content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Ludirium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet load on startup.
Verify that a ludiriumd node can maintain list of wallets loading on startup
"""
from test_framework.test_framework import LudiriumTestFramework
from test_framework.util import (
assert_equal,
)
class WalletStartupTest(LudiriumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
self.start_nodes()
def run_test(self):
self.log.info('Should start without any wallets')
assert_equal(self.nodes[0].listwallets(), [])
assert_equal(self.nodes[0].listwalletdir(), {'wallets': []})
self.log.info('New default wallet should load by default when there are no other wallets')
self.nodes[0].createwallet(wallet_name='', load_on_startup=False)
self.restart_node(0)
assert_equal(self.nodes[0].listwallets(), [''])
self.log.info('Test load on startup behavior')
self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w4', load_on_startup=True)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
self.nodes[0].loadwallet(filename='')
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
if __name__ == '__main__':
WalletStartupTest().main()
| 43.101695 | 98 | 0.69013 | [
"MIT"
] | ludirium/ludirium | test/functional/wallet_startup.py | 2,543 | Python |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, read_binary_integer32_token, collect_until_token, \
read_binary_float_token
from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_binary_matrix
from openvino.tools.mo.ops.tdnncomponent import TdnnComponent
class TdnnComponentFrontExtractor(FrontExtractorOp):
op = 'tdnncomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
collect_until_token(pb, b'<MaxChange>')
max_change = read_binary_float_token(pb)
collect_until_token(pb, b'<L2Regularize>')
collect_until_token(pb, b'<LearningRate>')
collect_until_token(pb, b'<TimeOffsets>')
time_offsets = read_binary_vector(pb, False, np.int32)
collect_until_token(pb, b'<LinearParams>')
weights, weights_shape = read_binary_matrix(pb)
collect_until_token(pb, b'<BiasParams>')
bias_params = read_binary_vector(pb)
collect_until_token(pb, b'<OrthonormalConstraint>')
orthonormal_constraint = read_binary_float_token(pb) # used only on training
collect_until_token(pb, b'<UseNaturalGradient>')
use_natural_grad = read_binary_bool_token(pb) # used only on training
collect_until_token(pb, b'<NumSamplesHistory>')
num_samples_hist = read_binary_float_token(pb)
collect_until_token(pb, b'<AlphaInOut>')
alpha_in_out = read_binary_float_token(pb), read_binary_float_token(pb) # for training, usually (4, 4)
# according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.html#details
# it looks like it's used only during training (but not 100% sure)
collect_until_token(pb, b'<RankInOut>')
rank_in_out = read_binary_integer32_token(pb), read_binary_integer32_token(pb)
biases = mo_array(bias_params) if len(bias_params) != 0 else None
attrs = {
'weights': np.reshape(weights, weights_shape),
'biases': biases,
'time_offsets': time_offsets,
}
TdnnComponent.update_node_stat(node, attrs)
return cls.enabled
| 40.6 | 130 | 0.719212 | [
"Apache-2.0"
] | 3Demonica/openvino | tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py | 2,436 | Python |
from fastcore.foundation import L
# 0~11 ์ซ์๋ฅผ ํฌํจํ L์ ์์ฑํฉ๋๋ค (range ์ฌ์ฉ)
t = ____________
print(t)
# L์ ๋ด์ฉ์ ๋ ๋ฐฐ ๋ถ๋ฆฝ๋๋ค
t __ 2
print(t)
# 0์ด ๋ด๊ธด ์์น (0, 12) ๋ฅผ ํํ ๋ฐฉ์์ผ๋ก ์ฐพ์์ ๋ฐํํฉ๋๋ค
t_1 = t[_, __]
print(t_1)
# 0์ด ๋ด๊ธด ์์น (0, 12) ๋ฅผ ๋ง์คํน ๋ฐฉ์์ผ๋ก ์ฐพ์์ ๋ฐํํฉ๋๋ค
# - ๋ง์คํฌ๋ฅผ ๋ง๋ญ๋๋ค 0๊ณผ 12๋ฒ์งธ ์์น์๋ง True๋ฅผ ๋ฃ์ต๋๋ค
mask = L([True])
mask += L([False] * 11)
mask += L([True])
mask += L([False] * 11)
t_2 = t______
print(t_2) | 16.73913 | 39 | 0.631169 | [
"MIT"
] | deep-diver/fastai-course | exercises/chapter01/exc_01_07.py | 555 | Python |
from setuptools import setup, find_packages
setup(name='nisrep', version='1.0', packages=find_packages())
| 26.75 | 61 | 0.775701 | [
"MIT"
] | NGoetz/NF | setup.py | 107 | Python |
'''
Advent of Code - 2019
--- Day 2: 1202 Program Alarm ---
'''
from utils import *
from intcode import IntcodeRunner, HaltExecution
def parse_input(day):
return day_input(day, integers)[0]
def part1(program, noun=12, verb=2):
runner = IntcodeRunner(program)
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
return runner.get_mem(0)
def part2(program, target=19690720):
runner = IntcodeRunner(program)
for noun in range(100, -1, -1):
for verb in range(100):
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
if runner.get_mem(0) == target:
return 100*noun+verb
runner.reset()
if __name__ == '__main__':
data = parse_input('02')
print(f'Part One: {part1(data)}')
print(f'Part Two: {part2(data)}')
| 21.82 | 48 | 0.549954 | [
"MIT"
] | basoares/advent-of-code | challenges/2019/python/d02.py | 1,091 | Python |
"""Workout schema module"""
import graphene
from exercises.schema import ExerciseType
from exercises.models import Exercise
class Query(graphene.ObjectType):
"""Workout query class"""
workout = graphene.List(ExerciseType,
body_part=graphene.String(),
exercise_name=graphene.String(),
equipment=graphene.String(),
level=graphene.String())
def resolve_workout(self, info, **kwargs):
"""query resolver for workout property"""
all_exercises = Exercise.objects.all()
if kwargs.get('body_part'):
all_exercises = all_exercises.select_related('body_part').filter(
body_part__name=kwargs.get('body_part').lower())
if kwargs.get('level'):
all_exercises = all_exercises.select_related('level').filter(
level__difficulty=kwargs.get('level').lower())
if kwargs.get('exercise_name'):
all_exercises = all_exercises.filter(
name__icontains=kwargs.get('exercise_name').lower())
if kwargs.get('equipment'):
all_exercises = all_exercises.select_related('equipment').filter(
equipment__name=kwargs.get('equipment').lower())
return all_exercises
| 36.805556 | 77 | 0.612075 | [
"MIT"
] | adeoke/django-quarantine-workout-graphql | quarantineworkout/workout/schema.py | 1,325 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return p.eval()
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
state_ops.assign(var, [1.0]).eval()
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with ops.device("/cpu:0"):
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
| 41.110204 | 80 | 0.700655 | [
"Apache-2.0"
] | 1559603450/tensorflow | tensorflow/python/kernel_tests/variable_ops_test.py | 10,072 | Python |
import pylab as pl
from get_fish_info import get_fish_info
from fit_integrator_model import get_model_result, get_target_result
import numpy as np
from pathlib import Path
import gmm_model_fit
import pandas as pd
from pymoo.factory import get_problem, get_visualization, get_decomposition
# import random
#
# for dt in [0.001, 0.002, 0.005, 0.01, 0.1]:
#
# tau = 4
# Is = np.arange(0, 30, dt)
# xs = np.empty_like(Is)
# xs[0]
#
# for i in range(1, len(Is)):
# dx = random.gauss(0.2, 5) - xs[i - 1]
# xs[i] = xs[i - 1] + dx * dt / tau
# pl.plot(Is, xs)
# pl.show()
# sdf
root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/surrogate_fish1")
#root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/scn1lab_NIBR")
#root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/disc1_hetinx")
df = pd.read_hdf(root_path / "all_data.h5", key="all_bouts")
#
# df_extracted_features, df_extracted_binned_features, \
# df_extracted_binned_features_same_direction, \
# df_extracted_binned_features_heading_angle_change_histograms, \
# df_extracted_binned_features_inter_bout_interval_histograms = get_mean_fish_info(df)
#
# print(df_extracted_features)
# pl.plot(df_extracted_features.loc["wt", :]["correctness"])
# pl.plot(df_extracted_features.loc["het", :]["correctness"])
# pl.plot(df_extracted_features.loc["hom", :]["correctness"])
#
# pl.figure()
# pl.plot(df_extracted_features.loc["wt", :]["inter_bout_interval"])
# pl.plot(df_extracted_features.loc["het", :]["inter_bout_interval"])
# pl.plot(df_extracted_features.loc["hom", :]["inter_bout_interval"])
#
# pl.figure()
# pl.plot(df_extracted_binned_features.loc["wt", 0])
# pl.plot(df_extracted_binned_features.loc["wt", 1])
# pl.plot(df_extracted_binned_features.loc["wt", 2])
# pl.plot(df_extracted_binned_features.loc["wt", 3])
#
# pl.figure()
# pl.plot(df_extracted_binned_features_same_direction.loc["wt"])
# pl.plot(df_extracted_binned_features_same_direction.loc["het"])
# pl.plot(df_extracted_binned_features_same_direction.loc["hom"])
#
#
# pl.figure()
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 0])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 1])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 2])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 3])
#
# pl.show()
#
#
# pl.show()
#
#
# print(df_extracted_features)
# gg
# sdf
genotype = "hom"
target_df_correctness_as_function_of_coherence, \
target_df_inter_bout_interval_as_function_of_coherence, \
target_df_binned_correctness, \
target_df_binned_same_direction, \
target_df_binned_features_heading_angle_change_histograms, \
target_df_binned_features_inter_bout_interval_histograms, \
target_df_gmm_fitting_results = get_target_result(root_path, genotype)
# colors = ["#000000", "#330000", "#990000", "#CC3333"]
#
# for i in range(4):
# pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), label=f"Coherence {i*25}%", color=colors[i], linewidth=2)
#
# pl.xlabel("Heading angle change (deg)")
# pl.ylabel("Probability")
# pl.legend()
#
# fig = pl.figure()
# fig.suptitle("Target functions")
# pl.subplot(211)
# pl.plot(target_df_correctness_as_function_of_coherence, 'o-', color='black')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot(target_df_inter_bout_interval_as_function_of_coherence, 'o-', color='black')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')
errornames = ["Error: 'Correctness as function of coherence'",
"Error: 'Inter-bout interval as function of coherence'",
"Error: 'Binned correctness at 25, 50, 100 %'",
"Error: 'Binned same direction'",
"Error: 'Histogram weights'"]
#errornames = ["Mixed"]
repeat = 1
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
#
#
# for i in range(7):
# F[-1, :, i] = F[-1, :, i] / np.max(F[-1, :, i])
# print(F.shape)
#
# i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4] + F[-1, :, 5] + F[-1, :, 6])
# print(F[-1, i6, 0])
# dd
#get_decomposition("asf").do(F[-1], [1, 1, 1, 1, 1, 1, 1]).argmin()
#print(I)
#sdfsdf
#X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}_single_error.npy")
#F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}_single_error.npy")
# from pymoo.factory import get_decision_making, get_reference_directions
#
# ref_dirs = get_reference_directions("das-dennis", 4, n_partitions=12)
# F = get_problem("dtlz1").pareto_front(ref_dirs)
#
# weights = np.array([10.25, 10.25, 0.25, 0.25])
# a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F, return_pseudo_weights=True)
# pl.plot(F[:, 0], F[:,1], 'o')
# pl.plot(F[a, 0], F[a,1], 'o')
# pl.show()
#
# print(a, pseudo_weights, F.shape)
# ghj
from pymoo.factory import get_decision_making, get_reference_directions
#weights = [1000, 1000, 1000, 0, 0, 0, 0]
#a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F[-1], return_pseudo_weights=True)
#print(pseudo_weights[0])
#print(a, pseudo_weights)
#dfg
for i in range(5):
#pl.hist(F[-1, :, i])
#pl.show()
#print(np.percentile(F[-1, :, i], 75))
#print(np.max(F[-1, :, i]) - np.min(F[-1, :, i]))
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
# print(F.shape)
#
#i6 = a
#i1 = np.argmin(F[-1, :, 0])
# i2 = np.argmin(F[-1, :, 1])
# i3 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500)
# i4 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3])
# i5 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25)
# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25 + F[-1, :, 6]*5800)
# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 6800)
# i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)
i6 = np.argmin(F[-1, :, 0] + 3*F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
#from pymoo.factory import get_decision_making
#dm = get_decision_making("high-tradeoff")
#I = dm.do(pf)
# print(F.shape)
# np.set_printoptions(precision=4, suppress=True)
# print((X[-1, i]))
# #gdfgh
# for error_i in range(len(errornames)):
# pl.figure()
# pl.title(errornames[error_i])
# bp = pl.boxplot(F[:, :, error_i].T, whis=[5, 95], showfliers=False, medianprops=medianprops)
# for gen in range(50):
# sc = pl.scatter([gen+1], [F[gen, :, error_i].min()], s=5, marker='.', c='firebrick')
# pl.yscale("log")
# pl.xlabel("Generation")
# pl.ylabel("Log Error")
# pl.show()
# dd
#
# pl.figure()
# pl.title("Compromise between all error functions")
# #error = F[:, :, 0] + F[:, :, 1]*500 + F[:, :, 3] + F[:, :, 5]*0.25 + F[:, :, 6]*500
# error = F[:, :, 0] + F[:, :, 1]*2500 + F[:, :, 3]*5 + F[:, :, 5]*0.5 + F[:, :, 6]*1500
#
# bp = pl.boxplot(error.T, whis=[5, 95], showfliers=False, medianprops=medianprops)
# for gen in range(50):
# sc = pl.scatter([gen + 1], [error[gen].min()], s=10, marker='.', c='firebrick')
# pl.yscale("log")
# pl.xlabel("Generation")
# pl.ylabel("Log Error")
# pl.show()
# pl.figure()
# pl.scatter(F[-1, :, 0], F[-1, :, 1], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0], F[-1, i1, 1], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0], F[-1, i2, 1], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0], F[-1, i3, 1], s=15, marker='o', c='C3', label="Compromise")
# pl.legend()
# pl.xlabel(errornames[0])
# pl.ylabel(errornames[1])
#
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500, F[-1, :, 3], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500, F[-1, i1, 3], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500, F[-1, i2, 3], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500, F[-1, i3, 3], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500, F[-1, i4, 3], s=15, marker='o', c='C4', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1 and 2")
# pl.ylabel(errornames[3])
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3], F[-1, :, 5], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3], F[-1, i1, 5], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3], F[-1, i2, 5], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3], F[-1, i3, 5], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3], F[-1, i4, 5], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3")
# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3], F[-1, i5, 5], s=15, marker='o', c='C5', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1, 2, and 3")
# pl.ylabel(errornames[5])
#
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25, F[-1, :, 6], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3] + F[-1, i1, 5]*0.25, F[-1, i1, 6], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3] + F[-1, i2, 5]*0.25, F[-1, i2, 6], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3] + F[-1, i3, 5]*0.25, F[-1, i3, 6], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3] + F[-1, i4, 5]*0.25, F[-1, i4, 6], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3")
# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3] + F[-1, i5, 5]*0.25, F[-1, i5, 6], s=15, marker='o', c='C5', label="Compromise between 1, 2, 3, and 4")
# pl.scatter(F[-1, i6, 0] + F[-1, i6, 1]*500 + F[-1, i6, 3] + F[-1, i6, 5]*0.25, F[-1, i6, 6], s=15, marker='o', c='C6', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1, 2, 3, and 4")
# pl.ylabel(errornames[6])
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i1])
# fig.suptitle("Best for 'Correctness as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C1')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C1')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i2])
# fig.suptitle("Best for 'Inter-bout interval as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C2')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C2')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i3])
# fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i3])
# fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C3')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
#
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i4])
# fig.suptitle("Compromise between all three error functions")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C4')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C4')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C4')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
#
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i5])
# fig.suptitle("Compromise between all four error functions")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C5')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C5')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C5')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
# pl.subplot(224)
# pl.plot(target_df_binned_same_direction, 'o-', color='black')
# pl.plot(model_df_binned_same_direction, 'o--', color='C5')
# pl.xlabel("Time since last bout (s)")
# pl.ylabel("Correctness (%)")
fig = pl.figure()
model_df_correctness_as_function_of_coherence, \
model_df_inter_bout_interval_as_function_of_coherence, \
model_df_binned_correctness, \
model_df_binned_same_direction, \
model_df_binned_features_heading_angle_change_histograms, \
model_df_binned_features_inter_bout_interval_histograms, \
model_df_gmm_fitting_results = get_model_result(X[-1, i6])
fig.suptitle("Compromise between all five error functions")
pl.subplot(231)
pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Correctness (%)")
pl.subplot(232)
pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Inter-bout interval (s)")
pl.subplot(233)
for i in range(4):
pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C6')
pl.xlabel("Time (s)")
pl.ylabel("Correctness (%)")
pl.subplot(234)
pl.plot(target_df_binned_same_direction, 'o-', color='black')
pl.plot(model_df_binned_same_direction, 'o--', color='C6')
pl.xlabel("Time since last bout (s)")
pl.ylabel("Correctness (%)")
# pl.subplot(235)
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_left"].values, '-o', color='black', label='s_left')
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_center"].values, '-o', color='black', label='s_center')
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_right"].values, '-o', color='black', label='s_right')
#
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_left"].values, '--o', color='C6', label='s_left')
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_center"].values, '--o', color='C6', label='s_center')
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_right"].values, '--o', color='C6', label='s_right')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Weight")
# pl.legend()
pl.subplot(235)
for i in range(4):
pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"black")
pl.plot(model_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"C6", linestyle='--')
pl.xlabel("Heading angle change")
pl.ylabel("Probability")
pl.show()
found_parameters = []
for repeat in range(12):
for genotype in ["wt", "het", "hom"]:
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
for i in range(5):
#F[-1, :, i] = F[-1, :, i] / np.median(F[-1, :, i])
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + 5 * F[-1, :, 3] + F[-1, :, 5] + 5 * F[-1, :, 6])
#i6 = np.argmin(F[-1, :, 0] + 5 * F[-1, :, 1] + 20 * F[-1, :, 4] + F[-1, :, 5] + 5 * F[-1, :, 6])
i6 = np.argmin(F[-1, :, 0] + 3 * F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
#i6 = np.argmin(F[-1, :, 0] + 2 * F[-1, :, 1] + F[-1, :, 2] + 3 * F[-1, :, 3] + F[-1, :, 5] + F[-1, :, 6])
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 500 + F[-1, :, 3] + F[-1, :, 5] * 0.25 + F[-1, :, 6] * 500)
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 1500)
#i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)
found_parameters.append([genotype, repeat, 49] + list(X[-1, i6, :]))
df = pd.DataFrame(found_parameters,
columns=["genotype",
"repeat",
"gen",
"tau",
"sigma",
"T",
"p_below",
"p_above"]).astype(dtype={"repeat": "int64", "gen": "int64"}, copy=False)
df.set_index(["genotype", 'repeat', 'gen'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path / "found_parameters.h5", key="parameters", complevel=9)
| 47.092975 | 185 | 0.660641 | [
"MIT"
] | arminbahl/mutant_zebrafish_behavior | armin_analysis/model_tests.py | 22,793 | Python |
from django.db import models
from django.utils import timezone
from django.core.exceptions import ValidationError
# from django.contrib.auth.models import User
from users.models import Student, College
from django.urls import reverse
from django.core import validators
class AbstractPostModel(models.Model):
title = models.CharField(validators=[validators.MinLengthValidator(10)],
null=False, max_length=500)
content = models.TextField(validators=[validators.MinLengthValidator(10)], null=False)
post_date = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(Student, on_delete=models.CASCADE)
rating = models.IntegerField(default=0)
college = models.ForeignKey(College, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk, 'title': self.title})
class Question(AbstractPostModel):
is_answered = models.BooleanField(default=False)
class Answer(AbstractPostModel):
is_approved = models.BooleanField(default=False)
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=True)
class Voter(models.Model):
Question = models.ForeignKey(Question, on_delete=models.CASCADE)
Answer = models.ForeignKey(Answer, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(Student, on_delete=models.CASCADE)
def __str__(self):
return self.user.username + ' vote on post: ' + self.Question.title
class Comment(AbstractPostModel):
Question = models.ForeignKey(Question, on_delete=models.CASCADE)
author = models.ForeignKey(Student, on_delete=models.CASCADE)
content = models.TextField(null=False)
def __str__(self):
return self.author.username + ' comment on post: ' + self.Question.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk, 'title': self.Question.title})
| 35.403509 | 91 | 0.733399 | [
"MIT"
] | franndyabreu/oncollegehub | app/blog/models.py | 2,018 | Python |
"""
Wrapper to get ROVA calendar from Rova's API
Acces to this ROVA API has been simplified since version 0.2.1 of this wrapper
Just use https://www.rova.nl/api/waste-calendar/upcoming?postalcode=1000AA&houseNumber=1&addition=&take=5
with a existing combination of postalcode, housenumber, housenumber addition
Be aware that this API has not been officially published by ROVA.
"""
from datetime import datetime
import random
import requests
__title__ = "rova"
__version__ = "0.3.0"
__author__ = "Gido Hakvoort and synoniem <[email protected]>"
__license__ = "MIT"
class Rova:
"""
ROVA class
"""
def __init__(self, zip_code, house_number, house_addition=""):
"""
To fetch the garbage calendar, you need to set a zip_code and house_number.
"""
self.zip_code = zip_code.replace(' ', '')
self.house_number = house_number.strip()
self.house_addition = house_addition.strip()
def is_rova_area(self):
"""
Check if ROVA collects garbage at this address
"""
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
# request data from rova API and check if garbage is collected at this address
# requesting with a non-existing postalcode will result in a error message
response = requests.get(url, params={
'postalcode': self.zip_code,
'houseNumber': self.house_number,
'addition': self.house_addition,
'take': '1',
})
response.raise_for_status()
rova_response = response.text.strip()
if rova_response != '[]':
rova_response = "OK"
return rova_response == "OK"
def get_calendar_items(self, take=5):
"""
Get next pickup date for each garbage types
"""
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
# request data from rova API and save response first 5 items (default)
response = requests.get(url, params={
'postalcode': self.zip_code,
'houseNumber': self.house_number,
'addition': self.house_addition,
'take': take,
})
response.raise_for_status()
rova_response = response.json()
items = []
types = []
# add next pickup date for each garbage type
for item in rova_response:
date = datetime.strptime(item["date"], "%Y-%m-%dT%H:%M:%SZ")
date = date.strftime("%Y-%m-%dT%H:%M:%S")
garbage_type = item["garbageTypeCode"].upper()
items.append({
'GarbageTypeCode': garbage_type,
'Date': date
})
types.append(garbage_type)
return items
| 31.261364 | 105 | 0.608142 | [
"MIT"
] | synoniem/rova | rova/rova.py | 2,751 | Python |
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
# from_numpy creates tensor without copying numpy array data
# float == to(float), to() can be used for dtype and device conversions
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
# eval mode as opposed to training (ignores dropout, batchnorm)
self.qnetwork_local.eval()
with torch.no_grad():
# call the nn.Module rather than explicitly using nn.Module.forward()
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
# Max q value over all next actions given their next states (this is for a whole batch)
# i.e. max_a(Q(s_{j+1}, a, w-)) from the one step look ahead
Q_targets_next = self.qnetwork_local(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + gamma * Q_targets_next * (1 - dones) # set y_i = r if done
# Get expected Q values from local model - used in gradient update as diff from target
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute Loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimise loss by backprop
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
ฮธ_target = ฯ*ฮธ_local + (1 - ฯ)*ฮธ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | 41.652439 | 127 | 0.624799 | [
"MIT"
] | 0xtristan/deep-reinforcement-learning | dqn/exercise/dqn_agent.py | 6,836 | Python |
from __future__ import print_function, division, absolute_import
import os
import unittest
from six import string_types
from .. import *
from ..compat import as_text, as_str, as_bytes
DEFAULT_VP_TEST_HOST = '127.0.0.1'
DEFAULT_VP_TEST_PORT = 5433
DEFAULT_VP_TEST_USER = 'dbadmin'
DEFAULT_VP_TEST_PASSWD = ''
DEFAULT_VP_TEST_DB = 'docker'
DEFAULT_VP_TEST_TABLE = 'vertica_python_unit_test'
class VerticaPythonTestCase(unittest.TestCase):
"""Base class for tests that query Vertica."""
@classmethod
def setUpClass(cls):
cls._host = os.getenv('VP_TEST_HOST', DEFAULT_VP_TEST_HOST)
cls._port = int(os.getenv('VP_TEST_PORT', DEFAULT_VP_TEST_PORT))
cls._user = os.getenv('VP_TEST_USER', DEFAULT_VP_TEST_USER)
cls._password = os.getenv('VP_TEST_PASSWD', DEFAULT_VP_TEST_PASSWD)
cls._database = os.getenv('VP_TEST_DB', DEFAULT_VP_TEST_DB)
cls._table = os.getenv('VP_TEST_TABLE', DEFAULT_VP_TEST_TABLE)
cls._conn_info = {
'host': cls._host,
'port': cls._port,
'database': cls._database,
'user': cls._user,
'password': cls._password,
}
@classmethod
def tearDownClass(cls):
with cls._connect() as conn:
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(cls._table))
@classmethod
def _connect(cls):
"""Connects to vertica.
:return: a connection to vertica.
"""
return connect(**cls._conn_info)
def _query_and_fetchall(self, query):
"""Creates a new connection, executes a query and fetches all the results.
:param query: query to execute
:return: all fetched results as returned by cursor.fetchall()
"""
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
results = cur.fetchall()
return results
def _query_and_fetchone(self, query):
"""Creates a new connection, executes a query and fetches one result.
:param query: query to execute
:return: the first result fetched by cursor.fetchone()
"""
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
return result
def assertTextEqual(self, first, second, msg=None):
first_text = as_text(first)
second_text = as_text(second)
self.assertEqual(first=first_text, second=second_text, msg=msg)
def assertStrEqual(self, first, second, msg=None):
first_str = as_str(first)
second_str = as_str(second)
self.assertEqual(first=first_str, second=second_str, msg=msg)
def assertBytesEqual(self, first, second, msg=None):
first_bytes = as_bytes(first)
second_bytes = as_bytes(second)
self.assertEqual(first=first_bytes, second=second_bytes, msg=msg)
def assertResultEqual(self, value, result, msg=None):
if isinstance(value, string_types):
self.assertTextEqual(first=value, second=result, msg=msg)
else:
self.assertEqual(first=value, second=result, msg=msg)
def assertListOfListsEqual(self, list1, list2, msg=None):
self.assertEqual(len(list1), len(list2), msg=msg)
for l1, l2 in zip(list1, list2):
self.assertListEqual(l1, l2, msg=msg)
| 32.971154 | 82 | 0.642461 | [
"MIT"
] | etsy/vertica-python | vertica_python/tests/base.py | 3,429 | Python |
from selenium_test.selenium_utils import *
from file_and_system.windows_os_utils import WindowsOsUtil
from python_common.global_param import GlobalParam
from http_request.request_utils import request_download_file_by_url
import cv2 as cv
import time
WindowsOsUtil.kill_process_by_name('MicrosoftWebDriver.exe')
# mail_lists=['mail.hoperun.com', 'mail.qq.com', 'mail.163.com]
mail_lists = ['mail.163.com']
mail_driver = init_driver('edge', GlobalParam.get_edge_driver_path())
open_browser_multi_tab(mail_driver, mail_lists)
wait_for_page_full_loaded(mail_driver)
def hoperun_login(hoperun_driver, user_name, user_pass):
hoperun_driver.execute_script("document.getElementById('usernameTip').removeAttribute('readonly');")
element = find_element_by_id(hoperun_driver, 'usernameTip')
element.click()
element = find_element_by_id(hoperun_driver, 'username')
element.send_keys(user_name)
element = find_element_by_id(hoperun_driver, 'userType')
element.click()
element = find_element_by_id(hoperun_driver, 'userTypePwd')
element.send_keys(user_pass)
element = find_element_by_id(hoperun_driver, 'wmSubBtn')
element.click()
def hoperun_check_mail(hoperun_driver, mail_sender, mail_title):
wait_for_frame_and_switch_to_frame(hoperun_driver, 'treeBox')
element = find_element_by_id(hoperun_driver, 'tree_folder_1_span')
element.click()
wait_for_page_full_loaded(hoperun_driver)
wait_for_frame_and_switch_to_frame(hoperun_driver, 'tabsHome')
wait_for_page_full_loaded(hoperun_driver)
element = hoperun_driver.find_elements_by_xpath(''.join(('//div[text()="', mail_sender, '"]/../../../..')))
for e in element:
if e.find_element_by_xpath('li[2]/div[3]/span').text.__contains__(mail_title):
e.find_element_by_xpath('li[2]/div[3]/span').click()
def qq_login(qq_driver, user_name, user_pass):
element = find_element_by_id(qq_driver, 'qqLoginTab')
element.click()
qq_driver.switch_to.frame('login_frame')
element = find_element_by_id(qq_driver, 'u')
element.click()
element.send_keys(user_name)
element = find_element_by_id(qq_driver, 'p')
element.click()
element.send_keys(user_pass)
element = find_element_by_id(qq_driver, 'login_button')
element.click()
wait_for_frame_and_switch_to_frame(qq_driver, 'tcaptcha_iframe')
img_element = find_element_by_id(qq_driver, 'slideBg')
wait_for_element_appeared(qq_driver, img_element)
big = img_element.get_attribute('src')
request_download_file_by_url(big, GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
img_element = find_element_by_id(qq_driver, 'slideBlock')
wait_for_element_appeared(qq_driver, img_element)
small = img_element.get_attribute('src')
request_download_file_by_url(small, GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
def netcase_163_login(netcase_163_driver, user_name, user_pass):
netcase_login_frame = netcase_163_driver.find_element_by_tag_name('iframe')
wait_for_frame_and_switch_to_frame(netcase_163_driver, netcase_login_frame)
wait_for_element_exist(netcase_163_driver, '//input[@name="email"]')
element = find_element_by_name(netcase_163_driver, 'email')
element.click()
element.send_keys(user_name)
wait_for_element_exist(netcase_163_driver, '//input[@name="password"]')
element = find_element_by_name(netcase_163_driver, 'password')
element.click()
element.send_keys(user_pass)
element = find_element_by_id(netcase_163_driver, 'dologin')
element.click()
# ------------------------security mail captcha not show----------------------
# wait_for_element_exist(netcase_163_driver,'//div[@class="yidun_panel"]')
# element = find_element_by_class_name(netcase_163_driver, 'yidun_panel')
# netcase_163_driver.execute_script("arguments[0].style['display'] = 'block';",element)
# # element = find_element_by_class_name(netcase_163_driver, 'yidun_bg-img')
# # netcase_mail_captcha = element.get_attribute('src')
# # request_download_file_by_url(netcase_mail_captcha, test_image_path+'test_netcase_mail_captcha.png')
# time.sleep(4)
# element = find_element_by_class_name(netcase_163_driver, 'yidun_refresh')
# element.click()
#
# element = find_element_by_class_name(netcase_163_driver, 'yidun_tips__point')
# print(element.location)
#
# # element = find_element_by_class_name(netcase_163_driver, 'yidun_tips__point')
# # print(element.get_attribute("innerHTML"))
# ------------------------security mail captcha not show----------------------
def netcase_163_check_mail(netcase_163_driver, mail_sender, mail_title):
wait_for_element_to_be_clickable(netcase_163_driver, '//div[@id="_mail_component_140_140"]/span[@title="ๆถไปถ็ฎฑ"]')
time.sleep(2)
# rF0 kw0 nui-txt-flag0 : not read
# rF0 nui-txt-flag0 : readed
# element = netcase_163_driver.find_elements_by_xpath('//div[@class="rF0 nui-txt-flag0"]/div/div[2]/span')
element = netcase_163_driver.find_elements_by_xpath('//div[@class="rF0 nui-txt-flag0"]')
for e in element:
print(e.find_element_by_xpath('.//div/div[2]/span').text)
# if e.text.__contains__(mail_title):
# print(e.text)
def qq_captcha_pass():
big_image = cv.imread(GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
small_image = cv.imread(GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
cv.imshow('1', small_image)
cv.waitKey(0)
def netcase_captcha_pass():
return ''
# login hoperun mail and check mail
# hoperun_login(mail_driver, 'user', 'password')
# wait_for_page_full_loaded(mail_driver)
# hoperun_check_mail(mail_driver, 'sender', 'title')
netcase_163_login(mail_driver, '****', '****')
wait_for_page_full_loaded(mail_driver)
netcase_163_check_mail(mail_driver, '', '123')
# qq_login(mail_driver, '', '')
# netcase_163_login(mail_driver, '', '')
# captcha_pass()
| 44.537313 | 115 | 0.744135 | [
"MIT"
] | ivanlevsky/cowabunga-potato | selenium_test/sele_test_mail_login.py | 5,974 | Python |
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
| 46.091703 | 120 | 0.597726 | [
"MIT"
] | xingkongxiaxia/RecBole | recbole/model/knowledge_aware_recommender/kgnnls.py | 21,110 | Python |
"""
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
import datetime
from decimal import Decimal
from django.db import models
from django.conf import settings
from django.utils import simplejson
from django.utils.encoding import smart_unicode
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
assert settings.TIME_ZONE == 'UTC'
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return simplejson.JSONEncoder.default(self, obj)
def dumps(value):
return JSONEncoder().encode(value)
def loads(txt):
value = simplejson.loads(
txt,
parse_float=Decimal,
encoding=settings.DEFAULT_CHARSET
)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONList(list):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONField(models.TextField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = '{}'
models.TextField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value is None or value == '':
return {}
elif isinstance(value, basestring):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
else:
return JSONList(res)
else:
return value
def get_db_prep_save(self, value, connection):
"""Convert our JSON object to a string before we save"""
if not isinstance(value, (list, dict)):
return super(JSONField, self).get_db_prep_save("", connection=connection)
else:
return super(JSONField, self).get_db_prep_save(dumps(value),
connection=connection)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| 29.607843 | 85 | 0.636755 | [
"BSD-3-Clause"
] | Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad | vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py | 3,020 | Python |
import uuid
import arrow
from collections import namedtuple
HEADERS = ('start', 'stop', 'project', 'id', 'tags', 'updated_at')
class Frame(namedtuple('Frame', HEADERS)):
def __new__(cls, start, stop, project, id, tags=None, updated_at=None,):
try:
if not isinstance(start, arrow.Arrow):
start = arrow.get(start)
if not isinstance(stop, arrow.Arrow):
stop = arrow.get(stop)
if updated_at is None:
updated_at = arrow.utcnow()
elif not isinstance(updated_at, arrow.Arrow):
updated_at = arrow.get(updated_at)
except (ValueError, TypeError) as e:
from .watson import WatsonError
raise WatsonError("Error converting date: {}".format(e))
start = start.to('local')
stop = stop.to('local')
if tags is None:
tags = []
return super(Frame, cls).__new__(
cls, start, stop, project, id, tags, updated_at
)
def dump(self):
start = self.start.to('utc').int_timestamp
stop = self.stop.to('utc').int_timestamp
updated_at = self.updated_at.int_timestamp
return (start, stop, self.project, self.id, self.tags, updated_at)
@property
def day(self):
return self.start.floor('day')
def __lt__(self, other):
return self.start < other.start
def __lte__(self, other):
return self.start <= other.start
def __gt__(self, other):
return self.start > other.start
def __gte__(self, other):
return self.start >= other.start
class Span(object):
def __init__(self, start, stop, timeframe='day'):
self.timeframe = timeframe
self.start = start.floor(self.timeframe)
self.stop = stop.ceil(self.timeframe)
def overlaps(self, frame):
return frame.start <= self.stop and frame.stop >= self.start
def __contains__(self, frame):
return frame.start >= self.start and frame.stop <= self.stop
class Frames(object):
def __init__(self, frames=None):
if not frames:
frames = []
rows = [Frame(*frame) for frame in frames]
self._rows = rows
self.changed = False
def __len__(self):
return len(self._rows)
def __getitem__(self, key):
if key in HEADERS:
return tuple(self._get_col(key))
elif isinstance(key, int):
return self._rows[key]
else:
return self._rows[self._get_index_by_id(key)]
def __setitem__(self, key, value):
self.changed = True
if isinstance(value, Frame):
frame = value
else:
frame = self.new_frame(*value)
if isinstance(key, int):
self._rows[key] = frame
else:
frame = frame._replace(id=key)
try:
self._rows[self._get_index_by_id(key)] = frame
except KeyError:
self._rows.append(frame)
def __delitem__(self, key):
self.changed = True
if isinstance(key, int):
del self._rows[key]
else:
del self._rows[self._get_index_by_id(key)]
def _get_index_by_id(self, id):
try:
return next(
i for i, v in enumerate(self['id']) if v.startswith(id)
)
except StopIteration:
raise KeyError("Frame with id {} not found.".format(id))
def _get_col(self, col):
index = HEADERS.index(col)
for row in self._rows:
yield row[index]
def add(self, *args, **kwargs):
self.changed = True
frame = self.new_frame(*args, **kwargs)
self._rows.append(frame)
return frame
def new_frame(self, project, start, stop, tags=None, id=None,
updated_at=None):
if not id:
id = uuid.uuid4().hex
return Frame(start, stop, project, id, tags=tags,
updated_at=updated_at)
def dump(self):
return tuple(frame.dump() for frame in self._rows)
def filter(
self,
projects=None,
tags=None,
ignore_projects=None,
ignore_tags=None,
span=None,
include_partial_frames=False,
):
for frame in self._rows:
if projects is not None and frame.project not in projects:
continue
if ignore_projects is not None and\
frame.project in ignore_projects:
continue
if tags is not None and not any(tag in frame.tags for tag in tags):
continue
if ignore_tags is not None and\
any(tag in frame.tags for tag in ignore_tags):
continue
if span is None:
yield frame
elif frame in span:
yield frame
elif include_partial_frames and span.overlaps(frame):
# If requested, return the part of the frame that is within the
# span, for frames that are *partially* within span or reaching
# over span
start = span.start if frame.start < span.start else frame.start
stop = span.stop if frame.stop > span.stop else frame.stop
yield frame._replace(start=start, stop=stop)
def span(self, start, stop):
return Span(start, stop)
| 29.27957 | 79 | 0.565736 | [
"MIT"
] | blaulan/Watson | watson/frames.py | 5,446 | Python |
import torch
import numpy as np
from allennlp.nn import util
from relex.modules.offset_embedders import OffsetEmbedder
def position_encoding_init(n_position: int, embedding_dim: int):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / embedding_dim)
for j in range(embedding_dim)]
if pos != 0 else np.zeros(embedding_dim)
for pos in range(n_position)])
# apply sin on 0th,2nd,4th...embedding_dim
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# apply cos on 1st,3rd,5th...embedding_dim
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2])
return torch.from_numpy(position_enc).type(torch.FloatTensor)
@OffsetEmbedder.register("sine")
class SineOffsetEmbedder(OffsetEmbedder):
def __init__(self, n_position: int, embedding_dim: int) -> None:
super(SineOffsetEmbedder, self).__init__()
self._n_position = n_position
self._embedding_dim = embedding_dim
self._embedding = torch.nn.Embedding(2 * n_position + 1,
embedding_dim,
padding_idx=0)
self._embedding.weight.data = position_encoding_init(2 * n_position + 1,
embedding_dim)
# TODO: add zero vector for padding
def get_output_dim(self) -> int:
return self._embedding_dim
def is_additive(self) -> bool:
return True
def forward(self,
inputs: torch.Tensor,
mask: torch.Tensor,
span: torch.Tensor) -> torch.Tensor:
# pylint: disable=arguments-differ
# input -> [B x seq_len x d], offset -> [B x 2]
batch_size, seq_len, _ = inputs.size()
offset = span[:, 0]
position_range = util.get_range_vector(
seq_len, util.get_device_of(inputs)).repeat((batch_size, 1))
relative_positions = (1 + self._n_position
+ position_range
- offset.unsqueeze(dim=1))
# mask padding so it won't receive a positional embedding
relative_positions = relative_positions * mask.long()
return self._embedding(relative_positions)
| 38.081967 | 81 | 0.584158 | [
"Apache-2.0"
] | DFKI-NLP/RelEx | relex/modules/offset_embedders/sine_offset_embedder.py | 2,323 | Python |
"""
Quotes API For Digital Portals
The quotes API combines endpoints for retrieving security end-of-day, delayed, and realtime prices with performance key figures and basic reference data on the security and market level. The API supports over 20 different price types for each quote and comes with basic search endpoints based on security identifiers and instrument names. Market coverage is included in the *Sample Use Cases* section below. The Digital Portal use case is focused on high-performance applications that are * serving millions of end-users, * accessible by client browsers via the internet, * supporting subscriptions for streamed updates out-of-the-box, * typically combining a wide variety of *for Digital Portals*-APIs into a highly use-case specific solution for customers, * integrated into complex infrastructures such as existing frontend frameworks, authentication services. All APIs labelled *for Digital Portals* have been designed for direct use by client web applications and feature extreme low latency: The average response time across all endpoints is 30 ms whereas 99% of all requests are answered in close to under 300ms. See the Time Series API for Digital Portals for direct access to price histories, and the News API for Digital Portals for searching and fetching related news. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response20013_data import InlineResponse20013Data
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response200_meta import InlineResponse200Meta
globals()['InlineResponse20013Data'] = InlineResponse20013Data
globals()['InlineResponse200Meta'] = InlineResponse200Meta
class InlineResponse20013(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([InlineResponse20013Data],), # noqa: E501
'meta': (InlineResponse200Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20013 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20013 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 48.712687 | 1,302 | 0.603294 | [
"Apache-2.0"
] | factset/enterprise-sdk | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py | 13,055 | Python |
def calculalaNotafinalRGHT1():
#defenir variables
calculalanotaFinalRGHT=20
#datos de entrada
notaFinalRGHT=float(input("Ingrese la nota final"))
calculalaNotafinalRGHT=float(input("ingrese"))
#Proceso
if primeraUnidad<=20% and notaObotenida>=14:
primeranota=notaFinalRGHT
elif segundaUnidad<=15% and notaObotenida>=17:
segundanota=notaFinalRGHT*2
elif terceraUnidad<=15% and notaObotenida>=15:
terceranota=notaObotenida*3
elif mientraselTrabajofinal<=50% and notaObotenida>=20:
trabajofinalnota=notaObotenida*4
#datos de salida
print("la nota final de Fundamentos de programaciรณn:",notaObotenida)
}
}
def bonoDocenteRGHT2():
#definir Variables
bonoObtenido=0.0
#Datos de Endrada
salarioMinimoRGHT=float(input("Ingrese el salario minimo:"))
puntuacionObtenidaRGHT=float(input("Ingrese la puntuaciรณn que ha obtenido:"))
#Proceso
if puntuacionObtenida<=100 and puntuacionObtenida>=0:
bonoObtenido=salarioMinimo
elif puntuacionObtenida >=101 and puntuacionObtenida<=150:
bonoObtenido=salarioMinimo*2
elif puntuacionObtenida>150:
bonoObtenido=salarioMinimo*3
#Datos de salida
print("El docente obtendra un bono de:", bonoObtenido )
}
def
calculalaNotafinalRGHT1()
#bonoDocenteRGHT
| 29.255814 | 79 | 0.771065 | [
"Apache-2.0"
] | royturpo123/EXAMEN-01 | EstCondicional.py | 1,260 | Python |
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from ignite.metrics import IoU, Precision, Recall
import torchsat.transforms.transforms_cd as T
from torchsat.datasets.folder import ChangeDetectionDataset
from torchsat.models import FC_EF, FC_Siam_Conc, FC_Siam_Diff
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device, writer):
print('train epoch {}'.format(epoch))
model.train()
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('train-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx+1, len(dataloader), loss.item()))
writer.add_scalar('train/loss', loss.item(), len(dataloader)*epoch+idx)
def evalidation(epoch, dataloader, model, criterion, device, writer, tb_test_imgs):
print('\neval epoch {}'.format(epoch))
model.eval()
recall = Recall(lambda x: (x[0], x[1]))
precision = Precision(lambda x: (x[0], x[1]))
mean_recall = []
mean_precision = []
mean_loss = []
with torch.no_grad():
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
preds = outputs.argmax(1)
precision.update((preds, targets))
recall.update((preds, targets))
mean_loss.append(loss.item())
mean_recall.append(recall.compute().item())
mean_precision.append(precision.compute().item())
# print('val-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx + 1, len(dataloader), loss.item()))
writer.add_scalar('test/loss', loss.item(), len(dataloader) * epoch + idx)
if idx < tb_test_imgs:
writer.add_image('test/pre', pre_img[0], idx)
writer.add_image('test/post', post_img[0], idx)
writer.add_image('test/label', label[0], idx)
writer.add_image('test/pred', preds, idx)
mean_precision, mean_recall = np.array(mean_precision).mean(), np.array(mean_recall).mean()
f1 = mean_precision * mean_recall * 2 / (mean_precision + mean_recall + 1e-20)
print('precision: {:07.5}, recall: {:07.5}, f1: {:07.5}\n'.format(mean_precision, mean_recall, f1))
writer.add_scalar('test/epoch-loss', np.array(mean_loss).mean(), epoch)
writer.add_scalar('test/f1', f1, epoch)
writer.add_scalar('test/precision', mean_precision, epoch)
writer.add_scalar('test/recall', mean_recall, epoch)
def load_data(traindir, valdir, **kwargs):
"""generate the train and val dataloader, you can change this for your specific task
Args:
traindir (str): train dataset dir
valdir (str): validation dataset dir
Returns:
tuple: the train dataset and validation dataset
"""
train_transform = T.Compose([
T.RandomCrop(512),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
T.Normalize(),
])
val_transform = T.Compose([
T.ToTensor(),
T.Normalize(),
])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform, )
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return dataset_train, dataset_val
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if args.device == 'cuda' else 'cpu')
# dataset and dataloader
train_data, val_data = load_data(args.train_path, args.val_path, extensions=args.extensions)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=False)
# model
# model = get_model(args.model, args.num_classes, pretrained=args.pretrained)
# model = FC_EF(num_classes=args.num_classes)
model = FC_Siam_Diff(num_classes=args.num_classes)
model.to(device)
if args.resume:
model.load_state_dict(torch.load(args.resume, map_location=device))
# TODO: resume learning rate
# loss
criterion = nn.CrossEntropyLoss().to(device)
criterion = nn.BCELoss()
# optim and lr scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1, eta_min=1e-8)
# lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
writer = SummaryWriter(args.ckp_dir)
for epoch in range(args.epochs):
writer.add_scalar('train/lr', lr_scheduler.get_lr()[0], epoch)
train_one_epoch(epoch, train_loader, model, criterion, optimizer, device, writer)
evalidation(epoch, val_loader, model, criterion, device, writer, args.tb_test_imgs)
lr_scheduler.step()
if epoch % 2 == 0:
torch.save(model.state_dict(), os.path.join(args.ckp_dir, 'cd_epoch_{}.pth'.format(epoch)))
def parse_args():
parser = argparse.ArgumentParser(description='TorchSat Change Detection Training Script')
parser.add_argument('--train-path', help='train dataset path')
parser.add_argument('--val-path', help='validate dataset path')
parser.add_argument('--extensions', nargs='+', default='jpg', help='the train image extension')
parser.add_argument('--model', default="unet34", help='model name. default, unet34')
parser.add_argument('--pretrained', default=True, help='use ImageNet pretrained params')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-classes', default=3, type=int, help='num of classes')
parser.add_argument('--in-channels', default=3, type=int, help='input image channels')
parser.add_argument('--device', default='cpu', help='device')
parser.add_argument('-b', '--batch-size', default=16, type=int, help='batch size')
parser.add_argument('--epochs', default=90, type=int, help='epochs')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--ckp-dir', default='./', help='path to save checkpoint')
parser.add_argument('--tb-test-imgs', default=10, help='the num of test image show in tensorboard')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| 42.393939 | 115 | 0.674482 | [
"MIT"
] | alina2204/contrastive_SSL_ship_detection | torchsat/scripts/train_cd.py | 6,995 | Python |
# -*- coding: utf-8 -*-
"""Converts .pyfr[m, s] files to a Paraview VTK UnstructuredGrid File"""
from collections import defaultdict
import os
import numpy as np
from pyfr.shapes import BaseShape
from pyfr.util import subclass_where
from pyfr.writers import BaseWriter
class ParaviewWriter(BaseWriter):
# Supported file types and extensions
name = 'paraview'
extn = ['.vtu', '.pvtu']
def __init__(self, args):
super().__init__(args)
self.dtype = np.dtype(args.precision).type
self.divisor = args.divisor or self.cfg.getint('solver', 'order')
def _get_npts_ncells_nnodes(self, mk):
m_inf = self.mesh_inf[mk]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=m_inf[0])
subdvcls = subclass_where(BaseShapeSubDiv, name=m_inf[0])
# Number of vis points
npts = shapecls.nspts_from_order(self.divisor + 1)*m_inf[1][1]
# Number of sub cells and nodes
ncells = len(subdvcls.subcells(self.divisor))*m_inf[1][1]
nnodes = len(subdvcls.subnodes(self.divisor))*m_inf[1][1]
return npts, ncells, nnodes
def _get_array_attrs(self, mk=None):
dtype = 'Float32' if self.dtype == np.float32 else 'Float64'
dsize = np.dtype(self.dtype).itemsize
ndims = self.ndims
vvars = self.elementscls.visvarmap[ndims]
names = ['', 'connectivity', 'offsets', 'types']
types = [dtype, 'Int32', 'Int32', 'UInt8']
comps = ['3', '', '', '']
for fname, varnames in vvars.items():
names.append(fname.capitalize())
types.append(dtype)
comps.append(str(len(varnames)))
# If a mesh has been given the compute the sizes
if mk:
npts, ncells, nnodes = self._get_npts_ncells_nnodes(mk)
nb = npts*dsize
sizes = [3*nb, 4*nnodes, 4*ncells, ncells]
sizes.extend(len(varnames)*nb for varnames in vvars.values())
return names, types, comps, sizes
else:
return names, types, comps
def write_out(self):
name, extn = os.path.splitext(self.outf)
parallel = extn == '.pvtu'
parts = defaultdict(list)
for mk, sk in zip(self.mesh_inf, self.soln_inf):
prt = mk.split('_')[-1]
pfn = '{0}_{1}.vtu'.format(name, prt) if parallel else self.outf
parts[pfn].append((mk, sk))
write_s_to_fh = lambda s: fh.write(s.encode('utf-8'))
for pfn, misil in parts.items():
with open(pfn, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="UnstructuredGrid" '
'version="0.1">\n<UnstructuredGrid>\n')
# Running byte-offset for appended data
off = 0
# Header
for mk, sk in misil:
off = self._write_serial_header(fh, mk, off)
write_s_to_fh('</UnstructuredGrid>\n'
'<AppendedData encoding="raw">\n_')
# Data
for mk, sk in misil:
self._write_data(fh, mk, sk)
write_s_to_fh('\n</AppendedData>\n</VTKFile>')
if parallel:
with open(self.outf, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="PUnstructuredGrid" '
'version="0.1">\n<PUnstructuredGrid>\n')
# Header
self._write_parallel_header(fh)
# Constitutent pieces
for pfn in parts:
write_s_to_fh('<Piece Source="{0}"/>\n'
.format(os.path.basename(pfn)))
write_s_to_fh('</PUnstructuredGrid>\n</VTKFile>\n')
def _write_darray(self, array, vtuf, dtype):
array = array.astype(dtype)
np.uint32(array.nbytes).tofile(vtuf)
array.tofile(vtuf)
def _write_serial_header(self, vtuf, mk, off):
names, types, comps, sizes = self._get_array_attrs(mk)
npts, ncells = self._get_npts_ncells_nnodes(mk)[:2]
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<Piece NumberOfPoints="{0}" NumberOfCells="{1}">\n'
.format(npts, ncells))
write_s('<Points>\n')
# Write vtk DaraArray headers
for i, (n, t, c, s) in enumerate(zip(names, types, comps, sizes)):
write_s('<DataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}" '
'format="appended" offset="{3}"/>\n'
.format(n, t, c, off))
off += 4 + s
# Write ends/starts of vtk file objects
if i == 0:
write_s('</Points>\n<Cells>\n')
elif i == 3:
write_s('</Cells>\n<PointData>\n')
# Write end of vtk element data
write_s('</PointData>\n</Piece>\n')
# Return the current offset
return off
def _write_parallel_header(self, vtuf):
names, types, comps = self._get_array_attrs()
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<PPoints>\n')
# Write vtk DaraArray headers
for i, (n, t, s) in enumerate(zip(names, types, comps)):
write_s('<PDataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}"/>\n'.format(n, t, s))
if i == 0:
write_s('</PPoints>\n<PCells>\n')
elif i == 3:
write_s('</PCells>\n<PPointData>\n')
write_s('</PPointData>\n')
def _write_data(self, vtuf, mk, sk):
name = self.mesh_inf[mk][0]
mesh = self.mesh[mk]
soln = self.soln[sk]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=name)
subdvcls = subclass_where(BaseShapeSubDiv, name=name)
# Dimensions
nspts, neles = mesh.shape[:2]
# Sub divison points inside of a standard element
svpts = shapecls.std_ele(self.divisor)
nsvpts = len(svpts)
# Shape
soln_b = shapecls(nspts, self.cfg)
# Generate the operator matrices
mesh_vtu_op = soln_b.sbasis.nodal_basis_at(svpts)
soln_vtu_op = soln_b.ubasis.nodal_basis_at(svpts)
# Calculate node locations of vtu elements
vpts = np.dot(mesh_vtu_op, mesh.reshape(nspts, -1))
vpts = vpts.reshape(nsvpts, -1, self.ndims)
# Calculate solution at node locations of vtu elements
vsol = np.dot(soln_vtu_op, soln.reshape(-1, self.nvars*neles))
vsol = vsol.reshape(nsvpts, self.nvars, -1).swapaxes(0, 1)
# Append dummy z dimension for points in 2D
if self.ndims == 2:
vpts = np.pad(vpts, [(0, 0), (0, 0), (0, 1)], 'constant')
# Write element node locations to file
self._write_darray(vpts.swapaxes(0, 1), vtuf, self.dtype)
# Perform the sub division
nodes = subdvcls.subnodes(self.divisor)
# Prepare vtu cell arrays
vtu_con = np.tile(nodes, (neles, 1))
vtu_con += (np.arange(neles)*nsvpts)[:, None]
# Generate offset into the connectivity array
vtu_off = np.tile(subdvcls.subcelloffs(self.divisor), (neles, 1))
vtu_off += (np.arange(neles)*len(nodes))[:, None]
# Tile vtu cell type numbers
vtu_typ = np.tile(subdvcls.subcelltypes(self.divisor), neles)
# Write vtu node connectivity, connectivity offsets and cell types
self._write_darray(vtu_con, vtuf, np.int32)
self._write_darray(vtu_off, vtuf, np.int32)
self._write_darray(vtu_typ, vtuf, np.uint8)
# Primitive and visualisation variable maps
privarmap = self.elementscls.privarmap[self.ndims]
visvarmap = self.elementscls.visvarmap[self.ndims]
# Convert from conservative to primitive variables
vsol = np.array(self.elementscls.conv_to_pri(vsol, self.cfg))
# Write out the various fields
for vnames in visvarmap.values():
ix = [privarmap.index(vn) for vn in vnames]
self._write_darray(vsol[ix].T, vtuf, self.dtype)
class BaseShapeSubDiv(object):
vtk_types = dict(tri=5, quad=9, tet=10, pyr=14, pri=13, hex=12)
vtk_nodes = dict(tri=3, quad=4, tet=4, pyr=5, pri=6, hex=8)
@classmethod
def subcells(cls, n):
pass
@classmethod
def subcelloffs(cls, n):
return np.cumsum([cls.vtk_nodes[t] for t in cls.subcells(n)])
@classmethod
def subcelltypes(cls, n):
return np.array([cls.vtk_types[t] for t in cls.subcells(n)])
@classmethod
def subnodes(cls, n):
pass
class TensorProdShapeSubDiv(BaseShapeSubDiv):
@classmethod
def subnodes(cls, n):
conbase = np.array([0, 1, n + 2, n + 1])
# Extend quad mapping to hex mapping
if cls.ndim == 3:
conbase = np.hstack((conbase, conbase + (1 + n)**2))
# Calculate offset of each subdivided element's nodes
nodeoff = np.zeros((n,)*cls.ndim)
for dim, off in enumerate(np.ix_(*(range(n),)*cls.ndim)):
nodeoff += off*(n + 1)**dim
# Tile standard element node ordering mapping, then apply offsets
internal_con = np.tile(conbase, (n**cls.ndim, 1))
internal_con += nodeoff.T.flatten()[:, None]
return np.hstack(internal_con)
class QuadShapeSubDiv(TensorProdShapeSubDiv):
name = 'quad'
ndim = 2
@classmethod
def subcells(cls, n):
return ['quad']*(n**2)
class HexShapeSubDiv(TensorProdShapeSubDiv):
name = 'hex'
ndim = 3
@classmethod
def subcells(cls, n):
return ['hex']*(n**3)
class TriShapeSubDiv(BaseShapeSubDiv):
name = 'tri'
@classmethod
def subcells(cls, n):
return ['tri']*(n**2)
@classmethod
def subnodes(cls, n):
conlst = []
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2
u = l + row + 1
# Base offsets
off = [l, l + 1, u, u + 1, l + 1, u]
# Generate current row
subin = np.ravel(np.arange(row - 1)[..., None] + off)
subex = [ix + row - 1 for ix in off[:3]]
# Extent list
conlst.extend([subin, subex])
return np.hstack(conlst)
class TetShapeSubDiv(BaseShapeSubDiv):
name = 'tet'
@classmethod
def subcells(cls, nsubdiv):
return ['tet']*(nsubdiv**3)
@classmethod
def subnodes(cls, nsubdiv):
conlst = []
jump = 0
for n in range(nsubdiv, 0, -1):
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2 + jump
u = l + row + 1
# Lower and upper for one row up
ln = (n + 1)*(n + 2) // 2 + l - n + row
un = ln + row
rowm1 = np.arange(row - 1)[..., None]
# Base offsets
offs = [(l, l + 1, u, ln), (l + 1, u, ln, ln + 1),
(u, u + 1, ln + 1, un), (u, ln, ln + 1, un),
(l + 1, u, u+1, ln + 1), (u + 1, ln + 1, un, un + 1)]
# Current row
conlst.extend(rowm1 + off for off in offs[:-1])
conlst.append(rowm1[:-1] + offs[-1])
conlst.append([ix + row - 1 for ix in offs[0]])
jump += (n + 1)*(n + 2) // 2
return np.hstack(np.ravel(c) for c in conlst)
class PriShapeSubDiv(BaseShapeSubDiv):
name = 'pri'
@classmethod
def subcells(cls, n):
return ['pri']*(n**3)
@classmethod
def subnodes(cls, n):
# Triangle connectivity
tcon = TriShapeSubDiv.subnodes(n).reshape(-1, 3)
# Layer these rows of triangles to define prisms
loff = (n + 1)*(n + 2) // 2
lcon = [[tcon + i*loff, tcon + (i + 1)*loff] for i in range(n)]
return np.hstack(np.hstack(l).flat for l in lcon)
class PyrShapeSubDiv(BaseShapeSubDiv):
name = 'pyr'
@classmethod
def subcells(cls, n):
cells = []
for i in range(n, 0, -1):
cells += ['pyr']*(i**2 + (i - 1)**2)
cells += ['tet']*(2*i*(i - 1))
return cells
@classmethod
def subnodes(cls, nsubdiv):
lcon = []
# Quad connectivity
qcon = [QuadShapeSubDiv.subnodes(n + 1).reshape(-1, 4)
for n in range(nsubdiv)]
# Simple functions
def _row_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*i + j + 1)
for i in range(a, n + b)
for j in range(n - 1)])
def _col_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*(i + 1) + j)
for i in range(n - 1)
for j in range(a, n + b)])
u = 0
for n in range(nsubdiv, 0, -1):
l = u
u += (n + 1)**2
lower_quad = qcon[n - 1] + l
upper_pts = np.arange(n**2) + u
# First set of pyramids
lcon.append([lower_quad, upper_pts])
if n > 1:
upper_quad = qcon[n - 2] + u
lower_pts = np.hstack(range(k*(n + 1)+1, (k + 1)*n + k)
for k in range(1, n)) + l
# Second set of pyramids
lcon.append([upper_quad[:, ::-1], lower_pts])
lower_row = _row_in_quad(n + 1, 1, -1) + l
lower_col = _col_in_quad(n + 1, 1, -1) + l
upper_row = _row_in_quad(n) + u
upper_col = _col_in_quad(n) + u
# Tetrahedra
lcon.append([lower_col, upper_row])
lcon.append([lower_row[:, ::-1], upper_col])
return np.hstack(np.column_stack(l).flat for l in lcon)
| 31.074236 | 77 | 0.531057 | [
"BSD-3-Clause"
] | tjcorona/PyFR | pyfr/writers/paraview.py | 14,232 | Python |
#!/c/python27/python
import os
from utils import *
def cli_cpp(parms):
return os.path.join(parms['OVPN3'], "core", "test", "ovpncli", "cli.cpp")
def src_fn(parms, srcfile):
# Get source file name
if srcfile:
if '.' not in os.path.basename(srcfile):
srcfile += ".cpp"
else:
srcfile = cli_cpp(parms)
return srcfile
def is_unit_test(argv):
unit_test = False
if len(argv) >= 2:
unit_test = argv[1] == "unittest"
return unit_test
def src_fn_argv(parms, argv):
srcfile = None
if len(argv) >= 1:
srcfile = argv[0]
return src_fn(parms, srcfile)
def build(parms, srcfile, unit_test=False):
# Debug?
if parms['DEBUG']:
dbg_rel_flags = "/Zi"
else:
dbg_rel_flags = "/O2"
# Dictionary we will use to substitute parameters
# onto VC command line.
options = {
"ovpn3" : parms['OVPN3'],
"tap" : os.path.join(parms['TAP'], 'src'),
"tap_component_id" : parms['TAP_WIN_COMPONENT_ID'],
"asio" : os.path.join(build_dir(parms), "asio"),
"mbedtls" : os.path.join(build_dir(parms), "mbedtls"),
"lz4" : os.path.join(build_dir(parms), "lz4", "lib"),
"srcfile" : srcfile,
"extra_defs" : parms['CPP_EXTRA'],
"extra_inc" : "",
"extra_lib_path" : "",
"extra_lib" : "",
}
vc_parms(parms, options)
# Do we need to support XP and Win 2003?
arch = os.environ.get("ARCH", parms['ARCH'])
if arch == "x86_xp":
options['extra_defs'] += " /D_WIN32_WINNT=0x0501" # pre-Vista
else:
options['extra_defs'] += " /D_WIN32_WINNT=0x0600" # Vista and later
options['extra_lib'] += " fwpuclnt.lib"
# Add jsoncpp (optional)
if 'jsoncpp' in parms['LIB_VERSIONS']:
options["jsoncpp"] = os.path.join(build_dir(parms), "jsoncpp")
options['extra_inc'] += " /DHAVE_JSONCPP /I %(jsoncpp)s/dist" % options
options['extra_lib_path'] += " /LIBPATH:%(jsoncpp)s/dist" % options
options['extra_lib'] += " jsoncpp.lib"
if unit_test:
options['extra_lib'] += " gtest.lib"
options['extra_inc'] += " /I %s" % os.path.join(parms["GTEST_ROOT"], "googletest", "include")
options['extra_lib_path'] += " /LIBPATH:%s" % os.path.join(parms["GTEST_ROOT"], "googlemock", "gtest", "Debug")
# Build OpenVPN Connect
if parms.get("CONNECT"):
options['extra_inc'] += " /I " + os.path.join(parms['OVPN3'], "common")
# build it
vc_cmd(parms, r"cl %(extra_defs)s /DNOMINMAX /D_CRT_SECURE_NO_WARNINGS /DUSE_ASIO /DASIO_STANDALONE /DASIO_NO_DEPRECATED /I %(asio)s\asio\include /DUSE_MBEDTLS /I %(mbedtls)s\include /DHAVE_LZ4 /I %(lz4)s%(extra_inc)s -DTAP_WIN_COMPONENT_ID=%(tap_component_id)s /I %(tap)s /I %(ovpn3)s\core /EHsc %(link_static_dynamic_flags)s /W0 %(dbg_rel_flags)s /nologo %(srcfile)s /link /LIBPATH:%(mbedtls)s\library /LIBPATH:%(lz4)s%(extra_lib_path)s mbedtls.lib lz4.lib%(extra_lib)s ws2_32.lib crypt32.lib iphlpapi.lib winmm.lib user32.lib gdi32.lib advapi32.lib wininet.lib shell32.lib ole32.lib rpcrt4.lib" % options, arch=os.environ.get("ARCH"))
if __name__ == "__main__":
import sys
from parms import PARMS
# some parameters might be redefined, like in Jenkins multibranch pipeline case
PARMS['BUILD'] = os.environ.get('BUILD', PARMS['BUILD'])
PARMS['OVPN3'] = os.environ.get('OVPN3', PARMS['OVPN3'])
src = src_fn_argv(PARMS, sys.argv[1:])
unit_test = is_unit_test(sys.argv[1:])
build(PARMS, src, unit_test)
| 37.882979 | 641 | 0.624544 | [
"MIT"
] | february29/FchVPN | Carthage/Checkouts/openvpn-adapter/OpenVPN Adapter/Vendors/openvpn/win/build.py | 3,561 | Python |
#!/usr/bin/env python
## @package teleop_joy A node for controlling the P3DX with an XBox controller
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Joy
import numpy as np
def quat2yaw(q):
return np.arctan2(2*(q.y*q.z + q.w*q.x), 1 - 2*(q.z**2 + q.w**2))
def joyCallback(msg):
global cmd_vel_pub
global linear_axis
global linear_scale
global rotation_axis
global rotation_scale
global yaw
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = msg.axes[linear_axis] * linear_scale
cmd_vel_msg.angular.z = msg.axes[rotation_axis] * rotation_scale
cmd_vel_msg.angular.y = np.inf
cmd_vel_pub.publish(cmd_vel_msg)
if __name__ == '__main__':
rospy.init_node('teleop_joy')
global cmd_vel_pub
global linear_axis
global linear_scale
global rotation_axis
global rotation_scale
global yaw
linear_axis = rospy.get_param('linear_axis' , 1)
linear_scale = rospy.get_param('linear_scale' , 5)
rotation_axis = rospy.get_param('rotation_axis' , 3)
rotation_scale = rospy.get_param('rotation_scale', 1)
cmd_vel_pub = rospy.Publisher("/asv/cmd_vel", Twist, queue_size=1)
rospy.Subscriber("joy", Joy, joyCallback)
rospy.spin()
| 24.615385 | 78 | 0.715625 | [
"MIT"
] | Lovestarni/asv_simulator | nodes/teleop_joy.py | 1,280 | Python |
import os
import fs
from .utils import Docs, custom_dedent
class TestTutorial(Docs):
def test_level_1(self):
expected = "world"
folder = "level-1-jinja2-cli"
self._moban(folder, expected)
def test_level_1_custom_define(self):
expected = "maailman"
folder = "level-1-jinja2-cli"
args = [
"moban",
"-d",
"hello=maailman",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(args, folder, [("moban.output", expected)])
def test_level_2(self):
expected = """
========header============
world
========footer============
"""
expected = custom_dedent(expected)
folder = "level-2-template-inheritance"
self._moban(folder, expected)
def test_level_3(self):
expected = """
========header============
world
shijie
========footer============
"""
expected = custom_dedent(expected)
folder = "level-3-data-override"
self._moban(folder, expected)
def test_level_4(self):
expected = """
========header============
world
shijie
========footer============
"""
expected = custom_dedent(expected)
folder = "level-4-single-command"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_5(self):
expected = """
========header============
world
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-5-custom-configuration"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_6(self):
expected = """
========header============
world2
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-6-complex-configuration"
self.run_moban(["moban"], folder, [("a.output2", expected)])
def test_level_20(self):
expected = """
========header============
world2
shijie
this demonstrates jinja2's include statement
========footer============
"""
expected = custom_dedent(expected)
folder = "level-20-templates-configs-in-zip-or-tar"
self.run_moban_with_fs(
["moban"], folder, [("zip://a.zip!/a.output2", expected)]
)
def test_level_7(self):
expected = """
Hello, you are in level 7 example
Hello, you are not in level 7
"""
expected = custom_dedent(expected)
folder = "level-7-use-custom-jinja2-filter-test-n-global"
self.run_moban(["moban"], folder, [("test.output", expected)])
def test_level_8(self):
expected = "it is a test\n"
folder = "level-8-pass-a-folder-full-of-templates"
check_file = fs.path.join("templated-folder", "my")
self.run_moban(["moban"], folder, [(check_file, expected)])
def test_level_9(self):
expected = "pypi-mobans: moban dependency as pypi package"
folder = "level-9-moban-dependency-as-pypi-package"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_24(self):
expected = "pypi-mobans: files over http protocol"
folder = "level-24-files-over-http"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_9_deprecated(self):
expected = "pypi-mobans: moban dependency as pypi package"
folder = "deprecated-level-9-moban-dependency-as-pypi-package"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_10(self):
expected = "pypi-mobans: moban dependency as git repo"
folder = "level-10-moban-dependency-as-git-repo"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_10_deprecated(self):
expected = "pypi-mobans: moban dependency as git repo"
folder = "deprecated-level-10-moban-dependency-as-git-repo"
self.run_moban(["moban"], folder, [("test.txt", expected)])
def test_level_11(self):
expected = "handlebars does not support inheritance\n"
folder = "level-11-use-handlebars"
self.run_moban(["moban"], folder, [("a.output", expected)])
def test_level_12(self):
expected_a = """
world
world
world
world
b.template exists
a/b
Static text generator using any template, any data and any location.
"""
expected_b = """
142
42
142
"""
expected_a = custom_dedent(expected_a)
expected_b = custom_dedent(expected_b)
folder = "level-12-use-template-engine-extensions"
self.run_moban(
["moban"],
folder,
[("a.output", expected_a), ("b.output", expected_b)],
)
def test_level_13_json(self):
expected = """
========header============
world from child.json
shijie from parent.yaml
========footer============
"""
expected = custom_dedent(expected)
folder = "level-13-any-data-override-any-data"
commands = [
"moban",
"-c",
"child.json",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(commands, folder, [("moban.output", expected)])
def test_level_13_yaml(self):
expected = """
========header============
world from child.yaml
shijie from parent.json
========footer============
"""
expected = custom_dedent(expected)
folder = "level-13-any-data-override-any-data"
commands = [
"moban",
"-c",
"child.yaml",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(commands, folder, [("moban.output", expected)])
def test_level_14_custom(self):
expected = """
========header============
world from child.cusom
shijie from parent.json
========footer============
"""
expected = custom_dedent(expected)
folder = "level-14-custom-data-loader"
commands = ["moban"]
self.run_moban(commands, folder, [("a.output", expected)])
def test_level_15_copy_templates_as_target(self):
expected = "test file\n"
folder = "level-15-copy-templates-as-target"
assertions = [
("simple.file", expected),
(
"target_without_template_type",
"file extension will trigger copy engine\n",
),
(
"target_in_short_form",
(
"it is OK to have a short form, "
+ "but the file to be 'copied' shall have 'copy' extension, "
+ "so as to trigger ContentForwardEngine, 'copy' engine.\n"
),
),
(
"output_is_copied.same_file_extension",
"it is implicit copy as well",
),
]
self.run_moban(["moban"], folder, assertions)
def test_level_21_copy_templates_into_zips(self):
expected = "test file\n"
folder = "level-21-copy-templates-into-an-alien-file-system"
long_url = (
"zip://my.zip!/test-recursive-dir/sub_directory_is_copied"
+ "/because_star_star_is_specified.txt"
)
criterias = [
["zip://my.zip!/simple.file", expected],
[
"zip://my.zip!/target_without_template_type",
"file extension will trigger copy engine\n",
],
[
"zip://my.zip!/target_in_short_form",
(
"it is OK to have a short form, "
+ "but the file to be 'copied' shall have 'copy' extension, "
+ "so as to trigger ContentForwardEngine, 'copy' engine.\n"
),
],
["zip://my.zip!/test-dir/afile.txt", "dir for copying\n"],
[long_url, "dest_directory: source_directory/**\n"],
]
self.run_moban_with_fs(["moban"], folder, criterias)
def test_level_16_group_targets_using_template_type(self):
expected = "test file\n"
folder = "level-16-group-targets-using-template-type"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def test_level_17_force_template_type_from_moban_file(self):
expected = "test file\n"
folder = "level-17-force-template-type-from-moban-file"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def test_level_18_user_defined_template_types(self):
from datetime import datetime
expected = "{date}\n".format(date=datetime.now().strftime("%Y-%m-%d"))
folder = "level-18-user-defined-template-types"
self.run_moban(
["moban"],
folder,
[("a.output", expected), ("b.output", "shijie\n")],
)
def test_level_19_without_group_target(self):
expected = "test file\n"
folder = "level-19-moban-a-sub-group-in-targets"
assertions = [
("simple.file", expected),
("a.output", "I will not be selected in level 19\n"),
]
self.run_moban(["moban"], folder, assertions)
def test_level_19_with_group_target(self):
expected = "test file\n"
folder = "level-19-moban-a-sub-group-in-targets"
self.run_moban(
["moban", "-g", "copy"], folder, [("simple.file", expected)]
)
# make sure only copy target is executed
assert False == os.path.exists("a.output")
def test_level_22_intermediate_targets(self):
expected = "a world\n"
folder = "level-22-intermediate-targets"
self.run_moban(["moban"], folder, [("final", expected)])
assert os.path.exists("intermediate.jj2")
def test_level_25_delete_intermediate_targets(self):
expected = "a world\n"
folder = "level-25-delete-intermediate-targets"
self.run_moban(["moban"], folder, [("final", expected)])
assert not os.path.exists("intermediate.jj2")
assert not os.path.exists("intermediate2.jj2")
assert not os.path.exists("intermediate3.jj2")
def test_level_26_strip_intermediate_targets(self):
expected = "a world"
folder = "level-26-strip-rendered-content"
self.run_moban(["moban"], folder, [("final", expected)])
assert not os.path.exists("intermediate.strip")
def test_level_23_inherit_parent_moban_file(self):
folder = "level-23-inherit-organisational-moban-file"
self.run_moban(
["moban"],
folder,
[("output_a", "I am template a"), ("output_b", "I am template b")],
)
def test_misc_1(self):
expected = "test file\n"
folder = "misc-1-copying-templates"
self.run_moban(["moban"], folder, [("simple.file", expected)])
def _moban(self, folder, expected):
args = [
"moban",
"-c",
"data.yml",
"-t",
"a.template",
"-o",
"moban.output",
]
self.run_moban(args, folder, [("moban.output", expected)])
| 29.903553 | 81 | 0.534035 | [
"MIT"
] | chfw/moban | tests/test_docs.py | 11,782 | Python |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch
import os
import random
import pdb
class Seq2seqAgent(Agent):
"""Simple agent which uses an RNN to process incoming text observations.
The RNN generates a vector which is used to represent the input text,
conditioning on the context to generate an output token-by-token.
For more information, see Sequence to Sequence Learning with Neural Networks
`(Sutskever et al. 2014) <https://arxiv.org/abs/1409.3215>`_.
"""
@staticmethod
def add_cmdline_args(argparser):
"""Add command-line arguments specifically for this agent."""
DictionaryAgent.add_cmdline_args(argparser)
agent = argparser.add_argument_group('Seq2Seq Arguments')
agent.add_argument('-hs', '--hiddensize', type=int, default=128,
help='size of the hidden layers and embeddings')
agent.add_argument('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
agent.add_argument('-lr', '--learningrate', type=float, default=0.001, help='learning rate')
agent.add_argument('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
# agent.add_argument('-att', '--attention', type='bool', default=False,
# help='whether to use attention over the context during decoding')
# agent.add_argument('-bi', '--bidirectional', type='bool', default=False,
# help='whether to encode the context with a bidirectional RNN')
agent.add_argument('--no-cuda', action='store_true', default=False,
help='disable GPUs even if available')
agent.add_argument('--gpu', type=int, default=-1,
help='which GPU device to use')
agent.add_argument('-rc', '--rank-candidates', type='bool', default=False,
help='rank candidates if available. this is done by computing the' +
' mean score per token for each candidate and selecting the ' +
'highest scoring one.')
def __init__(self, opt, shared=None):
# initialize defaults first
super().__init__(opt, shared)
if not shared:
# this is not a shared instance of this class, so do full
# initialization. if shared is set, only set up shared members.
# check for cuda
self.use_cuda = not opt.get('no_cuda') and torch.cuda.is_available()
if self.use_cuda:
print('[ Using CUDA ]')
torch.cuda.set_device(opt['gpu'])
if opt.get('model_file') and os.path.isfile(opt['model_file']):
# load model parameters if available
print('Loading existing model params from ' + opt['model_file'])
new_opt, self.states = self.load(opt['model_file'])
# override options with stored ones
opt = self.override_opt(new_opt)
self.dict = DictionaryAgent(opt)
self.id = 'Seq2Seq'
# we use START markers to start our output
self.START = self.dict.start_token
self.START_TENSOR = torch.LongTensor(self.dict.parse(self.START))
# we use END markers to end our output
self.END = self.dict.end_token
self.END_TENSOR = torch.LongTensor(self.dict.parse(self.END))
# get index of null token from dictionary (probably 0)
self.NULL_IDX = self.dict.txt2vec(self.dict.null_token)[0]
# logFile
#self.logFile = opt['logFile']
# store important params directly
hsz = opt['hiddensize']
self.hidden_size = hsz
self.num_layers = opt['numlayers']
self.learning_rate = opt['learningrate']
self.rank = opt['rank_candidates']
self.longest_label = 1
# set up tensors
self.zeros = torch.zeros(self.num_layers, 1, hsz)
self.xs = torch.LongTensor(1, 1)
self.ys = torch.LongTensor(1, 1)
self.cands = torch.LongTensor(1, 1, 1)
self.cand_scores = torch.FloatTensor(1)
self.cand_lengths = torch.LongTensor(1)
# set up modules
self.criterion = nn.NLLLoss()
# lookup table stores word embeddings
self.lt = nn.Embedding() # FILL HERE
# encoder captures the input text
self.encoder = nn.GRU() # FILL HERE
# decoder produces our output states
self.decoder = nn.GRU() # FILL HERE
# linear layer helps us produce outputs from final decoder state
self.h2o = nn.Linear() # FILL HERE
# droput on the linear layer helps us generalize
self.dropout = nn.Dropout(opt['dropout'])
# softmax maps output scores to probabilities
self.softmax = nn.LogSoftmax()
# set up optims for each module
lr = opt['learningrate']
"""
self.optims = {
'lt': optim.SGD(self.lt.parameters(), lr=lr),
'encoder': optim.SGD(self.encoder.parameters(), lr=lr),
'decoder': optim.SGD(self.decoder.parameters(), lr=lr),
'h2o': optim.SGD(self.h2o.parameters(), lr=lr),
}
"""
self.optims = {
'lt': optim.Adam(self.lt.parameters(), lr=lr),
'encoder': optim.Adam(self.encoder.parameters(), lr=lr),
'decoder': optim.Adam(self.decoder.parameters(), lr=lr),
'h2o': optim.Adam(self.h2o.parameters(), lr=lr),
}
if hasattr(self, 'states'):
# set loaded states if applicable
self.set_states(self.states)
if self.use_cuda:
self.cuda()
self.episode_done = True
def override_opt(self, new_opt):
"""Print out each added key and each overriden key.
Only override args specific to the model.
"""
model_args = {'hiddensize', 'numlayers'}
for k, v in new_opt.items():
if k not in model_args:
# skip non-model args
continue
if k not in self.opt:
print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
elif self.opt[k] != v:
print('Overriding option [ {k}: {old} => {v}]'.format(
k=k, old=self.opt[k], v=v))
self.opt[k] = v
return self.opt
def parse(self, text):
return self.dict.txt2vec(text)
def v2t(self, vec):
return self.dict.vec2txt(vec)
def cuda(self):
self.START_TENSOR = self.START_TENSOR.cuda(async=True)
self.END_TENSOR = self.END_TENSOR.cuda(async=True)
self.zeros = self.zeros.cuda(async=True)
self.xs = self.xs.cuda(async=True)
self.ys = self.ys.cuda(async=True)
self.cands = self.cands.cuda(async=True)
self.cand_scores = self.cand_scores.cuda(async=True)
self.cand_lengths = self.cand_lengths.cuda(async=True)
self.criterion.cuda()
self.lt.cuda()
self.encoder.cuda()
self.decoder.cuda()
self.h2o.cuda()
self.dropout.cuda()
self.softmax.cuda()
def hidden_to_idx(self, hidden, dropout=False):
"""Converts hidden state vectors into indices into the dictionary."""
if hidden.size(0) > 1:
raise RuntimeError('bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = # FILL HERE
if dropout:
scores = # FILL HERE
scores = # FILL HERE
_max_score, idx = scores.max(1)
return idx, scores
def zero_grad(self):
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
for optimizer in self.optims.values():
optimizer.step()
def reset(self):
self.observation = None
self.episode_done = True
def observe(self, observation):
# shallow copy observation (deep copy can be expensive)
observation = observation.copy()
if not self.episode_done:
# if the last example wasn't the end of an episode, then we need to
# recall what was said in that example
prev_dialogue = self.observation['text']
observation['text'] = prev_dialogue + '\n' + observation['text']
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def predict(self, xs, ys=None, cands=None):
"""Produce a prediction from our model. Update the model using the
targets if available.
"""
batchsize = len(xs)
text_cand_inds = None
# first encode context
#xes = self.lt(xs).t()
xes = self.lt(xs).transpose(0,1) # Ken
if self.zeros.size(1) != batchsize:
self.zeros.resize_(self.num_layers, batchsize, self.hidden_size).fill_(0)
h0 = Variable(self.zeros)
_output, hn = self.encoder(xes, h0)
# next we use END as an input to kick off our decoder
x = Variable(self.START_TENSOR)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
# list of output tokens for each example in the batch
output_lines = [[] for _ in range(batchsize)]
if ys is not None:
# update the model based on the labels
self.zero_grad()
loss = 0
# keep track of longest label we've ever seen
self.longest_label = max(self.longest_label, ys.size(1))
for i in range(ys.size(1)):
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, dropout=True)
y = ys.select(1, i)
loss += self.criterion(scores, y)
# use the true token as the next input instead of predicted
# this produces a biased prediction but better training
xes = self.lt(y).unsqueeze(0)
for b in range(batchsize):
# convert the output scores to tokens
token = self.v2t([preds.data[b][0]])
#token = self.v2t([preds.data[b]]) # Ken
output_lines[b].append(token)
loss.backward()
#pdb.set_trace()
self.update_params()
if random.random() < 0.01:
# sometimes output a prediction for debugging
self.nWord = ys.data.nonzero().size()[0]
self.nll_per_word = loss.data[0]/self.nWord
print('prediction:', ' '.join(output_lines[0]),
'\nlabel:', self.dict.vec2txt(ys.data[0]))
else:
# just produce a prediction without training the model
done = [False for _ in range(batchsize)]
total_done = 0
max_len = 0
if cands:
# score each candidate separately
# cands are exs_with_cands x cands_per_ex x words_per_cand
# cview is total_cands x words_per_cand
cview = cands.view(-1, cands.size(2))
cands_xes = xe.expand(xe.size(0), cview.size(0), xe.size(2))
sz = hn.size()
cands_hn = (
hn.view(sz[0], sz[1], 1, sz[2])
.expand(sz[0], sz[1], cands.size(1), sz[2])
.contiguous()
.view(sz[0], -1, sz[2])
)
cand_scores = Variable(
self.cand_scores.resize_(cview.size(0)).fill_(0))
cand_lengths = Variable(
self.cand_lengths.resize_(cview.size(0)).fill_(0))
for i in range(cview.size(1)):
output, cands_hn = self.decoder(cands_xes, cands_hn)
preds, scores = self.hidden_to_idx(output, dropout=False)
cs = cview.select(1, i)
non_nulls = cs.ne(self.NULL_IDX)
cand_lengths += non_nulls.long()
score_per_cand = torch.gather(scores, 1, cs.unsqueeze(1))
cand_scores += score_per_cand.squeeze() * non_nulls.float()
cands_xes = self.lt(cs).unsqueeze(0)
# set empty scores to -1, so when divided by 0 they become -inf
cand_scores -= cand_lengths.eq(0).float()
# average the scores per token
cand_scores /= cand_lengths.float()
cand_scores = cand_scores.view(cands.size(0), cands.size(1))
srtd_scores, text_cand_inds = cand_scores.sort(1, True)
text_cand_inds = text_cand_inds.data
# now, generate a response from scratch
while(total_done < batchsize) and max_len < self.longest_label:
# keep producing tokens until we hit END or max length for each
# example in the batch
#pdb.set_trace()
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, dropout=False)
#xes = self.lt(preds.t()) # original
"""
if (self.opt['mode'] == 'train'):
xes = torch.unsqueeze(self.lt(preds),0) # torch.unsqueeze makes error when prediction
elif(self.opt['mode'] == 'interactive'):
xes = self.lt(preds)
"""
xes = torch.unsqueeze(self.lt(preds),0) # KB-KAIST
max_len += 1
for b in range(batchsize):
if not done[b]:
# only add more tokens for examples that aren't done yet
#pdb.set_trace()
#token = self.v2t(preds.data[b])
token = self.v2t([preds.data[b]]) # KB-KAIST
if token == self.END:
# if we produced END, we're done
done[b] = True
total_done += 1
else:
output_lines[b].append(token)
if (random.random() < 0.1 and self.opt['mode'] == 'train'):
# sometimes output a prediction for debugging
print('prediction:', ' '.join(output_lines[0]))
return output_lines, text_cand_inds
def batchify(self, observations):
"""Convert a list of observations into input & target tensors."""
# valid examples
exs = [ex for ex in observations if 'text' in ex]
# the indices of the valid (non-empty) tensors
valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]
# set up the input tensors
batchsize = len(exs)
# tokenize the text
xs = None
if batchsize > 0:
parsed = [self.parse(ex['text']) for ex in exs]
min_x_len = min([len(x) for x in parsed])
max_x_len = max([len(x) for x in parsed])
parsed_x_len = min(min_x_len + 12, max_x_len, 48)
# shrink xs to to limit batch computation
parsed = [x[:parsed_x_len] for x in parsed]
xs = torch.LongTensor(batchsize, parsed_x_len).fill_(0)
# pack the data to the right side of the tensor for this model
for i, x in enumerate(parsed):
offset = parsed_x_len - len(x)
for j, idx in enumerate(x):
xs[i][j + offset] = idx
if self.use_cuda:
# copy to gpu
self.xs.resize_(xs.size())
self.xs.copy_(xs, async=True)
xs = Variable(self.xs)
else:
xs = Variable(xs)
# set up the target tensors
ys = None
if batchsize > 0 and any(['labels' in ex for ex in exs]):
# randomly select one of the labels to update on, if multiple
# append END to each label
labels = [random.choice(ex.get('labels', [''])) + ' ' + self.END for ex in exs]
parsed = [self.parse(y) for y in labels]
min_y_len = min(len(y) for y in parsed)
max_y_len = max(len(y) for y in parsed)
# shrink ys to to limit batch computation
parsed_y_len = min(min_y_len + 6, max_y_len)
parsed = [y[:parsed_y_len] for y in parsed]
ys = torch.LongTensor(batchsize, parsed_y_len).fill_(0)
for i, y in enumerate(parsed):
for j, idx in enumerate(y):
ys[i][j] = idx
if self.use_cuda:
# copy to gpu
self.ys.resize_(ys.size())
self.ys.copy_(ys, async=True)
ys = Variable(self.ys)
else:
ys = Variable(ys)
# set up candidates
cands = None
valid_cands = None
if ys is None and self.rank:
# only do ranking when no targets available and ranking flag set
parsed = []
valid_cands = []
for i in valid_inds:
if 'label_candidates' in observations[i]:
# each candidate tuple is a pair of the parsed version and
# the original full string
cs = list(observations[i]['label_candidates'])
parsed.append([self.parse(c) for c in cs])
valid_cands.append((i, cs))
if len(parsed) > 0:
# TODO: store lengths of cands separately, so don't have zero
# padding for varying number of cands per example
# found cands, pack them into tensor
max_c_len = max(max(len(c) for c in cs) for cs in parsed)
max_c_cnt = max(len(cs) for cs in parsed)
cands = torch.LongTensor(len(parsed), max_c_cnt, max_c_len).fill_(0)
for i, cs in enumerate(parsed):
for j, c in enumerate(cs):
for k, idx in enumerate(c):
cands[i][j][k] = idx
if self.use_cuda:
# copy to gpu
self.cands.resize_(cands.size())
self.cands.copy_(cands, async=True)
cands = Variable(self.cands)
else:
cands = Variable(cands)
return xs, ys, valid_inds, cands, valid_cands
def batch_act(self, observations):
batchsize = len(observations)
# initialize a table of replies with this agent's id
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
# convert the observations into batches of inputs and targets
# valid_inds tells us the indices of all valid examples
# e.g. for input [{}, {'text': 'hello'}, {}, {}], valid_inds is [1]
# since the other three elements had no 'text' field
xs, ys, valid_inds, cands, valid_cands = self.batchify(observations)
if xs is None:
# no valid examples, just return the empty responses we set up
return batch_reply
# produce predictions either way, but use the targets if available
predictions, text_cand_inds = self.predict(xs, ys, cands)
for i in range(len(predictions)):
# map the predictions back to non-empty examples in the batch
# we join with spaces since we produce tokens one at a time
curr = batch_reply[valid_inds[i]]
curr['text'] = ' '.join(c for c in predictions[i] if c != self.END
and c != self.dict.null_token)
if text_cand_inds is not None:
for i in range(len(valid_cands)):
order = text_cand_inds[i]
batch_idx, curr_cands = valid_cands[i]
curr = batch_reply[batch_idx]
curr['text_candidates'] = [curr_cands[idx] for idx in order
if idx < len(curr_cands)]
return batch_reply
def act(self):
# call batch_act with this batch of one
return self.batch_act([self.observation])[0]
def save(self, path=None):
path = self.opt.get('model_file', None) if path is None else path
if path and hasattr(self, 'lt'):
model = {}
model['lt'] = self.lt.state_dict()
model['encoder'] = self.encoder.state_dict()
model['decoder'] = self.decoder.state_dict()
model['h2o'] = self.h2o.state_dict()
model['longest_label'] = self.longest_label
model['opt'] = self.opt
with open(path, 'wb') as write:
torch.save(model, write)
def shutdown(self):
"""Save the state of the model when shutdown."""
path = self.opt.get('model_file', None)
if path is not None:
self.save(path + '.shutdown_state')
super().shutdown()
def load(self, path):
"""Return opt and model states."""
with open(path, 'rb') as read:
model = torch.load(read)
return model['opt'], model
def set_states(self, states):
"""Set the state dicts of the modules from saved states."""
#pdb.set_trace()
self.lt.load_state_dict(states['lt'])
self.encoder.load_state_dict(states['encoder'])
self.decoder.load_state_dict(states['decoder'])
self.h2o.load_state_dict(states['h2o'])
self.longest_label = states['longest_label'] | 42.251429 | 105 | 0.552069 | [
"BSD-3-Clause"
] | gmkim90/KBKAIST_Chatbot | parlai/agents/seq2seq/seq2seq.py | 22,182 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
| 37.004464 | 102 | 0.699441 | [
"Apache-2.0"
] | DeuroIO/Deuro-tensorflow | tensorflow/python/distribute/cross_device_utils.py | 24,867 | Python |
from time import sleep
from ec2mc import __main__
def test_user_commands():
"""test all user commands."""
assert __main__.main([
"user", "create", "ec2mc_test_user", "setup_users", "--default"
]) is not False
sleep(5)
assert __main__.main([
"user", "list"
]) is not False
assert __main__.main([
"user", "set_group", "EC2MC_TEST_USER", "basic_users"
]) is not False
assert __main__.main([
"user", "be", "takingitcasual"
]) is not False
assert __main__.main([
"user", "rotate_key", "Ec2Mc_TeSt_UsEr"
]) is not False
assert __main__.main([
"user", "delete", "eC2mC_tEsT_uSeR"
]) is not False
| 26.730769 | 71 | 0.604317 | [
"MIT"
] | TakingItCasual/easymc | tests/test_user_commands.py | 695 | Python |
import numpy as np
import tensorlayerx as tlx
import gammagl.mpops as mpops
from .num_nodes import maybe_num_nodes
from .check import check_is_numpy
def coalesce(edge_index, edge_attr=None, num_nodes=None, reduce="add", is_sorted=False, sort_by_row=True):
"""Row-wise sorts :obj:`edge_index` and removes its duplicated entries.
Duplicate entries in :obj:`edge_attr` are merged by scattering them
together according to the given :obj:`reduce` option.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-
dimensional edge features.
If given as a list, will re-shuffle and remove duplicates for all
its entries. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
reduce (string, optional): The reduce operation to use for merging edge
features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"mul"`). (default: :obj:`"add"`)
is_sorted (bool, optional): If set to :obj:`True`, will expect
:obj:`edge_index` to be already sorted row-wise.
sort_by_row (bool, optional): If set to :obj:`False`, will sort
:obj:`edge_index` column-wise.
:rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else
(:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)
"""
if tlx.is_tensor(edge_index):
edge_index = tlx.convert_to_numpy(edge_index)
nnz = edge_index.shape[1]
num_nodes = maybe_num_nodes(edge_index, num_nodes)
idx = np.zeros(nnz+1)
idx[0] = -1
idx[1:] = edge_index[1 - int(sort_by_row)]
idx[1:] = (np.add(np.multiply(idx[1:], num_nodes), edge_index[int(sort_by_row)]))
if not is_sorted:
perm = np.argsort(idx[1:])
idx[1:] = np.sort(idx[1:])
edge_index = edge_index[:, perm]
if edge_attr is not None and tlx.ops.is_tensor(edge_attr):
edge_attr = tlx.gather(edge_attr, tlx.convert_to_tensor(perm), axis=0)
elif edge_attr is not None and check_is_numpy(edge_attr):
edge_attr = edge_attr[perm]
elif edge_attr is not None: # edge_attr is List.
edge_attr = [tlx.gather(e, perm, axis=0) for e in edge_attr]
mask = idx[1:] > idx[:-1]
# Only perform expensive merging in case there exists duplicates:
if mask.all():
edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)
return edge_index if edge_attr is None else (edge_index, edge_attr)
edge_index = edge_index[:, mask]
edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)
if edge_attr is None:
return edge_index
idx = np.arange(0, nnz)
idx = tlx.convert_to_tensor(idx - (1 - mask).cumsum(axis=0))
if tlx.ops.is_tensor(edge_attr):
edge_attr = mpops.segment_sum(edge_attr, idx)
return edge_index, edge_attr | 42.704225 | 106 | 0.649406 | [
"Apache-2.0"
] | BUPT-GAMMA/GammaGL | gammagl/utils/coalesce.py | 3,032 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox3 import mox
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.conductor.tasks import live_migrate
from nova import db
from nova import exception
from nova import objects
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = "context"
self.instance_host = "host"
self.instance_uuid = "uuid"
self.instance_image = "image_ref"
db_instance = fake_instance.fake_db_instance(
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit)
def test_execute_with_destination(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_check_requested_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._check_requested_destination()
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
self.assertIsNone(self.task.destination)
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_find_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._find_destination().AndReturn("found_host")
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_check_instance_is_running_passes(self):
self.task._check_instance_is_running()
def test_check_instance_is_running_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceNotRunning,
self.task._check_instance_is_running)
def test_check_instance_host_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
self.mox.ReplayAll()
self.task._check_host_is_up("host")
def test_check_instance_host_is_up_fails_if_not_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_instance_host_is_up_fails_if_not_found(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
"host").AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_requested_destination(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
self.mox.StubOutWithMock(self.task.compute_rpcapi,
'check_can_live_migrate_destination')
db.service_get_by_compute_host(self.context,
self.destination).AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
hypervisor_details = {
"hypervisor_type": "a",
"hypervisor_version": 6.1,
"free_ram_mb": 513
}
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.instance_host)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task.compute_rpcapi.check_can_live_migrate_destination(
self.context, self.instance, self.destination,
self.block_migration, self.disk_over_commit).AndReturn(
"migrate_data")
self.mox.ReplayAll()
self.task._check_requested_destination()
self.assertEqual("migrate_data", self.task.migrate_data)
def test_check_requested_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
def test_check_requested_destination_fails_when_destination_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
self.destination).AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_not_enough_memory(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.task._check_host_is_up(self.destination)
db.service_get_by_compute_host(self.context,
self.destination).AndReturn({
"compute_node": [{"free_ram_mb": 511}]
})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_diff(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "b"
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a"
})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_too_old(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 7
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 6
})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
def test_find_destination_works(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
scheduler_utils.build_request_spec(self.context, None,
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
def test_find_destination_retry_with_invalid_livem_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self):
self.flags(migrate_max_retries=0)
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_when_runs_out_of_hosts(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
compute_utils.get_image_metadata(self.context,
self.task.image_api, self.instance_image,
self.instance).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_not_implemented_rollback(self):
self.assertRaises(NotImplementedError, self.task.rollback)
| 46.513514 | 78 | 0.665258 | [
"Apache-2.0"
] | Metaswitch/calico-nova | nova/tests/unit/conductor/tasks/test_live_migrate.py | 18,931 | Python |
"""
Script to show the wireframe of a given mesh (read from a file) in an interactive
Viewer.
"""
from viewer import *
from mesh.obj import OBJFile
import sys
if __name__ == "__main__":
app = Viewer()
if len(sys.argv) > 1:
try:
obj = OBJFile.read(sys.argv[1])
app.scene.addObject(obj)
app.title(sys.argv[1])
app.scene.setTarget(obj.centroid)
except Exception as e:
raise e
else:
print("No input file given. Nothing to render.")
print("Try 'python3 wireframe.py yourobj.obj'")
app.show()
| 23.038462 | 81 | 0.592654 | [
"MIT"
] | fwidmaier/mesh_handler | wireframe.py | 599 | Python |
# The MIT License (MIT)
# Copyright (c) 2015 Yanzheng Li
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## -----------------------------------------------------------------------------
def test_assert_true():
try:
assert True
assert True, 'I want to believe.'
except AssertionError:
print 'This should not happen'
## -----------------------------------------------------------------------------
def test_assert_false():
try:
assert False
except AssertionError:
print 'I cannot believe'
## -----------------------------------------------------------------------------
def test_assert_on_truthy_exprs():
try:
assert 1
assert 1 + 1
assert 3.14 - 3.12
assert not False
except AssertionError:
print 'This should not happen'
## -----------------------------------------------------------------------------
def test_assert_on_falsy_exprs():
try:
assert 0
except AssertionError:
print 'I cannot believe'
try:
assert 0 - 1
except AssertionError:
print 'I cannot believe'
try:
assert not True
except AssertionError:
print 'I cannot believe'
try:
assert 3.12 - 3.14
except AssertionError:
print 'I cannot believe'
## -----------------------------------------------------------------------------
test_assert_true()
test_assert_false()
test_assert_on_truthy_exprs()
test_assert_on_falsy_exprs()
## -----------------------------------------------------------------------------
| 31.52439 | 80 | 0.572534 | [
"MIT"
] | mizuki-nana/coreVM | python/tests/assert.py | 2,585 | Python |
import logging
import pandas as pd
from flask import Flask, request
from gevent.pywsgi import WSGIServer
from time import sleep
from func import rms, meas_to_influx, rms_to_influx, config
logger = logging.getLogger(config['log_name'])
logger.setLevel(logging.INFO)
h_stream = logging.StreamHandler()
h_stream.setLevel(logging.INFO)
logger.addHandler(h_stream)
app = Flask(__name__)
@app.post('/save')
def save():
headers = request.headers
if 'X-API-KEY' not in headers or headers['X-API-KEY'] != config['api_key']:
sleep(5)
return '', 401
data = request.json
dt = pd.Timestamp(data['dt'])
s_data, power = rms(data['payload'], data['ticks'], dt)
if power < 0:
logger.error(data)
return '', 204
if power < 100:
return str(power)
# print(s_data)
# print(power)
rms_to_influx(power, dt)
meas_to_influx(s_data)
return str(power)
if __name__ == '__main__':
# app.run(host=config['url'], port=config['port'])
WSGIServer((config['url'], config['port']), app).serve_forever()
| 22.87234 | 79 | 0.664186 | [
"MIT"
] | dave-cz/esp32_power_meter | server/server.py | 1,075 | Python |
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name)) | 31.822134 | 106 | 0.600795 | [
"MIT"
] | geoco84/comodit-client | comodit_client/api/exporter.py | 8,051 | Python |
from __future__ import absolute_import, division, print_function
import argparse
import sys
import os
import py
import pytest
from _pytest.config import argparsing as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser(object):
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
# need a short or long option
argument = parseopt.Argument()
argument = parseopt.Argument("-t")
assert argument._short_opts == ["-t"]
assert argument._long_opts == []
assert argument.dest == "t"
argument = parseopt.Argument("-t", "--test")
assert argument._short_opts == ["-t"]
assert argument._long_opts == ["--test"]
assert argument.dest == "test"
argument = parseopt.Argument("-t", "--test", dest="abc")
assert argument.dest == "abc"
assert (
str(argument)
== ("Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')")
)
def test_argument_type(self):
argument = parseopt.Argument("-t", dest="abc", type=int)
assert argument.type is int
argument = parseopt.Argument("-t", dest="abc", type=str)
assert argument.type is str
argument = parseopt.Argument("-t", dest="abc", type=float)
assert argument.type is float
with pytest.warns(DeprecationWarning):
with pytest.raises(KeyError):
argument = parseopt.Argument("-t", dest="abc", type="choice")
argument = parseopt.Argument(
"-t", dest="abc", type=str, choices=["red", "blue"]
)
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument("-t", type=int)
argument.default = 42
argument.dest = "abc"
res = argument.attrs()
assert res["default"] == 42
assert res["dest"] == "abc"
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_addoption_conflict(self):
group = parseopt.OptionGroup("hello again")
group.addoption("--option1", "--option-1", action="store_true")
with pytest.raises(ValueError) as err:
group.addoption("--option1", "--option-one", action="store_true")
assert str({"--option1"}) in str(err.value)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(
ValueError,
"""
group.addoption("-x", action="store_true")
""",
)
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(["--hello", "world"])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ["x"]
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(
["x", "--y", "--hello", "this"]
)
assert ns.hello
assert ns.file_or_dir == ["x"]
assert unknown == ["--y", "this"]
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A(object):
pass
option = A()
args = parser.parse_setoption(["--hello", "world"], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(["--ultimate-answer", "42"])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action="store_true")
parser.addoption("-S", action="store_false")
args = parser.parse(["-R", "4", "2", "-S"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
args = parser.parse(["-R", "-S", "4", "2", "-R"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
args = parser.parse(["-R", "4", "-S", "2"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, "type"):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type=int, action="store")
parser.addoption("--hello", dest="hello", type=str, action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = argparse.ArgumentParser(
formatter_class=parseopt.DropShorterLongHelpFormatter
)
parser.add_argument(
"-t", "--twoword", "--duo", "--two-word", "--two", help="foo"
).map_long_option = {
"two": "two-word"
}
# throws error on --deux only!
parser.add_argument(
"-d", "--deuxmots", "--deux-mots", action="store_true", help="foo"
).map_long_option = {
"deux": "deux-mots"
}
parser.add_argument("-s", action="store_true", help="single short")
parser.add_argument("--abc", "-a", action="store_true", help="bar")
parser.add_argument("--klm", "-k", "--kl-m", action="store_true", help="bar")
parser.add_argument(
"-P", "--pq-r", "-p", "--pqr", action="store_true", help="bar"
)
parser.add_argument(
"--zwei-wort", "--zweiwort", "--zweiwort", action="store_true", help="bar"
)
parser.add_argument(
"-x", "--exit-on-first", "--exitfirst", action="store_true", help="spam"
).map_long_option = {
"exitfirst": "exit-on-first"
}
parser.add_argument("files_and_dirs", nargs="*")
args = parser.parse_args(["-k", "--duo", "hallo", "--exitfirst"])
assert args.twoword == "hallo"
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(["--deux-mots"])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(["file", "dir"])
assert "|".join(args.files_and_dirs) == "file|dir"
def test_drop_short_0(self, parser):
parser.addoption("--funcarg", "--func-arg", action="store_true")
parser.addoption("--abc-def", "--abc-def", action="store_true")
parser.addoption("--klm-hij", action="store_true")
args = parser.parse(["--funcarg", "--k"])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption("--func-arg", "--doit", action="store_true")
args = parser.parse(["--doit"])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption("--func-arg", "--funcarg", "--doit", action="store_true")
args = parser.parse(["abcd"])
assert args.func_arg is False
assert args.file_or_dir == ["abcd"]
def test_drop_short_help0(self, parser, capsys):
parser.addoption("--func-args", "--doit", help="foo", action="store_true")
parser.parse([])
help = parser.optparser.format_help()
assert "--func-args, --doit foo" in help
# testing would be more helpful with all help generated
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption("--doit", "--func-args", action="store_true", help="foo")
group._addoption(
"-h",
"--help",
action="store_true",
dest="help",
help="show help message and configuration info",
)
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "-doit, --func-args foo" in help
def test_multiple_metavar_help(self, parser):
"""
Help text for options with a metavar tuple should display help
in the form "--preferences=value1 value2 value3" (#2004).
"""
group = parser.getgroup("general")
group.addoption(
"--preferences", metavar=("value1", "value2", "value3"), nargs=3
)
group._addoption("-h", "--help", action="store_true", dest="help")
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "--preferences=value1 value2 value3" in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind("bash"):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "pytest" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,))
with open(str(script), "w") as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
# http://stackoverflow.com/q/12589419/1307905
# so we use bash
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
# alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
# to handle a keyword argument env that replaces os.environ in popen or
# extends the copy, advantage: could not forget to restore
monkeypatch.setenv("_ARGCOMPLETE", "1")
monkeypatch.setenv("_ARGCOMPLETE_IFS", "\x0b")
monkeypatch.setenv("COMP_WORDBREAKS", " \\t\\n\"\\'><=;|&(:")
arg = "--fu"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
os.mkdir("test_argcomplete.d")
arg = "test_argc"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| 39.249258 | 86 | 0.597112 | [
"BSD-3-Clause"
] | 2-GARIK20/wpt | tools/third_party/pytest/testing/test_parseopt.py | 13,227 | Python |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, too-many-instance-attributes
# pylint: disable=super-init-not-called, too-many-lines
from enum import Enum
from azure.storage.blob import LeaseProperties as BlobLeaseProperties
from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions
from azure.storage.blob import ResourceTypes as BlobResourceTypes
from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey
from azure.storage.blob import ContentSettings as BlobContentSettings
from azure.storage.blob import AccessPolicy as BlobAccessPolicy
from azure.storage.blob import DelimitedTextDialect as BlobDelimitedTextDialect
from azure.storage.blob import DelimitedJsonDialect as BlobDelimitedJSON
from azure.storage.blob import ArrowDialect as BlobArrowDialect
from azure.storage.blob._models import ContainerPropertiesPaged
from ._shared.models import DictMixin
class FileSystemProperties(object):
"""File System properties class.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the file system was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar ~azure.storage.filedatalake.LeaseProperties lease:
Stores all the lease information for the file system.
:ivar str public_access: Specifies whether data in the file system may be accessed
publicly and the level of access.
:ivar bool has_immutability_policy:
Represents whether the file system has an immutability policy.
:ivar bool has_legal_hold:
Represents whether the file system has a legal hold.
:ivar dict metadata: A dict with name-value pairs to associate with the
file system as metadata.
Returned ``FileSystemProperties`` instances expose these values through a
dictionary interface, for example: ``file_system_props["last_modified"]``.
Additionally, the file system name is available as ``file_system_props["name"]``.
"""
def __init__(self):
self.name = None
self.last_modified = None
self.etag = None
self.lease = None
self.public_access = None
self.has_immutability_policy = None
self.has_legal_hold = None
self.metadata = None
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.last_modified = generated.properties.last_modified
props.etag = generated.properties.etag
props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access
generated.properties.public_access)
props.has_immutability_policy = generated.properties.has_immutability_policy
props.has_legal_hold = generated.properties.has_legal_hold
props.metadata = generated.metadata
return props
@classmethod
def _convert_from_container_props(cls, container_properties):
container_properties.__class__ = cls
container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access
container_properties.public_access)
container_properties.lease.__class__ = LeaseProperties
return container_properties
class FileSystemPropertiesPaged(ContainerPropertiesPaged):
"""An Iterable of File System properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A file system name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only file systems whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of file system names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, *args, **kwargs):
super(FileSystemPropertiesPaged, self).__init__(
*args,
**kwargs
)
@staticmethod
def _build_item(item):
return FileSystemProperties._from_generated(item) # pylint: disable=protected-access
class DirectoryProperties(DictMixin):
"""
:ivar str name: name of the directory
:ivar str etag: The ETag contains a value that you can use to perform operations
conditionally.
:ivar bool deleted: if the current directory marked as deleted
:ivar dict metadata: Name-value pairs associated with the directory as metadata.
:ivar ~azure.storage.filedatalake.LeaseProperties lease:
Stores all the lease information for the directory.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the directory was modified.
:ivar ~datetime.datetime creation_time:
Indicates when the directory was created, in UTC.
:ivar int remaining_retention_days: The number of days that the directory will be retained
before being permanently deleted by the service.
:var ~azure.storage.filedatalake.ContentSettings content_settings:
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.etag = kwargs.get('ETag')
self.deleted = None
self.metadata = kwargs.get('metadata')
self.lease = LeaseProperties(**kwargs)
self.last_modified = kwargs.get('Last-Modified')
self.creation_time = kwargs.get('x-ms-creation-time')
self.deleted_time = None
self.remaining_retention_days = None
class FileProperties(DictMixin):
"""
:ivar str name: name of the file
:ivar str etag: The ETag contains a value that you can use to perform operations
conditionally.
:ivar bool deleted: if the current file marked as deleted
:ivar dict metadata: Name-value pairs associated with the file as metadata.
:ivar ~azure.storage.filedatalake.LeaseProperties lease:
Stores all the lease information for the file.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the file was modified.
:ivar ~datetime.datetime creation_time:
Indicates when the file was created, in UTC.
:ivar int size: size of the file
:ivar int remaining_retention_days: The number of days that the file will be retained
before being permanently deleted by the service.
:var ~azure.storage.filedatalake.ContentSettings content_settings:
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.etag = kwargs.get('ETag')
self.deleted = None
self.metadata = kwargs.get('metadata')
self.lease = LeaseProperties(**kwargs)
self.last_modified = kwargs.get('Last-Modified')
self.creation_time = kwargs.get('x-ms-creation-time')
self.size = kwargs.get('Content-Length')
self.deleted_time = None
self.expiry_time = kwargs.get("x-ms-expiry-time")
self.remaining_retention_days = None
self.content_settings = ContentSettings(**kwargs)
class PathProperties(object):
"""Path properties listed by get_paths api.
:ivar str name: the full path for a file or directory.
:ivar str owner: The owner of the file or directory.
:ivar str group: he owning group of the file or directory.
:ivar str permissions: Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:ivar datetime last_modified: A datetime object representing the last time the directory/file was modified.
:ivar bool is_directory: is the path a directory or not.
:ivar str etag: The ETag contains a value that you can use to perform operations
conditionally.
:ivar content_length: the size of file if the path is a file.
"""
def __init__(self, **kwargs):
super(PathProperties, self).__init__(
**kwargs
)
self.name = kwargs.pop('name', None)
self.owner = kwargs.get('owner', None)
self.group = kwargs.get('group', None)
self.permissions = kwargs.get('permissions', None)
self.last_modified = kwargs.get('last_modified', None)
self.is_directory = kwargs.get('is_directory', False)
self.etag = kwargs.get('etag', None)
self.content_length = kwargs.get('content_length', None)
@classmethod
def _from_generated(cls, generated):
path_prop = PathProperties()
path_prop.name = generated.name
path_prop.owner = generated.owner
path_prop.group = generated.group
path_prop.permissions = generated.permissions
path_prop.last_modified = generated.last_modified
path_prop.is_directory = bool(generated.is_directory)
path_prop.etag = generated.additional_properties.get('etag')
path_prop.content_length = generated.content_length
return path_prop
class LeaseProperties(BlobLeaseProperties):
"""DataLake Lease Properties.
:ivar str status:
The lease status of the file. Possible values: locked|unlocked
:ivar str state:
Lease state of the file. Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a file is leased, specifies whether the lease is of infinite or fixed duration.
"""
class ContentSettings(BlobContentSettings):
"""The content settings of a file or directory.
:ivar str content_type:
The content type specified for the file or directory. If no content type was
specified, the default content type is application/octet-stream.
:ivar str content_encoding:
If the content_encoding has previously been set
for the file, that value is stored.
:ivar str content_language:
If the content_language has previously been set
for the file, that value is stored.
:ivar str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the file, that value is stored.
:ivar str cache_control:
If the cache_control has previously been set for
the file, that value is stored.
:ivar str content_md5:
If the content_md5 has been set for the file, this response
header is stored so that the client can check for message content
integrity.
:keyword str content_type:
The content type specified for the file or directory. If no content type was
specified, the default content type is application/octet-stream.
:keyword str content_encoding:
If the content_encoding has previously been set
for the file, that value is stored.
:keyword str content_language:
If the content_language has previously been set
for the file, that value is stored.
:keyword str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the file, that value is stored.
:keyword str cache_control:
If the cache_control has previously been set for
the file, that value is stored.
:keyword str content_md5:
If the content_md5 has been set for the file, this response
header is stored so that the client can check for message content
integrity.
"""
def __init__(
self, **kwargs):
super(ContentSettings, self).__init__(
**kwargs
)
class AccountSasPermissions(BlobAccountSasPermissions):
def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin
create=False):
super(AccountSasPermissions, self).__init__(
read=read, create=create, write=write, list=list,
delete=delete
)
class FileSystemSasPermissions(object):
"""FileSystemSasPermissions class to be used with the
:func:`~azure.storage.filedatalake.generate_file_system_sas` function.
:param bool read:
Read the content, properties, metadata etc.
:param bool write:
Create or write content, properties, metadata. Lease the file system.
:param bool delete:
Delete the file system.
:param bool list:
List paths in the file system.
:keyword bool move:
Move any file in the directory to a new location.
Note the move operation can optionally be restricted to the child file or directory owner or
the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
on the parent directory.
:keyword bool execute:
Get the status (system defined properties) and ACL of any file in the directory.
If the caller is the owner, set access control on any file in the directory.
:keyword bool manage_ownership:
Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
within a folder that has the sticky bit set.
:keyword bool manage_access_control:
Allows the user to set permissions and POSIX ACLs on files and directories.
"""
def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin
**kwargs):
self.read = read
self.write = write
self.delete = delete
self.list = list
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a FileSystemSasPermissions from a string.
To specify read, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A FileSystemSasPermissions object
:rtype: ~azure.storage.fildatalake.FileSystemSasPermissions
"""
p_read = 'r' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, write=p_write, delete=p_delete,
list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class DirectorySasPermissions(object):
"""DirectorySasPermissions class to be used with the
:func:`~azure.storage.filedatalake.generate_directory_sas` function.
:param bool read:
Read the content, properties, metadata etc.
:param bool create:
Create a new directory
:param bool write:
Create or write content, properties, metadata. Lease the directory.
:param bool delete:
Delete the directory.
:keyword bool list:
List any files in the directory. Implies Execute.
:keyword bool move:
Move any file in the directory to a new location.
Note the move operation can optionally be restricted to the child file or directory owner or
the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
on the parent directory.
:keyword bool execute:
Get the status (system defined properties) and ACL of any file in the directory.
If the caller is the owner, set access control on any file in the directory.
:keyword bool manage_ownership:
Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
within a folder that has the sticky bit set.
:keyword bool manage_access_control:
Allows the user to set permissions and POSIX ACLs on files and directories.
"""
def __init__(self, read=False, create=False, write=False,
delete=False, **kwargs):
self.read = read
self.create = create
self.write = write
self.delete = delete
self.list = kwargs.pop('list', None)
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a DirectorySasPermissions from a string.
To specify read, create, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A DirectorySasPermissions object
:rtype: ~azure.storage.filedatalake.DirectorySasPermissions
"""
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class FileSasPermissions(object):
"""FileSasPermissions class to be used with the
:func:`~azure.storage.filedatalake.generate_file_sas` function.
:param bool read:
Read the content, properties, metadata etc. Use the file as
the source of a read operation.
:param bool create:
Write a new file
:param bool write:
Create or write content, properties, metadata. Lease the file.
:param bool delete:
Delete the file.
:keyword bool move:
Move any file in the directory to a new location.
Note the move operation can optionally be restricted to the child file or directory owner or
the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
on the parent directory.
:keyword bool execute:
Get the status (system defined properties) and ACL of any file in the directory.
If the caller is the owner, set access control on any file in the directory.
:keyword bool manage_ownership:
Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
within a folder that has the sticky bit set.
:keyword bool manage_access_control:
Allows the user to set permissions and POSIX ACLs on files and directories.
"""
def __init__(self, read=False, create=False, write=False, delete=False, **kwargs):
self.read = read
self.create = create
self.write = write
self.delete = delete
self.list = list
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a FileSasPermissions from a string.
To specify read, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A FileSasPermissions object
:rtype: ~azure.storage.fildatalake.FileSasPermissions
"""
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class AccessPolicy(BlobAccessPolicy):
"""Access Policy class used by the set and get access policy methods in each service.
A stored access policy can specify the start time, expiry time, and
permissions for the Shared Access Signatures with which it's associated.
Depending on how you want to control access to your resource, you can
specify all of these parameters within the stored access policy, and omit
them from the URL for the Shared Access Signature. Doing so permits you to
modify the associated signature's behavior at any time, as well as to revoke
it. Or you can specify one or more of the access policy parameters within
the stored access policy, and the others on the URL. Finally, you can
specify all of the parameters on the URL. In this case, you can use the
stored access policy to revoke the signature, but not to modify its behavior.
Together the Shared Access Signature and the stored access policy must
include all fields required to authenticate the signature. If any required
fields are missing, the request will fail. Likewise, if a field is specified
both in the Shared Access Signature URL and in the stored access policy, the
request will fail with status code 400 (Bad Request).
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.datalake.FileSystemSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:keyword start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:paramtype start: ~datetime.datetime or str
"""
def __init__(self, permission=None, expiry=None, **kwargs):
super(AccessPolicy, self).__init__(
permission=permission, expiry=expiry, start=kwargs.pop('start', None)
)
class ResourceTypes(BlobResourceTypes):
"""
Specifies the resource types that are accessible with the account SAS.
:param bool service:
Access to service-level APIs (e.g.List File Systems)
:param bool file_system:
Access to file_system-level APIs (e.g., Create/Delete file system,
List Directories/Files)
:param bool object:
Access to object-level APIs for
files(e.g. Create File, etc.)
"""
def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin
):
super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
class UserDelegationKey(BlobUserDelegationKey):
"""
Represents a user delegation key, provided to the user by Azure Storage
based on their Azure Active Directory access token.
The fields are saved as simple strings since the user does not have to interact with this object;
to generate an identify SAS, the user can simply pass it to the right API.
:ivar str signed_oid:
Object ID of this token.
:ivar str signed_tid:
Tenant ID of the tenant that issued this token.
:ivar str signed_start:
The datetime this token becomes valid.
:ivar str signed_expiry:
The datetime this token expires.
:ivar str signed_service:
What service this key is valid for.
:ivar str signed_version:
The version identifier of the REST service that created this token.
:ivar str value:
The user delegation key.
"""
@classmethod
def _from_generated(cls, generated):
delegation_key = cls()
delegation_key.signed_oid = generated.signed_oid
delegation_key.signed_tid = generated.signed_tid
delegation_key.signed_start = generated.signed_start
delegation_key.signed_expiry = generated.signed_expiry
delegation_key.signed_service = generated.signed_service
delegation_key.signed_version = generated.signed_version
delegation_key.value = generated.value
return delegation_key
class PublicAccess(str, Enum):
"""
Specifies whether data in the file system may be accessed publicly and the level of access.
"""
File = 'blob'
"""
Specifies public read access for files. file data within this file system can be read
via anonymous request, but file system data is not available. Clients cannot enumerate
files within the container via anonymous request.
"""
FileSystem = 'container'
"""
Specifies full public read access for file system and file data. Clients can enumerate
files within the file system via anonymous request, but cannot enumerate file systems
within the storage account.
"""
@classmethod
def _from_generated(cls, public_access):
if public_access == "blob": # pylint:disable=no-else-return
return cls.File
elif public_access == "container":
return cls.FileSystem
return None
class LocationMode(object):
"""
Specifies the location the request should be sent to. This mode only applies
for RA-GRS accounts which allow secondary read access. All other account types
must use PRIMARY.
"""
PRIMARY = 'primary' #: Requests should be sent to the primary location.
SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible.
class DelimitedJsonDialect(BlobDelimitedJSON):
"""Defines the input or output JSON serialization for a datalake query.
:keyword str delimiter: The line separator character, default value is '\n'
"""
class DelimitedTextDialect(BlobDelimitedTextDialect):
"""Defines the input or output delimited (CSV) serialization for a datalake query request.
:keyword str delimiter:
Column separator, defaults to ','.
:keyword str quotechar:
Field quote, defaults to '"'.
:keyword str lineterminator:
Record separator, defaults to '\n'.
:keyword str escapechar:
Escape char, defaults to empty.
:keyword bool has_header:
Whether the blob data includes headers in the first line. The default value is False, meaning that the
data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
of the first line.
"""
class ArrowDialect(BlobArrowDialect):
"""field of an arrow schema.
All required parameters must be populated in order to send to Azure.
:param str type: Required.
:keyword str name: The name of the field.
:keyword int precision: The precision of the field.
:keyword int scale: The scale of the field.
"""
class ArrowType(str, Enum):
INT64 = "int64"
BOOL = "bool"
TIMESTAMP_MS = "timestamp[ms]"
STRING = "string"
DOUBLE = "double"
DECIMAL = 'decimal'
class DataLakeFileQueryError(object):
"""The error happened during quick query operation.
:ivar str error:
The name of the error.
:ivar bool is_fatal:
If true, this error prevents further query processing. More result data may be returned,
but there is no guarantee that all of the original data will be processed.
If false, this error does not prevent further query processing.
:ivar str description:
A description of the error.
:ivar int position:
The blob offset at which the error occurred.
"""
def __init__(self, error=None, is_fatal=False, description=None, position=None):
self.error = error
self.is_fatal = is_fatal
self.description = description
self.position = position
class AccessControlChangeCounters(DictMixin):
"""
AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively.
:ivar int directories_successful:
Number of directories where Access Control List has been updated successfully.
:ivar int files_successful:
Number of files where Access Control List has been updated successfully.
:ivar int failure_count:
Number of paths where Access Control List update has failed.
"""
def __init__(self, directories_successful, files_successful, failure_count):
self.directories_successful = directories_successful
self.files_successful = files_successful
self.failure_count = failure_count
class AccessControlChangeResult(DictMixin):
"""
AccessControlChangeResult contains result of operations that change Access Control Lists recursively.
:ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters:
Contains counts of paths changed from start of the operation.
:ivar str continuation:
Optional continuation token.
Value is present when operation is split into multiple batches and can be used to resume progress.
"""
def __init__(self, counters, continuation):
self.counters = counters
self.continuation = continuation
class AccessControlChangeFailure(DictMixin):
"""
Represents an entry that failed to update Access Control List.
:ivar str name:
Name of the entry.
:ivar bool is_directory:
Indicates whether the entry is a directory.
:ivar str error_message:
Indicates the reason why the entry failed to update.
"""
def __init__(self, name, is_directory, error_message):
self.name = name
self.is_directory = is_directory
self.error_message = error_message
class AccessControlChanges(DictMixin):
"""
AccessControlChanges contains batch and cumulative counts of operations
that change Access Control Lists recursively.
Additionally it exposes path entries that failed to update while these operations progress.
:ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters:
Contains counts of paths changed within single batch.
:ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters:
Contains counts of paths changed from start of the operation.
:ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures:
List of path entries that failed to update Access Control List within single batch.
:ivar str continuation:
An opaque continuation token that may be used to resume the operations in case of failures.
"""
def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation):
self.batch_counters = batch_counters
self.aggregate_counters = aggregate_counters
self.batch_failures = batch_failures
self.continuation = continuation
class DataLakeAclChangeFailedError(Exception):
"""The error happened during set/update/remove acl recursive operation.
:ivar ~azure.core.exceptions.AzureError error:
The exception.
:ivar str description:
A description of the error.
:ivar str continuation:
An opaque continuation token that may be used to resume the operations in case of failures.
"""
def __init__(self, error, description, continuation):
self.error = error
self.description = description
self.continuation = continuation
| 42.770883 | 117 | 0.685843 | [
"MIT"
] | Co0olboi/azure-sdk-for-python | sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py | 35,842 | Python |
#!/usr/bin/env python
from circuits import Component
from circuits.web import JSONRPC, Controller
from .helpers import urlopen
from .jsonrpclib import ServerProxy
class App(Component):
def eval(self, s):
return eval(s)
class Root(Controller):
def index(self):
return "Hello World!"
def test(webapp):
rpc = JSONRPC("/rpc")
test = App()
rpc.register(webapp)
test.register(webapp)
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
url = "%s/rpc" % webapp.server.http.base
jsonrpc = ServerProxy(url, allow_none=True, encoding='utf-8')
data = jsonrpc.eval("1 + 2")
assert data["result"] == 3
rpc.unregister()
test.unregister()
| 18.948718 | 65 | 0.645467 | [
"MIT"
] | hugovk/circuits | tests/web/test_jsonrpc.py | 739 | Python |
from output.models.saxon_data.all.all001_xsd.all001 import Doc
__all__ = [
"Doc",
]
| 14.833333 | 62 | 0.719101 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/saxon_data/all/all001_xsd/__init__.py | 89 | Python |
from django import forms
from .models import Todo
class TodoForm(forms.ModelForm):
class Meta:
model = Todo
fields="__all__"
| 16.375 | 32 | 0.755725 | [
"MIT"
] | Ronlin1/To-Do-App | app/forms.py | 131 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init module for TensorFlow Model Optimization Python API.
```
import tensorflow_model_optimization as tfmot
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need to put some imports inside a function call below, and the function
# call needs to come before the *actual* imports that populate the
# tensorflow_model_optimization namespace. Hence, we disable this lint check
# throughout the file.
#
# pylint: disable=g-import-not-at-top
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print(
'\n\nFailed to import TensorFlow. Please note that TensorFlow is not '
'installed by default when you install TensorFlow Model Optimization. This '
'is so that users can decide whether to install the GPU-enabled '
'TensorFlow package. To use TensorFlow Model Optimization, please install '
'the most recent version of TensorFlow, by following instructions at '
'https://tensorflow.org/install.\n\n')
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = '1.14.0'
if (distutils.version.LooseVersion(tf.version.VERSION) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
'This version of TensorFlow Model Optimization requires TensorFlow '
'version >= {required}; Detected an installation of version {present}. '
'Please upgrade TensorFlow to proceed.'.format(
required=required_tensorflow_version, present=tf.__version__))
_ensure_tf_install()
import inspect as _inspect
import os as _os
import sys as _sys
# To ensure users only access the expected public API, the API structure is
# created in the `api` directory. Import all api modules.
# pylint: disable=wildcard-import
from tensorflow_model_optimization.python.core.api import *
# pylint: enable=wildcard-import
# Use sparsity module to fetch the path for the `api` directory.
# This handles all techniques, not just sparsity.
_API_MODULE = sparsity # pylint: disable=undefined-variable
# Returns $(install_dir)/tensorflow_model_optimization/api
_sparsity_api_dir = _os.path.dirname(
_os.path.dirname(_inspect.getfile(_API_MODULE)))
# Add the `api` directory to `__path__` so that `from * import module` works.
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_sparsity_api_dir]
elif _os.path.dirname(_inspect.getfile(_API_MODULE)) not in __path__:
__path__.append(_sparsity_api_dir)
# Delete python module so that users only access the code using the API path
# rather than using the code directory structure.
# This will disallow usage such as `tfmot.python.core.sparsity.keras`.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
# pylint: enable=undefined-variable
| 36.424779 | 84 | 0.744412 | [
"Apache-2.0"
] | 13957166977/model-optimization | tensorflow_model_optimization/__init__.py | 4,116 | Python |
"""
Constructor functions intended to be shared by pd.array, Series.__init__,
and Index.__new__.
These should not depend on core.internals.
"""
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
"""
Create an array.
.. versionadded:: 0.24.0
Parameters
----------
data : Sequence of objects
The scalars inside `data` should be instances of the
scalar type for `dtype`. It's expected that `data`
represents a 1-dimensional array of data.
When `data` is an Index or Series, the underlying array
will be extracted from `data`.
dtype : str, np.dtype, or ExtensionDtype, optional
The dtype to use for the array. This may be a NumPy
dtype or an extension type registered with pandas using
:meth:`pandas.api.extensions.register_extension_dtype`.
If not specified, there are two possibilities:
1. When `data` is a :class:`Series`, :class:`Index`, or
:class:`ExtensionArray`, the `dtype` will be taken
from the data.
2. Otherwise, pandas will attempt to infer the `dtype`
from the data.
Note that when `data` is a NumPy array, ``data.dtype`` is
*not* used for inferring the array type. This is because
NumPy cannot represent all the types of data that can be
held in extension arrays.
Currently, pandas will infer an extension dtype for sequences of
============================== =====================================
Scalar Type Array Type
============================== =====================================
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
:class:`int` :class:`pandas.arrays.IntegerArray`
:class:`float` :class:`pandas.arrays.FloatingArray`
:class:`str` :class:`pandas.arrays.StringArray`
:class:`bool` :class:`pandas.arrays.BooleanArray`
============================== =====================================
For all other cases, NumPy's usual inference rules will be used.
.. versionchanged:: 1.0.0
Pandas infers nullable-integer dtype for integer data,
string dtype for string data, and nullable-boolean dtype
for boolean data.
.. versionchanged:: 1.2.0
Pandas now also infers nullable-floating dtype for float-like
input data
copy : bool, default True
Whether to copy the data, even if not necessary. Depending
on the type of `data`, creating the new array may require
copying data, even if ``copy=False``.
Returns
-------
ExtensionArray
The newly created array.
Raises
------
ValueError
When `data` is not 1-dimensional.
See Also
--------
numpy.array : Construct a NumPy array.
Series : Construct a pandas Series.
Index : Construct a pandas Index.
arrays.PandasArray : ExtensionArray wrapping a NumPy array.
Series.array : Extract the array stored within a Series.
Notes
-----
Omitting the `dtype` argument means pandas will attempt to infer the
best array type from the values in the data. As new array types are
added by pandas and 3rd party libraries, the "best" array type may
change. We recommend specifying `dtype` to ensure that
1. the correct array type for the data is returned
2. the returned array type doesn't change as new extension types
are added by pandas and third-party libraries
Additionally, if the underlying memory representation of the returned
array matters, we recommend specifying the `dtype` as a concrete object
rather than a string alias or allowing it to be inferred. For example,
a future version of pandas or a 3rd-party library may include a
dedicated ExtensionArray for string data. In this event, the following
would no longer return a :class:`arrays.PandasArray` backed by a NumPy
array.
>>> pd.array(['a', 'b'], dtype=str)
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
This would instead return the new ExtensionArray dedicated for string
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
Finally, Pandas has arrays that mostly overlap with NumPy
* :class:`arrays.DatetimeArray`
* :class:`arrays.TimedeltaArray`
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
rather than a ``PandasArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
>>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
Examples
--------
If a dtype is not specified, pandas will infer the best dtype from the values.
See the description of `dtype` for the types pandas infers for.
>>> pd.array([1, 2])
<IntegerArray>
[1, 2]
Length: 2, dtype: Int64
>>> pd.array([1, 2, np.nan])
<IntegerArray>
[1, 2, <NA>]
Length: 3, dtype: Int64
>>> pd.array([1.1, 2.2])
<FloatingArray>
[1.1, 2.2]
Length: 2, dtype: Float64
>>> pd.array(["a", None, "c"])
<StringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
>>> pd.array(['a', 'b', 'a'], dtype='category')
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
>>> pd.array(['a', 'b', 'a'],
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
If pandas does not infer a dedicated extension type a
:class:`arrays.PandasArray` is returned.
>>> pd.array([1 + 1j, 3 + 2j])
<PandasArray>
[(1+1j), (3+2j)]
Length: 2, dtype: complex128
As mentioned in the "Notes" section, new extension types may be added
in the future (by pandas or 3rd party libraries), causing the return
value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
as a NumPy dtype if you need to ensure there's no future change in
behavior.
>>> pd.array([1, 2], dtype=np.dtype("int32"))
<PandasArray>
[1, 2]
Length: 2, dtype: int32
`data` must be 1-dimensional. A ValueError is raised when the input
has the wrong dimensionality.
>>> pd.array(1)
Traceback (most recent call last):
...
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
# this returns None for not-found dtypes.
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
# We may have a mixture of `closed` here.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype.startswith("datetime"):
# datetime, datetime64
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
# Mixture of timezones, fall back to PandasArray
pass
elif inferred_dtype.startswith("timedelta"):
# timedelta, timedelta64
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
# Pandas overrides NumPy for
# 1. datetime64[ns]
# 2. timedelta64[ns]
# so that a DatetimeArray is returned.
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
For all other types, `obj` is just returned as is.
Parameters
----------
obj : object
For Series / Index, the underlying ExtensionArray is unboxed.
For Numpy-backed ExtensionArrays, the ndarray is extracted.
extract_numpy : bool, default False
Whether to extract the ndarray from a PandasArray
Returns
-------
arr : object
Examples
--------
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Other objects like lists, arrays, and DataFrames are just passed through.
>>> extract_array([1, 2, 3])
[1, 2, 3]
For an ndarray-backed Series / Index a PandasArray is returned.
>>> extract_array(pd.Series([1, 2, 3]))
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
array([1, 2, 3])
"""
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
# error: Incompatible return value type (got "Index", expected "ExtensionArray")
# error: Incompatible return value type (got "Series", expected "ExtensionArray")
return obj # type: ignore[return-value]
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
"""
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
coerce to the dtype if specified.
"""
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
# Raise only for unordered sets, e.g., not for dict_keys
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
# DatetimeTZ case needs to go through maybe_cast_to_datetime but
# SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
# this will raise if we have e.g. floats
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
def is_empty_data(data: Any) -> bool:
"""
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
"""
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
"""
Helper to pass an explicit dtype when instantiating an empty Series.
This silences a DeprecationWarning described in GitHub-17261.
Parameters
----------
data : Mirrored from Series.__init__
index : Mirrored from Series.__init__
dtype : Mirrored from Series.__init__
name : Mirrored from Series.__init__
copy : Mirrored from Series.__init__
fastpath : Mirrored from Series.__init__
dtype_if_empty : str, numpy.dtype, or ExtensionDtype
This dtype will be passed explicitly if an empty Series will
be instantiated.
Returns
-------
Series
"""
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
| 33.578867 | 88 | 0.626123 | [
"BSD-3-Clause"
] | BhavarthShah/pandas | pandas/core/construction.py | 21,927 | Python |
"""
DuckDuckGo (Images)
@website https://duckduckgo.com/
@provide-api yes (https://duckduckgo.com/api),
but images are not supported
@using-api no
@results JSON (site requires js to get images)
@stable no (JSON can change)
@parse url, title, img_src
@todo avoid extra request
"""
from json import loads
from searx.engines.xpath import extract_text
from searx.engines.duckduckgo import (
_fetch_supported_languages, supported_languages_url,
get_region_code, language_aliases
)
from searx.poolrequests import get
from searx.url_utils import urlencode
# engine dependent config
categories = ['images']
paging = True
language_support = True
safesearch = True
# search-url
images_url = 'https://duckduckgo.com/i.js?{query}&s={offset}&p={safesearch}&o=json&vqd={vqd}'
site_url = 'https://duckduckgo.com/?{query}&iar=images&iax=1&ia=images'
# run query in site to get vqd number needed for requesting images
# TODO: find a way to get this number without an extra request (is it a hash of the query?)
def get_vqd(query):
res = get(site_url.format(query=urlencode({'q': query})))
content = res.text
vqd = content[content.find('vqd=\'') + 5:]
vqd = vqd[:vqd.find('\'')]
return vqd
# do search-request
def request(query, params):
# to avoid running actual external requests when testing
if 'is_test' not in params:
vqd = get_vqd(query)
else:
vqd = '12345'
offset = (params['pageno'] - 1) * 50
safesearch = params['safesearch'] - 1
region_code = get_region_code(params['language'], lang_list=supported_languages)
params['url'] = images_url.format(
query=urlencode({'q': query, 'l': region_code}), offset=offset, safesearch=safesearch, vqd=vqd)
return params
# get response from search-request
def response(resp):
results = []
content = resp.text
try:
res_json = loads(content)
except:
return []
# parse results
for result in res_json['results']:
title = result['title']
url = result['url']
thumbnail = result['thumbnail']
image = result['image']
# append result
results.append({'template': 'images.html',
'title': title,
'content': '',
'thumbnail_src': thumbnail,
'img_src': image,
'url': url})
return results
| 27.076923 | 103 | 0.631088 | [
"MIT"
] | AlexRogalskiy/DevArtifacts | master/searx-master/searx/engines/duckduckgo_images.py | 2,464 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Havenir and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class PortOfLoading(Document):
pass
| 23.545455 | 49 | 0.776062 | [
"MIT"
] | umar567/shipment-repo | shipments/shipments/doctype/port_of_loading/port_of_loading.py | 259 | Python |
import os
import sys
import argparse
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import pickle
def normalize(image):
return (image - image.min()) / (image.max() - image.min())
layer_activations = None
def filter_explanation(x, model, cnnid, filterid, iteration=100, lr=1):
# x: ้่ฆ่ฎญ็ป็ๅพ็
# cnnid, filterid: ๆๅฎ็ฌฌๅ ๅฑcnnไธญ็ฌฌๅ ไธชfilter
model.eval()
def hook(model, input, output):
global layer_activations
layer_activations = output
hook_handle = model.cnn[cnnid].register_forward_hook(hook)
# ๅฝforwardไบ็ฌฌcnnidๅฑcnnๅ๏ผ ่ฆๅ
ๅผๅซhook, ๆๅฏไปฅ็ปง็ปญforwardไธไธๅฑcnn
# Filter activation: ๆไปฌๅ
่งๅฏx็ป่ฟ่ขซๆๅฎfilter็activation map
model(x.cuda())
# ๆญฃๅผๆง่กforward็ๆญฅ้ชค
filter_activations = layer_activations[:, filterid, :, :].detach().cpu()
# ๆ นๆฎfunction argument ๆๅฎ็filteridๆๅพ
ๅฎfilter็activation mapๅๅบๆฅ
x = x.cuda()
x.requires_grad_()
optimizer = Adam([x], lr=lr)
# ๅฉ็จๅๅพฎๅๅoptimizer, ้ๆญฅไฟฎๆนinput imageๆฅ่ฎฉfilter activation่ถๆฅ่ถๅคง
for iter in range(iteration):
optimizer.zero_grad()
model(x)
objective = -layer_activations[:, filterid, :, :].sum()
# ๆข็ฉถimage็ๅพฎ้ๅๅไผๆๆ ทๅฝฑๅactivation็็จๅบฆ๏ผๅ ่ดๅทไปฃ่กจๅmaximization
objective.backward()
optimizer.step()
# ไฟฎๆนinput imageๆฅๆๅคงๅfilter activation
filter_visualization = x.detach().cpu().squeeze()[0]
# ๅฎๆๅพ็ไฟฎๆน๏ผๅชๅฉไธ่ฆ็ปๅบๆฅ๏ผๅ ๆญคๅฏไปฅ็ดๆฅdetachๅนถ่ฝฌๆcpu tensor
hook_handle.remove()
# ไธๆฆmodel register hook, ่ฏฅhookๅฐฑไธ่ดๅญๅจใๅฆๆไนๅ็ปง็ปญregisterๆดๅคhook
# ้ฃmodelไธๆฌกforward่ฆๅ็ไบๆ
ๅฐฑ่ถๆฅ่ถๆฅ่ถๅค๏ผๅ ๆญค้่ฆๆhookๆฟๆ
return filter_activations, filter_visualization
| 28.578125 | 77 | 0.697649 | [
"MIT"
] | Zeng-WH/ML2020 | CNN/code/filter_visualiton.py | 2,173 | Python |
from functools import wraps
import sys
import traceback
from ploomber.io import TerminalWriter
from ploomber.exceptions import DAGBuildError, DAGRenderError
# TODO: there are two types of cli commands: the ones that execute user's
# code (ploomber build/task) and the ones that parse a dag/task but do not
# execute it. For the former, we want to capture errors and display them with
# colors so it's easier for the user to understand what went wrong with their
# code. For the latter, the errors are raise by us, hence, we only need to
# print the message and exit. Currently, all CLI end points (except ploomber
# nb) are decorated with @cli_endpoint but we should change it to
# @command_endpoint
def cli_endpoint(fn):
"""
Decorator for command line endpoints that execute dags or tasks. It runs
the decorated function, captures exception (if any), sends a colored
traceback to standard error and exits with code 1.
Notes
-----
Functions decorated with this must be called with keyword arguments
Call some_endpoint(catch_exception=False) to disable this behavior (e.g.
for testing)
"""
@wraps(fn)
def wrapper(catch_exception=True, **kwargs):
if catch_exception:
try:
fn(**kwargs)
# these already color output
except (DAGBuildError, DAGRenderError):
error = traceback.format_exc()
color = False
except Exception:
error = traceback.format_exc()
color = True
else:
error = None
if error:
if color:
tw = TerminalWriter(file=sys.stderr)
tw._write_source(error.splitlines())
else:
print(error, file=sys.stderr)
sys.exit(1)
else:
fn(**kwargs)
return wrapper
# FIXME: capture only certain types of exceptions. If it's something we dind't
# raise, we'd like to see the full traceback
def command_endpoint(fn):
"""
Decorator for command line endpoints that only parse dags or tasks but do
not execute them. If it tails, it prints error message to stderror, then
calls with exit code 1.
"""
@wraps(fn)
def wrapper(**kwargs):
try:
fn(**kwargs)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
sys.exit(1)
return wrapper
| 32.552632 | 78 | 0.630558 | [
"Apache-2.0"
] | abhishak3/ploomber | src/ploomber/cli/io.py | 2,474 | Python |
import json
import os
import shutil
import tempfile
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def call(command, ignore_error=False):
ret = os.system(command)
if ret != 0 and not ignore_error:
raise Exception("Command failed: %s" % command)
def clean_gh_pages():
call('git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" 1>/dev/null')
call("git fetch origin -q")
call("git checkout gh-pages")
if os.path.exists("en"):
shutil.rmtree("en")
def build_and_copy(branch, folder_name, versions_available, themes_dir, validate_links=False):
call("git checkout %s" % branch)
call("git pull origin %s" % branch)
with open('versions.json', 'w') as f:
f.write(json.dumps(versions_available))
shutil.rmtree("_themes")
copytree(themes_dir, "_themes")
call("make html > /dev/null")
if validate_links:
call("make spelling > /dev/null")
call("make linkcheck")
call("make latexpdf > /dev/null")
tmp_dir = tempfile.mkdtemp()
copytree("_build/html/", tmp_dir)
shutil.copy2("_build/latex/conan.pdf", tmp_dir)
shutil.rmtree("_build")
# Go to deploy branch, copy new files and commit
call("git stash")
call("git stash drop || true")
call("git clean -d -f")
call("git checkout gh-pages")
if not os.path.exists("en"):
os.mkdir("en")
version_folders = ["en/%s" % folder_name]
if branch == "master":
version_folders.append("en/latest")
for version_folder in version_folders:
if os.path.exists(version_folder):
shutil.rmtree(version_folder)
os.mkdir(version_folder)
copytree(tmp_dir, version_folder)
call("git add -A .")
call("git commit --message 'committed version %s'" % folder_name, ignore_error=True)
def should_deploy():
if not os.getenv("TRAVIS_BRANCH", None) == "master":
print("Skipping deploy for not master branch")
return False
if os.getenv("TRAVIS_PULL_REQUEST", "") != "false":
print("Deploy skipped, This is a PR in the main repository")
return False
if not os.getenv("GITHUB_API_KEY"):
print("Deploy skipped, missing GITHUB_API_KEY. Is this a PR?")
return False
return True
def deploy():
call('rm -rf .git')
call('git init .')
call('git add .')
call('git checkout -b gh-pages')
call('git commit -m "Cleared web"')
call('git remote add origin-pages '
'https://%[email protected]/conan-io/docs.git > /dev/null 2>&1' % os.getenv("GITHUB_API_KEY"))
call('git push origin-pages gh-pages --force')
if __name__ == "__main__":
if should_deploy():
# Copy the _themes to be able to share them between old versions
themes_dir = tempfile.mkdtemp()
copytree("_themes", themes_dir)
clean_gh_pages()
versions_dict = {"master": "1.25",
"release/1.24.1": "1.24",
"release/1.23.0": "1.23",
"release/1.22.3": "1.22",
"release/1.21.3": "1.21",
"release/1.20.5": "1.20",
"release/1.19.3": "1.19",
"release/1.18.5": "1.18",
"release/1.17.2": "1.17",
"release/1.16.1": "1.16",
"release/1.15.2": "1.15",
"release/1.14.5": "1.14",
"release/1.13.3": "1.13",
"release/1.12.3": "1.12",
"release/1.11.2": "1.11",
"release/1.10.2": "1.10",
"release/1.9.4": "1.9",
"release/1.8.4": "1.8",
"release/1.7.4": "1.7",
"release/1.6.1": "1.6",
"release/1.5.2": "1.5",
"release/1.4.5": "1.4",
"release/1.3.3": "1.3"}
for branch, folder_name in versions_dict.items():
print("Building {}...".format(branch))
build_and_copy(branch, folder_name, versions_dict, themes_dir)
deploy()
else:
call("make html > /dev/null")
call("make spelling")
call("make linkcheck")
| 31.321918 | 98 | 0.535535 | [
"MIT"
] | claremacrae/docs | deploy_gh_pages.py | 4,573 | Python |
# -*- coding: utf-8 -*-
"""็ฌ่ซ้
็ฝฎๆไปถ"""
import os
# MYSQL
MYSQL_IP = "localhost"
MYSQL_PORT = 3306
MYSQL_DB = "feapder"
MYSQL_USER_NAME = "feapder"
MYSQL_USER_PASS = "feapder123"
# REDIS
# IP:PORT
REDISDB_IP_PORTS = "localhost:6379"
REDISDB_USER_PASS = ""
REDISDB_DB = 0
# # ็ฌ่ซ็ธๅ
ณ
# # COLLECTOR
COLLECTOR_SLEEP_TIME = 1 # ไปไปปๅก้ๅไธญ่ทๅไปปๅกๅฐๅ
ๅญ้ๅ็้ด้
COLLECTOR_TASK_COUNT = 100 # ๆฏๆฌก่ทๅไปปๅกๆฐ้
#
# # SPIDER
SPIDER_THREAD_COUNT = 1 # ็ฌ่ซๅนถๅๆฐ
# SPIDER_SLEEP_TIME = 0 # ไธ่ฝฝๆถ้ด้ด้๏ผ่งฃๆๅฎไธไธชresponseๅไผ็ ๆถ้ด๏ผ
# SPIDER_MAX_RETRY_TIMES = 100 # ๆฏไธช่ฏทๆฑๆๅคง้่ฏๆฌกๆฐ
# # ้ๆฐๅฐ่ฏๅคฑ่ดฅ็requests ๅฝrequests้่ฏๆฌกๆฐ่ถ
่ฟๅ
่ฎธ็ๆๅคง้่ฏๆฌกๆฐ็ฎๅคฑ่ดฅ
# RETRY_FAILED_REQUESTS = False
# # request ่ถ
ๆถๆถ้ด๏ผ่ถ
่ฟ่ฟไธชๆถ้ด้ๆฐๅ๏ผไธๆฏ็ฝ็ป่ฏทๆฑ็่ถ
ๆถๆถ้ด๏ผๅไฝ็ง
# REQUEST_LOST_TIMEOUT = 600 # 10ๅ้
# # ไฟๅญๅคฑ่ดฅ็request
# SAVE_FAILED_REQUEST = True
#
# # ไธ่ฝฝ็ผๅญ ๅฉ็จredis็ผๅญ๏ผ็ฑไบๅ
ๅญๅฐ๏ผๆไปฅไป
ไพๆต่ฏๆถไฝฟ็จ
# RESPONSE_CACHED_ENABLE = False # ๆฏๅฆๅฏ็จไธ่ฝฝ็ผๅญ ๆๆฌ้ซ็ๆฐๆฎๆๅฎนๆๅ้ๆฑ็ๆฐๆฎ๏ผๅปบ่ฎฎ่ฎพ็ฝฎไธบTrue
# RESPONSE_CACHED_EXPIRE_TIME = 3600 # ็ผๅญๆถ้ด ็ง
# RESPONSE_CACHED_USED = False # ๆฏๅฆไฝฟ็จ็ผๅญ ่กฅ้ๆฐๆฎๆถๅฏ่ฎพ็ฝฎไธบTrue
#
# WARNING_FAILED_COUNT = 1000 # ไปปๅกๅคฑ่ดฅๆฐ ่ถ
่ฟWARNING_FAILED_COUNTๅๆฅ่ญฆ
#
# # ็ฌ่ซๅๅงๅๅทฅไฝ
# # ็ฌ่ซๅๅฎrequestๅๆฏๅฆ่ชๅจ็ปๆๆ่
็ญๅพ
ไปปๅก
# AUTO_STOP_WHEN_SPIDER_DONE = True
#
#
# # ่ฎพ็ฝฎไปฃ็
# PROXY_EXTRACT_API = None # ไปฃ็ๆๅAPI ๏ผ่ฟๅ็ไปฃ็ๅๅฒ็ฌฆไธบ\r\n
# PROXY_ENABLE = True
#
# # ้ๆบheaders
# RANDOM_HEADERS = True
# # requests ไฝฟ็จsession
# USE_SESSION = False
#
# # ๅป้
# ITEM_FILTER_ENABLE = False # item ๅป้
# REQUEST_FILTER_ENABLE = False # request ๅป้
#
# # ๆฅ่ญฆ
# DINGDING_WARNING_URL = "" # ้้ๆบๅจไบบapi
# DINGDING_WARNING_PHONE = "" # ๆฅ่ญฆไบบ
# LINGXI_TOKEN = "" # ็ต็ๆฅ่ญฆtoken
#
# LOG_NAME = os.path.basename(os.getcwd())
# LOG_PATH = "log/%s.log" % LOG_NAME # logๅญๅจ่ทฏๅพ
# LOG_LEVEL = "DEBUG"
# LOG_IS_WRITE_TO_FILE = False
# OTHERS_LOG_LEVAL = "ERROR" # ็ฌฌไธๆนๅบ็log็ญ็บง
| 23.309859 | 70 | 0.735347 | [
"MIT"
] | AYiXi/feapder | tests/spider/setting.py | 2,239 | Python |
import random
class Playlist:
"""
A list of routines (aka a list of light effect layer lists, or a list of
flame sequences) all intended for use in a single context (e.g. when the
headset is on). One routine in the playlist is selected at any given time.
"""
def __init__(self, routines, index = 0, shuffle=False):
self.routines = routines
self.selected = index
self.order = range(len(self.routines))
self.shuffle = shuffle
if shuffle:
random.shuffle(self.order)
self.print_selection()
def print_selection(self):
print "Playlist selecting index %d:" % self.selected
for x in self.routines[self.order[self.selected]]:
print " ", repr(x)
def selection(self):
return self.routines[self.order[self.selected]]
def advance(self):
"""
Switch the selected routine to the next one in the list, either
consecutively or randomly depending on whether shuffle is true
"""
if len(self.routines) > 1:
selected = self.selected + 1
if selected >= len(self.routines):
if self.shuffle:
random.shuffle(self.order)
selected = 0
self.selected = selected
self.print_selection()
| 34.025 | 78 | 0.588538 | [
"Apache-2.0"
] | FlamingLotusGirls/soma | pier14/opc-client/playlist.py | 1,361 | Python |
"""
Small script to generate gdal_warp commands
for projecting rasters to the Behrmann projection
to be able to run the generated bat file you should have gdalwarp in your path or run it from an OSGeo4W Shell
"""
import os
root = r"D:\a\data\BioOracle_scenarios_30s_min250"
output = root + r"_equal_area" #os.path.abspath(os.path.join(root, r'..\ascii_equalarea'))
nodata = "-9999"
def create_bat():
proj = "+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +datum=WGS84 +ellps=WGS84 +units=m +no_defs"
with open('project_to_behrmann.bat', 'w') as bat:
for r, dirs, files in os.walk(root):
for f in files:
n, ext = os.path.splitext(f)
if ext == '.asc':
## output of ascii files from gdalwarp is not supported
temptiff = os.path.join(output, n + '.tiff')
bat.write('gdalwarp -of GTiff -multi -srcnodata %s -dstnodata %s -t_srs "%s" "%s" "%s"\n' % (nodata, proj, os.path.join(r, f), temptiff))
## convert output tiff to ascii
outdir = r.replace(root, output)
if not os.path.exists(outdir): os.makedirs(outdir)
bat.write('gdal_translate -of AAIGrid "%s" "%s"\n' % (temptiff, os.path.join(outdir,f)))
## delete temp file
bat.write('del "%s"\n'%temptiff)
if __name__ == '__main__':
create_bat()
| 43.424242 | 157 | 0.586183 | [
"Unlicense"
] | samuelbosch/phd | rasters/project_to_behrmann.py | 1,433 | Python |
import json
import logging
from django.http import JsonResponse
logger = logging.getLogger('log')
from wxcloudrun.utils.SQL.DBUtils import DBUtils
def test1(request):
print(request.headers)
logger.info(request.headers)
rsp = JsonResponse({'code': 0, 'errorMsg': '๐'}, json_dumps_params={'ensure_ascii': False})
return rsp
| 19.166667 | 95 | 0.727536 | [
"MIT"
] | nixiaopan/nipan | wxcloudrun/np_test/views.py | 348 | Python |
import sqlite3
from checktheplug.models.Server import Server
"""
Operations to manage accessing the server database.
"""
class ServerDao:
"""
Sets up the object with the sql connection.
"""
def __init__(self, settings):
self.conn = sqlite3.connect(settings.database)
"""
Add Server to the database.
"""
def add(self, new_server):
if new_server:
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("INSERT INTO servers(host, url) values(?, ?)", (new_server.host, new_server.url))
return(Server(cur.lastrowid, new_server.host, new_server.url), None)
except sqlite3.IntegrityError as er:
return (None, "There was a db issue: " + str(er))
else:
return (None, "No server passed in")
"""
Find all the servers for a particular app.
"""
def find_by_app_id(self, app_id):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers where app_id = ?", (app_id,))
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], app_id), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
"""
Find x number of available servers or all that are available.
"""
def find_available_servers(self, quantity):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers where app_id = null limit = ?", (quantity,))
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], None), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
"""
Retrieve all servers.
"""
def retrieve_all_servers(self):
try:
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, host, url from servers")
server_rows = cur.fetchall()
return (list(map(lambda x: Server(x[0], x[1], x[2], None), server_rows)), None)
except Exception as er:
return (None, "There was a db issue: " + str(er))
"""
Tie an app to a number of servers.
"""
def tie_app_to_servers(self, app_id, available_servers):
try:
with self.conn:
cur = self.conn.cursor()
server_id_string = ', '.join("?" * available_servers)
cur.execute("update servers set app_id = ? where id in ({0})".format(server_id_string), tuple([app_id] + available_servers))
return (None, "ok")
except Exception as er:
return (None, "There was a db issue: " + str(er)) | 37.012346 | 140 | 0.53936 | [
"MIT"
] | maximx1/checktheplug | checktheplug/data/ServerDao.py | 2,998 | Python |
from rest_framework import serializers
from apps.challenge.models import Clan
class ClanSerializer(serializers.ModelSerializer):
class Meta:
model = Clan
fields = (
'name', 'leader', 'image', 'score', 'wins', 'losses', 'draws'
)
| 22.666667 | 73 | 0.632353 | [
"MIT"
] | SharifAIChallenge/AIC21-Backend | apps/challenge/serializers/clan.py | 272 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=25
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class840.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 32.068493 | 80 | 0.668518 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | data/cirq_new/cirq_program/startCirq_Class840.py | 2,341 | Python |
from seleniumbase import BaseCase
from werkzeug.security import generate_password_hash
from qa327_test.conftest import base_url
from qa327.models import User, Ticket
# Mock a sample user
TEST_USER = User(
email='[email protected]',
name='test_frontend',
password=generate_password_hash('test_frontend'),
balance=500
)
TEST_USER_SELLER = User(
email='[email protected]',
name='test_seller',
password=generate_password_hash('Password99!'),
balance=500
)
# Mock a sample ticket
TEST_TICKET = Ticket(
name='helloworld',
seller=TEST_USER_SELLER,
price=20,
quantity=20,
expires="20220101"
)
class GeekBaseCase(BaseCase):
'''
Selenium base case with some
GeekSeek utilities
'''
def assert_flash(self, text):
'''asserts that message exists in flashes'''
for flash_dom in self.find_elements('.flash'):
if flash_dom.text == text:
return
print(flash_dom.text)
raise AssertionError(f'Flash not found for text "{text}"')
def login_test_user(self, email=TEST_USER.email, password='test_frontend'):
'''login our test user'''
self.open(base_url+'/login')
self.input('#email', email)
self.input('#password', password)
self.click('#btn-submit') | 26.938776 | 79 | 0.669697 | [
"Apache-2.0",
"MIT"
] | nicoleooi/cmpe327 | qa327_test/frontend/geek_base.py | 1,320 | Python |
import pytest
from vdirsyncer.storage.dav import _BAD_XML_CHARS
from vdirsyncer.storage.dav import _merge_xml
from vdirsyncer.storage.dav import _parse_xml
def test_xml_utilities():
x = _parse_xml(
b"""<?xml version="1.0" encoding="UTF-8" ?>
<multistatus xmlns="DAV:">
<response>
<propstat>
<status>HTTP/1.1 404 Not Found</status>
<prop>
<getcontenttype/>
</prop>
</propstat>
<propstat>
<prop>
<resourcetype>
<collection/>
</resourcetype>
</prop>
</propstat>
</response>
</multistatus>
"""
)
response = x.find("{DAV:}response")
props = _merge_xml(response.findall("{DAV:}propstat/{DAV:}prop"))
assert props.find("{DAV:}resourcetype/{DAV:}collection") is not None
assert props.find("{DAV:}getcontenttype") is not None
@pytest.mark.parametrize("char", range(32))
def test_xml_specialchars(char):
x = _parse_xml(
'<?xml version="1.0" encoding="UTF-8" ?>'
"<foo>ye{}s\r\n"
"hello</foo>".format(chr(char)).encode("ascii")
)
if char in _BAD_XML_CHARS:
assert x.text == "yes\nhello"
| 29.085106 | 72 | 0.516459 | [
"BSD-3-Clause"
] | Bleala/vdirsyncer | tests/storage/dav/test_main.py | 1,367 | Python |
from __future__ import division
import numpy as np
from numpy.random import rand
import pandas as pd
# --- List of available filters
FILTERS=[
{'name':'Moving average','param':100,'paramName':'Window Size','paramRange':[0,100000],'increment':1},
{'name':'Low pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},
{'name':'High pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},
]
SAMPLERS=[
{'name':'Replace', 'param':[], 'paramName':'New x'},
{'name':'Insert', 'param':[], 'paramName':'Insert list'},
{'name':'Remove', 'param':[], 'paramName':'Remove list'},
{'name':'Every n', 'param':2 , 'paramName':'n'},
{'name':'Delta x', 'param':0.1, 'paramName':'dx'},
]
def reject_outliers(y, x=None, m = 2., replaceNaN=True):
""" Reject outliers:
If replaceNaN is true: they are replaced by NaN
Otherwise they are removed
"""
if m==0:
# No rejection...
pass
else:
dd = np.abs(y - np.nanmedian(y))
mdev = np.nanmedian(dd)
if mdev:
ss = dd/mdev
b=ss<m
if replaceNaN:
y=y.copy()
y[~b]=np.nan
else:
y=y[b]
if x is not None:
x= x[b]
if x is None:
return y
else:
return x, y
# --------------------------------------------------------------------------------}
# --- Resampling
# --------------------------------------------------------------------------------{
def multiInterp(x, xp, fp, extrap='bounded'):
j = np.searchsorted(xp, x) - 1
dd = np.zeros(len(x))
bOK = np.logical_and(j>=0, j< len(xp)-1)
bLower =j<0
bUpper =j>=len(xp)-1
jOK = j[bOK]
#import pdb; pdb.set_trace()
dd[bOK] = (x[bOK] - xp[jOK]) / (xp[jOK + 1] - xp[jOK])
jBef=j
jAft=j+1
#
# Use first and last values for anything beyond xp
jAft[bUpper] = len(xp)-1
jBef[bUpper] = len(xp)-1
jAft[bLower] = 0
jBef[bLower] = 0
if extrap=='bounded':
pass
# OK
elif extrap=='nan':
dd[~bOK] = np.nan
else:
raise NotImplementedError()
return (1 - dd) * fp[:,jBef] + fp[:,jAft] * dd
def resample_interp(x_old, x_new, y_old=None, df_old=None):
#x_new=np.sort(x_new)
if df_old is not None:
# --- Method 1 (pandas)
#df_new = df_old.copy()
#df_new = df_new.set_index(x_old)
#df_new = df_new.reindex(df_new.index | x_new)
#df_new = df_new.interpolate().loc[x_new]
#df_new = df_new.reset_index()
# --- Method 2 interp storing dx
data_new=multiInterp(x_new, x_old, df_old.values.T)
df_new = pd.DataFrame(data=data_new.T, columns=df_old.columns.values)
return x_new, df_new
if y_old is not None:
return x_new, np.interp(x_new, x_old, y_old)
def applySamplerDF(df_old, x_col, sampDict):
x_old=df_old[x_col].values
x_new, df_new =applySampler(x_old, y_old=None, sampDict=sampDict, df_old=df_old)
df_new[x_col]=x_new
return df_new
def applySampler(x_old, y_old, sampDict, df_old=None):
param = np.asarray(sampDict['param']).ravel()
if sampDict['name']=='Replace':
if len(param)==0:
raise Exception('Error: At least one value is required to resample the x values with')
x_new = param
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Insert':
if len(param)==0:
raise Exception('Error: provide a list of values to insert')
x_new = np.sort(np.concatenate((x_old.ravel(),param)))
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Remove':
I=[]
if len(param)==0:
raise Exception('Error: provide a list of values to remove')
for d in param:
Ifound= np.where(np.abs(x_old-d)<1e-3)[0]
if len(Ifound)>0:
I+=list(Ifound.ravel())
x_new=np.delete(x_old,I)
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Delta x':
if len(param)==0:
raise Exception('Error: provide value for dx')
dx = param[0]
x_new = np.arange(x_old[0], x_old[-1]+dx/2, dx)
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Every n':
if len(param)==0:
raise Exception('Error: provide value for n')
n = int(param[0])
if n==0:
raise Exception('Error: |n| should be at least 1')
x_new=x_old[::n]
if df_old is not None:
return x_new, (df_old.copy()).iloc[::n,:]
if y_old is not None:
return x_new, y_old[::n]
else:
raise NotImplementedError('{}'.format(sampDict))
pass
# --------------------------------------------------------------------------------}
# --- Filters
# --------------------------------------------------------------------------------{
# def moving_average(x, w):
# #t_new = np.arange(0,Tmax,dt)
# #nt = len(t_new)
# #nw=400
# #u_new = moving_average(np.floor(np.linspace(0,3,nt+nw-1))*3+3.5, nw)
# return np.convolve(x, np.ones(w), 'valid') / w
# def moving_average(x,N,mode='same'):
# y=np.convolve(x, np.ones((N,))/N, mode=mode)
# return y
def moving_average(a, n=3) :
"""
perform moving average, return a vector of same length as input
NOTE: also in kalman.filters
"""
a = a.ravel()
a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values
ret = np.cumsum(a, dtype = float)
ret[n:] = ret[n:] - ret[:-n]
ret=ret[n - 1:] / n
return ret
def lowpass1(y, dt, fc=3) :
"""
1st order low pass filter
"""
tau=1/(2*np.pi*fc)
alpha=dt/(tau+dt)
y_filt=np.zeros(y.shape)
y_filt[0]=y[0]
for i in np.arange(1,len(y)):
y_filt[i]=alpha*y[i] + (1-alpha)*y_filt[i-1]
return y_filt
def highpass1(y, dt, fc=3) :
"""
1st order high pass filter
"""
tau=1/(2*np.pi*fc)
alpha=tau/(tau+dt)
y_filt=np.zeros(y.shape)
y_filt[0]=0
for i in np.arange(1,len(y)):
y_filt[i]=alpha*y_filt[i-1] + alpha*(y[i]-y[i-1])
m0=np.mean(y)
m1=np.mean(y_filt)
y_filt+=m0-m1
return y_filt
def applyFilter(x, y,filtDict):
if filtDict['name']=='Moving average':
return moving_average(y, n=np.round(filtDict['param']).astype(int))
elif filtDict['name']=='Low pass 1st order':
dt = x[1]-x[0]
return lowpass1(y, dt=dt, fc=filtDict['param'])
elif filtDict['name']=='High pass 1st order':
dt = x[1]-x[0]
return highpass1(y, dt=dt, fc=filtDict['param'])
else:
raise NotImplementedError('{}'.format(filtDict))
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
def zero_crossings(y,x=None,direction=None):
"""
Find zero-crossing points in a discrete vector, using linear interpolation.
direction: 'up' or 'down', to select only up-crossings or down-crossings
returns:
x values xzc such that y(yzc)==0
indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)
if direction is not provided, also returns:
sign, equal to 1 for up crossing
"""
if x is None:
x=np.arange(len(y))
if np.any((x[1:] - x[0:-1]) <= 0.0):
raise Exception('x values need to be in ascending order')
# Indices before zero-crossing
iBef = np.where(y[1:]*y[0:-1] < 0.0)[0]
# Find the zero crossing by linear interpolation
xzc = x[iBef] - y[iBef] * (x[iBef+1] - x[iBef]) / (y[iBef+1] - y[iBef])
# Selecting points that are exactly 0 and where neighbor change sign
iZero = np.where(y == 0.0)[0]
iZero = iZero[np.where((iZero > 0) & (iZero < x.size-1))]
iZero = iZero[np.where(y[iZero-1]*y[iZero+1] < 0.0)]
# Concatenate
xzc = np.concatenate((xzc, x[iZero]))
iBef = np.concatenate((iBef, iZero))
# Sort
iSort = np.argsort(xzc)
xzc, iBef = xzc[iSort], iBef[iSort]
# Return up-crossing, down crossing or both
sign = np.sign(y[iBef+1]-y[iBef])
if direction == 'up':
I= np.where(sign==1)[0]
return xzc[I],iBef[I]
elif direction == 'down':
I= np.where(sign==-1)[0]
return xzc[I],iBef[I]
elif direction is not None:
raise Exception('Direction should be either `up` or `down`')
return xzc, iBef, sign
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
def correlation(x, nMax=80, dt=1, method='manual'):
"""
Compute auto correlation of a signal
"""
nvec = np.arange(0,nMax)
sigma2 = np.var(x)
R = np.zeros(nMax)
R[0] =1
for i,nDelay in enumerate(nvec[1:]):
R[i+1] = np.mean( x[0:-nDelay] * x[nDelay:] ) / sigma2
tau = nvec*dt
return R, tau
def correlated_signal(coeff, n=1000):
"""
Create a correlated random signal of length `n` based on the correlation coefficient `coeff`
value[t] = coeff * value[t-1] + (1-coeff) * random
"""
if coeff<0 or coeff>1:
raise Exception('Correlation coefficient should be between 0 and 1')
x = np.zeros(n)
rvec = rand(n)
x[0] = rvec[0]
for m in np.arange(1,n):
x[m] = coeff*x[m-1] + (1-coeff)*rvec[m]
x-=np.mean(x)
return x
if __name__=='__main__':
import numpy as np
import matplotlib.pyplot as plt
# Input
dt = 1
n = 10000
coeff = 0.95 # 1:full corr, 00-corr
nMax = 180
# Create a correlated time series
tvec = np.arange(0,n)*dt
ts = correlated_signal(coeff, n)
# --- Compute correlation coefficient
R, tau = correlation(x, nMax=nMax)
fig,axes = plt.subplots(2, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
ax=axes[0]
# Plot time series
ax.plot(tvec,ts)
ax.set_xlabel('t [s]')
ax.set_ylabel('u [m/s]')
ax.tick_params(direction='in')
# Plot correlation
ax=axes[1]
ax.plot(tau, R ,'b-o', label='computed')
ax.plot(tau, coeff**(tau/dt) , 'r--' ,label='coeff^{tau/dt}') # analytical coeff^n trend
ax.set_xlabel(r'$\tau$ [s]')
ax.set_ylabel(r'$R(\tau)$ [-]')
ax.legend()
plt.show()
| 31.002882 | 119 | 0.533278 | [
"MIT"
] | cdrtm/pyDatView | pydatview/tools/signal.py | 10,758 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-15 06:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('pinax_teams', '0002_add_simple_models'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', jsonfield.fields.JSONField()),
],
),
migrations.CreateModel(
name='ItemResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('answer', models.TextField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oxlos.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('description_html', models.TextField(blank=True, editable=False)),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_teams.SimpleTeam')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField()),
('description_html', models.TextField(blank=True, editable=False)),
('instructions', models.TextField()),
('instructions_html', models.TextField(blank=True, editable=False)),
('question_template', models.TextField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='oxlos.Project')),
],
),
migrations.AddField(
model_name='item',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oxlos.Task'),
),
]
| 41.941176 | 134 | 0.596073 | [
"MIT"
] | jtauber/oxlos2 | oxlos/migrations/0001_initial.py | 2,852 | Python |
from setuptools import find_packages, setup
setup(
name="hacker_news",
version="dev",
author="Elementl",
author_email="[email protected]",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
package_data={"hacker_news": ["hacker_news_dbt/*"]},
install_requires=[
"aiobotocore==1.3.3",
"dagster",
"dagster-aws",
"dagster-dbt",
"dagster-pandas",
"dagster-pyspark",
"dagster-slack",
"dagster-postgres",
"dagstermill",
"dbt>=0.19.0",
"mock",
# DataFrames were not written to Snowflake, causing errors
"pandas<1.4.0",
"pyarrow>=4.0.0",
"pyspark",
"requests",
"fsspec",
"s3fs",
"scipy",
"sklearn",
"snowflake-sqlalchemy",
"matplotlib",
],
extras_require={"tests": ["mypy", "pylint", "pytest"]},
)
| 26.5 | 66 | 0.539982 | [
"Apache-2.0"
] | kbd/dagster | examples/hacker_news/setup.py | 1,113 | Python |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from requests_mock import ANY
from backend.components.cluster_manager import ClusterManagerClient
class TestClusterManagerClient:
def test_get_nodes(self, cluster_id, request_user, requests_mock):
expected_data = [{"innerIP": "127.0.0.1"}]
requests_mock.get(ANY, json={"code": 0, "data": expected_data})
client = ClusterManagerClient(request_user.token.access_token)
data = client.get_nodes(cluster_id)
assert data == expected_data
| 46.769231 | 115 | 0.763158 | [
"Apache-2.0"
] | jamesgetx/bk-bcs | bcs-ui/backend/tests/components/test_cm.py | 1,234 | Python |
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from bs4 import BeautifulSoup as bs
import cv2
import imgaug
from utils import *
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Inference result directory
RESULTS_DIR = os.path.abspath("./inference/")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from configs import Config
# from mrcnn import model as modellib, utils
# from mrcnn import visualize
import matplotlib
# Agg backend runs without a display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = '2012'
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# VOC DATASET MASK MAP FUNCTION
# Following codes are mapping each mask color(SegmentationClass) to ground truth index.
# - reference: https://d2l.ai/chapter_computer-vision/semantic-segmentation-and-dataset.html
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def build_colormap2label():
"""Build a RGB color to label mapping for segmentation."""
colormap2label = np.zeros(256 ** 3)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[(colormap[0]*256 + colormap[1])*256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# VOC DATASET MASK MAP FUNCTION
class VocConfig(Config):
NAME = "voc"
IMAGE_PER_GPU = 2
NUM_CLASSES = 1 + 20 # VOC 2012 have 20 classes. "1" is for background.
class InferenceConfig(VocConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
class VocDataset(Dataset):
def load_voc(self, dataset_dir, trainval, year='2012'):
"""Load a voc_year of the VOC dataset.
dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit'
trainval: 'train' or 'val' for Training or Validation
year: '2007' or '2012' for VOC dataset
"""
voc_year = 'VOC' + year
Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation')
JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages')
Annotations = os.path.join(dataset_dir, voc_year, 'Annotations')
SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass')
SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject')
# load classes of VOC, BG is initialed in parent class.
for idx, class_name in enumerate(VOC_CLASSES[1:]):
self.add_class("voc", idx + 1, class_name)
assert trainval in ['train', 'val']
# read segmentation annotation file
annotation_file = os.path.join(Segmentation, trainval + '.txt')
image_ids = []
with open(annotation_file) as f:
image_id_list = [line.strip() for line in f]
image_ids += image_id_list
for image_id in image_ids:
image_file_name = '{}.jpg'.format(image_id)
mask_file_name = '{}.png'.format(image_id)
xml_file_name = '{}.xml'.format(image_id)
image_path = os.path.join(JPEGImages, image_file_name)
# Parse Annotations XML File
with open(os.path.join(Annotations, xml_file_name)) as f:
soup = bs(f, 'lxml')
objects = soup.find_all('object')
image_contains_class_flag = False
for obj in objects:
class_name = obj.find('name').text
if class_name in VOC_CLASSES:
image_contains_class_flag = True
continue
if image_contains_class_flag:
class_mask_path = os.path.join(SegmentationClass, mask_file_name)
object_mask_path = os.path.join(SegmentationObject, mask_file_name)
self.add_image("voc",
image_id=image_file_name,
path=image_path,
class_mask_path=class_mask_path,
object_mask_path=object_mask_path)
def load_raw_mask(self, image_id, class_or_object):
'''load two kinds of mask of VOC dataset.
image_id: id of mask
class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject
Returns:
image: numpy of mask image.
'''
assert class_or_object in ['class_mask', 'object_mask']
image = skimage.io.imread(self.image_info[image_id][class_or_object+'_path'])
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_class_label(self, image_id):
'''Mapping SegmentationClass image's color to indice of ground truth
image_id: id of mask
Return:
class_label: [height, width] matrix contains values form 0 to 20
'''
raw_mask = self.load_raw_mask(image_id, 'class_mask')
class_label = voc_label_indices(raw_mask, build_colormap2label())
return class_label
def load_mask(self, image_id):
'''Mapping annotation images to real Masks(MRCNN needed)
image_id: id of mask
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
'''
class_label = self.load_class_label(image_id)
instance_mask = self.load_raw_mask(image_id, 'object_mask')
max_indice = int(np.max(class_label))
instance_label = []
instance_class = []
for i in range(1, max_indice+1):
if not np.any(class_label==i):
continue
gt_indice = i
object_filter = class_label == i
object_filter = object_filter.astype(np.uint8)
object_filter = np.dstack((object_filter,object_filter,object_filter))
filtered = np.multiply(object_filter, instance_mask)
gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY)
max_gray = np.max(gray)
for sub_index in range(1, max_gray+1):
if not np.any(gray==sub_index):
continue
instance_filter = gray == sub_index
instance_label += [instance_filter]
instance_class += [gt_indice]
masks = np.asarray(instance_label).transpose((1,2,0))
classes_ids = np.asarray(instance_class)
return masks, classes_ids
############################################################
# Inference
############################################################
def inference(model, dataset, limit):
"""Run detection on images in the given directory."""
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
time_dir = "{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
time_dir = os.path.join(RESULTS_DIR, time_dir)
os.makedirs(time_dir)
# Load over images
for image_id in dataset.image_ids[:limit]:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
# Save image with masks
if len(r['class_ids']) > 0:
print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids'])))
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
show_bbox=True, show_mask=True,
title="Predictions")
plt.savefig("{}/{}".format(time_dir, dataset.image_info[image_id]["id"]))
plt.close()
else:
plt.imshow(image)
plt.savefig("{}/noinstance_{}".format(time_dir, dataset.image_info[image_id]["id"]))
print('[*] {}th image have no instance.'.format(image_id))
plt.close()
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on PASCAL VOC.')
parser.add_argument("--command",
metavar="<command>",
default='train',
help="'train' or 'inference' on PASCAL VOC")
parser.add_argument('--dataset',
default="/data/lktime-seg-tp/dataset/PASCALVOC/VOCdevkit/",
help='Directory of the PASCAL VOC dataset')
parser.add_argument('--year',
default='2012',
help='Year of the PASCAL VOC dataset (2007 or 2012) (default=2012)')
parser.add_argument('--model',
default="/path/to/weights.h5",
help="Path to weights .h5 file or 'voc'")
parser.add_argument('--logs',
default='./logs',
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=10,
metavar="<image count>",
help='Images to use for evaluation (default=10)')
# TODO
'''
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip PASCAL VOC files (default=False)',
type=bool)
'''
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
#print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = VocConfig()
else:
config = InferenceConfig()
config.display()
# Create model
# if args.command == "train":
# model = modellib.MaskRCNN(mode="training", config=config,
# model_dir=args.logs)
# else:
# model = modellib.MaskRCNN(mode="inference", config=config,
# model_dir=args.logs)
# Select weights file to load
# if args.model.lower() == "coco":
# model_path = COCO_WEIGHTS_PATH
# elif args.model.lower() == "last":
# # Find last trained weights
# model_path = model.find_last()
# elif args.model.lower() == "imagenet":
# # Start from ImageNet trained weights
# model_path = model.get_imagenet_weights()
# else:
# model_path = args.model
# Load weights
# if args.model.lower() == "coco":
# # Exclude the last layers because they require a matching
# # number of classes
# model.load_weights(model_path, by_name=True, exclude=[
# "mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# else:
# print("Loading weights ", model_path)
# model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = VocDataset()
dataset_train.load_voc(args.dataset, "train", year=args.year)
dataset_train.prepare()
# Validation dataset
dataset_val = VocDataset()
dataset_val.load_voc(args.dataset, "val", year=args.year)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# # Training - Stage 1
# print("Training network heads")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=40,
# layers='heads',
# augmentation=augmentation)
# # Training - Stage 2
# # Finetune layers from ResNet stage 4 and up
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=120,
# layers='4+',
# augmentation=augmentation)
# # Training - Stage 3
# # Fine tune all layers
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE / 10,
# epochs=160,
# layers='all',
# augmentation=augmentation)
# elif args.command == "inference":
# #print("evaluate have not been implemented")
# # Validation dataset
# dataset_val = VocDataset()
# voc = dataset_val.load_voc(args.dataset, "val", year=args.year)
# dataset_val.prepare()
# print("Running voc inference on {} images.".format(args.limit))
# inference(model, dataset_val, int(args.limit))
# else:
# print("'{}' is not recognized. "
# "Use 'train' or 'inference'".format(args.command)) | 39.112601 | 98 | 0.582699 | [
"Apache-2.0"
] | yhpengtu/CenterIMask | tools/convet_voc2coco/voc2coco.py | 14,589 | Python |
from tint.ssl.context import PFSContextFactory
from tint.log import Logger
from tint.protocols.tintp import ConnectionPool
from tint.protocols.tintp import TintProtocolFactory
from tint.friends import FriendsList
class Peer(object):
def __init__(self, keyStore, storage, resolver):
self.keyStore = keyStore
self.storage = storage
self.contextFactory = PFSContextFactory(self.keyStore)
self.pool = ConnectionPool(resolver, self.contextFactory, self.keyStore, self.storage)
self.protocolFactory = TintProtocolFactory(self.pool)
self.friends = FriendsList(self.storage, self.keyStore, resolver)
self.log = Logger(system=self)
def getKeyId(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getKeyId()
def getPublicKey(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getPublicKey()
def set(self, hostKeyId, storagePath, storageValue):
"""
Set a value on a host.
@param hostKeyId: The key id for the destination host to set the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to set. For instance, this
could be something like /chat/<somekey>/inbox.
@param storageValue: The value to set.
"""
if hostKeyId == self.getKeyId():
return self.storage.set(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
def get(self, hostKeyId, storagePath):
"""
Get a value from a host.
@param hostKeyId: The key id for the destination host to get the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to get. For instance, this
could be something like /chat/<somekey>/inbox.
"""
if hostKeyId == self.getKeyId():
self.log.debug("getting storagePath %s on self" % storagePath)
return self.storage.get(hostKeyId, storagePath)
self.log.debug("getting storagePath %s on %s" % (storagePath, hostKeyId))
return self.pool.send(hostKeyId, 'get', storagePath)
def push(self, hostKeyId, storagePath, storageValue):
"""
Given key, create a new key at <key>/<id> with the given value, where <id>
is an auto-incrementing integer value starting at 0.
"""
if hostKeyId == self.getKeyId():
return self.storage.push(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
def ls(self, hostKeyId, storagePath, offset, length):
"""
Given key, get all children keys (with the given offset and length). Length cannot
be more than 1000.
"""
if hostKeyId == self.getKeyId():
return self.storage.ls(hostKeyId, storagePath, offset, length)
return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
| 39 | 94 | 0.651163 | [
"MIT"
] | 8468/tint | tint/peer.py | 3,354 | Python |
"""Count Encoder"""
import numpy as np
import pandas as pd
import category_encoders.utils as util
from copy import copy
from sklearn.base import BaseEstimator, TransformerMixin
__author__ = 'joshua t. dunn'
class CountEncoder(BaseEstimator, TransformerMixin):
def __init__(self, verbose=0, cols=None, drop_invariant=False,
return_df=True, handle_unknown=None,
handle_missing='count',
min_group_size=None, combine_min_nan_groups=True,
min_group_name=None, normalize=False):
"""Count encoding for categorical features.
For a given categorical feature, replace the names of the groups
with the group counts.
Parameters
----------
verbose: int
integer indicating verbosity of output. 0 for none.
cols: list
a list of columns to encode, if None, all string and categorical columns
will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform
(otherwise it will be a numpy array).
handle_missing: str
how to handle missing values at fit time. Options are 'error', 'return_nan',
and 'count'. Default 'count', which treat NaNs as a countable category at
fit time.
handle_unknown: str, int or dict of.
how to handle unknown labels at transform time. Options are 'error'
'return_nan' and an int. Defaults to None which uses NaN behaviour
specified at fit time. Passing an int will fill with this int value.
normalize: bool or dict of.
whether to normalize the counts to the range (0, 1). See Pandas `value_counts`
for more details.
min_group_size: int, float or dict of.
the minimal count threshold of a group needed to ensure it is not
combined into a "leftovers" group. If float in the range (0, 1),
`min_group_size` is calculated as int(X.shape[0] * min_group_size).
Note: This value may change type based on the `normalize` variable. If True
this will become a float. If False, it will be an int.
min_group_name: None, str or dict of.
Set the name of the combined minimum groups when the defaults become
too long. Default None. In this case the category names will be joined
alphabetically with a `_` delimiter.
Note: The default name can be long ae may keep changing, for example,
in cross-validation.
combine_min_nan_groups: bool or dict of.
whether to combine the leftovers group with NaN group. Default True. Can
also be forced to combine with 'force' meaning small groups are effectively
counted as NaNs. Force can only used when 'handle_missing' is 'count' or 'error'.
Example
-------
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> from category_encoders import CountEncoder
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null int64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null int64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(11), int64(2)
memory usage: 51.5 KB
None
References
----------
"""
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self.normalize = normalize
self.min_group_size = min_group_size
self.min_group_name = min_group_name
self.combine_min_nan_groups = combine_min_nan_groups
self._min_group_categories = {}
self._normalize = {}
self._min_group_name = {}
self._combine_min_nan_groups = {}
self._min_group_size = {}
self._handle_unknown = {}
self._handle_missing = {}
def fit(self, X, y=None, **kwargs):
"""Fit encoder according to X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# first check the type
X = util.convert_input(X)
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
self._check_set_create_dict_attrs()
self._fit_count_encode(X, y)
if self.drop_invariant:
self.drop_cols = []
X_temp = self.transform(X)
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [
x for x in generated_cols if X_temp[x].var() <= 10e-5
]
return self
def transform(self, X, y=None):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples]
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self._dim is None:
raise ValueError(
'Must train encoder before it can be used to transform data.'
)
# first check the type
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError(
'Unexpected input dimension %d, expected %d'
% (X.shape[1], self._dim,)
)
if not self.cols:
return X
X, _ = self._transform_count_encode(X, y)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df:
return X
else:
return X.values
def _fit_count_encode(self, X_in, y):
"""Perform the count encoding."""
X = X_in.copy(deep=True)
if self.cols is None:
self.cols = X.columns.values
self.mapping = {}
for col in self.cols:
if X[col].isna().any():
if self._handle_missing[col] == 'error':
raise ValueError(
'Missing data found in column %s at fit time.'
% (col,)
)
elif self._handle_missing[col] not in ['count', 'return_nan', 'error']:
raise ValueError(
'%s key in `handle_missing` should be one of: '
' `value`, `return_nan` and `error`.'
% (col,)
)
self.mapping[col] = X[col].value_counts(
normalize=self._normalize[col],
dropna=False
)
if self._handle_missing[col] == 'return_nan':
self.mapping[col][np.NaN] = np.NaN
if any([val is not None for val in self._min_group_size.values()]):
self.combine_min_categories(X)
def _transform_count_encode(self, X_in, y):
"""Perform the transform count encoding."""
X = X_in.copy(deep=True)
for col in self.cols:
if self._min_group_size is not None:
if col in self._min_group_categories.keys():
X[col] = (
X[col].map(self._min_group_categories[col])
.fillna(X[col])
)
X[col] = X[col].map(self.mapping[col])
if isinstance(self._handle_unknown[col], np.integer):
X[col] = X[col].fillna(self._handle_unknown[col])
elif (
self._handle_unknown[col] == 'error'
and X[col].isna().any()
):
raise ValueError(
'Missing data found in column %s at transform time.'
% (col,)
)
return X, self.mapping
def combine_min_categories(self, X):
"""Combine small categories into a single category."""
for col, mapper in self.mapping.items():
if self._normalize[col] and isinstance(self._min_group_size[col], int):
self._min_group_size[col] = self._min_group_size[col] / X.shape[0]
elif not self._normalize and isinstance(self._min_group_size[col], float):
self._min_group_size[col] = self._min_group_size[col] * X.shape[0]
if self._combine_min_nan_groups[col] is True:
min_groups_idx = mapper < self._min_group_size[col]
elif self._combine_min_nan_groups[col] == 'force':
min_groups_idx = (
(mapper < self._min_group_size[col])
| (mapper.index.isna())
)
else:
min_groups_idx = (
(mapper < self._min_group_size[col])
& (~mapper.index.isna())
)
min_groups_sum = mapper.loc[min_groups_idx].sum()
if min_groups_sum > 0 and (min_groups_idx).sum() > 1:
if isinstance(self._min_group_name[col], str):
min_group_mapper_name = self._min_group_name
else:
min_group_mapper_name = '_'.join([
str(idx)
for idx
in mapper.loc[min_groups_idx].index.astype(str).sort_values()
])
self._min_group_categories[col] = {
cat: min_group_mapper_name
for cat
in mapper.loc[min_groups_idx].index.tolist()
}
if not min_groups_idx.all():
mapper = mapper.loc[~min_groups_idx]
if mapper.index.is_categorical():
mapper.index = mapper.index.add_categories(
min_group_mapper_name
)
mapper[min_group_mapper_name] = min_groups_sum
self.mapping[col] = mapper
def _check_set_create_dict_attrs(self):
"""Check attributes that can be dicts and format for all self.cols."""
dict_attrs = {
'normalize': False,
'min_group_name': None,
'combine_min_nan_groups': True,
'min_group_size': None,
'handle_unknown': 'value',
'handle_missing': 'value',
}
for attr_name, attr_default in dict_attrs.items():
attr = copy(getattr(self, attr_name))
if isinstance(attr, dict):
for col in self.cols:
if col not in attr:
attr[col] = attr_default
setattr(self, '_' + attr_name, attr)
else:
attr_dict = {}
for col in self.cols:
attr_dict[col] = attr
setattr(self, '_' + attr_name, attr_dict)
for col in self.cols:
if (
self._handle_missing[col] == 'return_nan'
and self._combine_min_nan_groups[col] == 'force'
):
raise ValueError(
"Cannot have `handle_missing` == 'return_nan' and "
"'combine_min_nan_groups' == 'force' for columns `%s`."
% (col,)
)
| 36.139665 | 93 | 0.541738 | [
"BSD-3-Clause"
] | JoshuaC3/categorical-encoding | category_encoders/count.py | 12,938 | Python |
#The term schedule that gets displayed. Can do multiple terms in the case of displaying
#summer and fall at the same time. ie termNames ['2201','2208']
termNames=['2218']
majorTemplate='in/majorPage.html.mako'
#Add new majors here.
#Name: short name for the major
#classFile: the csv file containing all the classes in this majors curriculum
#asof: the date that the major curriculum was aquired
majors=[
{'name': 'SNRE', 'classFile': 'majorClassLists/SNREList.csv', 'asof': 'Oct 10,2015'},
{'name': 'WEC', 'classFile': 'majorClassLists/WECList.csv', 'asof': 'Oct 10,2015'}
]
#Add new semesters here.
#Name: The term code, see below.
#prettyName: the more comprehendable name. eg. Fall 2015
#termSchedule: the filename for the downloaded csv file for the schedule. All should be semesterData/YYYYXX.csv
#
#The new API started being the sole source in spring 2020. With that term codes are:
# CYYM, where C = 2, YY = the last 2 digits of the year, and M is 8 or 1 for fall or spring
#
#TODO: New codes for Summer. Its special since it has several mini-terms.
terms=[
{'name' :'2218', 'prettyName':'Fall 2021', 'termSchedule': 'semesterData/fall2021.csv'},
{'name' :'2211', 'prettyName':'Spring 2021', 'termSchedule': 'semesterData/spring2021.csv'},
{'name' :'2208', 'prettyName':'Fall 2020', 'termSchedule': 'semesterData/fall2020.csv'},
{'name' :'2201', 'prettyName':'Spring 2020', 'termSchedule': 'semesterData/spring2020.csv'},
{'name' :'201908', 'prettyName':'Fall 2019', 'termSchedule': 'semesterData/201908.csv'},
{'name' :'201906', 'prettyName':'Summer 2019', 'termSchedule': 'semesterData/201906.csv'},
{'name' :'201901', 'prettyName':'Spring 2019', 'termSchedule': 'semesterData/201901.csv'},
{'name' :'201808', 'prettyName':'Fall 2018', 'termSchedule': 'semesterData/201808.csv'},
{'name' :'201806', 'prettyName':'Summer 2018', 'termSchedule': 'semesterData/201806.csv'},
{'name' :'201801', 'prettyName':'Spring 2018', 'termSchedule': 'semesterData/201801.csv'},
{'name' :'201708', 'prettyName':'Fall 2017', 'termSchedule': 'semesterData/201708.csv'},
{'name' :'201706', 'prettyName':'Summer 2017', 'termSchedule': 'semesterData/201706.csv'},
{'name' :'201701', 'prettyName':'Spring 2017', 'termSchedule': 'semesterData/201701.csv'},
{'name' :'201608', 'prettyName':'Fall 2016', 'termSchedule': 'semesterData/201608.csv'},
{'name' :'201606', 'prettyName':'Summer 2016', 'termSchedule': 'semesterData/201606.csv'},
{'name' :'201601', 'prettyName':'Spring 2016', 'termSchedule': 'semesterData/201601.csv'},
{'name' :'201508', 'prettyName':'Fall 2015', 'termSchedule': 'semesterData/201508.csv'},
{'name' :'201506', 'prettyName':'Summer 2015', 'termSchedule': 'semesterData/201506.csv'},
{'name' :'201501', 'prettyName':'Spring 2015', 'termSchedule': 'semesterData/201501.csv'},
{'name' :'201408', 'prettyName':'Fall 2014', 'termSchedule': 'semesterData/201408.csv'},
{'name' :'201406', 'prettyName':'Summer 2014', 'termSchedule': 'semesterData/201406.csv'},
{'name' :'201401', 'prettyName':'Spring 2014', 'termSchedule': 'semesterData/201401.csv'},
{'name' :'201308', 'prettyName':'Fall 2013', 'termSchedule': 'semesterData/201308.csv'},
{'name' :'201301', 'prettyName':'Spring 2013', 'termSchedule': 'semesterData/201301.csv'},
{'name' :'201208', 'prettyName':'Fall 2012', 'termSchedule': 'semesterData/201208.csv'},
{'name' :'201201', 'prettyName':'Spring 2012', 'termSchedule': 'semesterData/201201.csv'},
{'name' :'201108', 'prettyName':'Fall 2011', 'termSchedule': 'semesterData/201108.csv'},
{'name' :'201101', 'prettyName':'Spring 2011', 'termSchedule': 'semesterData/201101.csv'},
{'name' :'201008', 'prettyName':'Fall 2010', 'termSchedule': 'semesterData/201008.csv'}
]
#To deal with 100's of special topic classes that may or may not be on the curriculum (and if not, still deserve
#to be considered), show *all* special topcis classes from a few relevant departments
relevantDepts=['BOT','ZOO','FAS','WIS','FOR','GEO','ENV']
#Exclude any classes with these titles. Designed for research credits which I don't need to have on the site
classTitleExclusions=['SUPERVISED RESEARCH','MASTERS RESEARCH','DOCTORAL RESEARCH','ADVANCED RESEARCH',
'SUPERVISED TEACHING','INDIVIDUAL WORK','INDIVIDUAL STUDIES','SPECIAL TOPICS']
#Every dept has 'Special Topic' codes that are not necessarily in the curriculum.
#Since they all share the same course codes with things thare *are* in the curriclum,
#all special topics are included.
#This list is to go and find them and mark them "special topics" to indicate the class might
#need prior approval.
#Theres probably a better way to account for these. maybe scrape the grad catalog website
specialTopicClasses=['ZOO6927',
'WIS6934',
'SWS6932',
'ALS5932',
'AOM6932',
'AEC6932',
'STA6934',
'ANS6932',
'ENY6932',
'NEM6932',
'AEB6933',
'ABE6933',
'PHC6937',
'LAS6938',
'GEO6938',
'HOS6932',
'MCB6937',
'PBC6937',
'FAS6932',
'AGR6932',
'BOT6935',
'ANG6930',
'ENV6935',
'ENV6932',
'FOR6934',
'MAT6932',
'LAW6930',
'SYA7933',
'GEB6930',
'AFS6905',
'VME6934'
]
| 57.097087 | 112 | 0.60857 | [
"MIT"
] | sdtaylor/scheduleCrossRef | config.py | 5,881 | Python |
# ---------------------------------------------------------------------
# Syslog server
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
import time
from typing import Tuple
# NOC modules
from noc.config import config
from noc.core.perf import metrics
from noc.core.ioloop.udpserver import UDPServer
from noc.core.comp import smart_text
logger = logging.getLogger(__name__)
class SyslogServer(UDPServer):
def __init__(self, service):
super().__init__()
self.service = service
def enable_reuseport(self):
return config.syslogcollector.enable_reuseport
def enable_freebind(self):
return config.syslogcollector.enable_freebind
def on_read(self, data: bytes, address: Tuple[str, int]):
metrics["syslog_msg_in"] += 1
cfg = self.service.lookup_config(address[0])
if not cfg:
return # Invalid event source
# Convert data to valid UTF8
data = smart_text(data, errors="ignore")
# Parse priority
priority = 0
if data.startswith("<"):
idx = data.find(">")
if idx == -1:
return
try:
priority = int(data[1:idx])
except ValueError:
pass
data = data[idx + 1 :].strip()
# Get timestamp
ts = int(time.time())
#
self.service.register_message(cfg, ts, data, facility=priority >> 3, severity=priority & 7)
| 30.072727 | 99 | 0.538089 | [
"BSD-3-Clause"
] | nocproject/noc | services/syslogcollector/syslogserver.py | 1,654 | Python |
import logging
import os
import subprocess
logger = logging.getLogger(__name__)
class Combiner(object):
def __init__(self, options):
self.options = options
def combine(self, page_file_names):
output_file_name = self.options.output_file_name[0]
logger.info("combine %d pages into %s", len(page_file_names), output_file_name)
combine_args = ['pdfunite']
combine_args += page_file_names
combine_args += [os.path.basename(output_file_name)]
logger.debug("call: %s", " ".join(combine_args))
returncode = subprocess.call(combine_args)
if returncode != 0:
logger.error("combine failed: %s", " ".join(combine_args))
if not os.path.exists(output_file_name):
logger.error("output file '%s' does not exist", output_file_name)
return returncode
| 29.689655 | 87 | 0.663182 | [
"Apache-2.0"
] | wuan/scan_pdf | src/scan_pdf/combine.py | 861 | Python |
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
import diffxpy.api as de
class _TestPairwiseNull:
noise_model: str
def _prepate_data(
self,
n_cells: int,
n_genes: int,
n_groups: int
):
if self.noise_model == "nb":
from batchglm.api.models.glm_nb import Simulator
rand_fn_loc = lambda shape: np.random.uniform(0.1, 1, shape)
rand_fn_scale = lambda shape: np.random.uniform(0.5, 1, shape)
elif self.noise_model == "norm" or self.noise_model is None:
from batchglm.api.models.glm_norm import Simulator
rand_fn_loc = lambda shape: np.random.uniform(500, 1000, shape)
rand_fn_scale = lambda shape: np.random.uniform(1, 2, shape)
else:
raise ValueError("noise model %s not recognized" % self.noise_model)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate_params(
rand_fn_loc=rand_fn_loc,
rand_fn_scale=rand_fn_scale
)
sim.generate_data()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.nobs)
})
return sim, random_sample_description
def _test_null_distribution_basic(
self,
test: str,
lazy: bool,
quick_scale: bool = False,
n_cells: int = 3000,
n_genes: int = 200,
n_groups: int = 3
):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
sim, sample_description = self._prepate_data(
n_cells=n_cells,
n_genes=n_genes,
n_groups=n_groups
)
test = de.test.pairwise(
data=sim.input_data,
sample_description=sample_description,
grouping="condition",
test=test,
lazy=lazy,
quick_scale=quick_scale,
noise_model=self.noise_model
)
_ = test.summary()
# Compare p-value distribution under null model against uniform distribution.
if lazy:
pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue
else:
pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
class TestPairwiseNullStandard(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ttest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="t-test", lazy=False)
def test_null_distribution_rank(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="rank", lazy=False)
class TestPairwiseNullNb(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ztest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=True)
def test_null_distribution_ztest_lazy(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=True)
def test_null_distribution_wald(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=True)
def test_null_distribution_lrt(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="lrt", lazy=False, quick_scale=False)
if __name__ == '__main__':
unittest.main()
| 37.649351 | 104 | 0.657641 | [
"BSD-3-Clause"
] | gokceneraslan/diffxpy | diffxpy/unit_test/test_pairwise.py | 5,798 | Python |
__all__ = ['CompilerSanitizer']
from enum import Enum, unique
@unique
class CompilerSanitizer(Enum):
NONE = 'none'
ADDRESS = 'address'
THREAD = 'thread'
UNDEFINED = 'undefined'
MEMORY = 'memory'
| 16.769231 | 31 | 0.66055 | [
"MIT"
] | benoit-dubreuil/template-cpp20-agnostic-build-ci-cd | conf/script/src/build_system/compiler/build_option/sanitizer.py | 218 | Python |
"""pythonic_orcfighter
This is one of the different GameUnits that are used in the desing patterns examples.
:copyright: 2020, Jean Tardelli
:license: The MIT license (MIT). See LICENSE file for further details.
"""
from pythonic_abstractgameunit import AbstractGameUnit
class OrcFighter(AbstractGameUnit):
"""Create a OrcFighter instance"""
def info(self):
"""Print info about this unit, overrides superclass method."""
print("Grrr, I am the Orc Figher!")
| 30.3125 | 85 | 0.736082 | [
"MIT"
] | jeantardelli/wargameRepo | wargame/designpatterns/pythonic_orcfighter.py | 485 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0004_uuidfield'),
('meetings', '0009_auto_20170106_1414'),
]
operations = [
migrations.AddField(
model_name='meeting',
name='documents_zip',
field=models.ForeignKey(to='documents.Document', related_name='zip_for_meeting', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| 26.5 | 150 | 0.656947 | [
"Apache-2.0"
] | ecs-org/ecs | ecs/meetings/migrations/0010_meeting_documents_zip.py | 583 | Python |
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
# TODO: move to analysis
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
"""The attributes _expr_cached and _hash are
used as caches; they can be excluded from
serialization without affecting correctness.
Excluding _expr_cached and _hash from serialization
will allow the serialized bytes to be the same for
equivalent Node objets.
Returns
-------
Dict[str, Any]
A dictionary storing the objects attributes.
"""
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Parameters
----------
state: Dict[str, Any]
A dictionary storing the objects attributes.
"""
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
# The contents of this node at referentially distinct and may not be
# analyzed deeper
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
"""Check whether two objects `left` and `right` are equal.
Parameters
----------
left : Union[object, Expr, Node]
right : Union[object, Expr, Node]
cache : Optional[Dict[Tuple[Node, Node], bool]]
A dictionary indicating whether two Nodes are equal
"""
if cache is None:
cache = {}
if util.is_iterable(left):
# check that left and right are equal length iterables and that all
# of their elements are equal
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op()) # noqa: E731
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
"""Selects a column from a TableExpr"""
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
"""The row number (an autonumeric) of the returned result."""
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
"""A table sourced from the result set of a select query"""
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
"""
(Temporary?) Helper operation class for SQL translation (fully formed table
subqueries to be viewed as arrays)
"""
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
"""A binary operation"""
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
# see #396 for the issue preventing this
# def resolve_name(self):
# return self.args[0].get_name()
def output_type(self):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
"""Returns true if values are null
Returns
-------
isnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
"""Returns true if values are not null
Returns
-------
notnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
"""Equivalent to (but perhaps implemented differently):
case().when(expr.notnull(), expr)
.else_(null_substitute_expr)
"""
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
"""Set values to NULL if they equal the null_if_expr"""
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
"""
Set values to NULL if they equal to zero. Commonly used in cases where
divide-by-zero would produce an overflow or infinity.
Equivalent to (value == 0).ifelse(ibis.NA, value)
Returns
-------
maybe_nulled : type of caller
"""
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
# According to Impala documentation:
# Return type: same as the initial argument value, except that integer
# values are promoted to BIGINT and floating-point values are promoted to
# DOUBLE; use CAST() when inserting into a smaller numeric column
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
# self.arg is a list of value expressions
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
"""Absolute value"""
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
"""
Round up to the nearest integer value greater than or equal to this value
Returns
-------
ceiled : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
"""
Round down to the nearest integer value less than or equal to this value
Returns
-------
floored : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
"""Natural logarithm"""
class Log2(Logarithm):
"""Logarithm base 2"""
class Log10(Logarithm):
"""Logarithm base 10"""
class Degrees(ExpandingTypeMathUnaryOp):
"""Converts radians to degrees"""
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
"""Converts degrees to radians"""
arg = Arg(rlz.numeric)
# TRIGONOMETRIC OPERATIONS
class TrigonometricUnary(MathUnaryOp):
"""Trigonometric base unary"""
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
"""Trigonometric base binary"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
"""Returns the arc cosine of x"""
class Asin(TrigonometricUnary):
"""Returns the arc sine of x"""
class Atan(TrigonometricUnary):
"""Returns the arc tangent of x"""
class Atan2(TrigonometricBinary):
"""Returns the arc tangent of x and y"""
class Cos(TrigonometricUnary):
"""Returns the cosine of x"""
class Cot(TrigonometricUnary):
"""Returns the cotangent of x"""
class Sin(TrigonometricUnary):
"""Returns the sine of x"""
class Tan(TrigonometricUnary):
"""Returns the tangent of x"""
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
"""Convert string to all uppercase"""
class Lowercase(StringUnaryOp):
"""Convert string to all lowercase"""
class Reverse(StringUnaryOp):
"""Reverse string"""
class Strip(StringUnaryOp):
"""Remove whitespace from left and right sides of string"""
class LStrip(StringUnaryOp):
"""Remove whitespace from left side of string"""
class RStrip(StringUnaryOp):
"""Remove whitespace from right side of string"""
class Capitalize(StringUnaryOp):
"""Return a capitalized version of input string"""
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
"""SQL ilike operation"""
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
"""
Compute length of strings
Returns
-------
length : int32
"""
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
# ----------------------------------------------------------------------
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
"""Aggregate bitwise AND operation.
All elements in an integer column are ANDed together. This can be used
to determine which bit flags are set on all elements.
Resources:
* `BigQuery BIT_AND
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_and>`_
* `MySQL BIT_AND
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-and>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
"""Aggregate bitwise OR operation.
All elements in an integer column are ORed together. This can be used
to determine which bit flags are set on any element.
Resources:
* `BigQuery BIT_OR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_or>`_
* `MySQL BIT_OR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-or>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
"""Aggregate bitwise XOR operation.
All elements in an integer column are XORed together. This can be used
as a parity checksum of element values.
Resources:
* `BigQuery BIT_XOR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_xor>`_
* `MySQL BIT_XOR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-xor>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
"""Coefficient of correlation of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
"""Covariance of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
"""Approximate number of unique values using HyperLogLog algorithm.
Impala offers the NDV built-in function for this.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
# Impala 2.0 and higher returns a DOUBLE
# return ir.DoubleScalar
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
"""
Compute the approximate median of a set of comparable values using the
Count-Min-Sketch algorithm. Exposed in Impala using APPX_MEDIAN.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
# ----------------------------------------------------------------------
# Analytic functions
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order.
Examples
--------
values ranks
1 0
1 0
2 2
2 2
2 2
3 5
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL RANK()
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order, ignoring duplicate values.
Examples
--------
values ranks
1 0
1 0
2 1
2 1
2 1
3 2
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL DENSE_RANK()
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
"""
Compute row number starting from 0 after sorting by column expression
Examples
--------
>>> import ibis
>>> t = ibis.table([('values', dt.int64)])
>>> w = ibis.window(order_by=t.values)
>>> row_num = ibis.row_number().over(w)
>>> result = t[t.values, row_num.name('row_num')]
Returns
-------
row_number : Int64Column, starting from 0
"""
# Equivalent to SQL ROW_NUMBER()
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
"""Cumulative sum. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
"""Cumulative mean. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
"""Cumulative max. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
"""Cumulative min. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
# ----------------------------------------------------------------------
# Distinct stuff
class Distinct(TableNode, HasSchema):
"""
Distinct is a table-level unique-ing operation.
In SQL, you might have:
SELECT DISTINCT foo
FROM table
SELECT DISTINCT foo, bar
FROM table
"""
table = Arg(ir.TableExpr)
def _validate(self):
# check whether schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
"""
COUNT(DISTINCT ...) is really just syntactic suger, but we provide a
distinct().count() nicety for users nonetheless.
For all intents and purposes, like Distinct, but can be distinguished later
for evaluation if the result should be array-like versus table-like. Also
for calling count()
"""
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
"""Only valid if the distinct contains a single column"""
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
# ---------------------------------------------------------------------
# Boolean reductions and semi/anti join support
class Any(ValueOp):
# Depending on the kind of input boolean array, the result might either be
# array-like (an existence-type predicate) or scalar (a reduction)
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
# ---------------------------------------------------------------------
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
class Where(ValueOp):
"""
Ternary case expression, equivalent to
bool_expr.case()
.when(True, true_expr)
.else_(false_or_null_expr)
"""
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
# see GH #667
# If left and right table have a common parent expression (e.g. they
# have different filters), must add a self-reference and make the
# appropriate substitution in the join predicates
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
# Validate join predicates. Each predicate must be valid jointly when
# considering the roots of each input table
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
# For joins retaining both table schemas, merge them together here
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
# Unraveling is not possible
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
# check whether the underlying schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
"""
Some databases have a CROSS JOIN operator, that may be preferential to use
over an INNER JOIN with no predicates.
"""
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
# --------------------------------------------------------------------
# Sorting
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
# TODO: might generalize this equals based on fields
# requires a proxy class with equals for non expr values
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
# The dependencies of this operation are not walked, which makes the
# table expression holding this relationally distinct from other
# expressions, so things like self-joins are possible
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
# Argument cleaning
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
# Need to validate that the column expressions are compatible with the
# input table; this means they must either be scalar expressions or
# array expressions originating from the same root table expression
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate no overlapping columns in schema
assert self.schema
@cached_property
def schema(self):
# Resolve schema and initialize
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
# self and other are equivalent except for predicates, selections, or
# sort keys any of which is allowed to be empty. If both are not empty
# then they must be equal
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
# Operator combination / fusion logic
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
# sort keys cannot be discarded because of order-dependent
# aggregate functions like GROUP_CONCAT
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
"""
metrics : per-group scalar aggregates
by : group expressions
having : post-aggregation predicate
TODO: not putting this in the aggregate operation yet
where : pre-aggregation predicate
"""
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
# For tables, like joins, that are not materialized
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
# order by only makes sense with group by in an aggregation
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
# All aggregates are valid
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
# All non-scalar refs originate from the input table
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate schema has no overlapping columns
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
# TODO - #2832
# this optimization becomes O(n^2) when it calls into
# _lift_TableColumn in analysis.py, which itself is O(n) and is
# called on each input to the aggregation - thus creating the
# aggregation expression can be extremely slow on wide tables
# that contain a Selection.
# return [
# substitute_parents(x, past_projection=False) for x in all_exprs
# ]
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
"""
Casting rules for type promotions (for resolving the output type) may
depend in some cases on the target backend.
TODO: how will overflows be handled? Can we provide anything useful in
Ibis to help the user avoid them?
:param left:
:param right:
"""
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
# it might not be necessary?
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
# it can be a single expression, like a column
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
# or a list of expressions
options = ir.sequence(options)
else:
# or a set of scalar values
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
"""
Apply a multi-value replacement on a particular column. As an example from
SQL, given DAYOFWEEK(timestamp_col), replace 1 through 5 to "WEEKDAY" and 6
and 7 to "WEEKEND"
"""
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
"""
The constant pi
"""
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
# Only pandas-based backends support 'ns'
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
"""Return the hash of a literal value.
We override this method to make sure that we can handle things that
aren't eminently hashable like an ``array<array<int64>>``.
"""
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
"""Typeless NULL literal"""
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
"""Data structure for a list of arbitrary expressions"""
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
"""Data structure for a list of value expressions"""
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
"""Geo Spatial base binary"""
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
"""Geo Spatial base unary"""
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
"""Returns minimum distance between two geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one,
and no boundary points are shared."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
"""Returns True if no point in Geometry B is outside Geometry A"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
"""Returns True if no point in Geometry/Geography A is
outside Geometry/Geography B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
"""Returns True if the supplied geometries have some, but not all,
interior points in common."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
"""Returns True if the Geometries do not โspatially intersectโ -
if they do not share any space together."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
"""Returns True if the given geometries represent the same geometry."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
"""Returns the Nth Geometry of a Multi geometry."""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
"""Returns the type of the geometry."""
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
"""Returns True if the Geometries/Geography โspatially intersect in 2Dโ
- (share any portion of space) and False if they donโt (they are Disjoint).
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
"""Returns true if the geometry is well-formed."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
"""
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a multilinestring. If a geometry other than
a linestring or multilinestring is given, this will return an empty
geometry collection.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
"""
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
"""Returns True if the Geometries share space, are of the same dimension,
but are not completely contained by each other."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
"""Returns True if the geometries have at least one point in common,
but their interiors do not intersect."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
"""Returns the pointwise union of the geometries in the column."""
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
"""Returns the pointwise union of the two geometries."""
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
"""Area of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
"""Perimeter of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
"""Length of geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
"""Returns X maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
"""Returns Y maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
"""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
"""Return the number of points in a geometry. Works for all geometries"""
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
"""
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
"""Returns the spatial reference identifier for the ST_Geometry."""
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
"""Set the spatial reference identifier for the ST_Geometry."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
"""
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
"""Returns the geometric center of a geometry."""
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
"""Returns True if the geometries are fully within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
"""Returns True if the geometries are within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
"""Returns a geometry representing the boundingbox of the supplied geometry.
"""
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
"""Returns the angle in radians from the horizontal of the vector defined
by pointA and pointB. Angle is computed clockwise from down-to-up:
on the clock: 12=0; 3=PI/2; 6=PI; 9=3PI/2.
"""
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
"""Returns True if the geometry A is completely inside geometry B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
"""Returns a geometry that represents the point set intersection
of the Geometries.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
"""Returns a geometry that represents that part of geometry A
that does not intersect with geometry B
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
"""Returns a simplified version of the given geometry."""
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
"""Returns a transformed version of the given geometry into a new SRID."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography without SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography without SRID metadata.
"""
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
"""Node for element wise UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
"""Node for reduction UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
"""Node for analytics UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
"""Helper class"""
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
| 24.984392 | 99 | 0.624954 | [
"Apache-2.0"
] | odidev/ibis | ibis/expr/operations.py | 92,852 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Schedule']
class Schedule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]] = None,
hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'EnableStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
task_type: Optional[pulumi.Input[str]] = None,
time_zone_id: Optional[pulumi.Input[str]] = None,
weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A schedule.
API Version: 2018-09-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence.
:param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the schedule.
:param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs
:param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
:param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time).
:param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['daily_recurrence'] = daily_recurrence
__props__['hourly_recurrence'] = hourly_recurrence
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['notification_settings'] = notification_settings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['target_resource_id'] = target_resource_id
__props__['task_type'] = task_type
__props__['time_zone_id'] = time_zone_id
__props__['weekly_recurrence'] = weekly_recurrence
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/latest:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Schedule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Schedule, __self__).__init__(
'azure-nextgen:devtestlab:Schedule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Schedule':
"""
Get an existing Schedule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Schedule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the schedule.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]:
"""
If the schedule will occur once each day of the week, specify the daily recurrence.
"""
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]:
"""
If the schedule will occur multiple times a day, specify the hourly recurrence.
"""
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationSettings")
def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]:
"""
Notification settings.
"""
return pulumi.get(self, "notification_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the schedule (i.e. Enabled, Disabled)
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID to which the schedule belongs
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Output[Optional[str]]:
"""
The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
"""
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> pulumi.Output[Optional[str]]:
"""
The time zone ID (e.g. Pacific Standard time).
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]:
"""
If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
return pulumi.get(self, "weekly_recurrence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.712551 | 325 | 0.645308 | [
"Apache-2.0"
] | pulumi/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py | 10,550 | Python |
data = "cqjxjnds"
import string
import re
lc = string.ascii_lowercase
next = dict(zip(lc[:-1], lc[1:]))
three_seq = ["".join(z) for z in zip(lc[:-2], lc[1:-1], lc[2:])]
def check(pw):
if "i" in pw or "o" in pw or "l" in pw:
return False
three_match = False
for seq in three_seq:
if seq in pw:
three_match = True
if not three_match:
return False
doubles = set(re.findall(r'(.)\1', pw))
if len(doubles) < 2:
return False
return True
def inc(pw):
pw = list(pw)
i = -1
while pw[i] == 'z':
pw[i] = 'a'
i -= 1
pw[i] = next[pw[i]]
return "".join(pw)
# TEST
print(check("hijklmmn"))
print(check("abbceffg"))
print(check("abbcegjk"))
print(check("abcdffaa"))
print(check("ghjaabcc"))
# PART 1
pw = data
while not check(pw):
pw = inc(pw)
print(pw)
# PART 2
pw = inc(pw)
while not check(pw):
pw = inc(pw)
print(pw)
| 17.865385 | 64 | 0.557589 | [
"MIT"
] | lamperi/aoc | 2015/11/solve.py | 929 | Python |
# Qiwi module advanced usage example v1.00
# 17/05/2021
# https://t.me/ssleg ยฉ 2021
import logging
import qiwi_module
# ะฝะฐัััะพะนะบะฐ ะปะพะณัะปะฐะนะปะฐ test,log, ััะดะฐ ะฑัะดัั ะทะฐะฟะธััะฒะฐัััั ะฒัะต ะพัะธะฑะบะธ ะธ ะฟัะตะดัะฟัะตะถะดะตะฝะธั.
lfile = logging.FileHandler('test.log', 'a', 'utf-8')
lfile.setFormatter(logging.Formatter('%(levelname)s %(module)-13s [%(asctime)s] %(message)s'))
# noinspection PyArgumentList
logging.basicConfig(level=logging.INFO, handlers=[lfile])
# ะฟัะพััะพะน ะฒะฐัะธะฐะฝั ะธัะฟะพะปัะทะพะฒะฐะฝะธั ัะผะพััะธัะต ะฒ ัะฐะนะปะต sample.py
# ะตัะปะธ ั ะฒะฐั ะฝะฐัััะพะตะฝ ัะฒะพะน ะฒะฝะตัะฝะธะน ะฒะธะด ัะพัะผั ะฟะปะฐัะตะถะฐ, ะฝะตะพะฑั
ะพะดะธะผะพ ะฟะตัะตะดะฐัั ะบะพะด ัะตะผั ะผะพะดัะปั.
# ััะพ ะดะตะปะฐะตััั ะพะดะธะฝ ัะฐะท, ะฟัะธ ะตะณะพ ะธะฝะธัะธะฐะปะธะทะฐัะธะธ.
# ัะฐะผ ะบะพะด ะธ ะฝะฐัััะพะนะบะธ ัะพัะผั ะฝะฐั
ะพะดัััั ะฝะฐ ัััะฐะฝะธัะต https://qiwi.com/p2p-admin/transfers/link
theme_code = 'Ivanov-XX-vvv-k_'
# ะฟะตัะตะด ะปัะฑัะผ ะธัะฟะพะปัะทะพะฒะฐะฝะธะตะผ ะฝะตะพะฑั
ะพะดะธะผะฐ ะพะดะฝะพะบัะฐัะฝะฐั ะธะฝะธัะธะฐะปะธะทะฐัะธั ะผะพะดัะปั.
qiwi_module.init(theme_code)
# ัะพะทะดะฐะฝะธะต ััะตัะฐ ะฝะฐ 1 ััะฑะปั. ะัะธ ััะฟะตั
ะต ะฟะพะปััะฐะตัะต url ั ัะพัะผะพะน ะพะฟะปะฐัั ะดะปั ะบะปะธะตะฝัะฐ.
# ะฟัะธ ะฝะตััะฟะตั
ะต ะฒะพะทะฒัะฐัะฐะตััั False ั ะฟะพะดัะพะฑะฝะพะน ะทะฐะฟะธััั ะฒ ะปะพะณ.
# ะธะดะตะฝัะธัะธะบะฐัะพัั ััะตัะพะฒ ะฟัะธะดัะผัะฒะฐะตัะต ะธ ัะพั
ัะฐะฝัะตัะต ะฒั ัะฐะผะธ, ะพะฝะธ ะดะพะปะถะฝั ะฑััั ัะฝะธะบะฐะปัะฝัะผะธ ะฒัะตะณะดะฐ.
bill_id = 'bill_2021_00000002'
# ะฟะพ ัะผะพะปัะฐะฝะธั ััะตั ะดะตะนััะฒะธัะตะปะตะฝ 15 ะผะธะฝัั, ะฝะพ ะฒั ะผะพะถะตัะต ะฟะพััะฐะฒะธัั ัะฒะพะต ะฒัะตะผั, ะฝะฐะฟัะธะผะตั ัััะบะธ ะธ 1 ะผะธะฝััั.
valid_hours = 24
valid_minutes = 1
# ะตััั ัะฐะบ ะถะต ะฟะพะปะต ะดะปั ะบะพะผะผะตะฝัะฐัะธั, ะตะณะพ ะฒะธะดะธั ะบะปะธะตะฝั ะฒ ัะพัะผะต ะพะฟะปะฐัั. ะฝะฐะฟัะธะผะตั, ััะดะฐ ะผะพะถะฝะพ ะทะฐะฟะธัะฐัั ะดะตัะฐะปะธ ะทะฐะบะฐะทะฐ
comment = 'ะะธะฝั ั ะปะตะฒะพะน ัะตะทัะฑะพะน ะดะปั ะกะธะดะพัะพะฒะฐ.'
invoice_url = qiwi_module.create_bill(1.00, bill_id, comment, valid_hours, valid_minutes)
print(invoice_url)
# ะฟัะพะฒะตัะบะฐ ััะฐัััะฐ ะพะฟะปะฐัั.
# ะฒะพะทะฒัะฐัะฐะตั ะพะดะฝะพ ะธะท ัะตัััะตั
ะฒะพะทะผะพะถะฝัั
ะทะฝะฐัะตะฝะธะน, ะตัะปะธ ััะฟะตัะฝะพ ะธะปะธ False ะธ ะทะฐะฟะธัั ะฒ ะปะพะณ.
# 'WAITING' - cัะตั ะฒัััะฐะฒะปะตะฝ, ะพะถะธะดะฐะตั ะพะฟะปะฐัั.
# 'PAID' - cัะตั ะพะฟะปะฐัะตะฝ.
# 'REJECTED' - ััะตั ะพัะผะตะฝะตะฝ ั ะฒะฐัะตะน ััะพัะพะฝั.
# 'EXPIRED' - ััะตั ะฝะต ะพะฟะปะฐัะตะฝ ะธ ะธััะตะบ ััะพะบ ะตะณะพ ะดะตะนััะฒะธั.
# ะผะพะถะฝะพ ะฒัะทัะฒะฐัั ะตะถะตัะตะบัะฝะดะฝะพ ะธะปะธ ัะตะถะต.
pay_status = qiwi_module.bill_status(bill_id)
print(pay_status)
# ะพัะผะตะฝะฐ ััะตัะฐ, ะตัะปะธ ะฒะฐะผ ััะพ ะฝะตะพะฑั
ะพะดะธะผะพ.
# ะฒะพะทะฒัะฐัะฐะตั 'REJECTED' ะตัะปะธ ััะฟะตัะฝะพ, ะธะฝะฐัะต False ะธ ะทะฐะฟะธัั ะฒ ะปะพะณ.
bill_status = qiwi_module.cancel_bill(bill_id)
print(bill_status)
| 38.581818 | 112 | 0.780396 | [
"MIT"
] | ssleg/qiwi_module | adv_sample.py | 3,101 | Python |
def estimate_pi_parallel(N, lview, N_per_trial=1E6):
result = lview.map(estimate_pi, [N_per_trial for i in range(N)])
while not result.ready():
print(result.progress)
time.sleep(0.5)
return np.mean(list(result))
estimate_pi_parallel(100, lview)
| 30.444444 | 68 | 0.693431 | [
"BSD-3-Clause"
] | Peshal1067/PythonLectures | solutions/example2.py | 274 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
class okex(Exchange):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes โ 600 requests per minute โ 10 requests per second โ 100 ms
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': False, # see below
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'futures': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okex.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okex.com',
'doc': 'https://www.okex.com/docs/en/',
'fees': 'https://www.okex.com/pages/products/fees.html',
'referral': 'https://www.okex.com/join/1888677',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': [
'time',
],
},
'account': {
'get': [
'wallet',
'sub-account',
'asset-valuation',
'wallet/{currency}',
'withdrawal/history',
'withdrawal/history/{currency}',
'ledger',
'deposit/address',
'deposit/history',
'deposit/history/{currency}',
'currencies',
'withdrawal/fee',
],
'post': [
'transfer',
'withdrawal',
],
},
'spot': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders',
'orders_pending',
'orders/{order_id}',
'orders/{client_oid}',
'trade_fee',
'fills',
'algo',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
],
'post': [
'order_algo',
'orders',
'batch_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_algos',
'cancel_batch_orders',
],
},
'margin': {
'get': [
'accounts',
'accounts/{instrument_id}',
'accounts/{instrument_id}/ledger',
'accounts/availability',
'accounts/{instrument_id}/availability',
'accounts/borrowed',
'accounts/{instrument_id}/borrowed',
'orders',
'accounts/{instrument_id}/leverage',
'orders/{order_id}',
'orders/{client_oid}',
'orders_pending',
'fills',
# public
'instruments/{instrument_id}/mark_price',
],
'post': [
'accounts/borrow',
'accounts/repayment',
'orders',
'batch_orders',
'cancel_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_orders',
'accounts/{instrument_id}/leverage',
],
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'option': {
'get': [
'accounts',
'position',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
# public
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.0015,
'maker': 0.0010,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
'futures': {
'taker': 0.0005,
'maker': 0.0002,
},
'swap': {
'taker': 0.00075,
'maker': 0.00020,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request โ Invalid request format
# 401 Unauthorized โ Invalid API Key
# 403 Forbidden โ You do not have access to the requested resource
# 404 Not Found
# 429 Client Error: Too Many Requests for url
# 500 Internal Server Error โ We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'Server is busy, please try again.': ExchangeNotAvailable, # {"message": "Server is busy, please try again."}
'An unexpected error occurred': ExchangeError, # {"message": "An unexpected error occurred"}
'System error': ExchangeError, # {"error_message":"System error","message":"System error"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeNotAvailable, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
'30044': RequestTimeout, # {"code":30044, "message":"Endpoint request timeout"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your orderโs tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
'33085': InvalidOrder, # The value of the position and buying order has reached the position limit, and no further buying is allowed.
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': RateLimitExceeded, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': BadRequest, # {"code": 35022, "message": "Contract status error"}
'35024': BadRequest, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': BadRequest, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': ExchangeError, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
'35102': RateLimitExceeded, # {"error_message":"The operation that close all at market price is too frequent","result":"true","error_code":"35102","order_id":"-1"}
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': PermissionDenied, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': PermissionDenied, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles', # Candles or HistoryCandles
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot', 'futures', 'swap', 'option'],
'defaultType': 'spot', # 'account', 'spot', 'margin', 'futures', 'swap', 'option'
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'BOX': 'DefiBox',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'SBTC': 'Super Bitcoin',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
def fetch_time(self, params={}):
response = self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
#
# futures markets
#
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
#
# swap markets
#
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
#
# options markets
#
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# }
#
id = self.safe_string(market, 'instrument_id')
marketType = 'spot'
spot = True
future = False
swap = False
option = False
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_float(market, 'contract_val')
if contractVal is not None:
if 'option_type' in market:
marketType = 'option'
spot = False
option = True
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
else:
marketType = 'swap'
spot = False
swap = True
futuresAlias = self.safe_string(market, 'alias')
if futuresAlias is not None:
swap = False
future = True
marketType = 'futures'
baseId = self.safe_string(market, 'underlying_index')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if spot else id
lotSize = self.safe_float_2(market, 'lot_size', 'trade_increment')
precision = {
'amount': self.safe_float(market, 'size_increment', lotSize),
'price': self.safe_float(market, 'tick_size'),
}
minAmount = self.safe_float_2(market, 'min_size', 'base_min_size')
active = True
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'futures': future,
'swap': swap,
'option': option,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
#
# options markets
#
# [
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# },
# ]
#
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = getattr(self, method)(params)
#
# spot markets
#
# [
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
# ]
#
# futures markets
#
# [
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
# ]
#
# swap markets
#
# [
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
def fetch_currencies(self, params={}):
# has['fetchCurrencies'] is currently set to False
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
response = self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 0.00000001 # default precision, todo: fix "magic constants"
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_float(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = getattr(self, method)(self.extend(request, params))
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
timestamp = self.parse8601(self.safe_string(response, 'timestamp'))
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_24h'),
'low': self.safe_float(ticker, 'low_24h'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': self.safe_float(ticker, 'best_bid_size'),
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': self.safe_float(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'quote_volume_24h'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
def fetch_tickers_by_type(self, type, symbols=None, params={}):
self.load_markets()
method = type + 'GetInstrumentsTicker'
response = getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
marketId = self.safe_string(trade, 'instrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'qty')
amount = self.safe_float(trade, 'order_qty', amount)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_float(ohlcv, 1), # Open
self.safe_float(ohlcv, 2), # High
self.safe_float(ohlcv, 3), # Low
self.safe_float(ohlcv, 4), # Close
# self.safe_float(ohlcv, 5), # Quote Volume
# self.safe_float(ohlcv, 6), # Base Volume
self.safe_float(ohlcv, volumeIndex), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_float(ohlcv, 'open'), # Open
self.safe_float(ohlcv, 'high'), # High
self.safe_float(ohlcv, 'low'), # Low
self.safe_float(ohlcv, 'close'), # Close
self.safe_float(ohlcv, 'volume'), # Base Volume
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300 # default
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0"
# },
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
# ]
#
# futures
#
# [
# [
# 1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868
# ],
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331
# ]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'hold')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def parse_margin_balance(self, response):
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(marketBalance, 'balance')
account['used'] = self.safe_float(marketBalance, 'hold')
account['free'] = self.safe_float(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.parse_balance(accounts)
return result
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_float(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for i in range(0, len(contracts)):
contract = contracts[i]
fixedBalance = self.safe_float(contract, 'fixed_balance')
realizedPnl = self.safe_float(contract, 'realized_pnl')
marginFrozen = self.safe_float(contract, 'margin_frozen')
marginForUnfilled = self.safe_float(contract, 'margin_for_unfilled')
margin = self.sum(fixedBalance, realizedPnl) - marginFrozen - marginForUnfilled
free = self.sum(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_float(balance, 'realized_pnl')
unrealizedPnl = self.safe_float(balance, 'unrealized_pnl')
marginFrozen = self.safe_float(balance, 'margin_frozen')
marginForUnfilled = self.safe_float(balance, 'margin_for_unfilled')
account['free'] = self.sum(totalAvailBalance, realizedPnl, unrealizedPnl) - marginFrozen - marginForUnfilled
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
account['free'] = self.safe_float(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
def fetch_balance(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
# margin
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0 = Normal limit order, 1 = Post only, 2 = Fill Or Kill, 3 = Immediatel Or Cancel, 4 = Market for futures only
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set as 1, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
orderType = self.safe_string(params, 'order_type')
# order_type == '4' means a market order
isMarketOrder = (type == 'market') or (orderType == '4')
if isMarketOrder:
request['order_type'] = '4'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1') # 1 = spot, 2 = margin
request = self.extend(request, {
'side': side,
'type': type, # limit/market
'margin_trading': marginTrading, # 1 = spot, 2 = margin
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_float(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)")
else:
notional = amount if (notional is None) else notional
precision = market['precision']['price']
request['notional'] = self.decimal_to_precision(notional, TRUNCATE, precision, self.precisionMode)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot, margin
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot and margin orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same as notional
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot and margin orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot and margin orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot and margin orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spo and margin orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
symbol = None
marketId = self.safe_string(order, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
if market is not None:
if symbol is None:
symbol = market['symbol']
amount = self.safe_float(order, 'size')
filled = self.safe_float_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
cost = self.safe_float_2(order, 'filled_notional', 'funds')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = average * filled
else:
if (average is None) and (filled is not None) and (filled > 0):
average = cost / filled
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_float(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None # fix empty clientOrderId string
stopPrice = self.safe_float(order, 'trigger_price')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrdersByState() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete๏ผopen+partially filled),
# '7': complete๏ผcancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete๏ผopen+partially filled),
# '7': complete๏ผcancelled+fully filled),
return self.fetch_orders_by_state('6', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete๏ผopen+partially filled),
# '7': complete๏ผcancelled+fully filled),
return self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_addresses(self, addresses):
result = {}
for i in range(0, len(addresses)):
address = self.parse_deposit_address(addresses[i])
code = address['currency']
result[code] = address
return result
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string(depositAddress, 'memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addresses = self.parse_deposit_addresses(response)
address = self.safe_value(addresses, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be โฅ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee โฅ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802bโฆ",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "tag": "1234567",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
tagTo = self.safe_string(transaction, 'tag')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
# check that trading symbols match in both entries
userTrade = self.safe_value(pair, 1)
otherTrade = self.safe_value(pair, 0)
firstMarketId = self.safe_string(otherTrade, 'instrument_id')
secondMarketId = self.safe_string(userTrade, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
market = self.safe_market(marketId, market)
symbol = market['symbol']
quoteId = market['quoteId']
side = None
amount = None
cost = None
receivedCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyId = None
if receivedCurrencyId == quoteId:
side = self.safe_string(otherTrade, 'side')
amount = self.safe_float(otherTrade, 'size')
cost = self.safe_float(userTrade, 'size')
feeCurrencyId = self.safe_string(otherTrade, 'currency')
else:
side = self.safe_string(userTrade, 'side')
amount = self.safe_float(userTrade, 'size')
cost = self.safe_float(otherTrade, 'size')
feeCurrencyId = self.safe_string(userTrade, 'currency')
id = self.safe_string(userTrade, 'trade_id')
price = self.safe_float(userTrade, 'price')
feeCostFirst = self.safe_float(otherTrade, 'fee')
feeCostSecond = self.safe_float(userTrade, 'fee')
feeCurrencyCodeFirst = self.safe_currency_code(self.safe_string(otherTrade, 'currency'))
feeCurrencyCodeSecond = self.safe_currency_code(self.safe_string(userTrade, 'currency'))
fee = None
fees = None
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
if (feeCostFirst is not None) and (feeCostFirst != 0):
if (feeCostSecond is not None) and (feeCostSecond != 0):
fees = [
{
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
},
{
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
},
]
else:
fee = {
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
}
elif (feeCostSecond is not None) and (feeCostSecond != 0):
fee = {
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
}
else:
fee = {
'cost': 0,
'currency': self.safe_currency_code(feeCurrencyId),
}
#
# simplified structures to show the underlying semantics
#
# # market/limit sell
#
# {
# "currency":"USDT",
# "fee":"-0.04647925", # โ--- fee in received quote currency
# "price":"129.13", # โ------ price
# "size":"30.98616393", # โ-- cost
# },
# {
# "currency":"ETH",
# "fee":"0",
# "price":"129.13",
# "size":"0.23996099", # โ--- amount
# },
#
# # market/limit buy
#
# {
# "currency":"ETH",
# "fee":"-0.00036049", # โ--- fee in received base currency
# "price":"129.16", # โ------ price
# "size":"0.240322", # โ----- amount
# },
# {
# "currency":"USDT",
# "fee":"0",
# "price":"129.16",
# "size":"31.03998952", # โ-- cost
# }
#
timestamp = self.parse8601(self.safe_string_2(userTrade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(userTrade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderId = self.safe_string(userTrade, 'order_id')
result = {
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
if fees is not None:
result['fees'] = fees
return result
def parse_my_trades(self, trades, market=None, since=None, limit=None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
# make sure it has exactly 2 trades, no more, no less
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
symbol = None
if market is not None:
symbol = market['symbol']
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = getattr(self, method)(self.extend(request, query))
#
# [
# # sell
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"-0.04647925",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924353",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"30.98616393",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924352",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.23996099",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# # buy
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"-0.00036049",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922669",
# "liquidity":"T",
# "order_id": "4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"0.240322",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# },
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922668",
# "liquidity":"T",
# "order_id":"4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"31.03998952",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# }
# ]
#
return self.parse_my_trades(response, market, since, limit, params)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
# 'instrument_id': market['id'],
'order_id': id,
# 'after': '1', # return the page after the specified page number
# 'before': '1', # return the page before the specified page number
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_position(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = None
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
type = market['type']
if (type == 'futures') or (type == 'swap'):
method = type + 'GetInstrumentIdPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPosition() requires an underlying parameter for ' + type + ' market ' + symbol)
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPosition() does not support ' + type + ' market ' + symbol + ', supported market types are futures, swap or option')
response = getattr(self, method)(self.extend(request, params))
#
# futures
#
# crossed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "2",
# "long_avail_qty": "2",
# "long_avg_cost": "8260",
# "long_settlement_price": "8260",
# "realised_pnl": "0.00020928",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_avg_cost": "8259.99",
# "short_settlement_price": "8259.99",
# "liquidation_price": "113.81",
# "instrument_id": "BTC-USD-191227",
# "leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T14:02:51.029Z",
# "margin_mode": "crossed",
# "short_margin": "0.00242197",
# "short_pnl": "6.63E-6",
# "short_pnl_ratio": "0.002477997",
# "short_unrealised_pnl": "6.63E-6",
# "long_margin": "0.00242197",
# "long_pnl": "-6.65E-6",
# "long_pnl_ratio": "-0.002478",
# "long_unrealised_pnl": "-6.65E-6",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8257.57"
# }
# ],
# "margin_mode": "crossed"
# }
#
# fixed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "4",
# "long_avail_qty": "4",
# "long_margin": "0.00323844",
# "long_liqui_price": "7762.09",
# "long_pnl_ratio": "0.06052306",
# "long_avg_cost": "8234.43",
# "long_settlement_price": "8234.43",
# "realised_pnl": "-0.00000296",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_margin": "0.00241105",
# "short_liqui_price": "9166.74",
# "short_pnl_ratio": "0.03318052",
# "short_avg_cost": "8295.13",
# "short_settlement_price": "8295.13",
# "instrument_id": "BTC-USD-191227",
# "long_leverage": "15",
# "short_leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T13:12:09.438Z",
# "margin_mode": "fixed",
# "short_margin_ratio": "0.10292507",
# "short_maint_margin_ratio": "0.005",
# "short_pnl": "7.853E-5",
# "short_unrealised_pnl": "7.853E-5",
# "long_margin_ratio": "0.07103743",
# "long_maint_margin_ratio": "0.005",
# "long_pnl": "1.9841E-4",
# "long_unrealised_pnl": "1.9841E-4",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8266.99"
# }
# ],
# "margin_mode": "fixed"
# }
#
# swap
#
# crossed margin mode
#
# {
# "margin_mode": "crossed",
# "timestamp": "2019-09-27T03:49:02.018Z",
# "holding": [
# {
# "avail_position": "3",
# "avg_cost": "59.49",
# "instrument_id": "LTC-USD-SWAP",
# "last": "55.98",
# "leverage": "10.00",
# "liquidation_price": "4.37",
# "maint_margin_ratio": "0.0100",
# "margin": "0.0536",
# "position": "3",
# "realized_pnl": "0.0000",
# "unrealized_pnl": "0",
# "settled_pnl": "-0.0330",
# "settlement_price": "55.84",
# "side": "long",
# "timestamp": "2019-09-27T03:49:02.018Z"
# },
# ]
# }
#
# fixed margin mode
#
# {
# "margin_mode": "fixed",
# "timestamp": "2019-09-27T03:47:37.230Z",
# "holding": [
# {
# "avail_position": "20",
# "avg_cost": "8025.0",
# "instrument_id": "BTC-USD-SWAP",
# "last": "8113.1",
# "leverage": "15.00",
# "liquidation_price": "7002.6",
# "maint_margin_ratio": "0.0050",
# "margin": "0.0454",
# "position": "20",
# "realized_pnl": "-0.0001",
# "unrealized_pnl": "0",
# "settled_pnl": "0.0076",
# "settlement_price": "8279.2",
# "side": "long",
# "timestamp": "2019-09-27T03:47:37.230Z"
# }
# ]
# }
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if (type == 'futures') or (type == 'swap'):
method = type + 'GetPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires an underlying parameter for ' + type + ' markets')
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPositions() does not support ' + type + ' markets, supported market types are futures, swap or option')
params = self.omit(params, 'type')
response = getattr(self, method)(params)
#
# futures
#
# ...
#
#
# swap
#
# ...
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if (type == 'spot') or (type == 'futures'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved as a result of a trade, spot and margin accounts only
# rebate fee rebate as per fee schedule, spot and margin accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# margin
#
# [
# [
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
# ],
# {
# "before":"78965766",
# "after":"78918186"
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
responseLength = len(response)
if responseLength < 1:
return []
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
if type == 'swap':
ledgerEntries = self.parse_ledger(entries)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # # funds transfer in/out
'trade': 'trade', # funds moved as a result of a trade, spot and margin accounts only
'rebate': 'rebate', # fee rebate as per fee schedule, spot and margin accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# margin
#
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_float(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_params(self.urls['api']['rest'], {'hostname': self.hostname}) + request
type = self.get_path_authentication_type(path)
if type == 'public':
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
# https://github.com/ccxt/ccxt/issues/6651
# a special case to handle the optionGetUnderlying interefering with
# other endpoints containing self keyword
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
feedback = self.id + ' ' + body
if code == 503:
# {"message":"name resolution failed"}
raise ExchangeNotAvailable(feedback)
#
# {"error_message":"Order does not exist","result":"true","error_code":"35029","order_id":"-1"}
#
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| 47.17664 | 521 | 0.469924 | [
"MIT"
] | TheMagooo/ccxt | python/ccxt/okex.py | 164,036 | Python |
from pipeline import *
class SentenceLimiter:
"""
Limit the text, word boundaries and
sentence boundaries of a given document
to the number of sentences given
"""
def run(self, document, number_sentences):
"""
:param: number_sentences, starts with 0 for the fist sentence
"""
boundaries = (document.sentences_boundaries[0][0], document.sentences_boundaries[:number_sentences+1][-1][1])
document.text = document.text[boundaries[0]:boundaries[1]]
document.sentences_boundaries = self._limitSenteceBoundaries(document.sentences_boundaries, boundaries[1])
document.words_boundaries = self._limitWordBoundaries(document.words_boundaries, boundaries[1])
document.entities = self._limitEntities(document.entities, boundaries[1])
document.triples = self._limitTriples(document.triples, boundaries[1])
return document
def _limitSenteceBoundaries(self, sentences_boundaries, maxi):
sentences_boundaries_new = []
for sent in sentences_boundaries:
if sent[1] <= maxi:
sentences_boundaries_new.append(sent)
return sentences_boundaries_new
def _limitEntities(self, entities, maxi):
entities_new = []
for e in entities:
if e.boundaries[1] <= maxi:
entities_new.append(e)
return entities_new
def _limitTriples(self, triples, maxi):
triples_new = []
for t in triples:
if t.sentence_id == 0:
triples_new.append(t)
return triples_new
def _limitWordBoundaries(self, words_boundaries, maxi):
words_boundaries_new = []
for word in words_boundaries:
if word[1] <= maxi:
words_boundaries_new.append(word)
return words_boundaries_new
class MainEntityLimiter:
"""
Remove a document's content if the main entity is not aligned
"""
def run(self, document):
if not document.uri in [i.uri for i in document.entities]:
document = None
return document
class EntityTypeFilter:
"""
Remove all documents that are of a certain type
"""
def __init__(self, all_triples, entities):
"""
:param: input TripleReaderTriples object
:param: a list of entity that should be filtered
"""
self.wikidata_triples = all_triples
self.entities = entities
def run(self, document):
# P31: instance of
prop_id = 'http://www.wikidata.org/prop/direct/P31'
if any([i for i in self.wikidata_triples.get(document.docid) if i[1] == prop_id and i[2] in self.entities]):
document = None
return document
| 35.076923 | 117 | 0.645468 | [
"MIT"
] | hadyelsahar/RE-NLG-Dataset | pipeline/filter.py | 2,736 | Python |
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from jinja2 import Template
from oslo_concurrency import processutils
from oslo_log import log as logging
from networking_bgp_ovn import constants
LOG = logging.getLogger(__name__)
ADD_VRF_TEMPLATE = '''
vrf {{ vrf_name }}
vni {{ vni }}
exit-vrf
router bgp {{ bgp_as }} vrf {{ vrf_name }}
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
exit-address-family
address-family l2vpn evpn
advertise ipv4 unicast
advertise ipv6 unicast
exit-address-family
'''
DEL_VRF_TEMPLATE = '''
no vrf {{ vrf_name }}
no router bgp {{ bgp_as }} vrf {{ vrf_name }}
'''
LEAK_VRF_TEMPLATE = '''
router bgp {{ bgp_as }}
address-family ipv4 unicast
import vrf {{ vrf_name }}
exit-address-family
address-family ipv6 unicast
import vrf {{ vrf_name }}
exit-address-family
router bgp {{ bgp_as }} vrf {{ vrf_name }}
bgp router-id {{ bgp_router_id }}
address-family ipv4 unicast
redistribute connected
exit-address-family
address-family ipv6 unicast
redistribute connected
exit-address-family
'''
def _run_vtysh_config(frr_config_file):
vtysh_command = "copy {} running-config".format(frr_config_file)
full_args = ['/usr/bin/vtysh', '--vty_socket', constants.FRR_SOCKET_PATH,
'-c', vtysh_command]
try:
return processutils.execute(*full_args, run_as_root=True)
except Exception as e:
print("Unable to execute vtysh with {}. Exception: {}".format(
full_args, e))
raise
def _run_vtysh_command(command):
full_args = ['/usr/bin/vtysh', '--vty_socket', constants.FRR_SOCKET_PATH,
'-c', command]
try:
return processutils.execute(*full_args, run_as_root=True)[0]
except Exception as e:
print("Unable to execute vtysh with {}. Exception: {}".format(
full_args, e))
raise
def _get_router_id(bgp_as):
output = _run_vtysh_command(command='show ip bgp summary json')
return json.loads(output).get('ipv4Unicast', {}).get('routerId')
def vrf_leak(vrf, bgp_as, bgp_router_id=None):
LOG.info("Add VRF leak for VRF {} on router bgp {}".format(vrf, bgp_as))
if not bgp_router_id:
bgp_router_id = _get_router_id(bgp_as)
if not bgp_router_id:
LOG.error("Unknown router-id, needed for route leaking")
return
vrf_template = Template(LEAK_VRF_TEMPLATE)
vrf_config = vrf_template.render(vrf_name=vrf, bgp_as=bgp_as,
bgp_router_id=bgp_router_id)
frr_config_file = "frr-config-vrf-leak-{}".format(vrf)
with open(frr_config_file, 'w') as vrf_config_file:
vrf_config_file.write(vrf_config)
_run_vtysh_config(frr_config_file)
def vrf_reconfigure(evpn_info, action):
LOG.info("FRR reconfiguration (action = {}) for evpn: {}".format(
action, evpn_info))
frr_config_file = None
if action == "add-vrf":
vrf_template = Template(ADD_VRF_TEMPLATE)
vrf_config = vrf_template.render(
vrf_name="{}{}".format(constants.OVN_EVPN_VRF_PREFIX,
evpn_info['vni']),
bgp_as=evpn_info['bgp_as'],
vni=evpn_info['vni'])
frr_config_file = "frr-config-add-vrf-{}".format(evpn_info['vni'])
elif action == "del-vrf":
vrf_template = Template(DEL_VRF_TEMPLATE)
vrf_config = vrf_template.render(
vrf_name="{}{}".format(constants.OVN_EVPN_VRF_PREFIX,
evpn_info['vni']),
bgp_as=evpn_info['bgp_as'])
frr_config_file = "frr-config-del-vrf-{}".format(evpn_info['vni'])
else:
LOG.error("Unknown FRR reconfiguration action: %s", action)
return
with open(frr_config_file, 'w') as vrf_config_file:
vrf_config_file.write(vrf_config)
_run_vtysh_config(frr_config_file)
| 31.165517 | 77 | 0.669617 | [
"Apache-2.0"
] | luis5tb/networking-bgp-ovn | networking_bgp_ovn/drivers/openstack/utils/frr.py | 4,519 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import DAG
from airflow.contrib.operators.jenkins_job_trigger_operator import JenkinsJobTriggerOperator
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.jenkins_hook import JenkinsHook
from six.moves.urllib.request import Request
import jenkins
from datetime import datetime
from datetime import timedelta
datetime_start_date = datetime(2018, 5, 3)
default_args = {
"owner": "airflow",
"start_date": datetime_start_date,
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
"concurrency": 8,
"max_active_runs": 8
}
dag = DAG("test_jenkins", default_args=default_args, schedule_interval=None)
#This DAG shouldn't be executed and is only here to provide example of how to use the JenkinsJobTriggerOperator
#(it requires a jenkins server to be executed)
job_trigger = JenkinsJobTriggerOperator(
dag=dag,
task_id="trigger_job",
job_name="red-beta-build-deploy",
parameters={"BRANCH":"origin/master", "USER_ENV":"shameer"},
#parameters="resources/paremeter.json", You can also pass a path to a json file containing your param
jenkins_connection_id="jenkins_nqa" #The connection must be configured first
)
def grabArtifactFromJenkins(**context):
"""
Grab an artifact from the previous job
The python-jenkins library doesn't expose a method for that
But it's totally possible to build manually the request for that
"""
hook = JenkinsHook("jenkins_nqa")
jenkins_server = hook.get_jenkins_server()
url = context['task_instance'].xcom_pull(task_ids='trigger_job')
#The JenkinsJobTriggerOperator store the job url in the xcom variable corresponding to the task
#You can then use it to access things or to get the job number
#This url looks like : http://jenkins_url/job/job_name/job_number/
url = url + "artifact/myartifact.xml" #Or any other artifact name
self.log.info("url : %s", url)
request = Request(url)
response = jenkins_server.jenkins_open(request)
self.log.info("response: %s", response)
return response #We store the artifact content in a xcom variable for later use
artifact_grabber = PythonOperator(
task_id='artifact_grabber',
provide_context=True,
python_callable=grabArtifactFromJenkins,
dag=dag)
artifact_grabber.set_upstream(job_trigger)
| 37.505882 | 111 | 0.754391 | [
"Apache-2.0"
] | shameerb/incubator-airflow | dags/jenkins_dag.py | 3,188 | Python |
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class LinkAggregationGroupsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api23_link_aggregation_groups_delete_with_http_info(
self,
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""DELETE link-aggregation-groups
Remove a link aggregation group to unbind the ports.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.LinkAggregationGroupGetResponse
"""GET link-aggregation-groups
List the status and attributes of the Ethernet ports in the configured link aggregation groups.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: LinkAggregationGroupGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_patch_with_http_info(
self,
link_aggregation_group=None, # type: models.Linkaggregationgroup
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.LinkAggregationGroupResponse
"""PATCH link-aggregation-groups
Modify link aggregation groups by adding and removing Ethernet ports.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_patch_with_http_info(link_aggregation_group, async_req=True)
>>> result = thread.get()
:param Linkaggregationgroup link_aggregation_group: (required)
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: LinkAggregationGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'link_aggregation_group' is set
if link_aggregation_group is None:
raise TypeError("Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'link_aggregation_group' in params:
body_params = params['link_aggregation_group']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_post_with_http_info(
self,
link_aggregation_group=None, # type: models.LinkAggregationGroup
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.LinkAggregationGroupResponse
"""POST link-aggregation-groups
Create a link aggregation group of Ethernet ports on the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_post_with_http_info(link_aggregation_group, names, async_req=True)
>>> result = thread.get()
:param LinkAggregationGroup link_aggregation_group: (required)
:param list[str] names: A comma-separated list of resource names. (required)
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: LinkAggregationGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'link_aggregation_group' is set
if link_aggregation_group is None:
raise TypeError("Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_post`")
# verify the required parameter 'names' is set
if names is None:
raise TypeError("Missing the required parameter `names` when calling `api23_link_aggregation_groups_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'link_aggregation_group' in params:
body_params = params['link_aggregation_group']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 44.544365 | 449 | 0.638331 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py | 18,575 | Python |
# model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
wh_heatmap=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
hm_center_ratio=0.27,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttf53_whh_3lr_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 29.59596 | 86 | 0.63959 | [
"Apache-2.0"
] | mrsempress/mmdetection | configs/eftnet/R2_ttf53_whh_3lr_1x.py | 2,930 | Python |
## @package onnx
#Module caffe2.python.onnx.onnxifi
"""
ONNXIFI a Caffe2 net
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python._import_c_extension as C
import numpy as np
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
max_seq_size=1,
debug=False,
use_onnx=True,
merge_fp32_inputs_into_fp16=False,
adjust_batch=True,
black_list=None,
weight_names=None):
"""
Transform the caffe2_net by collapsing ONNXIFI-runnable nodes into Onnxifi c2 ops
"""
shape_hints = {}
for k, v in input_shapes.items():
shape_hints[k] = v
pred_net_str = C.onnxifi(pred_net.SerializeToString(),
shape_hints,
black_list if black_list else [],
weight_names if weight_names is not None else [],
max_batch_size,
max_seq_size,
adjust_batch,
debug,
merge_fp32_inputs_into_fp16,
use_onnx)
pred_net_cut = caffe2_pb2.NetDef()
pred_net_cut.ParseFromString(pred_net_str)
return pred_net_cut
| 29.877551 | 85 | 0.602459 | [
"Apache-2.0"
] | JustinBear99/Mask_RCNN | detectron/lib/python3.6/site-packages/caffe2/python/onnx/onnxifi.py | 1,464 | Python |
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
| 39.400362 | 98 | 0.542186 | [
"Apache-2.0"
] | benjaminPurdy/tcex | tcex/bin/validate.py | 21,749 | Python |
import os
from access import Access
from user import User
log = os.path.dirname(os.path.abspath(__file__)) + "/temp/access.log"
class UserDAO(object):
__database = None
__cursor = None
def __init__(self):
self.__database = Access()
self.__cursor = self.__database.getCursor()
self.initDatabase()
def initDatabase(self):
try:
self.__cursor.execute(""" create table user (name text, username text, password text) """)
self.__database.commit()
except:
pass
def insert(self, user):
if len(self.getUser(user.getUsername())) == 0:
users = [(user.getName(), user.getUsername() , user.getPassword()), ]
self.__cursor.executemany("INSERT INTO user VALUES (?,?,?)", users)
self.__database.commit()
def update(self, user):
users = [(user.getName(),user.getPassword(), user.getUsername())]
self.__cursor.executemany("UPDATE user SET name = ?, password = ? where username = ? ", users)
self.__database.commit()
def delete(self, username):
self.__cursor.execute("DELETE FROM user WHERE username = " + username)
self.__database.commit()
def list(self):
self.__cursor.execute("SELECT * FROM user")
print self.__cursor.fetchall()
def getUser(self, username):
self.__cursor.execute("SELECT * FROM user WHERE username = ?",[(username)] )
return self.__cursor.fetchall()
def log(self, user, request):
flines = user.toString() + " >>> " + request + "\n"
f = open(log, 'a')
f.writelines([flines,])
f.close()
| 36.06383 | 102 | 0.59174 | [
"MIT"
] | saraivaufc/PySpy | database/userDAO.py | 1,695 | Python |
import disnake, youtube_dl
import src.core.embeds as embeds
import src.core.functions as funcs
from disnake.ext import commands
prefix = funcs.get_prefix()
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(invoke_without_command=True, description="Connect/Leave VC")
@commands.has_guild_permissions(connect=True)
async def vc(self, ctx: commands.Context, command: str):
await ctx.reply("Command not Found!!")
@commands.group(
invoke_without_command=True, description="Play, Pause, Resume, Stop Music"
)
@commands.has_guild_permissions(connect=True)
async def music(self, ctx: commands.Context, command: str):
await ctx.reply("Command not Found!!")
@vc.command(
description="Joins the VC you are currently in", aliases=["connect", "c"]
)
@commands.has_guild_permissions(connect=True)
async def join(self, ctx: commands.Context):
if ctx.author.voice is None:
await ctx.reply("You are not Connected to a Voice Channel!!")
return
if ctx.voice_client is None:
voice_channel = ctx.author.voice.channel
try:
await voice_channel.connect()
await ctx.reply("Connected!!")
except disnake.HTTPException:
await ctx.reply("Can't Connect to this Voice Channel!!")
else:
await ctx.reply("I am already in a Voice Channel!!")
@vc.command(description="Leaves VC", aliases=["disconnect", "dc"])
@commands.has_guild_permissions(connect=True)
async def leave(self, ctx: commands.Context):
if ctx.voice_client:
await ctx.reply("Disconnected!!")
await ctx.voice_client.disconnect()
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Plays the Music")
@commands.has_guild_permissions(connect=True)
async def play(self, ctx: commands.Context, *, music_name: str):
vc = ctx.voice_client
if vc:
FFMPEG_OPTIONS = {
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
"options": "-vn",
}
YDL_OPTIONS = {"formats": "bestaudio"}
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = {}
url = ""
if music_name.startswith("https://"):
info = ydl.extract_info(music_name, download=False)
url = info["formats"][0]["url"]
else:
info_ = ydl.extract_info(f"ytsearch:{music_name}", download=False)
url_ = info_["entries"][0]["webpage_url"]
info = ydl.extract_info(url_, download=False)
url = info["formats"][0]["url"]
if info:
await ctx.reply(embed=embeds.music_playing_embed(info))
source = disnake.FFmpegPCMAudio(url, **FFMPEG_OPTIONS)
vc.play(source)
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Pauses the Music")
@commands.has_guild_permissions(connect=True)
async def pause(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_playing():
await ctx.reply("Song Paused!!")
await ctx.voice_client.pause()
else:
await ctx.reply("No Song is Playing!!")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Resumes the Music")
@commands.has_guild_permissions(connect=True)
async def resume(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_paused():
await ctx.reply("Song Resumed!!")
await ctx.voice_client.resume()
else:
await ctx.reply("No Song is Paused!!")
else:
await ctx.reply(" I am not Connected to any Voice Channel!!")
@music.command(description="Adjusts the Volume as per given amount")
@commands.has_guild_permissions(connect=True)
async def volume(self, ctx: commands.Context, volume: int):
vc = ctx.voice_client
if vc:
if not 0 > volume > 100:
volume = volume / 100
vc.source = disnake.PCMVolumeTransformer(original=vc.source, volume=1.0)
vc.source.volume = volume
await ctx.reply(f"Changed volume to {volume * 100}%")
else:
await ctx.reply("Volume must be between 0 to 100 (Inclusive)")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Stops the Music")
@commands.has_guild_permissions(connect=True)
async def stop(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_playing() or ctx.voice_client.is_paused():
await ctx.reply("Song Stopped!!")
await ctx.voice_client.stop()
else:
await ctx.reply("No Song is Playing")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
def setup(bot: commands.Bot):
bot.add_cog(Music(bot))
| 37.258503 | 94 | 0.59266 | [
"MIT"
] | Jonak-Adipta-Kalita/JAK-Discord-Bot | src/cogs/commands/music.py | 5,477 | Python |
# -*- coding: utf-8 -*-
#Chucky_Bot
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
nadya = LINETCR.LINE()
#nadya.login(qr=True)
nadya.login(token='Eq8HO0fhYMrll5V2r6v3.uFyCY3rEW6udwsHCnFj70W.KD1Mlw3UQ67PLM8N+4pVdjTi1joYo3zu7hlhQV6XWuo=')
nadya.loginResult()
print "Nadya-Login Success\n\n=====[Sukses Login]====="
reload(sys)
sys.setdefaultencoding('utf-8')
selfMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT S E L F โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใHiใ
โ โฉใMeใ
โ โฉใMymidใ
โ โฉใMid @ใ
โ โฉใSearchID: (ID LINE)ใ
โ โฉใCheckdate (DD/MM/YY)ใ
โ โฉใKalenderใ
โ โฉใSteal contactใ
โ โฉใPp @ใ
โ โฉใCover @ใ
โ โฉใAuto likeใ
โ โฉใScbc Textใ
โ โฉใCbc Textใ
โ โฉใGbc Textใ
โ โฉใGetbio @ใ
โ โฉใGetinfo @ใ
โ โฉใGetname @ใ
โ โฉใGetprofile @ใ
โ โฉใGetcontact @ใ
โ โฉใGetvid @ใ
โ โฉใFriendlistใ
โ โฉใMicadd @ใ
โ โฉใMicdel @ใ
โ โฉใMiclistใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
botMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT B O T โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใAbsenใ
โ โฉใResponใ
โ โฉใRuntimeใ
โ โฉใMycopy @ใ
โ โฉใCopycontactใ
โ โฉใMybackupใ
โ โฉใMybio (Text)ใ
โ โฉใMyname (Text)ใ
โ โฉใ@byeใ
โ โฉใBot on/offใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
mediaMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT M E D I A โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใGiftใ
โ โฉใGift1 @ s/d Gift10 @ใ
โ โฉใGiftbycontactใ
โ โฉใGif goreใ
โ โฉใGoogle: (Text)ใ
โ โฉใPlaystore NamaAppใ
โ โฉใFancytext: Textใ
โ โฉใ/musik Judul-Penyanyiใ
โ โฉใ/lirik Judul-Penyanyiใ
โ โฉใ/musrik Judul-Penyanyiใ
โ โฉใ/ig UrsnameInstagramใ
โ โฉใCheckig UrsnameInstagramใ
โ โฉใ/apakah Text (Kerang Ajaib)ใ
โ โฉใ/kapan Text (Kerang Ajaib)ใ
โ โฉใ/hari Text (Kerang Ajaib)ใ
โ โฉใ/berapa Text (Kerang Ajaib)ใ
โ โฉใ/berapakah Textใ
โ โฉใYoutubelink: Judul Videoใ
โ โฉใYoutubevideo: Judul Videoใ
โ โฉใYoutubesearch: Judul Videoใ
โ โฉใImage NamaGambarใ
โ โฉใSay-id Textใ
โ โฉใSay-en Textใ
โ โฉใSay-jp Textใ
โ โฉใImage NamaGambarใ
โ โฉใTr-id Text (Translate En Ke IDใ
โ โฉใTr-en Text (Translate ID Ke Enใ
โ โฉใTr-th Text (Translate ID Ke Thใ
โ โฉใId@en Text (Translate ID Ke Enใ
โ โฉใId@th Text (Translate ID Ke THใ
โ โฉใEn@id Text (Translate En Ke IDใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
groupMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT G R O U P โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใWelcomeใ
โ โฉใSay welcomeใ
โ โฉใInvite creatorใ
โ โฉใSetviewใ
โ โฉใViewseenใ
โ โฉใGn: (NamaGroup)ใ
โ โฉใTag allใ
โ โฉใRecoverใ
โ โฉใCancelใ
โ โฉใCancelallใ
โ โฉใGcreatorใ
โ โฉใGinfoใ
โ โฉใGurlใ
โ โฉใList groupใ
โ โฉใPict group: (NamaGroup)ใ
โ โฉใSpam: (Text)ใ
โ โฉใAdd allใ
โ โฉใKick: (Mid)ใ
โ โฉใInvite: (Mid)ใ
โ โฉใInviteใ
โ โฉใMemlistใ
โ โฉใGetgroup imageใ
โ โฉใUrlgroup Imageใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
tjia="u9f09cfcb17d037e2936b751bd9d40ead"
setMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT S E T โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใSambutan on/offใ
โ โฉใMimic on/offใ
โ โฉใUrl on/offใ
โ โฉใAlwaysread on/offใ
โ โฉใSider on/offใ
โ โฉใContact on/offใ
โ โฉใSticker onใ
โ โฉใSimisimi on/offใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
creatorMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT C R E A T O R โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใCrashใ
โ โฉใKickallใ
โ โฉใBc: (Text)ใ
โ โฉใJoin group: (NamaGroupใ
โ โฉใLeave group: (NamaGroupใ
โ โฉใLeave all groupใ
โ โฉใTag on/offใ
โ โฉใBot restartใ
โ โฉใTurn offใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
adminMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ A D M I N โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใAllprotect on/offใ
โ โฉใBanใ
โ โฉใUnbanใ
โ โฉใBan @ใ
โ โฉใUnban @ใ
โ โฉใBan listใ
โ โฉใClear banใ
โ โฉใKillใ
โ โฉใKick @ใ
โ โฉใSet member: (Jumblah)ใ
โ โฉใBan group: (NamaGroupใ
โ โฉใDel ban: (NamaGroupใ
โ โฉใList banใ
โ โฉใKill banใ
โ โฉใGlistใ
โ โฉใGlistmidใ
โ โฉใDetails group: (Gid)ใ
โ โฉใCancel invite: (Gid)ใ
โ โฉใInvitemeto: (Gid)ใ
โ โฉใAcc inviteใ
โ โฉใRemovechatใ
โ โฉใQr on/offใ
โ โฉใAutokick on/offใ
โ โฉใAutocancel on/offใ
โ โฉใInvitepro on/offใ
โ โฉใJoin on/offใ
โ โฉใJoincancel on/offใ
โ โฉใRespon1 on/offใ
โ โฉใRespon2 on/offใ
โ โฉใRespon3 on/offใ
โ โฉใResponkick on/offใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
helpMessage ="""
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โโ FRYANT H E L P โโ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ โฉใHelp selfใ
โ โฉใHelp botใ
โ โฉใHelp groupใ
โ โฉใHelp setใ
โ โฉใHelp mediaใ
โ โฉใHelp adminใ
โ โฉใHelp creatorใ
โ โฉใOwnerใ
โ โฉใPap ownerใ
โ โฉใSpeedใ
โ โฉใSpeed testใ
โ โฉใStatusใ
โ โโโโโโโโโโโโโโโโโโโโโโโโโ
โ เผเฝฒเงกโ๏ปฟแถสฐแต+Sepri๏ปฟ๏ปฟโฎเฟเงกย
SelfBot Versi 124V
โโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
KAC=[nadya]
mid = nadya.getProfile().mid
Bots=[mid]
Creator=["u51f61ccb745ec3a50359285c35f27bd3"]
admin=["u51f61ccb745ec3a50359285c35f27bd3"]
contact = nadya.getProfile()
backup1 = nadya.getProfile()
backup1.displayName = contact.displayName
backup1.statusMessage = contact.statusMessage
backup1.pictureStatus = contact.pictureStatus
responsename = nadya.getProfile().displayName
wait = {
"LeaveRoom":True,
"Bot":True,
"AutoJoin":False,
"AutoJoinCancel":False,
"memberscancel":30,
"Members":1,
"AutoCancel":False,
"AutoKick":False,
'pap':{},
'invite':{},
'steal':{},
'gift':{},
'copy':{},
'likeOn':{},
'detectMention':False,
'detectMention2':False,
'detectMention3':True,
'kickMention':False,
'sticker':False,
'timeline':True,
"Timeline":True,
"comment":"Bot Auto Like ยฉBy : Nadya\nContact Me : ๐ line.me/ti/p/~sepriche.",
"commentOn":True,
"commentBlack":{},
"message":"Thx For Add Me (^_^)\nInvite Me To Your Group ใ(^_^)ใ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Qr":False,
"Contact":False,
"Sambutan":True,
"inviteprotect":False,
"alwaysRead":False,
"Sider":{},
"Simi":{},
"lang":"JP",
"BlGroup":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","๏ผพ","ใตใใฉ:","ใตใใฉ:","ใตใใฉ๏ผ","ใตใใฉ๏ผ"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudioWithURL(self, to_, url):
path = self.downloadFileWithURL(url)
try:
self.sendAudio(to_, path)
except Exception as e:
raise Exception(e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True, verify=False)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def downloadFileWithURL(self, fileUrl):
saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = self.get_content(fileUrl)
if r.status_code == 200:
with open(saveAs, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return saveAs
else:
raise Exception('Download file failure.')
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
nadya.sendMessage(msg)
except Exception as error:
print error
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
nadya.findAndAddContactsByMid(op.param1)
if(wait["message"]in[""," ","\n",None]):
pass
else:
nadya.sendText(op.param1,str(wait["message"]))
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = nadya.getContact(op.param2).displayName
# Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\nโข " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
nadya.sendText(op.param1, "Haii " + "โ " + Name + " โ" + "\nNgintip Aja Niih. . .\nChat Kek Idiih (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
nadya.sendText(op.param1, "Haii " + "โ " + Name + " โ" + "\nBetah Banget Jadi Penonton. . .\nChat Napa (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
nadya.sendText(op.param1, "Haii " + "โ " + Name + " โ" + "\nNgapain Kak Ngintip Aja???\nSini Gabung Chat... ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 22:
nadya.leaveRoom(op.param1)
if op.type == 21:
nadya.leaveRoom(op.param1)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
nadya.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = nadya.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"Maaf " + nadya.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
nadya.leaveGroup(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"โKetik โHelpโ Untuk Bantuanโ\nโHarap Gunakan Dengan Bijak ^_^ โ")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = nadya.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
nadya.rejectGroupInvitation(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"โKetik โHelpโ Untuk Bantuanโ\nโHarap Gunakan Dengan Bijak ^_^ โ")
else:
if wait["AutoCancel"] == True:
if op.param3 in Bots:
pass
else:
nadya.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
nadya.cancelGroupInvitation(op.param1, [op.param3])
nadya.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
nadya.cancelGroupInvitation(op.param1,[op.param3])
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
nadya.inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"] == True:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
nadya.kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in Creator:
return
ginfo = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendText(op.param1,"Hallo " + nadya.getContact(op.param2).displayName + "\nWelcome To โ " + str(ginfo.name) + " โ" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
nadya.sendMessage(c)
nadya.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "13269548",
"STKPKGID": "1329191",
"STKVER": "1" }
nadya.sendMessage(d)
print "MEMBER JOIN TO GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in Creator:
return
nadya.sendText(op.param1,"Good Bye " + nadya.getContact(op.param2).displayName + "\nSee You Next Time . . . (pโฒ๏ธตโตใ) ๐ค")
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "13269542",
"STKPKGID": "1329191",
"STKVER": "1" }
nadya.sendMessage(d)
print "MEMBER HAS LEFT THE GROUP"
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
nadya.sendText(msg.to,text)
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
nadya.sendText(msg.to,data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
nadya.kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Sekali tag, berarti naksir aim๐
",cName + " Follow ya id smuleku @Fryant_BSS1",cName + " Iya sayank, I love you too muacchhh๐","aih, org ganteng, ditag mulu๐", cName + " kaka mau nikung aku yah??๐","kalau mau didesahin\npm aja kak๐ " + cName, "kangen ya sayank??๐ " + cName, "Follow id smule ku ya ka @Fryant_BSS1 " + cName + "๐๐๐", "Kaka mau nikung aku yah " + cName + "๐ฐ","orang ganteng " + cName + " pasti ditag mulu ๐"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["kenapa sayank,, kangen yah??","jangan tag kalau ga mau aku hamilin","jangan tag " + cName + " tuan muda lagi meeting"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "157",
"STKPKGID": "2",
"STKVER": "100" }
nadya.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention3"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Iya sayank " + cName + ", Syg kangen ya...aku lg kerja buat menata masa depan kita"]
balas1 = "Supaya aq dan kamu, bahagia selalu๐๐๐"
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
nadya.sendText(msg.to,balas1)
nadya.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "11764508",
"STKPKGID": "6641",
"STKVER": "1" }
nadya.sendMessage(msg)
break
if op.type == 25:
msg = op.message
if msg.text in ["Bot on"]:
wait["Bot"] = True
nadya.sendText(msg.to,"Bot Sudah On Kembali.")
if op.type == 25:
if wait["Bot"] == True:
msg = op.message
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "ใ Sticker Check ใ\nSTKID : %s\nSTKPKGID : %s\nSTKVER : %s\nใ Link ใ\nline://shop/detail/%s" % (stk_id,pkg_id,stk_ver,pkg_id)
nadya.sendText(msg.to, filler)
wait["sticker"] = False
else:
pass
if wait["alwaysRead"] == True:
if msg.toType == 0:
nadya.sendChatChecked(msg.from_,msg.id)
else:
nadya.sendChatChecked(msg.to,msg.id)
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
nadya.like(url[25:58], url[66:], likeType=1005)
nadya.comment(url[25:58], url[66:], wait["comment"])
nadya.sendText(msg.to,"Like Success")
wait['likeOn'] = False
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] not in admin:
if msg.contentMetadata["mid"] in wait["blacklist"]:
nadya.sendText(msg.to,"Sudah")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
nadya.sendText(msg.to,"Ditambahkan")
else:
nadya.sendText(msg.to,"Admin Detected~")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
nadya.sendText(msg.to,"Terhapus")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
nadya.sendText(msg.to,"Tidak Ada Black List")
elif wait["Contact"] == True:
msg.contentType = 0
nadya.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = nadya.getContact(msg.contentMetadata["mid"])
try:
cu = nadya.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
nadya.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
else:
contact = nadya.getContact(msg.contentMetadata["mid"])
try:
cu = nadya.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
nadya.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = nadya.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
nadya.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
nadya.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Can not be used outside the group")
else:
nadya.sendText(msg.to,"Not for use less than group")
elif msg.text is None:
return
elif msg.text in ["Creator","Owner"]:
msg.contentType = 13
msg.contentMetadata = {'mid': tjia}
nadya.sendMessage(msg)
nadya.sendText(msg.to,"Itu Majikan Kami (^_^)")
elif msg.text in ["Group creator","Gcreator","gcreator"]:
ginfo = nadya.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
nadya.sendMessage(msg)
nadya.sendText(msg.to,"Itu Yang Buat Grup Ini")
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
nadya.sendText(msg.to,msg.text)
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
nadya.findAndAddContactsByMid(target)
contact = nadya.getContact(target)
cu = nadya.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
nadya.sendText(msg.to,"Profile Picture " + contact.displayName)
nadya.sendImageWithURL(msg.to,image)
nadya.sendText(msg.to,"Cover " + contact.displayName)
nadya.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Gift"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,"Gift Sudah Terkirim!")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
wait['gift'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["gift"] = False
break
if msg.contentType == 13:
if wait["copy"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Copy"
break
else:
targets.append(copy)
if targets == []:
nadya.sendText(msg.to, "Not Found...")
pass
else:
for target in targets:
try:
nadya.CloneContactProfile(target)
nadya.sendText(msg.to, "Copied (^_^)")
wait['copy'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["copy"] = False
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
nadya.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
nadya.findAndAddContactsByMid(target)
nadya.inviteIntoGroup(msg.to,[target])
nadya.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
nadya.sendText(msg.to,"Limit Invite")
wait['invite'] = False
break
elif msg.text in ["Key creator","help creator","Fryant 1"]:
nadya.sendText(msg.to,creatorMessage)
elif msg.text in ["Key group","help group","Fryant 2"]:
nadya.sendText(msg.to,groupMessage)
elif msg.text in ["Key","Fryant","Help"]:
nadya.sendText(msg.to,helpMessage)
elif msg.text in ["Key self","help self","Fryant 3"]:
nadya.sendText(msg.to,selfMessage)
elif msg.text in ["Key bot","help bot","Fryant 4"]:
nadya.sendText(msg.to,botMessage)
elif msg.text in ["Key set","help set","Fryant 5"]:
nadya.sendText(msg.to,setMessage)
elif msg.text in ["Key media","help media","Fryant 6"]:
nadya.sendText(msg.to,mediaMessage)
elif msg.text in ["Key admin","help admin","Fryant 7"]:
nadya.sendText(msg.to,adminMessage)
elif msg.text in ["Fryant group"]:
gid = nadya.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
gn = nadya.getGroup(i).name
h += "โฆใ%sใ\n" % (gn)
jml += 1
nadya.sendText(msg.to,"=======[List Group]=======\n"+ h +"\nTotal Group: "+str(jml))
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = nadya.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
nadya.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
nadya.sendText(msg.to, "Khusus Nadya")
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
nadya.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +nadya.getGroup(gid).name + "\n"
nadya.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
nadya.sendText(msg.to, "Khusus Admin")
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if nadya.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
nadya.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
nadya.sendText(msg.to, "Khusus Nadya")
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = nadya.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = nadya.getGroup(i).name
if h == ng:
nadya.inviteIntoGroup(i,[Creator])
nadya.sendText(msg.to,"Success Join To ["+ h +"] Group")
else:
pass
else:
nadya.sendText(msg.to,"Khusus Nadya")
except Exception as e:
nadya.sendText(msg.to, str(e))
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = nadya.getGroup(i).name
if h == ng:
nadya.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
nadya.leaveGroup(i)
nadya.sendText(msg.to,"Success Left ["+ h +"] group")
else:
pass
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Leave all group" == msg.text:
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
nadya.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
nadya.leaveGroup(i)
nadya.sendText(msg.to,"Success Leave All Group")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Pict group: " in msg.text:
saya = msg.text.replace('Pict group: ','')
gid = nadya.getGroupIdsJoined()
for i in gid:
h = nadya.getGroup(i).name
gna = nadya.getGroup(i)
if h == saya:
nadya.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["cancelall","Cancelall"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
nadya.cancelGroupInvitation(msg.to, gInviMids)
else:
nadya.sendText(msg.to,"Tidak Ada Yang Pending")
else:
nadya.sendText(msg.to,"Tidak Bisa Digunakan Diluar Group")
elif msg.text in ["Ourl","Url on"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.preventJoinByTicket = False
nadya.updateGroup(X)
nadya.sendText(msg.to,"Url Sudah Aktif")
else:
nadya.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Curl","Url off"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.preventJoinByTicket = True
nadya.updateGroup(X)
nadya.sendText(msg.to,"Url Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Join on","Autojoin on"]:
if msg.from_ in admin:
wait["AutoJoin"] = True
wait["AutoJoinCancel"] = False
nadya.sendText(msg.to,"Auto Join Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Join off","Autojoin off"]:
if msg.from_ in admin:
wait["AutoJoin"] = False
nadya.sendText(msg.to,"Auto Join Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Joincancel on","Autojoincancel on"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = True
wait["AutoJoin"] = False
nadya.sendText(msg.to,"Auto Join Cancel Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Joincancel off","Autojoincancel off"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = False
nadya.sendText(msg.to,"Auto Join Cancel Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon1 on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon1 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon1 off"]:
if msg.from_ in admin:
wait["detectMention"] = False
nadya.sendText(msg.to,"Auto Respon1 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon2 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = True
wait["detectMention3"] = False
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon2 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon2 off"]:
if msg.from_ in admin:
wait["detectMention2"] = False
nadya.sendText(msg.to,"Auto Respon2 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon3 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = True
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon3 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon3 off"]:
if msg.from_ in admin:
wait["detectMention3"] = False
nadya.sendText(msg.to,"Auto Respon3 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Responkick on"]:
if msg.from_ in admin:
wait["kickMention"] = True
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = False
nadya.sendText(msg.to,"Auto Respon Kick Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Responkick off"]:
if msg.from_ in admin:
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon Kick Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Autocancel on"]:
if msg.from_ in admin:
wait["AutoCancel"] = True
nadya.sendText(msg.to,"Auto Cancel Sudah Aktif")
print wait["AutoCancel"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Autocancel off"]:
if msg.from_ in admin:
wait["AutoCancel"] = False
nadya.sendText(msg.to,"Auto Cancel Sudah Di Nonaktifkan")
print wait["AutoCancel"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Invitepro on"]:
if msg.from_ in admin:
wait["inviteprotect"] = True
nadya.sendText(msg.to,"Invite Protect Sudah Aktif")
print wait["inviteprotect"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Invitepro off"]:
if msg.from_ in admin:
wait["inviteprotect"] = False
nadya.sendText(msg.to,"Invite Protect Sudah Di Nonaktifkan")
print wait["inviteprotect"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Qr on" in msg.text:
if msg.from_ in admin:
wait["Qr"] = True
nadya.sendText(msg.to,"QR Protect Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Qr off" in msg.text:
if msg.from_ in admin:
wait["Qr"] = False
nadya.sendText(msg.to,"Qr Protect Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Autokick on" in msg.text:
if msg.from_ in admin:
wait["AutoKick"] = True
nadya.sendText(msg.to,"Auto Kick Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Autokick off" in msg.text:
if msg.from_ in admin:
wait["AutoKick"] = False
nadya.sendText(msg.to,"Auto Kick Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Allprotect on"]:
if msg.from_ in admin:
wait["AutoCancel"] = True
wait["inviteprotect"] = True
wait["AutoKick"] = True
wait["Qr"] = True
nadya.sendText(msg.to,"All Protect Sudah Aktif Semua")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Allprotect off"]:
if msg.from_ in admin:
wait["AutoCancel"] = False
wait["inviteprotect"] = False
wait["AutoKick"] = False
wait["Qr"] = False
nadya.sendText(msg.to,"All Protect Sudah Di Nonaktifkan Semua")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["K on","Contact on"]:
wait["Contact"] = True
nadya.sendText(msg.to,"Contact Sudah Aktif")
elif msg.text in ["K off","Contact off"]:
wait["Contact"] = False
nadya.sendText(msg.to,"Contact Sudah Di Nonaktifkan")
elif msg.text in ["Alwaysread on"]:
wait["alwaysRead"] = True
nadya.sendText(msg.to,"Always Read Sudah Aktif")
elif msg.text in ["Alwaysread off"]:
wait["alwaysRead"] = False
nadya.sendText(msg.to,"Always Read Sudah Di Nonaktifkan")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sambutan Di Aktifkanใพ(*ยดโ๏ฝ*)๏พ")
else:
wait["Sambutan"] = True
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sudah Onใฝ(ยดโฝ๏ฝ)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sambutan Di Nonaktifkan(ใ๏ผพโ๏ผพ)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sudah Off(pโฒ๏ธตโตใ)")
elif "Sider on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
nadya.sendText(msg.to,"Siap On Cek Sider")
elif "Sider off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
nadya.sendText(msg.to, "Cek Sider Off")
else:
nadya.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Status"]:
md = ""
if wait["Sambutan"] == True: md+="โ โฉโ๏ธ Sambutan : On\n"
else:md+="โ โฉโ Sambutan : Off\n"
if wait["AutoJoin"] == True: md+="โ โฉโ๏ธ Auto Join : On\n"
else: md +="โ โฉโ Auto Join : Off\n"
if wait["AutoJoinCancel"] == True: md+="โ โฉโ๏ธ Auto Join Cancel : On\n"
else: md +="โ โฉโ Auto Join Cancel : Off\n"
if wait["Contact"] == True: md+="โ โฉโ๏ธ Info Contact : On\n"
else: md+="โ โฉโ Info Contact : Off\n"
if wait["AutoCancel"] == True:md+="โ โฉโ๏ธ Auto Cancel : On\n"
else: md+= "โ โฉโ Auto Cancel : Off\n"
if wait["inviteprotect"] == True:md+="โ โฉโ๏ธ Invite Protect : On\n"
else: md+= "โ โฉโ Invite Protect : Off\n"
if wait["Qr"] == True: md+="โ โฉโ๏ธ Qr Protect : On\n"
else:md+="โ โฉโ Qr Protect : Off\n"
if wait["AutoKick"] == True: md+="โ โฉโ๏ธ Auto Kick : On\n"
else:md+="โ โฉโ Auto Kick : Off\n"
if wait["alwaysRead"] == True: md+="โ โฉโ๏ธ Always Read : On\n"
else:md+="โ โฉโ Always Read: Off\n"
if wait["detectMention"] == True: md+="โ โฉโ๏ธ Auto Respon1 : On\n"
else:md+="โ โฉโ Auto Respon1 : Off\n"
if wait["detectMention2"] == True: md+="โ โฉโ๏ธ Auto Respon2 : On\n"
else:md+="โ โฉโ Auto Respon2 : Off\n"
if wait["detectMention3"] == True: md+="โ โฉโ๏ธ Auto Respon3 : On\n"
else:md+="โ โฉโ Auto Respon3 : Off\n"
if wait["kickMention"] == True: md+="โ โฉโ๏ธ Auto Respon Kick : On\n"
else:md+="โ โฉโ Auto Respon Kick : Off\n"
if wait["Sider"] == True: md+="โ โฉโ๏ธ Auto Sider : On\n"
else:md+="โ โฉโ Auto Sider: Off\n"
if wait["Simi"] == True: md+="โ โฉโ๏ธ Simisimi : On\n"
else:md+="โ โฉโ Simisimi: Off\n"
nadya.sendText(msg.to,"โโโโโโโโโโโโโโโโโโโโโ\n""โ โโ F R Y A N T S T A T U S โโ\n""โ โโโโโโโโโโโโโโโโโโโโ\n"+md+"โโโโโโโโโโโโโโโโโโโโโ")
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
nadya.sendMessage(msg)
elif "Gift1 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift3 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1329191'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift5 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift5 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '9057'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift6 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift6 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '9167'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift7 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift7 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '7334'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift8 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift8 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift9 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift9 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1405277'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift10 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift10 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif msg.text.lower() in ["wkwkwk","wkwk","hahaha","haha"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '100',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["hehehe","hehe"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '10',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["galau"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '9',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["you","kau","kamu"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '7',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["marah","hadeuh","hadeh"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '6',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["please","pliss","mohon","tolong"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '4',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["haa","haaa","kaget"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '3',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["lucu","ngakak","lol"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '110',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["hmm","hmmm"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '101',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["tidur"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '1',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["gemes"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '2',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["cantik","imut"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '5',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["nyanyi","lalala"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '11',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["gugup"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '8',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '13',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["mantab","mantap","nice","keren"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '14',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["ngejek"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '15',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["nangis","sedih"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '16',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["woi","kampret"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '102',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["huft"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '104',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif "tag all" == msg.text.lower():
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
nadya.sendMessage(cnt)
elif "tagall" == msg.text.lower():
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
nadya.sendMessage(cnt)
elif msg.text in ["Setview","Setpoint","Cctv"]:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
nadya.sendText(msg.to, "โCheckpoint Checkedโ")
print "Setview"
elif msg.text in ["Viewseen","Check","Ciduk","Cyduk"]:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = nadya.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "โโโโโโโโโโโโโโโโโโโโโโโโโโ\nโ โโ LIST VIEWERS โโ\nโ โโโโโโโโโโโโโโโโโโโโโโโโโ\nโ โฉ"
grp = '\nโ โฉ '.join(str(f) for f in dataResult)
total = '\nโ โโโโโโโโโโโโโโโโโโโโโโโโโ\nโ โฉ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\nโโโโโโโโโโโโโโโโโโโโโโโโโโ"
nadya.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
nadya.sendText(msg.to, "โAuto Checkpointโ")
else:
nadya.sendText(msg.to, "โBelum Ada Viewersโ")
print "Viewseen"
elif "Kick " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
nadya.kickoutFromGroup(msg.to,[mention['M']])
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
nadya.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
elif "Add all" in msg.text:
thisgroup = nadya.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
nadya.findAndAddContactsByMids(mi_d)
nadya.sendText(msg.to,"Success Add all")
elif msg.text in ["Invite"]:
wait["invite"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Auto like"]:
wait["likeOn"] = True
nadya.sendText(msg.to,"Shere Post Kamu Yang Mau Di Like!")
elif msg.text in ["Steal contact"]:
wait["steal"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Giftbycontact"]:
wait["gift"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Copycontact"]:
wait["copy"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Sticker on"]:
wait["sticker"] = True
nadya.sendText(msg.to,"Sticker ID Detect Already On.")
elif msg.text in ["Bot off"]:
wait["Bot"] = False
nadya.sendText(msg.to,"Bot Sudah Di Nonaktifkan.")
elif "Recover" in msg.text:
thisgroup = nadya.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
nadya.createGroup("Recover", mi_d)
nadya.sendText(msg.to,"Success recover")
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
nadya.updateGroup(X)
else:
nadya.sendText(msg.to,"It can't be used besides the group.")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
if midd not in admin:
nadya.kickoutFromGroup(msg.to,[midd])
else:
nadya.sendText(msg.to,"Admin Detected")
elif "Invite: " in msg.text:
midd = msg.text.replace("Invite: ","")
nadya.findAndAddContactsByMid(midd)
nadya.inviteIntoGroup(msg.to,[midd])
elif "Invite creator" in msg.text:
midd = "u14f64e139a3817afaabe27d237afb36b"
nadya.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc"]:
gs = nadya.getGroup(msg.to)
nadya.sendText(msg.to,"Selamat Datang Di "+ gs.name)
msg.contentType = 7
msg.contentMetadata={'STKID': '247',
'STKPKGID': '3',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
nadya.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~nad_nad.")
nadya.sendText(msg.to,"Success BC BosQ")
else:
nadya.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Cancel"]:
gid = nadya.getGroupIdsInvited()
for i in gid:
nadya.rejectGroupInvitation(i)
nadya.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = nadya.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
nadya.updateGroup(x)
gurl = nadya.reissueGroupTicket(msg.to)
nadya.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Can't be used outside the group")
else:
nadya.sendText(msg.to,"Not for use less than group")
elif msg.text in ["timeline"]:
try:
url = nadya.activity(limit=5)
nadya.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
elif msg.text in ["@bye","@Bye"]:
nadya.leaveGroup(msg.to)
elif msg.text in ["Absen"]:
nadya.sendText(msg.to,"Hadir!!")
elif msg.text.lower() in ["respon"]:
nadya.sendText(msg.to,responsename)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
nadya.sendText(msg.to, "Progress...")
nadya.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Speed test"]:
start = time.time()
nadya.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
nadya.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
nadya.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
nadya.sendText(msg.to,"send contact")
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "@Ban by mention"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendText(msg.to,"Succes BosQ")
except:
nadya.sendText(msg.to,"Error")
else:
nadya.sendText(msg.to,"Admin Detected~")
elif msg.text in ["Banlist","Ban list"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
nadya.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +nadya.getContact(mi_d).displayName + "\n"
nadya.sendText(msg.to,"===[Blacklist User]===\n"+mc)
elif "Unban @" in msg.text:
if msg.toType == 2:
print "@Unban by mention"
if msg.from_ in admin:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendText(msg.to,"Succes BosQ")
except:
nadya.sendText(msg.to,"Succes BosQ")
elif msg.text.lower() == 'clear ban':
if msg.from_ in admin:
wait["blacklist"] = {}
nadya.sendText(msg.to,"ใฝ( ^ฯ^)๏พโ โUnbanned All Successโ โ")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = nadya.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
nadya.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
nadya.kickoutFromGroup(msg.to,[jj])
nadya.sendText(msg.to,"Blacklist emang pantas tuk di usir")
else:
nadya.sendText(msg.to, "Khusus creator")
elif msg.text in ["Kill"]:
if msg.toType == 2:
if msg.from_ in admin:
group = nadya.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
nadya.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
nadya.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Kickall" == msg.text:
if msg.from_ in Creator:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Kickall","")
gs = nadya.getGroup(msg.to)
nadya.sendText(msg.to,"Dadaaah~")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in admin:
try:
nadya.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as e:
nadya.sendText(msg.to,str(e))
nadya.inviteIntoGroup(msg.to, targets)
elif msg.text in ["Bot restart","Reboot"]:
if msg.from_ in Creator:
nadya.sendText(msg.to, "Bot Has Been Restarted...")
restart_program()
print "@Restart"
else:
nadya.sendText(msg.to, "No Access")
elif msg.text in ["Turn off"]:
if msg.from_ in Creator:
try:
import sys
sys.exit()
except:
pass
elif 'Crash' in msg.text:
if msg.from_ in Creator:
msg.contentType = 13
msg.contentMetadata = {'mid': "NADYA,'"}
nadya.sendMessage(msg)
elif "Mycopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
nadya.CloneContactProfile(target)
nadya.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif msg.text in ["Mybackup"]:
try:
nadya.updateDisplayPicture(backup1.pictureStatus)
nadya.updateProfile(backup1)
nadya.sendText(msg.to, "Done (^_^)")
except Exception as e:
nadya.sendText(msg.to, str(e))
elif "/musik " in msg.text:
songname = msg.text.replace("/musik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
nadya.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4])
nadya.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
nadya.sendAudioWithURL(msg.to,abc)
nadya.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif '/lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('/lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
nadya.sendText(msg.to, hasil)
except Exception as wak:
nadya.sendText(msg.to, str(wak))
elif "/musrik " in msg.text:
songname = msg.text.replace("/musrik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
nadya.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
nadya.sendAudioWithURL(msg.to,abc)
nadya.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4] +"\n\n" + hasil)
nadya.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
nadya.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("cover @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Cpp" in msg.text:
if msg.from_ in admin:
path = "nadya.jpg"
nadya.sendText(msg.to,"Update PP :")
nadya.sendImage(msg.to,path)
nadya.updateProfilePicture(path)
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.getContact(target)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.getContact(target)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
link = ["http://dl.profile.line-cdn.net/0hNPsZWL9WEX9OIz0lhyFuKHJmHxI5DRc3NkJaETwkRklqGwQoJkNbTGklHRo2G1B7cxFXH2NxSU03"]
pilih = random.choice(link)
nadya.sendImageWithURL(msg.to,pilih)
elif "Spam: " in msg.text:
bctxt = msg.text.replace("Spam: ", "")
t = 10
while(t):
nadya.sendText(msg.to, (bctxt))
t-=1
elif "Scbc " in msg.text:
bctxt = msg.text.replace("Scbc ", "")
orang = nadya.getAllContactIds()
t = 20
for manusia in orang:
while(t):
nadya.sendText(manusia, (bctxt))
t-=1
elif "Cbc " in msg.text:
broadcasttxt = msg.text.replace("Cbc ", "")
orang = nadya.getAllContactIds()
for manusia in orang:
nadya.sendText(manusia, (broadcasttxt))
elif '/ig ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("/ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html.parser')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
tj = text1[0].replace("s150x150/","")
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO ========\n"
details = "\n========INSTAGRAM INFO ========"
nadya.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
nadya.sendImageWithURL(msg.to, tj)
except Exception as njer:
nadya.sendText(msg.to, str(njer))
elif "Checkig " in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
nadya.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
nadya.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif 'Youtubelink: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
nadya.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
nadya.sendText(msg.to,"Could not find it")
elif 'Youtubevideo: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubevideo: ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
nadya.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href'])
except:
nadya.sendText(msg.to, "Could not find it")
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say welcome" in msg.text:
gs = nadya.getGroup(msg.to)
say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name)
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() in ["hi","hai","halo","hallo"]:
beb = "Hi Sayang ๐ " +nadya.getContact(msg.from_).displayName + " ๔ธ๔starry heart๔ฟฟ"
nadya.sendText(msg.to,beb)
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
nadya.sendText(msg.to,"Sedang Mencari...")
nadya.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob)
nadya.sendText(msg.to,"Tuh Linknya Kak (^_^)")
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
nadya.sendText(msg.to, g.mid)
else:
pass
elif "Mybio " in msg.text:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 500:
profile = nadya.getProfile()
profile.statusMessage = string
nadya.updateProfile(profile)
nadya.sendText(msg.to,"Done")
elif "Myname " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Myname ","")
if len(string.decode('utf-8')) <= 5000:
profile = nadya.getProfile()
profile.displayName = string
nadya.updateProfile(profile)
nadya.sendText(msg.to,"Done")
elif msg.text.lower() in ["mymid","myid"]:
middd = "Name : " +nadya.getContact(msg.from_).displayName + "\nMid : " +msg.from_
nadya.sendText(msg.to,middd)
elif msg.text.lower() in ["me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
nadya.sendMessage(msg)
elif "/apakah " in msg.text:
apk = msg.text.replace("/apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/hari " in msg.text:
apk = msg.text.replace("/hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/berapa " in msg.text:
apk = msg.text.replace("/berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/berapakah " in msg.text:
apk = msg.text.replace("/berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/kapan " in msg.text:
apk = msg.text.replace("/kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
nadya.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
nadya.sendText(msg.to,"Simisimi Di Nonaktifkan")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
nadya.sendImageWithURL(msg.to,path)
except:
pass
elif "Youtubesearch: " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
nadya.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = nadya.getAllContactIds()
kontak = nadya.getContacts(contactlist)
num=1
msgs="โโโโโโโโโList Friendโโโโโโโโโ"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\nโโโโโโโโโList Friendโโโโโโโโโ\n\nTotal Friend : %i" % len(kontak)
nadya.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = nadya.getGroup(msg.to)
group = kontak.members
num=1
msgs="โโโโโโโโโList Memberโ๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝโโโโโโโ-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\nโโโโโโโโโList Memberโโโโโโโโโ\n\nTotal Members : %i" % len(group)
nadya.sendText(msg.to, msgs)
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = nadya.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getgroup image" in msg.text:
group = nadya.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendImageWithURL(msg.to,path)
elif "Urlgroup image" in msg.text:
group = nadya.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendText(msg.to,path)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
nadya.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
nadya.sendText(msg.to,"Profile Picture " + contact.displayName)
nadya.sendImageWithURL(msg.to,image)
nadya.sendText(msg.to,"Cover " + contact.displayName)
nadya.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = nadya.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
nadya.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
nadya.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime)
nadya.sendText(msg.to,van)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
nadya.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
nadya.sendText(msg.to, rst)
elif "SearchID: " in msg.text:
userid = msg.text.replace("SearchID: ","")
contact = nadya.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
nadya.sendMessage(msg)
elif "Searchid: " in msg.text:
userid = msg.text.replace("Searchid: ","")
contact = nadya.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
nadya.sendMessage(msg)
elif "removechat" in msg.text.lower():
if msg.from_ in admin:
try:
nadya.removeAllMessages(op.param2)
print "[Command] Remove Chat"
nadya.sendText(msg.to,"Done")
except Exception as error:
print error
nadya.sendText(msg.to,"Error")
elif "Invitemeto: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Invitemeto: ","")
if gid == "":
nadya.sendText(msg.to,"Invalid group id")
else:
try:
nadya.findAndAddContactsByMid(msg.from_)
nadya.inviteIntoGroup(gid,[msg.from_])
except:
nadya.sendText(msg.to,"Mungkin Saya Tidak Di Dalaam Grup Itu")
elif msg.text in ["Glist"]:
nadya.sendText(msg.to, "Tunggu Sebentar. . .")
gid = nadya.getGroupIdsJoined()
h = ""
for i in gid:
h += "โ โฉ" + "%s\n" % (nadya.getGroup(i).name +" ~> ["+str(len(nadya.getGroup(i).members))+"]")
nadya.sendText(msg.to,"โโโโโโโโโโโโโโโโโโโโโโโโโโ\nโ โโ LIST GROUPSโโ\nโ โโโโโโโโโโโโโโโโโโโโโโโโโ\n" + h + "โ โโโโโโโโโโโโโโโโโโโโโโโโโ" + "\nโ Total Groups =" +" ["+str(len(gid))+"]\nโโโโโโโโโโโโโโโโโโโโโโโโโโ")
elif msg.text in ["Glistmid"]:
gruplist = nadya.getGroupIdsJoined()
kontak = nadya.getGroups(gruplist)
num=1
msgs="โโโโโโโโโList GrupMidโโโโโโโโโ"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\nโโโโโโโโโList GrupMidโโโโโโโโโ\n\nTotal Grup : %i" % len(kontak)
nadya.sendText(msg.to, msgs)
elif "Google: " in msg.text:
a = msg.text.replace("Google: ","")
b = urllib.quote(a)
nadya.sendText(msg.to,"Sedang Mencari...")
nadya.sendText(msg.to, "https://www.google.com/" + b)
nadya.sendText(msg.to,"Itu Dia Linknya. . .")
elif "Details group: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Details group: ","")
if gid in [""," "]:
nadya.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = nadya.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
nadya.sendText(msg.to,h)
except Exception as error:
nadya.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = nadya.getGroup(gids)
for i in gid:
if i is not None:
try:
nadya.rejectGroupInvitation(i)
except:
nadya.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
nadya.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
nadya.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Acc invite"]:
if msg.from_ in admin:
gid = nadya.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = nadya.getGroup(i)
_list += gids.name
nadya.acceptGroupInvitation(i)
else:
break
if gid is not None:
nadya.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
nadya.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Gif gore" in msg.text:
gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif")
gore = random.choice(gif)
nadya.sendGifWithURL(msg.to,gore)
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
nadya.sendText(msg.to,"Target ditambahkan!")
break
except:
nadya.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
nadya.sendText(msg.to,"Target dihapuskan!")
break
except:
nadya.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
nadya.sendText(msg.to,"Nothing")
else:
mc = "Target Mimic User:\n"
for mi_d in mimic["target"]:
mc += "?? "+nadya.getContact(mi_d).displayName + "\n"
nadya.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
nadya.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
nadya.sendText(msg.to,"Mimic change to target")
else:
nadya.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
nadya.sendText(msg.to,"Reply Message on")
else:
nadya.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
nadya.sendText(msg.to,"Reply Message off")
else:
nadya.sendText(msg.to,"Sudah off")
if op.type == 59:
print op
except Exception as error:
print error
while True:
try:
Ops = nadya.fetchOps(nadya.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(nadya.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
nadya.Poll.rev = max(nadya.Poll.rev, Op.revision)
bot(Op)
| 41.510932 | 453 | 0.419462 | [
"MIT"
] | sahrukanja/fryant1 | Chuckysb.py | 148,684 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, TUSHAR TAJNE and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class District(Document):
def validate(self):
name = str(self.district.capitalize())
self.name = _(name)
pass
| 25.785714 | 51 | 0.759003 | [
"MIT"
] | tushar7724/SPS | sps/sps/doctype/district/district.py | 361 | Python |
def appendAndDelete(s, t, k):
iter=0
s=[]
t=[]
while s:
s.pop(0)
iter+=1
for i in t:
s.append(i)
iter+=1
if iter==k:
print("Yes")
else:
print("No")
| 13.052632 | 29 | 0.362903 | [
"MIT"
] | kasyap1234/codingproblems | append and delete.py | 248 | Python |
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from tinymce import HTMLField
# Create your models here.
User = get_user_model()
class PostView(models.Model):
user = models.ForeignKey(User, verbose_name=_(
"User"), on_delete=models.CASCADE)
post = models.ForeignKey('Post', verbose_name=_(
"Post"), on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Author(models.Model):
user = models.OneToOneField(User, verbose_name=_(
"Author"), on_delete=models.CASCADE)
profile_picture = models.ImageField(_("Profile picture"))
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(_("Title"), max_length=50)
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(
User, verbose_name=_("User"), on_delete=models.CASCADE)
timestamp = models.DateTimeField(_("Timestamp"), auto_now_add=True)
content = models.TextField(_("Comment text"))
post = models.ForeignKey('Post', verbose_name=_(
"Post"), related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Post(models.Model):
title = models.CharField(_("Title"), max_length=50)
overview = models.TextField(_("Overview"))
timestamp = models.DateTimeField(
_("Timestamp"), auto_now=False, auto_now_add=True)
content = HTMLField()
# comment_count = models.IntegerField(_("Comment count"), default=0)
# view_count = models.IntegerField(_("View count"), default=0)
author = models.ForeignKey(Author, verbose_name=_(
"Author"), on_delete=models.CASCADE)
thumbnail = models.ImageField(_("Thumbnail"))
categories = models.ManyToManyField(Category, verbose_name=_("Categories"))
featured = models.BooleanField(_("Featured"), default=False)
previous_post = models.ForeignKey("self", verbose_name=_(
"Previous post"), related_name='previous',
on_delete=models.SET_NULL, blank=True, null=True)
next_post = models.ForeignKey("self", verbose_name=_(
"Next post"), related_name='next',
on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("post-detail", kwargs={"pk": self.pk})
def get_update_url(self):
return reverse("post-update", kwargs={"pk": self.pk})
def get_delete_url(self):
return reverse("post-delete", kwargs={"pk": self.pk})
@property
def get_comments(self):
return self.comments.all().order_by('-timestamp')
@property
def comment_count(self):
return Comment.objects.filter(post=self).count()
@property
def view_count(self):
return PostView.objects.filter(post=self).count()
| 33.11828 | 80 | 0.667208 | [
"MIT"
] | zulune/Just-Django-Blog | src/posts/models.py | 3,080 | Python |
import unittest
import os
import numpy as np
from dotenv import load_dotenv
from nlpaug.util import AudioLoader
import nlpaug.augmenter.spectrogram as nas
class TestLoudnessSpec(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", ".env")
)
load_dotenv(env_config_path)
# https://freewavesamples.com/yamaha-v50-rock-beat-120-bpm
cls.sample_wav_file = os.path.join(
os.environ.get("TEST_DIR"),
"res",
"audio",
"Yamaha-V50-Rock-Beat-120bpm.wav",
)
def test_no_change_source(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.LoudnessAug(stateless=False)
aug_data = aug.augment(data)
comparison = data == aug_data
self.assertFalse(comparison.all())
def test_substitute(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.LoudnessAug(stateless=False)
aug_data = aug.augment(data)
comparison = (
data[:, aug.time_start : aug.time_end]
== aug_data[:, aug.time_start : aug.time_end]
)
self.assertFalse(comparison.all())
comparison = data[:, : aug.time_start] == aug_data[:, : aug.time_start]
self.assertTrue(comparison.all())
comparison = data[:, aug.time_end :] == aug_data[:, aug.time_end :]
self.assertTrue(comparison.all())
| 33.382979 | 81 | 0.6297 | [
"MIT"
] | lucidworks/nlpaug | test/augmenter/spectrogram/test_loudness_spec.py | 1,569 | Python |
"""
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
import warnings
from . import overrides
from . import _multiarray_umath
import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None):
"""
concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
block : Assemble arrays from blocks.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
"""
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None, dims=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if dims is not None:
warnings.warn("'shape' argument should be used instead of 'dims'",
DeprecationWarning, stacklevel=3)
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
"""
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(myarray, axis=None):
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
"""
return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(myarray, axis=None):
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
"""
return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
"""
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
"""
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
Convert an array of datetimes into an array of strings.
Parameters
----------
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
first, and suffix with a +-#### timezone offset. If a tzinfo object,
then do as with 'local', but use the specified timezone.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
Casting to allow when changing between datetime units.
Returns
-------
str_arr : ndarray
An array of strings the same shape as `arr`.
Examples
--------
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
>>> d
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
'2002-10-27T07:30'], dtype='datetime64[m]')
Setting the timezone to UTC shows the same information, but with a Z suffix
>>> np.datetime_as_string(d, timezone='UTC')
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
dtype='<U32')
>>> np.datetime_as_string(d, unit='s')
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
TypeError: Cannot create a datetime string as units 'h' from a NumPy
datetime with units 'm' according to the rule 'safe'
"""
return (arr,)
| 32.274235 | 128 | 0.619571 | [
"MIT"
] | 180Studios/LoginApp | venv/lib/python3.7/site-packages/numpy/core/multiarray.py | 50,606 | Python |
from os import environ
from kombu import Queue, Exchange
CELERY_BROKER_URL = environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = environ.get('CELERY_RESULT_BACKEND')
CELERY_TIMEZONE = environ.get('TZ', 'UTC')
CELERY_RESULT_PERSISTENT = True
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_BROKER_HEARTBEAT_CHECKRATE = 10
CELERY_EVENT_QUEUE_EXPIRES = 10
CELERY_EVENT_QUEUE_TTL = 10
CELERY_TASK_SOFT_TIME_LIMIT = 60
CELERY_BROKER_TRANSPORT_OPTIONS = {
'max_retries': 4,
'interval_start': 0,
'interval_step': 0.5,
'interval_max': 3,
}
celery_exchange = Exchange('celery', type='direct') # topic, fanout
CELERY_TASK_ROUTES = {
'*': {'queue': 'celery'},
}
CELERY_TASK_QUEUES = (
Queue('celery', exchange=celery_exchange, queue_arguments={'x-queue-mode': 'lazy'}),
)
| 24.694444 | 88 | 0.752531 | [
"MIT"
] | SilinAlexander/django-chat | web/src/additional_settings/celery_settings.py | 889 | Python |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import math
import json
import time
import random
import logging
import functools
import traceback
from collections import defaultdict
from _thread import start_new_thread
from multiprocessing import Queue, Process
import numpy as np
from tqdm import tqdm
import paddle
import paddle.distributed as dist
def set_seed(seed):
"""Set seed for reproduction.
"""
seed = seed + dist.get_rank()
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def set_logger(args):
"""Write logs to console and log file.
"""
log_file = os.path.join(args.save_path, 'train.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='a+')
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
for arg in vars(args):
logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
def print_log(step, interval, log, timer, time_sum):
"""Print log to logger.
"""
logging.info(
'[GPU %d] step: %d, loss: %.5f, reg: %.4e, speed: %.2f steps/s, time: %.2f s' %
(dist.get_rank(), step, log['loss'] / interval, log['reg'] / interval,
interval / time_sum, time_sum))
logging.info('sample: %f, forward: %f, backward: %f, update: %f' % (
timer['sample'], timer['forward'], timer['backward'], timer['update']))
def uniform(low, high, size, dtype=np.float32, seed=0):
"""Memory efficient uniform implementation.
"""
rng = np.random.default_rng(seed)
out = (high - low) * rng.random(size, dtype=dtype) + low
return out
def timer_wrapper(name):
"""Time counter wrapper.
"""
def decorate(func):
"""decorate func
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""wrapper func
"""
logging.info(f'[{name}] start...')
ts = time.time()
result = func(*args, **kwargs)
te = time.time()
costs = te - ts
if costs < 1e-4:
cost_str = '%f sec' % costs
elif costs > 3600:
cost_str = '%.4f sec (%.4f hours)' % (costs, costs / 3600.)
else:
cost_str = '%.4f sec' % costs
logging.info(f'[{name}] finished! It takes {cost_str} s')
return result
return wrapper
return decorate
def calculate_metrics(scores, corr_idxs, filter_list):
"""Calculate metrics according to scores.
"""
logs = []
for i in range(scores.shape[0]):
rank = (scores[i] > scores[i][corr_idxs[i]]).astype('float32')
if filter_list is not None:
mask = paddle.ones(rank.shape, dtype='float32')
mask[filter_list[i]] = 0.
rank = rank * mask
rank = paddle.sum(rank) + 1
logs.append({
'MRR': 1.0 / rank,
'MR': float(rank),
'HITS@1': 1.0 if rank <= 1 else 0.0,
'HITS@3': 1.0 if rank <= 3 else 0.0,
'HITS@10': 1.0 if rank <= 10 else 0.0,
})
return logs
def evaluate_wikikg2(model, loader, mode, save_path):
from ogb.linkproppred import Evaluator
evaluator = Evaluator(name='ogbl-wikikg2')
model.eval()
with paddle.no_grad():
y_pred_pos = []
y_pred_neg = []
for h, r, t, neg_h, neg_t in tqdm(loader):
pos_h = model._get_ent_embedding(h)
pos_r = model._get_rel_embedding(r)
pos_t = model._get_ent_embedding(t)
y_pred_pos.append(model(pos_h, pos_r, pos_t).numpy())
y_neg_head = model.predict(t, r, neg_h, mode='head').numpy()
y_neg_tail = model.predict(h, r, neg_t, mode='tail').numpy()
y_pred_neg.append(np.concatenate([y_neg_head, y_neg_tail], axis=1))
y_pred_pos = np.concatenate(y_pred_pos, axis=0)
y_pred_neg = np.concatenate(y_pred_neg, axis=0)
input_dict = {'y_pred_pos': y_pred_pos, 'y_pred_neg': y_pred_neg}
result = evaluator.eval(input_dict)
logging.info('-- %s results ------------' % mode)
logging.info(' ' + ' '.join(
['{}: {}'.format(k, v.mean()) for k, v in result.items()]))
def evaluate_wikikg90m(model, loader, mode, save_path):
from ogb.lsc import WikiKG90MEvaluator
evaluator = WikiKG90MEvaluator()
model.eval()
with paddle.no_grad():
top_tens = []
corr_idx = []
for h, r, t_idx, cand_t in tqdm(loader):
score = model.predict(h, r, cand_t)
rank = paddle.argsort(score, axis=1, descending=True)
top_tens.append(rank[:, :10].numpy())
corr_idx.append(t_idx.numpy())
t_pred_top10 = np.concatenate(top_tens, axis=0)
t_correct_index = np.concatenate(corr_idx, axis=0)
input_dict = {}
if mode == 'valid':
input_dict['h,r->t'] = {
't_pred_top10': t_pred_top10,
't_correct_index': t_correct_index
}
result = evaluator.eval(input_dict)
logging.info('-- %s results -------------' % mode)
logging.info(' '.join(
['{}: {}'.format(k, v) for k, v in result.items()]))
else:
input_dict['h,r->t'] = {'t_pred_top10': t_pred_top10}
evaluator.save_test_submission(
input_dict=input_dict, dir_path=save_path)
@timer_wrapper('evaluation')
def evaluate(model,
loader,
evaluate_mode='test',
filter_dict=None,
save_path='./tmp/',
data_mode='hrt'):
"""Evaluate given KGE model.
"""
if data_mode == 'wikikg2':
evaluate_wikikg2(model, loader, evaluate_mode, save_path)
elif data_mode == 'wikikg90m':
evaluate_wikikg90m(model, loader, evaluate_mode, save_path)
else:
model.eval()
with paddle.no_grad():
h_metrics = []
t_metrics = []
output = {'h,r->t': {}, 't,r->h': {}, 'average': {}}
for h, r, t in tqdm(loader):
t_score = model.predict(h, r, mode='tail')
h_score = model.predict(t, r, mode='head')
if filter_dict is not None:
h_filter_list = [
filter_dict['head'][(ti, ri)]
for ti, ri in zip(t.numpy(), r.numpy())
]
t_filter_list = [
filter_dict['tail'][(hi, ri)]
for hi, ri in zip(h.numpy(), r.numpy())
]
else:
h_filter_list = None
t_filter_list = None
h_metrics += calculate_metrics(h_score, h, h_filter_list)
t_metrics += calculate_metrics(t_score, t, t_filter_list)
for metric in h_metrics[0].keys():
output['t,r->h'][metric] = np.mean(
[x[metric] for x in h_metrics])
output['h,r->t'][metric] = np.mean(
[x[metric] for x in t_metrics])
output['average'][metric] = (
output['t,r->h'][metric] + output['h,r->t'][metric]) / 2
logging.info('-------------- %s result --------------' %
evaluate_mode)
logging.info('t,r->h |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['t,r->h'].items()]))
logging.info('h,r->t |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['h,r->t'].items()]))
logging.info('average |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['average'].items()]))
logging.info('-----------------------------------------')
def gram_schimidt_process(embeds, num_elem, use_scale):
""" Orthogonalize embeddings.
"""
num_embed = embeds.shape[0]
assert embeds.shape[1] == num_elem
assert embeds.shape[2] == (num_elem + int(use_scale))
if use_scale:
scales = embeds[:, :, -1]
embeds = embeds[:, :, :num_elem]
u = [embeds[:, 0]]
uu = [0] * num_elem
uu[0] = (u[0] * u[0]).sum(axis=-1)
u_d = embeds[:, 1:]
ushape = (num_embed, 1, -1)
for i in range(1, num_elem):
tmp_a = (embeds[:, i:] * u[i - 1].reshape(ushape)).sum(axis=-1)
tmp_b = uu[i - 1].reshape((num_embed, -1))
tmp_u = (tmp_a / tmp_b).reshape((num_embed, -1, 1))
u_d = u_d - u[-1].reshape(ushape) * tmp_u
u_i = u_d[:, 0]
if u_d.shape[1] > 1:
u_d = u_d[:, 1:]
uu[i] = (u_i * u_i).sum(axis=-1)
u.append(u_i)
u = np.stack(u, axis=1)
u_norm = np.linalg.norm(u, axis=-1, keepdims=True)
u = u / u_norm
if use_scale:
u = np.concatenate([u, scales.reshape((num_embed, -1, 1))], axis=-1)
return u
| 34.989324 | 87 | 0.54465 | [
"ECL-2.0",
"Apache-2.0"
] | LemonNoel/PGL | apps/Graph4KG/utils.py | 9,832 | Python |
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Test User Addresser"""
import logging
import pytest
from rbac.common import addresser
from tests.rbac.common.assertions import TestAssertions
LOGGER = logging.getLogger(__name__)
@pytest.mark.addressing
@pytest.mark.library
class TestUserAddresser(TestAssertions):
"""Test User Addresser"""
def test_address(self):
"""Tests address makes an address that identifies as the correct AddressSpace"""
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
self.assertIsAddress(user_address)
self.assertEqual(
addresser.get_address_type(user_address), addresser.AddressSpace.USER
)
def test_unique_id(self):
"""Tests that unique_id generates a unique identifier and is unique"""
id1 = addresser.user.unique_id()
id2 = addresser.user.unique_id()
self.assertIsIdentifier(id1)
self.assertIsIdentifier(id2)
self.assertNotEqual(id1, id2)
def test_get_address_type(self):
"""Tests that get_address_type returns AddressSpace.USER if it is a user
address, and None if it is of another address type"""
user_address = addresser.user.address(addresser.user.unique_id())
role_address = addresser.role.address(addresser.role.unique_id())
self.assertEqual(
addresser.get_address_type(user_address), addresser.AddressSpace.USER
)
self.assertEqual(
addresser.user.get_address_type(user_address), addresser.AddressSpace.USER
)
self.assertIsNone(addresser.user.get_address_type(role_address))
self.assertEqual(
addresser.get_address_type(role_address),
addresser.AddressSpace.ROLES_ATTRIBUTES,
)
def test_get_addresser(self):
"""Test that get_addresser returns the addresser class if it is a
user address, and None if it is of another address type"""
user_address = addresser.user.address(addresser.user.unique_id())
other_address = addresser.role.address(addresser.role.unique_id())
self.assertIsInstance(
addresser.get_addresser(user_address), type(addresser.user)
)
self.assertIsInstance(
addresser.user.get_addresser(user_address), type(addresser.user)
)
self.assertIsNone(addresser.user.get_addresser(other_address))
def test_user_parse(self):
"""Test addresser.user.parse returns a parsed address if it is a user address"""
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
parsed = addresser.user.parse(user_address)
self.assertEqual(parsed.object_type, addresser.ObjectType.USER)
self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)
self.assertEqual(
parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES
)
self.assertEqual(parsed.address_type, addresser.AddressSpace.USER)
self.assertEqual(parsed.object_id, user_id)
self.assertEqual(parsed.related_id, None)
def test_addresser_parse(self):
"""Test addresser.parse returns a parsed address"""
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
parsed = addresser.parse(user_address)
self.assertEqual(parsed.object_type, addresser.ObjectType.USER)
self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)
self.assertEqual(
parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES
)
self.assertEqual(parsed.address_type, addresser.AddressSpace.USER)
self.assertEqual(parsed.object_id, user_id)
self.assertEqual(parsed.related_id, None)
def test_parse_other(self):
"""Test that parse returns None if it is not a user address"""
other_address = addresser.role.address(addresser.role.unique_id())
self.assertIsNone(addresser.user.parse(other_address))
def test_addresses_are(self):
"""Test that addresses_are returns True if all addresses are a user
addresses, and False if any addresses are if a different address type"""
user_address1 = addresser.user.address(addresser.user.unique_id())
user_address2 = addresser.user.address(addresser.user.unique_id())
other_address = addresser.role.address(addresser.role.unique_id())
self.assertTrue(addresser.user.addresses_are([user_address1]))
self.assertTrue(addresser.user.addresses_are([user_address1, user_address2]))
self.assertFalse(addresser.user.addresses_are([other_address]))
self.assertFalse(addresser.user.addresses_are([user_address1, other_address]))
self.assertFalse(addresser.user.addresses_are([other_address, user_address1]))
self.assertTrue(addresser.user.addresses_are([]))
def test_address_deterministic(self):
"""Tests address makes an address that identifies as the correct AddressSpace"""
user_id1 = addresser.user.unique_id()
user_address1 = addresser.user.address(user_id1)
user_address2 = addresser.user.address(user_id1)
self.assertIsAddress(user_address1)
self.assertIsAddress(user_address2)
self.assertEqual(user_address1, user_address2)
self.assertEqual(
addresser.get_address_type(user_address1), addresser.AddressSpace.USER
)
def test_address_random(self):
"""Tests address makes a unique address given different inputs"""
user_id1 = addresser.user.unique_id()
user_id2 = addresser.user.unique_id()
user_address1 = addresser.user.address(user_id1)
user_address2 = addresser.user.address(user_id2)
self.assertIsAddress(user_address1)
self.assertIsAddress(user_address2)
self.assertNotEqual(user_address1, user_address2)
self.assertEqual(
addresser.get_address_type(user_address1), addresser.AddressSpace.USER
)
self.assertEqual(
addresser.get_address_type(user_address2), addresser.AddressSpace.USER
)
| 44.993421 | 88 | 0.702588 | [
"Apache-2.0"
] | kthblmfld/sawtooth-next-directory | tests/rbac/common/addresser/user_test.py | 6,839 | Python |
import sys, os
import json
from requests import get
sensorID = sys.argv[1]
SupervisorToken = os.environ["SUPERVISOR_TOKEN"]
url = "http://supervisor/core/api/states/"+sensorID
headers = {
"Authorization": "Bearer "+SupervisorToken,
"content-type": "application/json",
}
ha_sensor_data_request = get(url, headers=headers)
ha_sensor = json.loads(ha_sensor_data_request.text)
# Sensor state output
print(ha_sensor["state"]) | 21.7 | 51 | 0.748848 | [
"MIT"
] | PecceG2/HASS-SNMP-Sensors-and-Entities-addon | data/get-sensor-data.py | 434 | Python |
# List the type colors for the editor
AIR = (0, 0, 0)
GRASS = (100, 200, 40)
ROCK = (106, 106, 106)
LAVA = (252, 144, 3)
WATER = (0, 0, 255)
PLAYER = (155, 191, 250)
PLAYER_END = (40, 30, 100)
SPIKE_UP = (204, 24, 24)
SPIKE_DOWN = (166, 8, 8)
# List all the used types
types = ['GRASS', 'ROCK', 'LAVA', 'WATER', 'PLAYER', 'SPIKE_UP', 'SPIKE_DOWN', 'PLAYER_END', 'AIR']
colorTypes = [GRASS, ROCK, LAVA, WATER, PLAYER, SPIKE_UP, SPIKE_DOWN, PLAYER_END, AIR]
# Set default type
select = 'GRASS'
colorSelect = GRASS
| 27.052632 | 99 | 0.640078 | [
"MIT"
] | Kronifer/cj8-repo | tools/LevelCreator/ezTypes.py | 514 | Python |
"""
Matrix operations for neuroswarms models.
Author: Joseph Monaco ([email protected])
Affiliation: Johns Hopkins University
Created: 2019-05-12
Updated: 2020-11-16
Related paper:
Monaco, J.D., Hwang, G.M., Schultz, K.M. et al. Cognitive swarming in complex
environments with attractor dynamics and oscillatory computing. Biol Cybern
114, 269โ284 (2020). https://doi.org/10.1007/s00422-020-00823-z
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php
"""
__all__ = ('tile_index', 'pairwise_tile_index', 'pairwise_distances',
'distances', 'pairwise_phasediffs', 'pairwise_unit_diffs',
'somatic_motion_update', 'reward_motion_update')
from numpy import (empty, zeros, newaxis as AX, swapaxes, hypot, sin, inf,
broadcast_arrays, broadcast_to)
from .utils.types import *
DEBUGGING = False
def _check_ndim(Mstr, M, ndim):
assert M.ndim == ndim, f'{Mstr}.ndim != {ndim}'
def _check_shape(Mstr, M, shape, axis=None):
if axis is None:
assert M.shape == shape, f'{Mstr}.shape != {shape}'
else:
assert M.shape[axis] == shape, f'{Mstr}.shape[{axis}] != {shape}'
def tile_index(A, B):
"""
Entrywise comparison index of tile index (column) vectors.
"""
AA, BB = broadcast_arrays(A, B)
if DEBUGGING:
shape = (max(A.shape[0], B.shape[0]), 1)
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_tile_index(A, B):
"""
Pairwise comparison index of tile index (column) vectors.
"""
AA, BB = broadcast_arrays(A, B.T)
if DEBUGGING:
shape = (len(A), len(B))
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_phasediffs(A, B):
"""
Compute synchronizing phase differences between phase pairs.
"""
N_A = len(A)
N_B = len(B)
DD_shape = (N_A, N_B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 1, axis=1)
_check_shape('B', B, 1, axis=1)
return B.T - A
def distances(A, B):
"""
Compute distances between points in entrywise order.
"""
AA, BB = broadcast_arrays(A, B)
shape = AA.shape
if DEBUGGING:
_check_ndim('AA', AA, 2)
_check_ndim('BB', BB, 2)
_check_shape('AA', AA, 2, axis=1)
_check_shape('BB', BB, 2, axis=1)
return hypot(AA[:,0] - BB[:,0], AA[:,1] - BB[:,1])[:,AX]
def pairwise_unit_diffs(A, B):
"""
Compute attracting unit-vector differences between pairs of points.
"""
DD = pairwise_position_deltas(A, B)
D_norm = hypot(DD[...,0], DD[...,1])
nz = D_norm.nonzero()
DD[nz] /= D_norm[nz][...,AX]
return DD
def pairwise_distances(A, B):
"""
Compute distances between pairs of points.
"""
DD = pairwise_position_deltas(A, B)
return hypot(DD[...,0], DD[...,1])
def pairwise_position_deltas(A, B):
"""
Compute attracting component deltas between pairs of points.
"""
N_A = len(A)
N_B = len(B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 2, axis=1)
_check_shape('B', B, 2, axis=1)
# Broadcast the first position matrix
AA = empty((N_A,N_B,2), DISTANCE_DTYPE)
AA[:] = A[:,AX,:]
return B[AX,...] - AA
def somatic_motion_update(D_up, D_cur, X, V):
"""
Compute updated positions by averaging pairwise difference vectors for
mutually visible pairs with equal bidirectional adjustments within each
pair. The updated distance matrix does not need to be symmetric; it
represents 'desired' updates based on recurrent learning.
:D_up: R(N,N)-matrix of updated distances
:D_cur: R(N,N)-matrix of current distances
:X: R(N,2)-matrix of current positions
:V: {0,1}(N,2)-matrix of current agent visibility
:returns: R(N,2)-matrix of updated positions
"""
N = len(X)
D_shape = (N, N)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
# Broadcast field position matrix and its transpose
XX = empty((N,N,2))
XX[:] = X[:,AX,:]
XT = swapaxes(XX, 0, 1)
# Find visible & valid values (i.e., corresponding to non-zero weights)
#
# NOTE: The normalizing factor is divided by 2 because the somatic update
# represents one half of the change in distance between a pair of units.
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= 2*N[valid,AX]
# Zero out the inf elements of the updated distance matrix and corresponding
# elements in the current distance matrix
D_up[D_inf] = D_cur[D_inf] = 0.0
# Construct the agent-agent avoidant unit vectors
DX = XX - XT
DX_norm = hypot(DX[...,0], DX[...,1])
valid = DX_norm.nonzero()
DX[valid] /= DX_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DX).sum(axis=1)
def reward_motion_update(D_up, D_cur, X, R, V):
"""
Compute updated positions by averaging reward-based unit vectors for
adjustments of the point only. The updated distance matrix represents
'desired' updates based on reward learning.
:D_up: R(N,N_R)-matrix of updated distances between points and rewards
:D_cur: R(N,N_R)-matrix of current distances between points and rewards
:X: R(N,2)-matrix of current point positions
:R: R(N_R,2)-matrix of current reward positions
:V: {0,1}(N_R,2)-matrix of current agent-reward visibility
:returns: R(N,2)-matrix of updated positions
"""
N = len(X)
N_R = len(R)
D_shape = (N, N_R)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_ndim('R', R, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('R', R, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
# Broadcast field position matrix
XX = empty((N,N_R,2))
XX[:] = X[:,AX,:]
# Find valid values (i.e., corresponding to non-zero weights)
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= N[valid,AX]
# Zero out the inf elements of the updated distance matrix and corresponding
# elements in the current distance matrix
D_up[D_inf] = D_cur[D_inf] = 0.0
# Construct the agent-reward avoidant unit vectors
DR = XX - R[AX]
DR_norm = hypot(DR[...,0], DR[...,1])
valid = DR_norm.nonzero()
DR[valid] /= DR_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DR).sum(axis=1)
| 30.376106 | 81 | 0.621267 | [
"MIT"
] | jdmonaco/neuroswarms | neuroswarms/matrix.py | 6,867 | Python |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the discrete-variable quantum operations that perform
arithmetic operations on their input states.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access
import numpy as np
import pennylane as qml
from pennylane.operation import Operation
class QubitCarry(Operation):
r"""QubitCarry(wires)
Apply the ``QubitCarry`` operation to four input wires.
This operation performs the transformation:
.. math::
|a\rangle |b\rangle |c\rangle |d\rangle \rightarrow |a\rangle |b\rangle |b\oplus c\rangle |bc \oplus d\oplus (b\oplus c)a\rangle
.. figure:: ../../_static/ops/QubitCarry.svg
:align: center
:width: 60%
:target: javascript:void(0);
See `here <https://arxiv.org/abs/quant-ph/0008033v1>`__ for more information.
.. note::
The first wire should be used to input a carry bit from previous operations. The final wire
holds the carry bit of this operation and the input state on this wire should be
:math:`|0\rangle`.
**Details:**
* Number of wires: 4
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
**Example**
The ``QubitCarry`` operation maps the state :math:`|0110\rangle` to :math:`|0101\rangle`, where
the last qubit denotes the carry value:
.. code-block::
input_bitstring = (0, 1, 1, 0)
@qml.qnode(dev)
def circuit(basis_state):
qml.BasisState(basis_state, wires=[0, 1, 2, 3])
qml.QubitCarry(wires=[0, 1, 2, 3])
return qml.probs(wires=[0, 1, 2, 3])
probs = circuit(input_bitstring)
probs_indx = np.argwhere(probs == 1).flatten()[0]
bitstrings = list(itertools.product(range(2), repeat=4))
output_bitstring = bitstrings[probs_indx]
The output bitstring is
>>> output_bitstring
(0, 1, 0, 1)
The action of ``QubitCarry`` is to add wires ``1`` and ``2``. The modulo-two result is output
in wire ``2`` with a carry value output in wire ``3``. In this case, :math:`1 \oplus 1 = 0` with
a carry, so we have:
>>> bc_sum = output_bitstring[2]
>>> bc_sum
0
>>> carry = output_bitstring[3]
>>> carry
1
"""
num_wires = 4
num_params = 0
_mat = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
]
)
@classmethod
def _matrix(cls, *params):
return QubitCarry._mat
@staticmethod
def decomposition(wires):
decomp_ops = [
qml.Toffoli(wires=wires[1:]),
qml.CNOT(wires=[wires[1], wires[2]]),
qml.Toffoli(wires=[wires[0], wires[2], wires[3]]),
]
return decomp_ops
class QubitSum(Operation):
r"""QubitSum(wires)
Apply a ``QubitSum`` operation on three input wires.
This operation performs the transformation:
.. math::
|a\rangle |b\rangle |c\rangle \rightarrow |a\rangle |b\rangle |a\oplus b\oplus c\rangle
.. figure:: ../../_static/ops/QubitSum.svg
:align: center
:width: 40%
:target: javascript:void(0);
See `here <https://arxiv.org/abs/quant-ph/0008033v1>`__ for more information.
**Details:**
* Number of wires: 3
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
**Example**
The ``QubitSum`` operation maps the state :math:`|010\rangle` to :math:`|011\rangle`, with the
final wire holding the modulo-two sum of the first two wires:
.. code-block::
input_bitstring = (0, 1, 0)
@qml.qnode(dev)
def circuit(basis_state):
qml.BasisState(basis_state, wires = [0, 1, 2])
qml.QubitSum(wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
probs = circuit(input_bitstring)
probs_indx = np.argwhere(probs == 1).flatten()[0]
bitstrings = list(itertools.product(range(2), repeat=3))
output_bitstring = bitstrings[probs_indx]
The output bitstring is
>>> output_bitstring
(0, 1, 1)
The action of ``QubitSum`` is to add wires ``0``, ``1``, and ``2``. The modulo-two result is
output in wire ``2``. In this case, :math:`0 \oplus 1 \oplus 0 = 1`, so we have:
>>> abc_sum = output_bitstring[2]
>>> abc_sum
1
"""
num_wires = 3
num_params = 0
_mat = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
def label(self, decimals=None, base_label=None):
return super().label(decimals=decimals, base_label=base_label or "ฮฃ")
@classmethod
def _matrix(cls, *params):
return QubitSum._mat
@staticmethod
def decomposition(wires):
decomp_ops = [
qml.CNOT(wires=[wires[1], wires[2]]),
qml.CNOT(wires=[wires[0], wires[2]]),
]
return decomp_ops
def adjoint(self):
return QubitSum(wires=self.wires)
| 31.342593 | 136 | 0.550812 | [
"Apache-2.0"
] | Omid-Hassasfar/pennylane | pennylane/ops/qubit/arithmetic_ops.py | 6,771 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SiteRecoveryManagementClientConfiguration(Configuration):
"""Configuration for SiteRecoveryManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription Id.
:type subscription_id: str
:param resource_group_name: The name of the resource group where the recovery services vault is present.
:type resource_group_name: str
:param resource_name: The name of the recovery services vault.
:type resource_name: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if resource_group_name is None:
raise ValueError("Parameter 'resource_group_name' must not be None.")
if resource_name is None:
raise ValueError("Parameter 'resource_name' must not be None.")
super(SiteRecoveryManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.resource_group_name = resource_group_name
self.resource_name = resource_name
self.api_version = "2021-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-recoveryservicessiterecovery/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.35 | 134 | 0.70243 | [
"MIT"
] | AFengKK/azure-sdk-for-python | sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/_configuration.py | 3,868 | Python |
from talon import Module, Context
import appscript
mod = Module()
ctx = Context()
ctx.matches = r"""
os: mac
"""
@mod.action_class
class Actions:
def run_shortcut(name: str):
"""Runs a shortcut on macOS"""
pass
@ctx.action_class("user")
class UserActions:
def run_shortcut(name: str):
appscript.app(id='com.apple.shortcuts.events').shortcuts[name].run_()
| 17.434783 | 77 | 0.653367 | [
"MIT"
] | palexjo/pokey_talon | code/platforms/mac/user.py | 401 | Python |
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if root and root.left and root.right:
root.left.next = root.right
root.right.next = root.next and root.next.left
return self.connect(root.left) or self.connect(root.right) | 33.625 | 70 | 0.585502 | [
"MIT"
] | Fenghuapiao/PyLeetcode | LeetcodeAlgorithms/116. Populating Next Right Pointers in Each Node/populating-next-right-pointers-in-each-node.py | 538 | Python |
import datetime
from platform import python_version
from six import integer_types, string_types, text_type
class _NO_VALUE(object):
pass
# we don't use NOTHING because it might be returned from various APIs
NO_VALUE = _NO_VALUE()
_SUPPORTED_TYPES = (float, bool, str, datetime.datetime, type(None)) + \
string_types + integer_types + (text_type, bytes) + (type,)
if python_version() < '3.0':
dict_type = dict
else:
from collections import abc
dict_type = abc.Mapping
def diff(a, b, path=None):
path = _make_path(path)
if isinstance(a, (list, tuple)):
return _diff_sequences(a, b, path)
if type(a).__name__ == 'SON':
a = dict(a)
if type(b).__name__ == 'SON':
b = dict(b)
if isinstance(a, dict_type):
return _diff_dicts(a, b, path)
if type(a).__name__ == 'ObjectId':
a = str(a)
if type(b).__name__ == 'ObjectId':
b = str(b)
if type(a).__name__ == 'Int64':
a = int(a)
if type(b).__name__ == 'Int64':
b = int(b)
if not isinstance(a, _SUPPORTED_TYPES):
raise NotImplementedError(
'Unsupported diff type: {0}'.format(type(a))) # pragma: no cover
if not isinstance(b, _SUPPORTED_TYPES):
raise NotImplementedError(
'Unsupported diff type: {0}'.format(type(b))) # pragma: no cover
if a != b:
return [(path[:], a, b)]
return []
def _diff_dicts(a, b, path):
if not isinstance(a, type(b)):
return [(path[:], type(a), type(b))]
returned = []
for key in set(a) | set(b):
a_value = a.get(key, NO_VALUE)
b_value = b.get(key, NO_VALUE)
path.append(key)
if a_value is NO_VALUE or b_value is NO_VALUE:
returned.append((path[:], a_value, b_value))
else:
returned.extend(diff(a_value, b_value, path))
path.pop()
return returned
def _diff_sequences(a, b, path):
if len(a) != len(b):
return [(path[:], a, b)]
returned = []
for i, a_i in enumerate(a):
path.append(i)
returned.extend(diff(a_i, b[i], path))
path.pop()
return returned
def _make_path(path):
if path is None:
return []
return path
| 26.86747 | 77 | 0.590583 | [
"BSD-3-Clause"
] | kdeyev/mongomock | tests/diff.py | 2,230 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.