blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
115a5f4de9b5a815764712b22925f2dff071cb0c | acef5161a1eeb107b116f9763114bb9f77d701b4 | /pytorch/深度学习之PyTorch入门/廖星宇教程/14_googlenet.py | 92ed042f6f1974bc2acbf433b857c3b0917fb789 | [] | no_license | lingxiao00/PyTorch_Tutorials | aadb68582edbaa093ab200724c670b36763156b7 | 285bcfb0c60860e47343485daeb54947cd715f97 | refs/heads/master | 2021-10-20T16:56:21.275740 | 2019-03-01T02:46:42 | 2019-03-01T02:46:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,188 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-12-25 09:07:53
# @Author : cdl ([email protected])
# @Link : https://github.com/cdlwhm1217096231/python3_spider
# @Version : $Id$
import torch
import torchvision
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.datasets import CIFAR10
import torchvision.transforms as tfs
import numpy as np
from utils import train
import torch.nn as nn
import sys
sys.path.append("..")
"""inception模块"""
# 定义一个卷积和一个relu激活函数和一个batchnorm作为一个基本的层结构
def conv_relu(in_channel, out_channel, kernel, stride=1, padding=0):
layer = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel, stride, padding),
nn.BatchNorm2d(out_channel, eps=1e-3),
nn.ReLU(True),
)
return layer
# inception模块
class inception(nn.Module):
def __init__(self, in_channel, out1_1, out2_1, out2_3, out3_1, out3_5, out4_1):
super(inception, self).__init__()
# 第一条线路
self.branch1x1 = conv_relu(in_channel, out1_1, 1)
# 第二条线路
self.branch3x3 = nn.Sequential(
conv_relu(in_channel, out2_1, 1),
conv_relu(out2_1, out2_3, 3, padding=1)
)
# 第三条线路
self.branch5x5 = nn.Sequential(
conv_relu(in_channel, out3_1, 1),
conv_relu(out3_1, out3_5, 5, padding=2)
)
# 第四条线路
self.branch_pool = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
conv_relu(in_channel, out4_1, 1)
)
def forward(self, x):
f1 = self.branch1x1(x)
f2 = self.branch3x3(x)
f3 = self.branch5x5(x)
f4 = self.branch_pool(x)
output = torch.cat((f1, f2, f3, f4), dim=1)
return output
test_net = inception(3, 64, 48, 64, 64, 96, 32)
test_x = Variable(torch.zeros(1, 3, 96, 96))
print('input shape: {} x {} x {}'.format(
test_x.shape[1], test_x.shape[2], test_x.shape[3]))
test_y = test_net(test_x)
print('output shape: {} x {} x {}'.format(
test_y.shape[1], test_y.shape[2], test_y.shape[3]))
# 定义googlenet网络
class googlenet(nn.Module):
def __init__(self, in_channel, num_classes, verbose=False):
super(googlenet, self).__init__()
self.verbose = verbose
self.block1 = nn.Sequential(
conv_relu(in_channel, out_channel=64,
kernel=7, stride=2, padding=3),
nn.MaxPool2d(3, 2)
)
self.block2 = nn.Sequential(
conv_relu(64, 64, kernel=1),
conv_relu(64, 192, kernel=3, padding=1),
nn.MaxPool2d(3, 2)
)
self.block3 = nn.Sequential(
inception(192, 64, 96, 128, 16, 32, 32),
inception(256, 128, 128, 192, 32, 96, 64),
nn.MaxPool2d(3, 2)
)
self.block4 = nn.Sequential(
inception(480, 192, 96, 208, 16, 48, 64),
inception(512, 160, 112, 224, 24, 64, 64),
inception(512, 128, 128, 256, 24, 64, 64),
inception(512, 112, 144, 288, 32, 64, 64),
inception(528, 256, 160, 320, 32, 128, 128),
nn.MaxPool2d(3, 2)
)
self.block5 = nn.Sequential(
inception(832, 256, 160, 320, 32, 128, 128),
inception(832, 384, 182, 384, 48, 128, 128),
nn.AvgPool2d(2)
)
self.classifier = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.block1(x)
if self.verbose:
print('block 1 output: {}'.format(x.shape))
x = self.block2(x)
if self.verbose:
print('block 2 output: {}'.format(x.shape))
x = self.block3(x)
if self.verbose:
print('block 3 output: {}'.format(x.shape))
x = self.block4(x)
if self.verbose:
print('block 4 output: {}'.format(x.shape))
x = self.block5(x)
if self.verbose:
print('block 5 output: {}'.format(x.shape))
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
test_net = googlenet(3, 10, True)
test_x = Variable(torch.zeros(1, 3, 96, 96)) # (bs, n_c, n_h, n_w)
test_y = test_net(test_x)
print('output: {}'.format(test_y.shape))
# 数据预处理
def data_tf(x):
x = x.resize((96, 96), 2)
x = np.array(x, dtype="float32") / 255
x = (x - 0.5) / 0.5
x = x.transpose((2, 0, 1)) # 将channel放在第一个维度上
x = torch.from_numpy(x)
return x
# 数据预处理得到训练数据和测试数据
train_set = CIFAR10(root="./datasets/", train=True, transform=data_tf)
train_data = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True)
test_set = CIFAR10(root="./datasets/", train=False, transform=data_tf)
test_data = torch.utils.data.DataLoader(
test_set, batch_size=128, shuffle=False)
# 定义优化算法和损失函数
net = googlenet(3, 10)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
loss_func = nn.CrossEntropyLoss()
# 训练网络
# train(net, train_data, test_data, num_epochs=20, optimizer, loss_func)
| [
"[email protected]"
] | |
1d32e1151eba68baf6a8345f309bbe74e4e2f45e | 3cb59da879c7865dd6ddb246b7ea92d5a71cb838 | /documentation/book/src/conf.py | da1eca7ba63789d464e1cc8f1721a2ecc13e5f7b | [] | no_license | iNarcissuss/Cuckoodroid-1 | 9110be112f45327ffe8a74b2216fe8ad6a153485 | 0170c52aa9536ffc5b8391190dec892c9bd7ba0b | refs/heads/master | 2020-04-07T08:53:28.011215 | 2018-06-17T22:25:29 | 2018-06-17T22:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,078 | py | # -*- coding: utf-8 -*-
#
# Cuckoo Sandbox documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CuckooDroid'
copyright = u'2014-2015, Checkpoint Software Technologies'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = en
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Book" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_images/logo/cuckoo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CuckooSandboxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CuckooSandbox.tex', u'Cuckoo Sandbox Book',
u'Cuckoo Sandbox', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cuckoosandbox', u'Cuckoo Sandbox Book',
[u'Cuckoo Sandbox'], 1)
]
| [
"[email protected]"
] | |
9e204b2a44c4a6faeafac15f08663c30bceeb24e | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/prb_control/entities/random/squad/actions_handler.py | 5f596563a646d6a33336b7a766a43818fd80f3e2 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,310 | py | # 2017.02.03 21:48:45 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/prb_control/entities/random/squad/actions_handler.py
from CurrentVehicle import g_currentVehicle
from constants import MIN_VEHICLE_LEVEL, MAX_VEHICLE_LEVEL
from gui import DialogsInterface
from gui.Scaleform.daapi.view.dialogs import I18nConfirmDialogMeta
from gui.prb_control.entities.base.squad.actions_handler import SquadActionsHandler
class RandomSquadActionsHandler(SquadActionsHandler):
"""
Random squad actions handler
"""
pass
class BalancedSquadActionsHandler(RandomSquadActionsHandler):
"""
Random balanced squad actions handler
"""
def execute(self):
if self._entity.isCommander():
func = self._entity
fullData = func.getUnitFullData(unitIdx=self._entity.getUnitIdx())
notReadyCount = 0
for slot in fullData.slotsIterator:
slotPlayer = slot.player
if slotPlayer:
if slotPlayer.isInArena() or fullData.playerInfo.isInSearch() or fullData.playerInfo.isInQueue():
DialogsInterface.showI18nInfoDialog('squadHavePlayersInBattle', lambda result: None)
return True
if not slotPlayer.isReady:
notReadyCount += 1
if not fullData.playerInfo.isReady:
notReadyCount -= 1
if fullData.stats.occupiedSlotsCount == 1:
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNoPlayers'), self._confirmCallback)
return True
if notReadyCount > 0:
if notReadyCount == 1:
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNotReadyPlayer'), self._confirmCallback)
return True
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNotReadyPlayers'), self._confirmCallback)
return True
if not g_currentVehicle.isLocked():
_, unit = self._entity.getUnit()
playerVehicles = unit.getVehicles()
if playerVehicles:
commanderLevel = g_currentVehicle.item.level
lowerBound, upperBound = self._entity.getSquadLevelBounds()
minLevel = max(MIN_VEHICLE_LEVEL, commanderLevel + lowerBound)
maxLevel = min(MAX_VEHICLE_LEVEL, commanderLevel + upperBound)
levelRange = range(minLevel, maxLevel + 1)
for _, unitVehicles in playerVehicles.iteritems():
for vehicle in unitVehicles:
if vehicle.vehLevel not in levelRange:
DialogsInterface.showDialog(I18nConfirmDialogMeta('squadHaveNoPlayers'), self._confirmCallback)
return True
self._setCreatorReady()
else:
self._entity.togglePlayerReadyAction(True)
return True
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\random\squad\actions_handler.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:48:45 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
94f99e7b82a5ec22f9c330c1c3424c80ddffd340 | 68cf7c25bb614883c50d21e5051fbea8dbf18ccb | /ecommercejockey/premier/migrations/0004_premiermanufacturer_is_relevant.py | 7fffb9a4c92c2468f267399e76f0273e040662dc | [
"MIT"
] | permissive | anniethiessen/ecommerce-jockey | 63bf5af6212a46742dee98d816d0bc2cdb411708 | 9268b72553845a4650cdfe7c88b398db3cf92258 | refs/heads/master | 2022-12-14T02:29:25.140796 | 2021-05-15T01:20:30 | 2021-05-15T01:20:30 | 211,400,595 | 1 | 1 | MIT | 2022-12-08T06:45:40 | 2019-09-27T20:57:19 | Python | UTF-8 | Python | false | false | 420 | py | # Generated by Django 2.2.5 on 2019-10-24 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('premier', '0003_premierproduct_vendor_part_number'),
]
operations = [
migrations.AddField(
model_name='premiermanufacturer',
name='is_relevant',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
91ee91326e2c26dfbfda3d24dd84086ee64b226b | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories/124701/kaggle-bike-sharing-demand-master/support_vector_regression.py | 759c9ec148be5704e863f8be68bcb9bf1a95db7a | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,644 | py | # Kaggle Bike Sharing Demand
# Joey L. Maalouf
# Approach: Support Vector Regression
# -- import any necessary modules ----------------------------------------------
import csv
from sklearn.svm import SVR
# from sklearn.grid_search import GridSearchCV
# -- define our functions ------------------------------------------------------
def to_int(input):
try:
return int(input)
except TypeError:
return [int(input[0]), int(input[1])]
def read_data(filename, xy):
datalist = []
# read in the file
data = open(filename)
reader = csv.reader(data, delimiter=",")
for row in reader:
if (xy == "x"):
# store just the hour and weather
datalist.append([row[0][11:13], row[4]])
elif (xy == "y"):
# store just the count
datalist.append(row[11])
elif (xy == "xx"):
datalist.append(row[0])
return datalist[1:] if xy == "xx" else [to_int(i) for i in datalist[1:]]
# -- read in the data ----------------------------------------------------------
print("Let's start reading in the data...")
x_train = read_data("train.csv", "x")
y_train = read_data("train.csv", "y")
x_test = read_data("test.csv", "x")
print("Finished reading in the data!\n")
# -- fit regression model ------------------------------------------------------
print("Let's start instantiating our model...")
# parameters = \
# [
# {
# "kernel": ["rbf"],
# "C": [1e3, 1e2, 1e1],
# "gamma": [1e0, 1e-1, 1e-2, 1e-3]
# },
# {
# "kernel": ["poly"],
# "C": [1e3, 1e2, 1e1],
# "gamma": [1e0, 1e-1, 1e-2, 1e-3],
# "degree": [2, 3, 4]
# }
# ]
# svr = GridSearchCV(SVR(), parameters)
svr = SVR(kernel="rbf", C=1000, gamma=0.1)
print("Finished instantiating our model!\n")
print("Let's start training our model...")
model = svr.fit(x_train, y_train)
print("Finished training our model!\n")
print("Let's start predicting our new data...")
y_test = model.predict(x_test)
print("Finished predicting our new data!\n")
# print("\nBest estimator:")
# print(svr.best_estimator_)
# print("\nBest parameters:")
# print(svr.best_params_)
# print("\nScorer:")
# print(svr.scorer_)
# print("\nGrid scores:")
# for s in svr.grid_scores_:
# print(s)
# -- output the results --------------------------------------------------------
datetime = read_data("test.csv", "xx")
with open("predicted_output_sv.csv", "w") as output:
output.write("datetime,count\n")
for i in range(len(y_test)):
output.write("%s,%d\n" % (datetime[i], y_test[i]))
| [
"[email protected]"
] | |
6b6a068af11269018cfc37811340df981f155484 | 760c354ab910fb9ad5f1ea44221e1dc724f1108b | /tests/test_extrusion_stiffness.py | 8b49218dfee3067009ce6ae4e0e2c5320fc400a6 | [
"MIT"
] | permissive | yijiangh/assembly_instances | dabeafc7c5fc8b8b2b9ce7003ab493ad0f421db8 | b97a4924d9998b64815c692cada85f4f595e023f | refs/heads/master | 2021-06-13T13:27:27.579912 | 2020-01-30T02:40:51 | 2020-01-30T02:40:51 | 174,738,145 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,286 | py | import os
import pytest
import numpy as np
from pyconmech import StiffnessChecker
@pytest.fixture
def stiffness_tol():
trans_tol = 0.0015
rot_tol = 5 * np.pi / 180
return trans_tol, rot_tol
@pytest.fixture
def known_failure():
return ['klein_bottle_trail.json', 'rotated_dented_cube.json']
def create_stiffness_checker(extrusion_path, trans_tol=0.0015, rot_tol=5*np.pi/180, verbose=False):
# TODO: the stiffness checker likely has a memory leak
if not os.path.exists(extrusion_path):
raise FileNotFoundError(extrusion_path)
checker = StiffnessChecker(json_file_path=extrusion_path, verbose=verbose)
checker.set_self_weight_load(True)
checker.set_nodal_displacement_tol(trans_tol=trans_tol, rot_tol=rot_tol)
return checker
def test_extrusion_stiffness(extrusion_dir, extrusion_problem, stiffness_tol, known_failure):
p = os.path.join(extrusion_dir, extrusion_problem)
checker = create_stiffness_checker(p, trans_tol=stiffness_tol[0], rot_tol=stiffness_tol[1], verbose=False)
is_stiff = checker.solve()
success, nodal_displacement, fixities_reaction, element_reaction = checker.get_solved_results()
assert is_stiff == success
trans_tol, rot_tol = checker.get_nodal_deformation_tol()
max_trans, max_rot, max_trans_vid, max_rot_vid = checker.get_max_nodal_deformation()
compliance = checker.get_compliance()
assert compliance > 0, 'Compliance must be bigger than zero! (no matter how small the value is), its likely have something wrong with the material / cross sectional properties. Does it have cross section area, Jx, Ix, Iy, Iz value? (compared to radius)'
if not success:
print('\n' + '='*6)
print('Test stiffness on problem: {}'.format(p))
# The inverse of stiffness is flexibility or compliance
print('Stiff: {} | Compliance: {}'.format(is_stiff, compliance))
print('Max translation deformation: {0:.5f} / {1:.5} = {2:.5}, at node #{3}'.format(
max_trans, trans_tol, max_trans / trans_tol, max_trans_vid))
print('Max rotation deformation: {0:.5f} / {1:.5} = {2:.5}, at node #{3}'.format(
max_rot, rot_tol, max_rot / rot_tol, max_rot_vid))
if extrusion_problem not in known_failure:
assert success | [
"[email protected]"
] | |
5b641a6aa3fe121f56f85194dd5c3617d7814729 | e2220b78a968d8bff21061bdf67b027d933bb2be | /rentoMojo/rentoMojo/asgi.py | 5399867dc8911d3da9c3bd8daa85d9e095910552 | [] | no_license | amannvl/rentoMojo | eab2ed103ed32e9e81424093571019160d9c49c0 | 0f4f0076f00ccd7178f42dee0081ee0d58027874 | refs/heads/master | 2023-03-08T10:24:44.339962 | 2021-02-20T09:48:30 | 2021-02-20T09:48:30 | 340,601,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for rentoMojo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rentoMojo.settings')
application = get_asgi_application()
| [
"Your-Email"
] | Your-Email |
d7eaedaf350f3b86d019a17c881535f17a757277 | 7b1de4a2607e3125b719c499a05bf6e2d3ec532d | /design_patterns/Command/ex2/actions.py | 12e98f5164f5d62f49fffacf1ac797ab96234447 | [] | no_license | ganqzz/sandbox_py | 61345ac7bddb09081e02decb78507daa3030c1e8 | cc9e1ecca2ca99f350a3e2c3f51bbdb5eabc60e1 | refs/heads/master | 2022-12-01T21:54:38.461718 | 2021-09-04T03:47:14 | 2021-09-04T03:47:14 | 125,375,767 | 0 | 1 | null | 2023-04-16T00:55:51 | 2018-03-15T14:00:47 | Python | UTF-8 | Python | false | false | 599 | py | # Receiver classes
class Appliance(object):
def __init__(self, name):
self._name = name
def on(self):
print('%s has been turned on.' % self._name)
def off(self):
print('%s has been turned off.' % self._name)
class Door(object):
def __init__(self, name):
self.name = name
def lock(self):
print("%s is locked." % self.name)
def unlock(self):
print("%s is unlocked." % self.name)
class Security(object):
def arm(self):
print('Security system armed')
def disarm(self):
print('Security disarmed')
| [
"[email protected]"
] | |
41a9e2c99011225acf96d6969a1bfc6ac8265ef4 | 837c1bd7e021f071fbee78e2e4c7c27695ff62db | /meiduo_lianxi/apps/areas/migrations/0001_initial.py | 9296f6f3a0d7380538a93604e5129fed0e6b4aa4 | [
"MIT"
] | permissive | Wang-TaoTao/lianxiku | 0ae87b3db98e9f869a2a9901c24da80ccb63fe6f | 0f58b6859b4dcd4e81d8f7c4f67be68f245811cc | refs/heads/master | 2020-07-06T01:21:59.869064 | 2019-09-16T16:05:24 | 2019-09-16T16:05:24 | 202,844,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-08-23 09:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subs', to='areas.Area', verbose_name='上级行政区划')),
],
options={
'verbose_name': '省市区',
'db_table': 'tb_areas',
'verbose_name_plural': '省市区',
},
),
]
| [
"[email protected]"
] | |
47f5d95e1f02be15883f5396f1c1136658fc91af | 8660f77e77948f7646e2f0e4e46b46c0c7b318c5 | /examples/ps-simulator.py | 1e61d5fc5c05526a910c34ea8942a6492baf3424 | [] | no_license | ajoubertza/icalepcs-workshop | b6c227c4e6f79181222c40a9c505d3c41a8d068f | 06ff925fe4724a71c17bbd193e3387884a919e51 | refs/heads/gh-pages | 2020-08-03T09:28:09.959380 | 2019-10-06T01:04:06 | 2019-10-06T01:04:06 | 211,702,036 | 1 | 1 | null | 2019-10-05T16:28:27 | 2019-09-29T17:45:48 | HTML | UTF-8 | Python | false | false | 3,665 | py | #!/usr/bin/env python3
import time
import random
import logging
import gevent.server
DEFAULT_BIND = ''
DEFAULT_PORT = 45000
class Attr:
def __init__(self, *, initial_value=0.,
encode=lambda x: bytes(str(x), 'ascii'),
decode=float):
self.value = initial_value
self.encode = encode
self.decode = decode
def get(self):
return self.encode(self.value)
def set(self, value):
self.value = self.decode(value)
class Calibrate(Attr):
def set(self, value):
self.ts = time.time()
super().set(value)
class State(Attr):
def __init__(self, calib, *args, **kwargs):
kwargs['initial_value'] = 0
kwargs['decode'] = int
super().__init__(*args, **kwargs)
self.calib = calib
calib.ts = 0
def get(self):
self.value = 0
if time.time() - self.calib.ts < 2:
self.value = 1
return super().get()
class PSSimulator(gevent.server.StreamServer):
class Error(Exception):
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log = logging.getLogger(f'simulator.{self.server_port}')
calib = Calibrate(initial_value=0)
self.attrs = {
b'stat': State(calib),
b'vol': Attr(initial_value=0.1),
b'curr': Attr(initial_value=0.),
b'calib': calib,
}
def __getitem__(self, name):
return self.attrs[name].get()
def __setitem__(self, name, value):
self.attrs[name].set(value)
def handle(self, sock, addr):
log = self.log
log.info('new connection from %r', addr)
fileobj = sock.makefile(mode='rb')
while True:
request = fileobj.readline()
if not request:
log.info('disconnected %r', addr)
break
log.info('request %r', request)
reply = b'ERROR'
try:
reply = self.handle_request(request)
except PSSimulator.Error:
pass
except:
log.exception('Unforseen error')
gevent.sleep(1e-1)
sock.sendall(reply + b'\n')
log.info('replyed %r', reply)
fileobj.close()
def handle_request(self, request):
req_lower = request.strip().lower()
is_query = b'?' in req_lower
pars = req_lower.split()
name = pars[0]
if is_query:
name = name[:-1] # take out '?'
if is_query:
return self[name]
self[name] = pars[1]
return b'OK'
def main(number=1, bind=DEFAULT_BIND, port=DEFAULT_PORT, **kwargs):
servers = []
logging.info('starting simulator...')
for i in range(number):
address = bind, port+i
server = PSSimulator(address)
server.start()
servers.append(server)
server.log.info('simulator listenning on %r!', address)
try:
while True:
# gevent.joinall(servers)
gevent.sleep(1)
except KeyboardInterrupt:
logging.info('Ctrl-C pressed. Bailing out!')
for server in servers:
server.stop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=DEFAULT_PORT)
parser.add_argument('--bind', default=DEFAULT_BIND)
parser.add_argument('--log-level', default='info')
parser.add_argument('--number', type=int, default=1)
args = parser.parse_args()
logging.basicConfig(level=args.log_level.upper())
main(**vars(args))
| [
"[email protected]"
] | |
4672862d3327093a2d281b7b1a5b7c8ee31255f7 | fafbddf21e669a20e3329d85f8edb06fb03d5a82 | /wp2txt2json_run.py | fd6589378173f0054d69a1acae25e2572e872c3d | [] | no_license | quesada/runs-gensim | 07e188ca971d9734989c1981f297f00d7813eedc | aae75cc3188b99bd571fe7bbef008ac94bf3918a | refs/heads/master | 2020-06-05T00:46:10.856429 | 2011-09-01T19:14:35 | 2011-09-01T19:14:35 | 1,306,724 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | #!/usr/bin/env python
# encoding: utf-8
"""
wp2txt2json.py
Created by Stephan Gabler on 2011-06-09.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from os import path
import codecs
import glob
import json
import os
import re
import sys
import time
import tools
def main(param_file=None):
# setup
p, base_path, output_dir = tools.setup(param_file)
logger = tools.get_logger('gensim', path.join(output_dir, "run.log"))
logger.info("running %s" % ' '.join(sys.argv))
# in test case
if param_file:
files = [path.join(base_path, p['wiki_txt'])]
else:
files = glob.glob(path.join(base_path, p['wiki_txt']) + '*.txt')
out = codecs.open(os.path.join(output_dir, 'wiki.json'), mode='w', encoding='utf-8')
headline = re.compile('\[\[(.*)\]\]')
level2 = re.compile('== (.*) ==')
t0 = time.time()
c = 0
res = {}
for file in files:
print 'work on: %s' % file
with codecs.open(file, encoding='utf-8') as f:
for line in f:
# ignore linebreaks
if line == '\n':
continue
# if headline found
if headline.search(line):
if len(res) > 0:
out.write(json.dumps(res, encoding='utf-8', ensure_ascii=False) + '\n')
topic = headline.search(line).groups()[0]
res = {topic: {}}
sub = None
elif level2.search(line):
sub = level2.search(line).groups()[0]
else:
if not sub:
res[topic].setdefault('desc', []).append(line.strip())
else:
res[topic].setdefault(sub, []).append(line.strip())
c += 1
print 'average execution time: %f' % ((time.time() - t0) / c)
out.write(json.dumps(res, encoding='utf-8', ensure_ascii=False) + '\n')
print time.time() - t0
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4279424d2d636b5183bcdaca92e5e30219f6fbc4 | 0a9949a7dbe5f7d70028b22779b3821c62eb6510 | /static/rice/tools.py | 542ed77243cfdf7ef84167c7dd93f31ce1e57930 | [] | no_license | 744996162/warehouse | ed34f251addb9438a783945b6eed5eabe18ef5a2 | 3efd299a59a0703a1a092c58a6f7dc2564b92e4d | refs/heads/master | 2020-06-04T22:10:14.727156 | 2015-07-03T09:40:09 | 2015-07-03T09:40:09 | 35,603,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,322 | py | #ecoding=utf-8
__author__ = 'Administrator'
import os
import xlwt
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import datetime
from email.mime.image import MIMEImage
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
def getYestaday():
now_time = datetime.datetime.now()
yes_time = now_time + datetime.timedelta(days=-1)
return yes_time.strftime('%Y%m%d')
def excel_write(text_in="0128/result.txt",excel_out="0128/result0128.xls",table_name="sheet1"):
fr_in=open(text_in)
wbk=xlwt.Workbook(encoding='utf-8')
sheet=wbk.add_sheet(table_name,True)
for i,line in enumerate(fr_in.readlines()):
stringArr=line.strip().split("\t")
for j,str in enumerate(stringArr):
if j==6:
str=int(str)
sheet.write(i,j,str)
# print i,stringArr
export=excel_out
wbk.save(export)
return excel_out
def excel_write2(text_in1="0128/result.txt",text_in2="0128/error.txt",excel_out="0128/result0128.xls",table_name1="sheet1",table_name2="sheet2"):
fr_in1=open(text_in1)
fr_in2=open(text_in2)
wbk=xlwt.Workbook(encoding='utf-8')
sheet1=wbk.add_sheet(table_name1,True)
for i,line in enumerate(fr_in1.readlines()):
stringArr=line.strip().split("\t")
for j,str in enumerate(stringArr):
if j==6:
str=int(str)
sheet1.write(i,j,str)
sheet2=wbk.add_sheet(table_name2,True)
for i,line in enumerate(fr_in2.readlines()):
stringArr=line.strip().split("\t")
for j,str in enumerate(stringArr):
if j==6:
str=int(str)
sheet2.write(i,j,str)
export=excel_out
wbk.save(export)
return excel_out
def send_mail(to_list,sub,content):
#############
#to_list为收件人
#sub为邮件标题
#content为邮件内容
###############
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.qq.com"
mail_user="744996162"
mail_pass="a1b2c3e48517343"
mail_postfix="qq.com"
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content)
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(e)
return False
def send_mail2(to_list,sub,content,attach_file="0128/result.txt"):
#############
#to_list为收件人
#sub为邮件标题
#content为邮件内容
###############
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.qq.com"
mail_user="744996162"
mail_pass="a1b2c3e48517343"
mail_postfix="qq.com"
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg=MIMEMultipart()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
att = MIMEApplication(file(attach_file, 'rb').read())
att["Content-Type"] = 'application/octet-stream'
att.add_header('content-disposition','attachment',filename=attach_file)
msg.attach(att)
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(e)
return False
pass
def send_mail_attach_dir(to_list,sub,content,dir_path="20150128"):
#############
#to_list为收件人
#sub为邮件标题
#content为邮件内容
###############
#设置服务器,用户名、口令以及邮箱的后缀
mail_host="smtp.qq.com"
mail_user="744996162"
mail_pass="a1b2c3e48517343"
mail_postfix="qq.com"
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg=MIMEMultipart()
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
path_list=get_all_file(dir_path)
for file_path in path_list:
try:
att = MIMEApplication(file(file_path, 'rb').read())
att["Content-Type"] = 'application/octet-stream'
att.add_header('content-disposition','attachment',filename=file_path)
msg.attach(att)
except Exception as e:
print(e)
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(e)
return False
pass
def get_all_file(dir_path="20150128"):
file_list = []
if dir_path is None:
raise Exception("floder_path is None")
for dirpath, dirnames, filenames in os.walk(dir_path):
for name in filenames:
# print(name)
file_list.append(dirpath + '/' + name)
return file_list
if __name__=="__main__":
# excel_write2()
# to_list="[email protected]"
# send_mail2(to_list,"hello","mail test")
floder_path="20150128"
file_list=get_all_file(floder_path)
for i in file_list:
print i
pass
to_list="[email protected]"
send_mail_attach_dir(to_list,"hello","mail test") | [
"[email protected]"
] | |
7d35d105cdd8d6255538d78fb8262fabfedcd14f | 1e17cce2124d772871eaa7086a37e98b5af14adf | /Alphabets/Q.py | 59937c56bc6e491340c16d5360de8e91efd68e2d | [] | no_license | venukumarbv/PythonPatternPrinting | b5e62e23ac2e9d929a74f5b58a42116f27163889 | 3318907b8e930ada6e367cc04a0b6e314666ec24 | refs/heads/master | 2022-12-23T02:02:18.793081 | 2020-07-03T07:21:23 | 2020-07-03T07:21:23 | 276,833,884 | 0 | 1 | null | 2020-09-30T19:44:18 | 2020-07-03T07:14:53 | Python | UTF-8 | Python | false | false | 508 | py | '''
* * *
* *
* *
* *
* * *
* * *
* * * *
'''
for row in range(7):
for col in range(5):
if row in {0, 6} and col in {1, 2, 3}:
print('*', end=' ')
elif row in range(1,6) and col in {0, 4}:
print('*', end=' ')
elif (row == 4 and col == 2) or (row == 5 and col == 3): \
print('*', end=' ')
elif row == 6 and col == 4:
print('*', end=' ')
else:
print(' ', end=' ')
print()
| [
"[email protected]"
] | |
6f349d3c153769bb6bfa800268372e30579abeb6 | a82aa8430e32eaf62df0f44b20afb0e7d50c3d7b | /ippon/tournament/seralizers.py | 8c29e4688e49aed3e000d0f5c7bfc44a4784bbeb | [
"MIT"
] | permissive | morynicz/ippon_back | 314daac99f79247b749dc46d59a645a6eb840263 | dce901bfc649c6f8efbbf0907654e0860606b3e3 | refs/heads/master | 2022-12-20T23:33:10.898738 | 2021-10-17T09:25:39 | 2021-10-17T09:25:39 | 124,851,931 | 0 | 2 | MIT | 2022-12-08T12:37:26 | 2018-03-12T07:43:17 | Python | UTF-8 | Python | false | false | 3,502 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
import ippon.models.player as plm
import ippon.models.tournament as tm
import ippon.player.serializers as pls
class TournamentParticipationSerializer(serializers.ModelSerializer):
is_age_ok = serializers.BooleanField(source='check_is_age_ok', read_only=True)
is_rank_ok = serializers.BooleanField(source='check_is_rank_ok', read_only=True)
is_sex_ok = serializers.BooleanField(source='check_is_sex_ok', read_only=True)
tournament_id = serializers.IntegerField(source='tournament.id')
player = pls.ShallowPlayerSerializer()
class Meta:
model = tm.TournamentParticipation
fields = (
'id',
'is_paid',
'is_registered',
'is_qualified',
'is_age_ok',
'is_rank_ok',
'is_sex_ok',
'player',
'tournament_id',
'notes'
)
def create(self, validated_data):
if not isinstance(self.initial_data['player']['id'], int):
raise ValidationError('player.id must be an integer')
filtered = plm.Player.objects.filter(pk=self.initial_data['player']['id'])
if not filtered.exists():
raise ValidationError('no such player')
participation = tm.TournamentParticipation.objects.create(
player=filtered.first(),
tournament=tm.Tournament.objects.get(pk=validated_data['tournament']['id'])
)
return participation
def update(self, instance, validated_data):
instance.is_paid = validated_data['is_paid']
instance.is_registered = validated_data['is_registered']
instance.is_qualified = validated_data['is_qualified']
instance.notes = validated_data['notes']
instance.save()
return instance
class TournamentAdminSerializer(serializers.ModelSerializer):
tournament_id = serializers.IntegerField(source='tournament.id')
user = serializers.DictField(source='get_user')
class Meta:
model = tm.TournamentAdmin
fields = (
'tournament_id',
'id',
'is_master',
'user'
)
read_only_fields = ('user',)
def create(self, validated_data):
if not isinstance(self.initial_data['user']['id'], int):
raise ValidationError('user.id must be an integer')
admin = tm.TournamentAdmin.objects.create(
user=User.objects.get(pk=self.initial_data['user']['id']),
tournament=tm.Tournament.objects.get(pk=validated_data['tournament']['id']),
is_master=False
)
return admin
def update(self, instance, validated_data):
instance.is_master = validated_data['is_master']
instance.save()
return instance
class TournamentSerializer(serializers.ModelSerializer):
class Meta:
model = tm.Tournament
fields = (
'id',
'name',
'date',
'city',
'address',
'description',
'webpage',
'team_size',
'group_match_length',
'ko_match_length',
'final_match_length',
'finals_depth',
'age_constraint',
'sex_constraint',
'rank_constraint',
'rank_constraint_value',
'age_constraint_value'
)
| [
"[email protected]"
] | |
a62739539e974b10b70d26d979a69301251dca72 | c49590eb7f01df37c8ec5fef00d0ffc7250fa321 | /test/test_res_mtf_order_cancel.py | b4faf84610f01187cc9e405cac99e3cb98b887c9 | [] | no_license | harshad5498/ks-orderapi-python | 373a4b85a56ff97e2367eebd076f67f972e92f51 | 237da6fc3297c02e85f0fff1a34857aaa4c1d295 | refs/heads/master | 2022-12-09T19:55:21.938764 | 2020-09-03T05:22:51 | 2020-09-03T05:22:51 | 293,533,651 | 0 | 0 | null | 2020-09-07T13:19:25 | 2020-09-07T13:19:24 | null | UTF-8 | Python | false | false | 1,481 | py | # coding: utf-8
"""
KS Trade API's
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.res_mtf_order_cancel import ResMTFOrderCancel # noqa: E501
from openapi_client.rest import ApiException
class TestResMTFOrderCancel(unittest.TestCase):
"""ResMTFOrderCancel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ResMTFOrderCancel
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.res_mtf_order_cancel.ResMTFOrderCancel() # noqa: E501
if include_optional :
return ResMTFOrderCancel(
orderId = '0',
message = '0'
)
else :
return ResMTFOrderCancel(
)
def testResMTFOrderCancel(self):
"""Test ResMTFOrderCancel"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
62605144fd6035421844ef44be1d02b01acb69ce | 2fe58e7f6bfc3efdb78ca56f72a4e2a75a24c270 | /eric/eric6/E5Gui/E5Led.py | 4571e0736687cac051cfd8c678a3d5649eb747e9 | [] | no_license | testerclub/eric6-20.3 | 3053e0e6962060b213f5df329ee331a4893d18e6 | bba0b9f13fa3eb84938422732d751219bc3e29e2 | refs/heads/master | 2023-03-18T08:24:03.472297 | 2020-03-14T06:44:14 | 2020-03-14T06:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,710 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2020 Detlev Offenbach <[email protected]>
#
"""
Module implementing a LED widget.
It was inspired by KLed.
"""
from PyQt5.QtCore import pyqtSignal, Qt, QSize, QPoint
from PyQt5.QtGui import QColor, QRadialGradient, QPalette, QPainter, QBrush
from PyQt5.QtWidgets import QWidget
E5LedRectangular = 0
E5LedCircular = 1
class E5Led(QWidget):
"""
Class implementing a LED widget.
"""
def __init__(self, parent=None, color=None, shape=E5LedCircular,
rectRatio=1):
"""
Constructor
@param parent reference to parent widget (QWidget)
@param color color of the LED (QColor)
@param shape shape of the LED (E5LedCircular, E5LedRectangular)
@param rectRatio ratio width to height, if shape is rectangular (float)
"""
super(E5Led, self).__init__(parent)
if color is None:
color = QColor("green")
self.__led_on = True
self.__dark_factor = 300
self.__offcolor = color.darker(self.__dark_factor)
self.__led_color = color
self.__framedLed = True
self.__shape = shape
self.__rectRatio = rectRatio
self.setColor(color)
def paintEvent(self, evt):
"""
Protected slot handling the paint event.
@param evt paint event object (QPaintEvent)
@exception TypeError The E5Led has an unsupported shape type.
"""
if self.__shape == E5LedCircular:
self.__paintRound()
elif self.__shape == E5LedRectangular:
self.__paintRectangular()
else:
raise TypeError("Unsupported shape type for E5Led.")
def __getBestRoundSize(self):
"""
Private method to calculate the width of the LED.
@return new width of the LED (integer)
"""
width = min(self.width(), self.height())
width -= 2 # leave one pixel border
return width > -1 and width or 0
def __paintRound(self):
"""
Private method to paint a round raised LED.
"""
# Initialize coordinates, width and height of the LED
width = self.__getBestRoundSize()
# Calculate the gradient for the LED
wh = width / 2
color = self.__led_on and self.__led_color or self.__offcolor
gradient = QRadialGradient(wh, wh, wh, 0.8 * wh, 0.8 * wh)
gradient.setColorAt(0.0, color.lighter(200))
gradient.setColorAt(0.6, color)
if self.__framedLed:
gradient.setColorAt(0.9, color.darker())
gradient.setColorAt(1.0, self.palette().color(QPalette.Dark))
else:
gradient.setColorAt(1.0, color.darker())
# now do the drawing
paint = QPainter(self)
paint.setRenderHint(QPainter.Antialiasing, True)
paint.setBrush(QBrush(gradient))
paint.setPen(Qt.NoPen)
paint.drawEllipse(1, 1, width, width)
paint.end()
def __paintRectangular(self):
"""
Private method to paint a rectangular raised LED.
"""
# Initialize coordinates, width and height of the LED
width = self.height() * self.__rectRatio
left = max(0, int((self.width() - width) / 2) - 1)
right = min(int((self.width() + width) / 2), self.width())
height = self.height()
# now do the drawing
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
color = self.__led_on and self.__led_color or self.__offcolor
painter.setPen(color.lighter(200))
painter.drawLine(left, 0, left, height - 1)
painter.drawLine(left + 1, 0, right - 1, 0)
if self.__framedLed:
painter.setPen(self.palette().color(QPalette.Dark))
else:
painter.setPen(color.darker())
painter.drawLine(left + 1, height - 1, right - 1, height - 1)
painter.drawLine(right - 1, 1, right - 1, height - 1)
painter.fillRect(left + 1, 1, right - 2, height - 2, QBrush(color))
painter.end()
def isOn(self):
"""
Public method to return the LED state.
@return flag indicating the light state (boolean)
"""
return self.__led_on
def shape(self):
"""
Public method to return the LED shape.
@return LED shape (E5LedCircular, E5LedRectangular)
"""
return self.__shape
def ratio(self):
"""
Public method to return the LED rectangular ratio [= width / height].
@return LED rectangular ratio (float)
"""
return self.__rectRatio
def color(self):
"""
Public method to return the LED color.
@return color of the LED (QColor)
"""
return self.__led_color
def setOn(self, state):
"""
Public method to set the LED to on.
@param state new state of the LED (boolean)
"""
if self.__led_on != state:
self.__led_on = state
self.update()
def setShape(self, shape):
"""
Public method to set the LED shape.
@param shape new LED shape (E5LedCircular, E5LedRectangular)
"""
if self.__shape != shape:
self.__shape = shape
self.update()
def setRatio(self, ratio):
"""
Public method to set the LED rectangular ratio (width / height).
@param ratio new LED rectangular ratio (float)
"""
if self.__rectRatio != ratio:
self.__rectRatio = ratio
self.update()
def setColor(self, color):
"""
Public method to set the LED color.
@param color color for the LED (QColor)
"""
if self.__led_color != color:
self.__led_color = color
self.__offcolor = color.darker(self.__dark_factor)
self.update()
def setDarkFactor(self, darkfactor):
"""
Public method to set the dark factor.
@param darkfactor value to set for the dark factor (integer)
"""
if self.__dark_factor != darkfactor:
self.__dark_factor = darkfactor
self.__offcolor = self.__led_color.darker(darkfactor)
self.update()
def darkFactor(self):
"""
Public method to return the dark factor.
@return the current dark factor (integer)
"""
return self.__dark_factor
def toggle(self):
"""
Public slot to toggle the LED state.
"""
self.setOn(not self.__led_on)
def on(self):
"""
Public slot to set the LED to on.
"""
self.setOn(True)
def off(self):
"""
Public slot to set the LED to off.
"""
self.setOn(False)
def setFramed(self, framed):
"""
Public slot to set the __framedLed attribute.
@param framed flag indicating the framed state (boolean)
"""
if self.__framedLed != framed:
self.__framedLed = framed
self.__off_map = None
self.__on_map = None
self.update()
def isFramed(self):
"""
Public method to return the framed state.
@return flag indicating the current framed state (boolean)
"""
return self.__framedLed
def sizeHint(self):
"""
Public method to give a hint about our desired size.
@return size hint (QSize)
"""
return QSize(18, 18)
def minimumSizeHint(self):
"""
Public method to give a hint about our minimum size.
@return size hint (QSize)
"""
return QSize(18, 18)
class E5ClickableLed(E5Led):
"""
Class implementing a clickable LED widget.
@signal clicked(QPoint) emitted upon a click on the LED with the
left button
@signal middleClicked(QPoint) emitted upon a click on the LED with
the middle button or CTRL and left button
"""
clicked = pyqtSignal(QPoint)
middleClicked = pyqtSignal(QPoint)
def __init__(self, parent=None, color=None, shape=E5LedCircular,
rectRatio=1):
"""
Constructor
@param parent reference to parent widget (QWidget)
@param color color of the LED (QColor)
@param shape shape of the LED (E5LedCircular, E5LedRectangular)
@param rectRatio ratio width to height, if shape is rectangular (float)
"""
super(E5ClickableLed, self).__init__(parent, color, shape, rectRatio)
self.setCursor(Qt.PointingHandCursor)
def mouseReleaseEvent(self, evt):
"""
Protected method handling mouse release events.
@param evt mouse event (QMouseEvent)
"""
if evt.button() == Qt.LeftButton and self.rect().contains(evt.pos()):
if evt.modifiers() == Qt.ControlModifier:
self.middleClicked.emit(evt.globalPos())
else:
self.clicked.emit(evt.globalPos())
elif (
evt.button() == Qt.MidButton and
self.rect().contains(evt.pos())
):
self.middleClicked.emit(evt.globalPos())
else:
super(E5ClickableLed, self).mouseReleaseEvent(evt)
| [
"[email protected]"
] | |
5ffe3a1f93e81e9e2774504dd769dcc13ce6537b | c0c533728e049d41206282bb929bf66aedc1d154 | /apps/application/migrations/0011_auto_20180516_0149.py | 9ce644b45d34365c8c220b0648d3c407cf6e19b2 | [] | no_license | nolan1299/madras | 5df425a7796fae71f3c9d6763a06aa08145d50e0 | 489f6279160622e72fcaf8654ec0c1bed7413fe8 | refs/heads/master | 2020-03-17T16:30:34.728894 | 2018-05-25T18:02:13 | 2018-05-25T18:02:13 | 133,751,296 | 0 | 0 | null | 2018-05-17T03:03:48 | 2018-05-17T03:03:48 | null | UTF-8 | Python | false | false | 832 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-05-16 01:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('application', '0010_resume'),
]
operations = [
migrations.AddField(
model_name='resume',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='resume',
name='application',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='resumes', to='application.Application'),
),
]
| [
"[email protected]"
] | |
6f8b879786c5c0c3dc31c1641bfe4a723a5afa62 | b2cce36e9f7dba3f393ce5a177cd4d03eced094a | /PageObject/me/CollectSwipeDellPage.py | f4c7122b39fce18cf1115b05b2baee5de90966a1 | [] | no_license | tachibana814/appium | 180192ba0727f4ab0cdfaec54f332ce603e203b4 | 1f8945f8fa2057a46f2291d03de152af2566ad55 | refs/heads/master | 2021-08-22T22:34:26.056831 | 2017-12-01T13:47:57 | 2017-12-01T13:47:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,383 | py | from Base.BaseStatistics import countSum, countInfo
from Base.BaseYaml import getYam
from Base.BaseOperate import OperateElement
from Base.BaseElementEnmu import Element as be
import re
class CollectSwipeDelPage:
'''
滑动删除收藏
isOperate: 操作失败,检查点就失败,kwargs: WebDriver driver, String path(yaml配置参数)
'''
def __init__(self, **kwargs):
self.driver = kwargs["driver"]
if kwargs.get("launch_app", "0") == "0": # 若为空,重新打开app
self.driver.launch_app()
self.path = kwargs["path"]
self.operateElement = OperateElement(self.driver)
self.isOperate = True
test_msg = getYam(self.path)
self.testInfo = test_msg["testinfo"]
self.testCase = test_msg["testcase"]
self.testcheck = test_msg["check"]
self.get_value = []
self.msg = ""
'''
操作步骤
logTest 日记记录器
'''
def operate(self, logTest):
for item in self.testCase:
result = self.operateElement.operate(item, self.testInfo, logTest)
if not result["result"]:
m_s_g = self.msg + "\n" if self.msg != "" else ""
msg = m_s_g + "执行过程中失败,请检查元素是否存在" + item["element_info"]
print(msg)
self.testInfo[0]["msg"] = msg
self.msg = m_s_g + msg
self.isOperate = False
return False
if item.get("operate_type", "0") == be.SWIPE_LEFT: # 根据元素左滑动
web_element = self.driver.find_elements_by_id(item["element_info"])[item["index"]]
start = web_element.location
# 获取控件开始位置的坐标轴
startx = start["x"]
starty = start["y"]
# 获取控件坐标轴差
size1 = web_element.size
width = size1["width"]
height = size1["height"]
# 计算出控件结束坐标
endX = width + startx
endY = height + starty
self.driver.swipe(endX-50, endY, starty+500, endY)
if item.get("operate_type", "0") == be.GET_VALUE:
self.get_value.append(result["text"])
return True
def checkPoint(self, **kwargs):
result = self.check(**kwargs)
if result is not True and be.RE_CONNECT:
self.msg = "用例失败重连过一次,失败原因:" + self.testInfo[0]["msg"]
kwargs["logTest"].buildStartLine(kwargs["caseName"] + "_失败重连") # 记录日志
# self.operateElement.switchToNative()
self.driver.launch_app()
self.isOperate = True
self.get_value = []
self.operate(kwargs["logTest"])
result = self.check(**kwargs)
self.testInfo[0]["msg"] = self.msg
self.operateElement.switchToNative()
countSum(result)
countInfo(result=result, testInfo=self.testInfo, caseName=kwargs["caseName"],
driver=self.driver, logTest=kwargs["logTest"], devices=kwargs["devices"], testCase=self.testCase,
testCheck=self.testcheck)
return result
'''
检查点
caseName:测试用例函数名 用作统计
logTest: 日志记录
devices 设备名
'''
def check(self, **kwargs):
result = True
m_s_g = self.msg + "\n" if self.msg != "" else ""
if self.isOperate:
for item in self.testcheck:
resp = self.operateElement.operate(item, self.testInfo, kwargs["logTest"])
if not resp["result"]: # 表示操作出现异常情况检查点为成功
print("操作失败,简单点为成功")
result = True
break
if resp["text"] in self.get_value: # 删除后数据对比
msg = m_s_g + "删除数据失败,删除前数据为:" + ".".join(self.get_value) + "当前获取的数据为:" + resp["text"]
self.msg = m_s_g + msg
print(msg)
self.testInfo[0]["msg"] = msg
break
else:
result = False
return result
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
ffdf703a0923d7b64e0319499dffc354cf526805 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/pyinstaller/build/lib/PyInstaller/hooks/hook-dynaconf.py | d8b99ecb1e4dab7a0cca50558645cd76d92825ed | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:837c53561512a82c5654188db01cd8d6fbe3b2a9eb1608240134ac0b77dda545
size 886
| [
"[email protected]"
] | |
2f1a8a7c8912e0d47a5e9ad908e93778deb29aa0 | 6b7ae49d83c51c298f1ed4e5a8324db7ee393f06 | /rms/urls.py | 9c10cde55095b8e9e47bb26c15768b2c486c9e7b | [] | no_license | MoTechStore/ma | 6837596be92532f613e4b8f2bd91cbcf6daa103d | 21a4c63f66681e2b919c8bfcfe3acf12de385205 | refs/heads/main | 2023-08-22T01:49:22.739313 | 2021-10-05T10:03:25 | 2021-10-05T10:03:25 | 411,613,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,502 | py | from django.urls import include, path
from . import views
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.ma, name='ma'),
path('engine/', views.ai_search, name='engine'),
path('rms/', views.index, name='rms'),
path('report/', views.download_file, name='report'),
path('doc/', views.doc_insight, name='doc'),
path('cv/', views.cv_insight, name='cv'),
path('media/', views.media_check, name='media_check'),
path('loletter/', views.LOutgoingLetter.as_view(), name='loletter'),
path('liletter/', views.LIcomingLetter.as_view(), name='liletter'),
path('dashboard/', views.dashboard, name='dashboard'),
path('manage-files/<int:pk>', views.crudAdmin.found, name='found'),
path('save-data/', views.save_data, name='save_data'),
path('letter/', views.save_letter, name='save_letter'),
path('addfile/', views.add_file, name='addfile'),
path('addletter/', views.add_letter, name='addletter'),
path('save_file/', views.save_file, name='savefile'),
path('outletter/', views.out_letter, name='outletter'),
path('search/', views.search_ai, name='search'),
path('usearch/', views.usearch, name='usearch'),
path('sentletter/', views.outing_letter, name='sentletter'),
path('manage-files/', views.ManageFileView.as_view(), name='manage_files'),
path('search/', views.search_view, name='search'),
path('lsearch/', views.search_letter, name='search_letter'),
path('test/', views.test, name='test'),
path('r_admin_list/<int:pk>/update_file/', views.update_file, name='update_file'),
path('list-files/', views.FileListView.as_view(), name='list_file'),
path('logout/', views.logout_view, name='logout'),
path('signup/', views.signup, name='signup'),
path('r_admin_list/<int:pk>', views.AdminListReadView.as_view(), name='r_admin_list'),
path('al_update/<int:pk>', views.AlUpdateView.as_view(), name='al_update'),
path('u_update/<int:pk>', views.UserUpdateView.as_view(), name='u_update'),
path('admin_list_delete/<int:pk>', views.ListItDeleteView.as_view(), name='admin_list_delete'),
path('delete_user/<int:pk>', views.DeleteUserView.as_view(), name='delete_user'),
path('users/', views.UserView.as_view(), name='users'),
path('r_admin_truck/<int:pk>', views.AListTruckReadView.as_view(), name='r_admin_truck'),
path('login/', auth_views.LoginView.as_view(template_name='crow/login.html'), name='login'),
]
| [
"[email protected]"
] | |
cd4554e2cfdb845fb82245f7c366f0dfe311f709 | 3506d8c9a8391be52d24cff54f27537a92a7228c | /HackerRank/Implementation/Picking_Numbers.py | 8120850a099a18dbfc581f4ef6b163926e96aed7 | [] | no_license | saumya-singh/CodeLab | 04ef2c61c516c417c03c6a510e8b5e6e498fbe5d | 9371f0d6bd45e5592dae25b50f0d04ba45ae67cf | refs/heads/master | 2021-09-12T05:01:17.491312 | 2018-04-14T19:48:40 | 2018-04-14T19:48:40 | 81,596,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | #!/bin/python3
#https://www.hackerrank.com/challenges/picking-numbers/problem
def pickingNumbers(n, a):
dictionary = {}
for element in a:
value = dictionary.get(element, "None")
if value == "None":
dictionary[element] = 1
else:
dictionary[element] = int(value) + 1
list_d = list(dictionary.items())
list_d.sort()
#print(list_d)
if len(list_d) == 1:
return list_d[0][1]
max = 0
for i in range(len(list_d)):
if max < list_d[i][1]:
max = list_d[i][1]
for i in range(len(list_d) - 1):
if abs(list_d[i][0] - list_d[i + 1][0]) == 1:
add_result = list_d[i][1] + list_d[i + 1][1]
if max < add_result:
max = add_result
return max
n = int(input().strip())
a = [int(a_temp) for a_temp in input().strip().split(' ')]
count = pickingNumbers(n, a)
print(count) | [
"[email protected]"
] | |
d3c4ff20dda9054f73a27d12749f649b646cc97c | 0791b310393b0a88ae03e05593abf921e1920951 | /resourses/convertui.py | 5653855d28da8e3db4670c41f89c569103ed4a0c | [
"MIT"
] | permissive | sashgorokhov-heaven/python-vkontakte-music-gui-old | 482ae8f84e43abbc1137f920fe445a67d81522a3 | b57d80686d404292a35d9055ba43b0da0e5aaab2 | refs/heads/master | 2021-06-01T04:30:05.525614 | 2014-06-24T16:42:08 | 2014-06-24T16:42:08 | 20,318,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | __author__ = 'sashgorokhov'
__email__ = '[email protected]'
import os, time
os.startfile('convertui.cmd')
files = [
r'..\modules\forms\downloadform\ui.py',
r'..\modules\forms\mainform\ui.py',
r'..\modules\forms\mainform\components\audiolist\components\audiolistitemwidget\ui.py',
r'..\modules\forms\downloadform\components\audiolist\components\audiolistitemwidget\ui.py'
]
time.sleep(3)
for file in files:
with open(file, 'r') as f:
lines = f.read().split('\n')
lines.reverse()
parsed = list()
found = False
for line in lines:
if line == 'import resourses_rc':
if found:
continue
found = True
parsed.append('import resourses.resourses_rc')
else:
parsed.append(line)
parsed.reverse()
with open(file, 'w') as f:
f.write('\n'.join(parsed)) | [
"[email protected]"
] | |
facebbe13610d58711da7060347d0e724d170152 | 4427916fafe69a32626cb5d8c02bc55c7c87f642 | /FortyTwo/plugin.py | df71696ddb0010cbf8d1a2244f0c02d37bdc3f81 | [] | no_license | ki113d/Supybot-plugins | 13f91f2fe0c01c9769a562e3d480d4f0e7fa8739 | 0d6b4447004b822acbe41fd7075cc85d8476a289 | refs/heads/master | 2021-04-15T07:57:27.272322 | 2012-12-30T21:35:56 | 2012-12-30T21:35:56 | 4,077,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,562 | py | ###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import time
import fnmatch
from xml.dom import minidom
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('FortyTwo')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
class Domain:
def __init__(self, dom, warnings):
self.domain = None
self.purpose = None
for node in dom.childNodes:
if not node.nodeName in 'domain purpose'.split():
warnings.append(_("Unknown node '%s'") % node.nodeName)
continue
try:
data = node.firstChild.data
except AttributeError:
# Empty purpose, for instance
data = ''
self.__dict__.update({node.nodeName: data})
assert None not in (self.domain, self.purpose)
@internationalizeDocstring
class FortyTwo(callbacks.Plugin):
"""Add the help for "@plugin help 42Chan" here
This should describe *how* to use this plugin."""
@internationalizeDocstring
def find(self, irc, msg, args, optlist):
"""[--domain <glob>] [--purpose <glob>]
Returns all the domains that matches the search. --domain and
--purpose take a glob (a string with wildcards) that have to match
the results, --resolves means the domain is resolved, and --http is
the HTTP response status (000 for a domain that isn't resolved)."""
def translate(glob):
return re.compile(fnmatch.translate(glob), re.I)
domain, purpose = translate('*'), translate('*')
resolve, http = None, None
for name, value in optlist:
if name == 'domain': domain = translate(value)
if name == 'purpose': purpose = translate(value)
if not hasattr(self, '_lastRefresh') or \
self._lastRefresh<time.time()-self.registryValue('lifetime'):
self._refreshCache()
results = []
for obj in self._domains:
if not domain.match(obj.domain) or not purpose.match(obj.purpose):
continue
results.append(obj.domain)
if results == []:
irc.error(_('No such domain'))
else:
irc.reply(_(', ').join(results))
find = wrap(find, [getopts({'domain': 'glob', 'purpose': 'glob'})])
@internationalizeDocstring
def fetch(self, irc, msg, args):
"""takes no arguments
Fetches data from the domains list source."""
self._refreshCache()
irc.replySuccess()
@internationalizeDocstring
def purpose(self, irc, msg, args, domain):
"""<domain>
Returns the purpose of the given domain."""
if not hasattr(self, '_lastRefresh') or \
self._lastRefresh<time.time()-self.registryValue('lifetime'):
self._refreshCache()
for obj in self._domains:
if obj.domain == domain:
irc.reply(obj.purpose)
return
irc.error(_('No such domain'))
purpose = wrap(purpose, ['somethingWithoutSpaces'])
def _refreshCache(self):
self._lastRefresh = time.time()
xml = utils.web.getUrl(self.registryValue('source'))
dom = minidom.parseString(xml)
warnings = []
root = None
for child in dom.childNodes:
if child.nodeName == 'domains':
root = child
break
assert root is not None
self._domains = [Domain(child, warnings) for child in root.childNodes
if child.nodeName == 'item']
Class = FortyTwo
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| [
"[email protected]"
] | |
39f23942de022147dd5876c2b5e9383005d57383 | bb970bbe151d7ac48d090d86fe1f02c6ed546f25 | /arouse/_dj/utils/_os.py | 4f586b4c0305d41a7a7a672366300d62577ff519 | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | thektulu/arouse | 95016b4028c2b8e9b35c5062a175ad04286703b6 | 97cadf9d17c14adf919660ab19771a17adc6bcea | refs/heads/master | 2021-01-13T12:51:15.888494 | 2017-01-09T21:43:32 | 2017-01-09T21:43:32 | 78,466,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | from __future__ import unicode_literals
import os
import sys
import tempfile
from os.path import abspath, dirname, isabs, join, normcase, normpath, sep
from arouse._dj.core.exceptions import SuspiciousFileOperation
from arouse._dj.utils import six
from arouse._dj.utils.encoding import force_text
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if six.PY2 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
A function to check if creating symlinks are supported in the
host platform and/or if they are allowed to be created (e.g.
on Windows it requires admin permissions).
"""
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
| [
"[email protected]"
] | |
7ea6777f121d25cecdd3df4ad37bc8958faa33f6 | f82349a5d9cb285ced7c52db1ce95c65f5fd0cf0 | /mars/tensor/execution/optimizes/ne.py | 34e7ee8d39dbabd98b8b5eb609277840c853c493 | [
"MIT",
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | pingrunhuang/mars | 8d2602356b6f4d9eb7c6dfe4b2c4536b4bdfc229 | ae920c374e9844d7426d0cc09c0d97059dc5341c | refs/heads/master | 2020-04-17T03:42:11.147774 | 2019-01-18T06:49:29 | 2019-01-18T06:49:29 | 166,196,676 | 0 | 0 | Apache-2.0 | 2019-01-17T09:17:25 | 2019-01-17T09:17:25 | null | UTF-8 | Python | false | false | 4,936 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...expressions import arithmetic, reduction
from ...expressions.fuse import TensorNeFuseChunk
REDUCTION_OP = {reduction.TensorSum, reduction.TensorProd,
reduction.TensorMax, reduction.TensorMin}
SUPPORT_OP = {
arithmetic.TensorAdd, arithmetic.TensorAddConstant,
arithmetic.TensorSubtract, arithmetic.TensorSubConstant,
arithmetic.TensorMultiply, arithmetic.TensorMulConstant,
arithmetic.TensorDivide, arithmetic.TensorDivConstant,
arithmetic.TensorPower, arithmetic.TensorPowConstant,
arithmetic.TensorMod, arithmetic.TensorModConstant,
arithmetic.TensorNegative,
arithmetic.TensorAbs,
arithmetic.TensorConj,
arithmetic.TensorExp,
arithmetic.TensorLog,
arithmetic.TensorLog10,
arithmetic.TensorExpm1,
arithmetic.TensorLog1p,
arithmetic.TensorSqrt,
arithmetic.TensorEqual, arithmetic.TensorEqConstant,
arithmetic.TensorNotEqual, arithmetic.TensorNeConstant,
arithmetic.TensorLessThan, arithmetic.TensorLtConstant,
arithmetic.TensorLessEqual, arithmetic.TensorLeConstant,
arithmetic.TensorGreaterThan, arithmetic.TensorGtConstant,
arithmetic.TensorGreaterEqual, arithmetic.TensorGeConstant,
arithmetic.TensorSin,
arithmetic.TensorCos,
arithmetic.TensorTan,
arithmetic.TensorArcsin,
arithmetic.TensorArccos,
arithmetic.TensorArctan,
arithmetic.TensorSinh,
arithmetic.TensorCosh,
arithmetic.TensorTanh,
arithmetic.TensorArcsinh,
arithmetic.TensorArccosh,
arithmetic.TensorArctanh,
arithmetic.TensorLshift, arithmetic.TensorLshiftConstant,
arithmetic.TensorRshift, arithmetic.TensorRshiftConstant,
arithmetic.TensorTreeAdd,
arithmetic.TensorTreeMultiply,
reduction.TensorSum,
reduction.TensorProd,
reduction.TensorMax,
reduction.TensorMin
}
def _check_reduction_axis(node):
return len(node.op.axis) == 1 or len(node.op.axis) == node.ndim
def _support(node):
op_type = type(node.op)
if op_type in REDUCTION_OP:
return _check_reduction_axis(node)
return op_type in SUPPORT_OP
def _transfer_op(node):
op = node.op
if type(op) in REDUCTION_OP and not _check_reduction_axis(node):
return op
return op
class NeOptimizer(object):
def __init__(self, graph):
self._graph = graph
def optimize(self, keys=None):
self.compose(keys=keys)
def _compose_graph(self, composes):
graph = self._graph
composed_nodes = []
for c in composes:
head_node = c[0]
tail_node = c[-1]
op = TensorNeFuseChunk(dtype=tail_node.dtype)
composed_chunk = op(c)
graph.add_node(composed_chunk)
for node in graph.iter_successors(tail_node):
graph.add_edge(composed_chunk, node)
for node in graph.iter_predecessors(head_node):
graph.add_edge(node, composed_chunk)
for node in c:
graph.remove_node(node)
composed_nodes.append(composed_chunk)
return composed_nodes
def compose(self, keys=None):
composes = []
explored = set()
keys = set(keys or [])
graph = self._graph
for v in graph.bfs():
if v.op.gpu or v.op.sparse:
# break out
return []
if type(v.op) not in SUPPORT_OP or v.key in keys:
continue
if v in explored or type(v.op) in REDUCTION_OP: # TODO: check logic here
continue
if graph.count_successors(v) != 1:
continue
selected = [v]
# add successors
cur_node = graph.successors(v)[0]
while graph.count_predecessors(cur_node) == 1\
and _support(cur_node) and cur_node.key not in keys:
selected.append(cur_node)
if graph.count_successors(cur_node) != 1 \
or type(cur_node.op) in REDUCTION_OP:
break
else:
cur_node = graph.successors(cur_node)[0]
if len(selected) > 1:
explored.update(selected)
composes.append(list(selected))
return self._compose_graph(composes)
| [
"[email protected]"
] | |
84246ba2dc3ebc23ea5633a69655136b4e07164e | 2ada0217e09e02c6d1ab1af6c12c1262eb66ea06 | /NTWebsite/migrations/0033_auto_20190518_1729.py | 73af9010eb07ba2cb93c8ef71bfd283d8ac97d07 | [] | no_license | lianglianggou/Django-Python-NagetiveWeb-Beta | 74545d2f19d7a65b974e7b88c06cbe1bae450568 | e19175d14541debf59d6ea1223fef54727bd2150 | refs/heads/master | 2021-01-07T00:43:16.696519 | 2019-12-12T14:02:13 | 2019-12-12T14:02:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | # Generated by Django 2.0.6 on 2019-05-18 09:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('NTWebsite', '0032_auto_20190518_1714'),
]
operations = [
migrations.AlterField(
model_name='commentattitude',
name='ObjectID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='NTWebsite.CommentInfo', verbose_name='评论'),
),
migrations.AlterField(
model_name='commentinfo',
name='TopicID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='NTWebsite.TopicInfo', verbose_name='文章'),
),
migrations.AlterField(
model_name='topicattitude',
name='ObjectID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='NTWebsite.TopicInfo', verbose_name='文章'),
),
]
| [
"[email protected]"
] | |
53df6c2ebd3e371dd21d207caaa24b7293593289 | fa7e75212e9f536eed7a78237a5fa9a4021a206b | /python/smqtk/tests/representation/DataElement/test_DataElement_abstract.py | 80492106247f0fbfc59c332c3d5f27c062eb74f8 | [] | no_license | kod3r/SMQTK | 3d40730c956220a3d9bb02aef65edc8493bbf527 | c128e8ca38c679ee37901551f4cc021cc43d00e6 | refs/heads/master | 2020-12-03T09:12:41.163643 | 2015-10-19T14:56:55 | 2015-10-19T14:56:55 | 44,916,678 | 1 | 0 | null | 2015-10-25T15:47:35 | 2015-10-25T15:47:35 | null | UTF-8 | Python | false | false | 8,164 | py | """
Tests for DataElement abstract interface class methods that provide
functionality.
"""
import hashlib
import mock
import nose.tools as ntools
import os.path as osp
import tempfile
import unittest
import smqtk.representation.data_element
__author__ = "[email protected]"
# because this has a stable mimetype conversion
EXPECTED_CONTENT_TYPE = "image/png"
EXPECTED_BYTES = "hello world"
EXPECTED_UUID = 1234567890
EXPECTED_MD5 = hashlib.md5(EXPECTED_BYTES).hexdigest()
EXPECTED_SHA1 = hashlib.sha1(EXPECTED_BYTES).hexdigest()
# Caches the temp directory before we start mocking things out that would
# otherwise be required for the tempfile module to determine the temp directory.
tempfile.gettempdir()
class DummyDataElement (smqtk.representation.data_element.DataElement):
# abstract methods have no base functionality
def get_config(self):
return {}
def content_type(self):
return EXPECTED_CONTENT_TYPE
def get_bytes(self):
# Aligned with the checksum strings in test class setUp method
return EXPECTED_BYTES
def uuid(self):
return EXPECTED_UUID
class TestDataElementAbstract (unittest.TestCase):
def test_md5(self):
de = DummyDataElement()
ntools.assert_is_none(de._md5_cache)
md5 = de.md5()
sha1 = de.sha1()
ntools.assert_is_not_none(de._md5_cache)
ntools.assert_equal(de._md5_cache, EXPECTED_MD5)
ntools.assert_equal(md5, EXPECTED_MD5)
ntools.assert_equal(de._sha1_cache, EXPECTED_SHA1)
ntools.assert_equal(sha1, EXPECTED_SHA1)
# When called a second time, should use cache instead of recomputing
with mock.patch("smqtk.representation.data_element.hashlib") as mock_hashlib:
md5 = de.md5()
ntools.assert_false(mock_hashlib.md5.called)
ntools.assert_equal(md5, EXPECTED_MD5)
sha1 = de.sha1()
ntools.assert_false(mock_hashlib.sha1.called)
ntools.assert_equal(sha1, EXPECTED_SHA1)
def test_del(self):
de = DummyDataElement()
m_clean_temp = de.clean_temp = mock.Mock()
del de
ntools.assert_true(m_clean_temp.called)
def test_hashing(self):
# Hash should be that of the UUID of the element
de = DummyDataElement()
ntools.assert_equal(hash(de), hash(EXPECTED_UUID))
# Cases:
# - no existing temps, no specific dir
# - no existing temps, given specific dir
# - existing temps, no specific dir
# - existing temps, given specific dir
#
# Mocking open, os.open, os.close and fcntl to actual file interaction
# - os.open is used under the hood of tempfile to open a file (which also
# creates it on disk).
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_noExisting_noDir(self,
mock_open, mock_os_open, mock_os_close,
mock_fcntl, mock_scd):
# no existing temps, no specific dir
fp = DummyDataElement().write_temp()
ntools.assert_false(mock_scd.called)
ntools.assert_true(mock_open.called)
ntools.assert_equal(osp.dirname(fp), tempfile.gettempdir())
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_noExisting_givenDir(self,
mock_open, mock_os_open,
mock_os_close, mock_fcntl, mock_scd):
# no existing temps, given specific dir
target_dir = '/some/dir/somewhere'
fp = DummyDataElement().write_temp(target_dir)
mock_scd.assert_called_once_with(target_dir)
ntools.assert_true(mock_open.called)
ntools.assert_not_equal(osp.dirname(fp), tempfile.gettempdir())
ntools.assert_equal(osp.dirname(fp), target_dir)
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_hasExisting_noDir(self,
mock_open, mock_os_open, mock_os_close,
mock_fcntl, mock_scd):
# existing temps, no specific dir
prev_0 = '/tmp/file.txt'
prev_1 = '/tmp/file_two.png'
de = DummyDataElement()
de._temp_filepath_stack.append(prev_0)
de._temp_filepath_stack.append(prev_1)
fp = de.write_temp()
ntools.assert_false(mock_scd.called)
ntools.assert_false(mock_open.called)
ntools.assert_equal(fp, prev_1)
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_hasExisting_givenNewDir(self, mock_open, mock_os_open,
mock_os_close, mock_fcntl,
mock_scd):
# existing temps, given specific dir
prev_0 = '/tmp/file.txt'
prev_1 = '/tmp/file_two.png'
target_dir = '/some/specific/dir'
de = DummyDataElement()
de._temp_filepath_stack.append(prev_0)
de._temp_filepath_stack.append(prev_1)
fp = de.write_temp(temp_dir=target_dir)
ntools.assert_true(mock_scd.called)
ntools.assert_true(mock_open.called)
ntools.assert_equal(osp.dirname(fp), target_dir)
@mock.patch('smqtk.representation.data_element.file_utils.safe_create_dir')
@mock.patch('fcntl.fcntl') # global
@mock.patch('os.close') # global
@mock.patch('os.open') # global
@mock.patch('__builtin__.open')
def test_writeTemp_hasExisting_givenExistingDir(self, mock_open,
mock_os_open, mock_os_close,
mock_fcntl, mock_scd):
# existing temps, given specific dir already in stack
prev_0 = '/dir1/file.txt'
prev_1 = '/tmp/things/file_two.png'
prev_2 = '/some/specific/dir'
de = DummyDataElement()
de._temp_filepath_stack.append(prev_0)
de._temp_filepath_stack.append(prev_1)
de._temp_filepath_stack.append(prev_2)
target_dir = "/tmp/things"
fp = de.write_temp(temp_dir=target_dir)
ntools.assert_false(mock_scd.called)
ntools.assert_false(mock_open.called)
ntools.assert_equal(fp, prev_1)
@mock.patch("smqtk.representation.data_element.os")
def test_cleanTemp_noTemp(self, mock_os):
# should do all of nothing
de = DummyDataElement()
de.clean_temp()
ntools.assert_false(mock_os.path.isfile.called)
ntools.assert_false(mock_os.remove.called)
@mock.patch("smqtk.representation.data_element.os")
def test_cleanTemp_hasTemp_badPath(self, mock_os):
de = DummyDataElement()
de._temp_filepath_stack.append('tmp/thing')
mock_os.path.isfile.return_value = False
de.clean_temp()
mock_os.path.isfile.assert_called_once_with('tmp/thing')
ntools.assert_false(mock_os.remove.called)
@mock.patch("smqtk.representation.data_element.os")
def test_cleanTemp_hasTemp_validPath(self, mock_os):
expected_path = '/tmp/something'
de = DummyDataElement()
de._temp_filepath_stack.append(expected_path)
mock_os.path.isfile.return_value = True
de.clean_temp()
mock_os.path.isfile.assert_called_once_with(expected_path)
mock_os.remove.assert_called_once_with(expected_path)
| [
"[email protected]"
] | |
e1361bf2b8bd4dd6a0afeb1667b5967a9269d8a3 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/jedi/third_party/django-stubs/django-stubs/utils/datastructures.pyi | 7b5f7b2d182a86676ef63a639789ad5293c52f11 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 2,624 | pyi | from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
MutableSet,
Tuple,
TypeVar,
Union,
overload,
Iterator,
Optional,
)
from typing_extensions import Literal
_K = TypeVar("_K")
_V = TypeVar("_V")
class OrderedSet(MutableSet[_K]):
dict: Dict[_K, None] = ...
def __init__(self, iterable: Optional[Iterable[_K]] = ...) -> None: ...
def __contains__(self, item: object) -> bool: ...
def __iter__(self) -> Iterator[_K]: ...
def __len__(self) -> int: ...
def add(self, x: _K) -> None: ...
def discard(self, item: _K) -> None: ...
class MultiValueDictKeyError(KeyError): ...
_D = TypeVar("_D", bound="MultiValueDict")
class MultiValueDict(MutableMapping[_K, _V]):
@overload
def __init__(self, key_to_list_mapping: Mapping[_K, Optional[List[_V]]] = ...) -> None: ...
@overload
def __init__(self, key_to_list_mapping: Iterable[Tuple[_K, List[_V]]] = ...) -> None: ...
def getlist(self, key: _K, default: Any = ...) -> List[_V]: ...
def setlist(self, key: _K, list_: List[_V]) -> None: ...
def setlistdefault(self, key: _K, default_list: Optional[List[_V]] = ...) -> List[_V]: ...
def appendlist(self, key: _K, value: _V) -> None: ...
def lists(self) -> Iterable[Tuple[_K, List[_V]]]: ...
def dict(self) -> Dict[_K, Union[_V, List[_V]]]: ...
def copy(self: _D) -> _D: ...
# These overrides are needed to convince mypy that this isn't an abstract class
def __delitem__(self, item: _K) -> None: ...
def __getitem__(self, item: _K) -> Union[_V, Literal[[]]]: ... # type: ignore
def __setitem__(self, k: _K, v: Union[_V, List[_V]]) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_K]: ...
class ImmutableList(Tuple[_V, ...]):
warning: str = ...
def __init__(self, *args: Any, warning: str = ..., **kwargs: Any) -> None: ...
def complain(self, *wargs: Any, **kwargs: Any) -> None: ...
class DictWrapper(Dict[str, _V]):
func: Callable[[_V], _V] = ...
prefix: str = ...
@overload
def __init__(self, data: Mapping[str, _V], func: Callable[[_V], _V], prefix: str) -> None: ...
@overload
def __init__(self, data: Iterable[Tuple[str, _V]], func: Callable[[_V], _V], prefix: str) -> None: ...
_T = TypeVar("_T", bound="CaseInsensitiveMapping")
class CaseInsensitiveMapping(Mapping):
def __init__(self, data: Any) -> None: ...
def __getitem__(self, key: str) -> Any: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def copy(self: _T) -> _T: ...
| [
"[email protected]"
] | |
d9bd2890dfcfbc0a88d99138185203be8d152d8b | 13d8ede6d23ed0a375bbc9310d93be035fd164e9 | /InterviewBits/arrays/flip.py | f9d5783e746d6b7c2e6a9d020b5caec7cd947ea2 | [] | no_license | iamrishap/PythonBits | 192d3fb7bce101485eb81da2153e5b0c82b6872a | dcbc5f087ad78110a98e78dd6e5943ed971309c2 | refs/heads/master | 2022-03-10T07:16:08.601170 | 2019-11-17T04:01:00 | 2019-11-17T04:01:00 | 206,778,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | """
You are given a binary string(i.e. with characters 0 and 1) S consisting of characters S1, S2, …, SN.
In a single operation, you can choose two indices L and R such that 1 ≤ L ≤ R ≤ N and
flip the characters SL, SL+1, …, SR. By flipping, we mean change character 0 to 1 and vice-versa.
Your aim is to perform ATMOST one operation such that in final string number of 1s is maximised.
If you don’t want to perform the operation, return an empty array. Else, return an array consisting of two
elements denoting L and R. If there are multiple solutions, return the lexicographically smallest pair of L and R.
Notes:
Pair (a, b) is lexicographically smaller than pair (c, d) if a < c or, if a == c and b < d.
For example,
S = 010
Pair of [L, R] | Final string
_______________|_____________
[1 1] | 110
[1 2] | 100
[1 3] | 101
[2 2] | 000
[2 3] | 001
We see that two pairs [1, 1] and [1, 3] give same number of 1s in final string. So, we return [1, 1].
Another example,
If S = 111
No operation can give us more than three 1s in final string. So, we return empty array [].
"""
# Say it has A 0s and B 1s. Eventually, there are B 0s and A 1s.
# So, number of 1s increase by A - B. We want to choose a subarray which maximises this.
# Note, if we change 1s to -1, then sum of values will give us A - B.
# Then, we have to find a subarray with maximum sum, which can be done via Kadane’s Algorithm.
class Solution:
# @param A : string
# @return a list of integers
def flip(self, A):
max_diff = 0
diff = 0
start = 0
ans = None
for i, a in enumerate(A):
diff += (1 if a is '0' else -1)
if diff < 0:
diff = 0
start = i + 1
continue
if diff > max_diff:
max_diff = diff
ans = [start, i]
if ans is None:
return []
return list(map(lambda x: x + 1, ans))
s = Solution()
print(s.flip('010'))
| [
"[email protected]"
] | |
a27ec35f7aa311ffa7144d14cdcef4c0bea23acd | 4bebd76e65768c6a2fe9f8019c99ae9e579dbafd | /scripts/LAPipe/NumPy/PART2/P2.10RW.py | c37313dde9fcf99a541e71b68a800c74863e1648 | [] | no_license | hadad-paper/HADAD_SIGMOD2021 | 7e0f7687bfdb3601b817570a2c10c2b923970fd9 | 6bfef6838a5549288adca6bdd71ec0d3497d3f2e | refs/heads/master | 2023-03-05T22:54:36.894263 | 2021-02-22T23:24:43 | 2021-02-22T23:24:43 | 296,177,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | with open(__file__) as fh: print fh.read()
import os
import sys
import datetime
import time
import numpy as np
import numpy.linalg as alg
import pandas as pd
from numpy import genfromtxt
def timeOp(string,cleanup=None):
times = []
time_stamp = datetime.datetime.fromtimestamp(
time.time()).strftime('%Y%m%d%H%M%S')
for ix in range(5):
try:
start = time.time()
res = eval(string)
stop = time.time()
times.append(stop-start)
except MemoryError:
return np.nan
if cleanup is not None:
eval(cleanup)
return times
def Query_Call(M,N):
return np.matmul(M,np.sum(N,axis=1))
path = 'results.out'
colnames = ['Query','time1','time2','time3','time4','time5']
runTimes = pd.DataFrame(np.zeros((1,len(colnames))))
runTimes.columns = colnames
M = genfromtxt(str(sys.argv[1]), delimiter=',')
N = genfromtxt(str(sys.argv[2]), delimiter=',')
Query = 'Query_Call(M,N)'
runTimes.ix[:,'Query'] = "P2.10RW"
runTimes.ix[:,1:] = timeOp(Query)
writeHeader = not os.path.exists(path)
runTimes.to_csv(path, index=False, header = writeHeader, mode = 'a') | [
"[email protected]"
] | |
ff05fe771f713034457ab59fd6057ba63edd7043 | aaf4a46f6bde17bfcbb8334f83d4e5972c1ed9cc | /HiggsFastSim/launchVBFHToETau_8TeVBeamspot_20PU.py | e0c79f9fdcff8d4d7ad7c6fb44b64c6cda31e6b2 | [] | no_license | taroni/usercode | 72811057e54691edfd3eee1f5cd2eab163ff97c6 | 94888ed661f1c31f0fb2c8593add5efd8ecaafa4 | refs/heads/master | 2021-06-06T20:52:41.776442 | 2019-07-09T13:31:39 | 2019-07-09T13:31:39 | 11,950,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,325 | py | #!/bin/env python
from os import popen
#numero iniziale
i=10
#inizio ciclo
while i<20:
#inizio file cfg
cfg = """
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('FastSimulation.Configuration.EventContent_cff')
process.load('FastSimulation.PileUpProducer.PileUpSimulator_E13TeV_AVE_20_inTimeOnly_cff')
process.load('FastSimulation.Configuration.Geometries_MC_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.Generator_cff')
#process.load('IOMC.EventVertexGenerators.VtxSmearedNominalCollision2015_cfi')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('FastSimulation.Configuration.FamosSequences_cff')
process.load('FastSimulation.Configuration.FamosSequences_cff')
process.load('CommonTools.ParticleFlow.EITopPAG_cff')
process.load('HLTrigger.Configuration.HLT_GRun_Famos_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
# Input source
process.source = cms.Source("EmptySource",
firstLuminosityBlock=cms.untracked.uint32("""+str(1+i)+"""),
firstEvent=cms.untracked.uint32("""+str(1+1000*i)+""")
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('VBFToHtautau_13TeV_pythia8_cff nevts:50'),
name = cms.untracked.string('Applications')
)
# Output definition
process.AODoutput = cms.OutputModule("PoolOutputModule",
compressionLevel = cms.untracked.int32(4),
compressionAlgorithm = cms.untracked.string('LZMA'),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
outputCommands = process.AODSIMEventContent.outputCommands,
fileName = cms.untracked.string('VBFHetau_8TeVBeamSpot_20PU_AODSIM_"""+str(100+i)+""".root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dropMetaData = cms.untracked.string('ALL'),
fastCloning = cms.untracked.bool(False),
overrideInputFileSplitLevels = cms.untracked.bool(True)
)
# Additional output definition
# Other statements
from FastSimulation.PileUpProducer.PileUpSimulator_E13TeV_AVE_20_inTimeOnly_cff import famosPileUp
process.famosPileUp.PileUpSimulator.averageNumber = 20.
process.famosPileUp.PileUpSimulator.usePoisson = True
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
process.HLTEndSequence = cms.Sequence( process.dummyModule )
process.simulation = cms.Sequence( process.dummyModule )
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
process.generator = cms.EDFilter("Pythia8GeneratorFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'),
processParameters = cms.vstring('HiggsSM:gg2H = off',
'HiggsSM:ff2Hff(t:ZZ) = on',
'HiggsSM:ff2Hff(t:WW) = on',
'HiggsSM:ffbar2HZ = off',
'HiggsSM:ffbar2HW = off',
'TauDecays:mode = 2',
'TauDecays:tauPolarization = 0',
'TauDecays:tauMother = 25',
'6:m0 = 172.04',
'25:m0 = 125.0',
'25:addChannel 1 0.1 100 15 -11',
'25:addChannel 1 0.1 100 11 -15',
'25:onMode = off',
'25:onIfMatch 15 11'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8CommonSettings = cms.vstring('Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = on',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on')
),
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.RandomNumberGeneratorService.generator.initialSeed = """+str(123456789+1000000*i)+"""
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.simulationWithFamos)
process.reconstruction_step = cms.Path(process.reconstructionWithFamos)
process.eventinterpretaion_step = cms.Path(process.EIsequence)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.AODoutput_step = cms.EndPath(process.AODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.reconstruction_step,process.eventinterpretaion_step)
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.endjob_step,process.AODoutput_step ])
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from FastSimulation.Configuration.MixingModule_Full2Fast
from FastSimulation.Configuration.MixingModule_Full2Fast import setVertexGeneratorPileUpProducer
#call to customisation function setVertexGeneratorPileUpProducer imported from FastSimulation.Configuration.MixingModule_Full2Fast
process = setVertexGeneratorPileUpProducer(process)
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1
#call to customisation function customisePostLS1 imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1(process)
# End of customisation functions
"""
#scrittura del file di configurazione
cfg_file = open("vbfHiggsETau_8TeVBeamSpot_20PU-"+str(i)+"_cfg.py","w")
cfg_file.write(cfg)
cfg_file.close()
#crea lo script per lanciare su cluster
sh = """#!/bin/tcsh -f
set W_DIR = \"/afs/cern.ch/user/t/taroni/scratch1/LFVH/CMSSW_7_2_3/src/FastSimulation/Event/test\"
set CFG = \"/afs/cern.ch/user/t/taroni/scratch1/LFVH/CMSSW_7_2_3/src/FastSimulation/Event/test/vbfHiggsETau_8TeVBeamSpot_20PU-"""+str(i)+"""_cfg.py\"
cd $W_DIR
eval `scramv1 runtime -csh`
cd -
cmsRun $CFG
cmsStage VBFHetau_8TeVBeamSpot_20PU_AODSIM_"""+str(100+i)+""".root /store/user/taroni/723/vbfHiggs/
exit
"""
#scrive script
sh_file = open("submitvbfHiggsETau_8TeVBeamSpot_20PU-"+str(i)+".sh","w")
sh_file.write(sh)
sh_file.close()
#sottomette script
popen("chmod a+x submitvbfHiggsETau_8TeVBeamSpot_20PU-"+str(i)+".sh" )
popen("bsub -q 1nw submitvbfHiggsETau_8TeVBeamSpot_20PU-"+str(i)+".sh")
i+=1
| [
"[email protected]"
] | |
6e523c8eb1f2c2e9af921a9ad5ae44de98465a3f | c8f4cdee426f04fcf0e7c4865515508178cdc580 | /NFL_Draftkings/__init__.py | 4a572416a3b992c5d443c98ea2d1fb2048bd2cc4 | [
"MIT"
] | permissive | jkope892/NFL_Draftkings | 734d5dd6145cea1ba8cc2f18ccb3f56098c67458 | 3744d53c63c9f58a5c0a1ce4cf6ebce989b3c0da | refs/heads/master | 2021-05-17T08:37:45.698445 | 2016-09-05T00:18:39 | 2016-09-05T00:18:39 | 250,710,985 | 1 | 0 | MIT | 2020-03-28T04:04:24 | 2020-03-28T04:04:23 | null | UTF-8 | Python | false | false | 80 | py | __author__ = 'Kacper Adach'
__version__='0.4'
from GetPlayerDKScores import *
| [
"[email protected]"
] | |
ff68e33fd5b4b5b0f46bab5d93a2456434b1b756 | fe6775ca8c5b42710785e3a923974ae079f92c8f | /code/111. 二叉树的最小深度.py | 3e4de4635aa4de1d9e8f7a31cf17e83b061248e2 | [] | no_license | AiZhanghan/Leetcode | 41bda6676fa1a25fa19e393553c1148ed51fdf72 | 101bce2fac8b188a4eb2f5e017293d21ad0ecb21 | refs/heads/master | 2021-06-28T10:48:07.865968 | 2020-11-20T09:45:15 | 2020-11-20T09:45:15 | 188,155,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root):
"""
Args:
root: TreeNode
Return:
int
"""
if not root:
return 0
if not root.left and not root.right:
return 1
res = float("inf")
if root.left:
res = min(res, self.minDepth(root.left))
if root.right:
res = min(res, self.minDepth(root.right))
return res + 1 | [
"[email protected]"
] | |
8cbcd942da4735224385ab3dbc3923e725f2b36b | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetQueryRequest.py | 1c336dd763456ee791f4e197b59b28af345ec219 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,052 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetQueryModel import AlipayMarketingCampaignDiscountBudgetQueryModel
class AlipayMarketingCampaignDiscountBudgetQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
ec38695fe5da3b2dbd32e767f06c00c40ededda7 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/clikit/io/input_stream/null_input_stream.py | 028240ec0f8db6eac4aaef02276bb18a572c1cad | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/a1/7e/50/f915f39cc05a20660c21d59727fecc2940e54cafe21c2b2ba707d671e7 | [
"[email protected]"
] | |
8b69cd4779156df3932f49444b300b44d248ecf2 | 5464dcbcf10bafdda01d04fcbff71eeafbe3c432 | /src/metrics/user_metric.py | 54ed042d3ff0d3a92dea242d52c2127ae685c3c2 | [
"BSD-3-Clause"
] | permissive | dsc/E3_analysis | caeeb29a8cf7381f67652ebb7059c0da1ce53b08 | 5bd27b038d6b56632205a81137450ebb884ca1bb | refs/heads/master | 2023-07-31T20:34:53.990017 | 2013-02-08T00:20:37 | 2013-02-08T00:20:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,206 | py | """
This module will be used to define Wikimedia Foundation user metrics. The
Strategy behavioural pattern(http://en.wikipedia.org/wiki/Strategy_pattern)
will be used to implement the metrics generation. In general the UserMetric
type utilizes the process() function attribute to produce an internal list
of metrics for a specified set of user handles (typically ID but user names
may also be specified) passed to the method on call. The execution of
process() produces a nested list that can be accessed via generator with
an object call to __iter__().
The class structure is generally as follows: ::
class Metric(object):
def __init__(self):
# initialize base metric
return
def process(self):
# base metric implementation
return metric_value
class DerivedMetric(Metric):
def __init__(self):
super(DerivedMetric, self)
# initialize derived metric
return
def process(self):
# derived metric implementation
return metric_value
These metrics will be used to support experimentation and measurement
at the Wikimedia Foundation. The guidelines for this development may
be found at https://meta.wikimedia.org/wiki/Research:Metrics.
"""
__author__ = "Ryan Faulkner"
__date__ = "July 27th, 2012"
__license__ = "GPL (version 2 or later)"
import src.etl.data_loader as dl
from collections import namedtuple
from dateutil.parser import parse as date_parse
from datetime import datetime, timedelta
def pre_metrics_init(init_f):
""" Decorator function for subclassed metrics __init__ """
def wrapper(self,**kwargs):
# Add params from base class
self.append_params(UserMetric)
self.apply_default_kwargs(kwargs,'init')
# Call init
init_f(self,**kwargs)
return wrapper
# Define aggregator processing methods, method attributes, and namedtuple
# class for packaging aggregate data
# Respectively:
#
# 1. flag attribute for a type of metric aggregation methods
# 2. header attribute for a type of metric aggregation methods
# 3. name attribute for a type of metric aggregation methods
# 4. keyword arg attribute for a type of metric aggregation methods
METRIC_AGG_METHOD_FLAG = 'metric_agg_flag'
METRIC_AGG_METHOD_HEAD = 'metric_agg_head'
METRIC_AGG_METHOD_NAME = 'metric_agg_name'
METRIC_AGG_METHOD_KWARGS = 'metric_agg_kwargs'
# Class for storing aggregate data
aggregate_data_class = namedtuple("AggregateData", "header data")
def aggregator(agg_method, metric, data_header):
""" Method for wrapping and executing aggregated data """
if hasattr(agg_method, METRIC_AGG_METHOD_FLAG) and getattr(agg_method,
METRIC_AGG_METHOD_FLAG):
# These are metric specific aggregators. The method must also define
# the header.
agg_header = getattr(agg_method, METRIC_AGG_METHOD_HEAD) if hasattr(
agg_method, METRIC_AGG_METHOD_HEAD) else 'No header specified.'
kwargs = getattr(agg_method,METRIC_AGG_METHOD_KWARGS) if hasattr(
agg_method, METRIC_AGG_METHOD_KWARGS) else {}
data = [getattr(agg_method,METRIC_AGG_METHOD_NAME)] + agg_method(
metric, **kwargs)
else:
# Generic aggregators that are metric agnostic
agg_header = ['type'] + [data_header[i] for i in metric._agg_indices[
agg_method.__name__]]
data = [agg_method.__name__] + agg_method(metric.__iter__(),
metric._agg_indices[agg_method.__name__])
return aggregate_data_class(agg_header, data)
class UserMetric(object):
ALL_NAMESPACES = 'all_namespaces'
DATETIME_STR_FORMAT = "%Y%m%d%H%M%S"
# Default number of days for a metric computation
DEFAULT_DATA_RANGE = 14
_data_model_meta = dict()
_agg_indices = dict()
# Structure that defines parameters for UserMetric class
_param_types = {
'init' : {
'date_start' : ['str|datetime', 'Earliest date metric '
'is measured.',
datetime.now() + timedelta(DEFAULT_DATA_RANGE)],
'date_end' : ['str|datetime', 'Latest date metric is measured.',
datetime.now()],
'project' : ['str', 'The project (language) being inspected.',
'enwiki'],
'namespace' : ['int|set', 'The namespace over which the '
'metric is computed.', 0],
},
'process' : {}
}
def apply_default_kwargs(self, kwargs, arg_type):
""" Apply parameter defaults where necessary """
if hasattr(kwargs, '__iter__') and arg_type in self._param_types:
for k in self._param_types[arg_type]:
if not k in kwargs or not kwargs[k]:
kwargs[k] = self._param_types[arg_type][k][2]
def __init__(self, **kwargs):
self._data_source_ = dl.Connector(instance='slave')
self._results = list() # Stores results of a process request
# Set metric time bounds
self._start_ts_ = self._get_timestamp(kwargs['date_start'])
self._end_ts_ = self._get_timestamp(kwargs['date_end'])
self._project_ = kwargs['project']
namespace = kwargs['namespace']
if not namespace == self.ALL_NAMESPACES:
if not hasattr(namespace, '__iter__'): namespace = [namespace]
self._namespace_ = set(namespace)
else:
self._namespace_ = namespace
def __str__(self): return "\n".join([str(self._data_source_._db_),
str(self.__class__),
str(self._namespace_),
self._project_])
def __iter__(self): return (r for r in self._results)
def __del__(self):
if hasattr(self, '_data_source_') and hasattr(self._data_source_,
'close_db'):
self._data_source_.close_db()
def append_params(self, class_ref):
""" Append params from class reference """
if hasattr(class_ref, '_param_types'):
for k,v in class_ref._param_types['init'].iteritems():
self.__class__._param_types['init'][k] = v
for k,v in class_ref._param_types['process'].iteritems():
self.__class__._param_types['process'][k] = v
@property
def date_start(self): return self._start_ts_
@property
def date_end(self): return self._end_ts_
@classmethod
def _construct_data_point(cls): return namedtuple(cls.__name__,
cls.header())
@classmethod
def _get_timestamp(cls, ts_representation):
"""
Helper method. Takes a representation of a date object (String or
datetime.datetime object) and formats as a timestamp:
"YYYY-MM-DD HH:II:SS"
- Parameters:
- *date_representation* - String or datetime. A formatted
timestamp representation
- Return:
- String. Timestamp derived from argument in format
"YYYY-MM-DD HH:II:SS".
"""
try:
# timestamp strings should
datetime_obj = date_parse(ts_representation[:19])
except AttributeError:
datetime_obj = ts_representation
except TypeError:
datetime_obj = ts_representation
# datetime_obj
try:
timestamp = datetime_obj.strftime(cls.DATETIME_STR_FORMAT)
return timestamp
except ValueError:
raise cls.UserMetricError(message='Could not parse timestamp: %s'
% datetime_obj.__str__())
@classmethod
def _format_namespace(cls, namespace):
# format the namespace condition
ns_cond = ''
if hasattr(namespace, '__iter__'):
if len(namespace) == 1:
ns_cond = 'page_namespace = ' + str(namespace.pop())
else:
ns_cond = 'page_namespace in (' + ",".join(dl.DataLoader().
cast_elems_to_string(list(namespace))) + ')'
return ns_cond
@staticmethod
def header(): raise NotImplementedError
@staticmethod
def pre_process_users(proc_func):
def wrapper(self, users, **kwargs):
# Duck-type the "cohort" ref for a ID generating interface
# see src/metrics/users.py
if hasattr(users, 'get_users'):
users = [u for u in users.get_users(self._start_ts_, self._end_ts_)]
return proc_func(self, users, **kwargs)
return wrapper
def process(self, users, **kwargs):
raise NotImplementedError()
class UserMetricError(Exception):
""" Basic exception class for UserMetric types """
def __init__(self, message="Unable to process results using "
"strategy."):
Exception.__init__(self, message)
| [
"[email protected]"
] | |
10d0c0440ab0d5f8b17d79217c02ccde8d00ed6f | fd03997cf671c295e1c36fea5a7ff0888af4620e | /tests/pyfilter/operations/test_setproperties.py | c0083a326aa4e089ce2ef711d31153d39c17ce39 | [] | no_license | johnathandinh/Houdini-Toolbox | 5d8ae22ee6e50db97eb48ad1aa8ff4c3dd5cb70b | 975e11fc59e42953938d8e03a1d49099abbc7b34 | refs/heads/master | 2020-12-01T16:52:37.978855 | 2019-11-16T20:05:26 | 2019-11-16T20:05:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,463 | py | """Test the ht.pyfilter.operations.setproperties module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library Imports
import argparse
# Third Party Library Imports
import pytest
# Houdini Toolbox Imports
from ht.pyfilter.manager import PyFilterManager
from ht.pyfilter.operations import setproperties
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
def init_manager(mocker):
"""Fixture to initialize the manager class."""
mocker.patch.object(setproperties.PropertySetterManager, "__init__", lambda x, y: None)
def _create():
return setproperties.PropertySetterManager(None)
return _create
@pytest.fixture
def init_masked_setter(mocker):
"""Fixture to initialize the setter class."""
mocker.patch.object(setproperties.MaskedPropertySetter, "__init__", lambda x, y, z, w: None)
def _create():
return setproperties.MaskedPropertySetter(None, None, None)
return _create
@pytest.fixture
def init_operation(mocker):
"""Fixture to initialize an operation."""
mocker.patch.object(setproperties.SetProperties, "__init__", lambda x, y: None)
def _create():
return setproperties.SetProperties(None)
return _create
@pytest.fixture
def init_setter(mocker):
"""Fixture to initialize the masked setter class."""
mocker.patch.object(setproperties.PropertySetter, "__init__", lambda x, y: None)
def _create():
return setproperties.PropertySetter(None)
return _create
@pytest.fixture
def properties(mocker):
"""Fixture to handle mocking (get|set)_property calls."""
_mock_get = mocker.patch("ht.pyfilter.operations.setproperties.get_property")
_mock_set = mocker.patch("ht.pyfilter.operations.setproperties.set_property")
class Properties(object):
"""Fake class for accessing and setting properties."""
@property
def mock_get(self):
"""Access get_property."""
return _mock_get
@property
def mock_set(self):
"""Access set_property."""
return _mock_set
return Properties()
# =============================================================================
# CLASSES
# =============================================================================
class Test_PropertySetterManager(object):
"""Test the ht.pyfilter.operations.setproperties.PropertySetterManager class."""
def test___init__(self):
"""Test object initialization."""
op = setproperties.PropertySetterManager()
assert op._properties == {}
# Properties
def test_properties(self, init_manager, mocker):
value = mocker.MagicMock(spec=dict)
op = init_manager()
op._properties = value
assert op.properties == value
# Methods
def test__load_from_data(self, init_manager, mocker):
mock_process_render = mocker.patch("ht.pyfilter.operations.setproperties._process_rendertype_block")
mock_process_block = mocker.patch("ht.pyfilter.operations.setproperties._process_block")
mock_stage1 = mocker.MagicMock(spec=str)
mock_stage2 = mocker.MagicMock(spec=str)
mock_property1 = mocker.MagicMock(spec=str)
mock_property1.startswith.return_value = True
mock_property2 = mocker.MagicMock(spec=str)
mock_property2.startswith.return_value = False
mock_block1 = mocker.MagicMock(spec=dict)
mock_block2 = mocker.MagicMock(spec=dict)
data = {
mock_stage1: {
mock_property1: mock_block1
},
mock_stage2: {
mock_property2: mock_block2
}
}
properties = {}
mock_properties = mocker.PropertyMock()
mock_properties.return_value = properties
op = init_manager()
type(op).properties = mock_properties
op._load_from_data(data)
assert mock_stage1 in properties
assert mock_stage2 in properties
mock_process_render.assert_called_with(
[], mock_stage1, mock_property1.split.return_value[1], mock_block1
)
mock_process_block.assert_called_with(
[], mock_stage2, mock_property2, mock_block2
)
# load_from_file
def test_load_from_file(self, init_manager, mocker):
mock_from_data = mocker.patch.object(setproperties.PropertySetterManager, "_load_from_data")
mock_json_load = mocker.patch("ht.pyfilter.operations.setproperties.json.load")
mock_path = mocker.MagicMock(spec=str)
op = init_manager()
mock_handle = mocker.mock_open()
mocker.patch("__builtin__.open", mock_handle)
op.load_from_file(mock_path)
mock_handle.assert_called_with(mock_path)
mock_json_load.assert_called_with(mock_handle.return_value)
mock_from_data.assert_called_with(mock_json_load.return_value)
def test_parse_from_string(self, init_manager, mocker):
mock_from_data = mocker.patch.object(setproperties.PropertySetterManager, "_load_from_data")
mock_json_loads = mocker.patch("ht.pyfilter.operations.setproperties.json.loads")
mock_string = mocker.MagicMock(spec=str)
op = init_manager()
op.parse_from_string(mock_string)
mock_json_loads.assert_called_with(mock_string)
mock_from_data.assert_called_with(mock_json_loads.return_value)
# set_properties
def test_set_properties__has_stage(self, init_manager, mocker):
mock_properties = mocker.patch.object(
setproperties.PropertySetterManager, "properties", new_callable=mocker.PropertyMock
)
mock_stage = mocker.MagicMock(spec=str)
mock_property = mocker.MagicMock(spec=setproperties.PropertySetter)
properties = {mock_stage: [mock_property]}
mock_properties.return_value = properties
op = init_manager()
op.set_properties(mock_stage)
mock_property.set_property.assert_called()
def test_set_properties__no_stage(self, init_manager, mocker):
mock_properties = mocker.patch.object(
setproperties.PropertySetterManager, "properties", new_callable=mocker.PropertyMock
)
mock_stage1 = mocker.MagicMock(spec=str)
mock_stage2 = mocker.MagicMock(spec=str)
mock_property = mocker.MagicMock(spec=setproperties.PropertySetter)
properties = {mock_stage1: [mock_property]}
mock_properties.return_value = properties
op = init_manager()
op.set_properties(mock_stage2)
mock_property.set_property.assert_not_called()
class Test_PropertySetter(object):
"""Test the ht.pyfilter.operations.setproperties.PropertySetter class."""
def test___init___no_findfile(self, mocker):
"""Test object initialization without finding a file."""
mocker.patch.object(
setproperties.PropertySetter, "find_file", new_callable=mocker.PropertyMock(return_value=False)
)
mock_name = mocker.MagicMock(spec=str)
mock_value = mocker.MagicMock()
mock_rendertype = mocker.MagicMock(spec=str)
block = {
"value": mock_value,
"rendertype": mock_rendertype
}
op = setproperties.PropertySetter(mock_name, block)
assert op._name == mock_name
assert op._value == mock_value
assert not op._find_file
assert op._rendertype == mock_rendertype
def test___init___findfile(self, patch_hou, mocker):
"""Test object initialization with finding a file."""
mocker.patch.object(
setproperties.PropertySetter, "find_file", new_callable=mocker.PropertyMock(return_value=True)
)
mock_name = mocker.MagicMock(spec=str)
mock_value = mocker.MagicMock()
block = {
"value": mock_value,
"findfile": True
}
op = setproperties.PropertySetter(mock_name, block)
assert op._name == mock_name
assert op._value == patch_hou["hou"].findFile.return_value
assert op._find_file
assert op._rendertype is None
patch_hou["hou"].findFile.assert_called_with(mock_value)
# Properties
def test_find_file(self, init_setter, mocker):
value = mocker.MagicMock(spec=bool)
op = init_setter()
op._find_file = value
assert op.find_file == value
def test_name(self, init_setter, mocker):
value = mocker.MagicMock(spec=str)
op = init_setter()
op._name = value
assert op.name == value
def test_rendertype(self, init_setter, mocker):
value = mocker.MagicMock(spec=str)
op = init_setter()
op._rendertype = value
assert op.rendertype == value
def test_value(self, init_setter, mocker):
value = mocker.MagicMock()
op = init_setter()
op._value = value
assert op.value == value
# Methods
# set_property
def test_set_property__rendertype_no_match(self, init_setter, properties, mocker, patch_hou):
mock_rendertype = mocker.patch.object(setproperties.PropertySetter, "rendertype", new_callable=mocker.PropertyMock)
patch_hou["hou"].patternMatch.return_value = False
op = init_setter()
op.set_property()
properties.mock_get.assert_called_with("renderer:rendertype")
patch_hou["hou"].patternMatch.assert_called_with(mock_rendertype.return_value, properties.mock_get.return_value)
properties.mock_set.assert_not_called()
def test_set_property__rendertype_match(self, init_setter, properties, mocker, patch_hou):
mock_rendertype = mocker.patch.object(setproperties.PropertySetter, "rendertype", new_callable=mocker.PropertyMock)
mock_name = mocker.patch.object(setproperties.PropertySetter, "name", new_callable=mocker.PropertyMock)
mock_value = mocker.patch.object(setproperties.PropertySetter, "value", new_callable=mocker.PropertyMock)
patch_hou["hou"].patternMatch.return_value = True
op = init_setter()
op.set_property()
properties.mock_get.assert_called_with("renderer:rendertype")
patch_hou["hou"].patternMatch.assert_called_with(mock_rendertype.return_value, properties.mock_get.return_value)
properties.mock_set.assert_called_with(mock_name.return_value, mock_value.return_value)
def test_set_property__no_rendertype(self, init_setter, properties, mocker, patch_hou):
mocker.patch.object(setproperties.PropertySetter, "rendertype", new_callable=mocker.PropertyMock(return_value=None))
mock_name = mocker.patch.object(setproperties.PropertySetter, "name", new_callable=mocker.PropertyMock)
mock_value = mocker.patch.object(setproperties.PropertySetter, "value", new_callable=mocker.PropertyMock)
patch_hou["hou"].patternMatch.return_value = True
op = init_setter()
op.set_property()
properties.mock_get.assert_not_called()
properties.mock_set.assert_called_with(mock_name.return_value, mock_value.return_value)
class Test_MaskedPropertySetter(object):
"""Test the ht.pyfilter.operations.setproperties.MaskedPropertySetter class."""
def test___init__(self, mocker):
mock_super_init = mocker.patch.object(setproperties.PropertySetter, "__init__")
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=str)
mock_mask = mocker.MagicMock(spec=str)
op = setproperties.MaskedPropertySetter(mock_name, mock_block, mock_mask)
mock_super_init.assert_called_with(mock_name, mock_block)
assert op._mask == mock_block["mask"]
assert op._mask_property_name == mock_mask
# Properties
def test_mask(self, init_masked_setter, mocker):
value = mocker.MagicMock(spec=str)
op = init_masked_setter()
op._mask = value
assert op.mask == value
def test_mask_property_name(self, init_masked_setter, mocker):
value = mocker.MagicMock(spec=str)
op = init_masked_setter()
op._mask_property_name = value
assert op.mask_property_name == value
# Methods
# set_property
def test_set_property__mask_no_match(self, init_masked_setter, mocker, patch_hou):
mock_super_set = mocker.patch.object(setproperties.PropertySetter, "set_property")
mock_mask = mocker.patch.object(setproperties.MaskedPropertySetter, "mask", new_callable=mocker.PropertyMock)
mock_mask_name = mocker.patch.object(setproperties.MaskedPropertySetter, "mask_property_name",
new_callable=mocker.PropertyMock)
mock_get = mocker.patch("ht.pyfilter.operations.setproperties.get_property")
patch_hou["hou"].patternMatch.return_value = False
op = init_masked_setter()
op.set_property()
mock_get.assert_called_with(mock_mask_name.return_value)
patch_hou["hou"].patternMatch.assert_called_with(mock_mask.return_value, mock_get.return_value)
mock_super_set.assert_not_called()
def test_set_property__mask_match(self, init_masked_setter, mocker, patch_hou):
mock_super_set = mocker.patch.object(setproperties.PropertySetter, "set_property")
mock_mask = mocker.patch.object(setproperties.MaskedPropertySetter, "mask", new_callable=mocker.PropertyMock)
mock_mask_name = mocker.patch.object(setproperties.MaskedPropertySetter, "mask_property_name", new_callable=mocker.PropertyMock)
mock_get = mocker.patch("ht.pyfilter.operations.setproperties.get_property")
patch_hou["hou"].patternMatch.return_value = True
op = init_masked_setter()
op.set_property()
mock_get.assert_called_with(mock_mask_name.return_value)
patch_hou["hou"].patternMatch.assert_called_with(mock_mask.return_value, mock_get.return_value)
mock_super_set.assert_called()
def test_set_property__no_mask(self, init_masked_setter, mocker):
mock_super_set = mocker.patch.object(setproperties.PropertySetter, "set_property")
mocker.patch.object(setproperties.MaskedPropertySetter, "mask", new_callable=mocker.PropertyMock(return_value=None))
mock_get = mocker.patch("ht.pyfilter.operations.setproperties.get_property")
op = init_masked_setter()
op.set_property()
mock_get.assert_not_called()
mock_super_set.assert_called()
class Test_SetProperties(object):
"""Test the ht.pyfilter.operations.setproperties.SetProperties class."""
def test___init__(self, mocker):
"""Test object initialization."""
mock_super_init = mocker.patch.object(setproperties.PyFilterOperation, "__init__")
mock_prop_manager = mocker.patch("ht.pyfilter.operations.setproperties.PropertySetterManager", autospec=True)
mock_manager = mocker.MagicMock(spec=PyFilterManager)
op = setproperties.SetProperties(mock_manager)
mock_super_init.assert_called_with(mock_manager)
assert op._property_manager == mock_prop_manager.return_value
# Properties
def test_property_manager(self, init_operation, mocker):
"""Test the 'property_manager' property."""
mock_value = mocker.MagicMock(spec=setproperties.PropertySetterManager)
op = init_operation()
op._property_manager = mock_value
assert op.property_manager == mock_value
# Static Methods
# build_arg_string
def test_build_arg_string(self, mocker):
"""Test arg string construction."""
assert setproperties.SetProperties.build_arg_string() == ""
# Test properties flag.
mock_dumps = mocker.patch("ht.pyfilter.operations.setproperties.json.dumps")
mock_properties = mocker.MagicMock(spec=dict)
mock_result = mocker.MagicMock(spec=str)
mock_dumps.return_value.replace.return_value = mock_result
result = setproperties.SetProperties.build_arg_string(properties=mock_properties)
assert result == '--properties="{}"'.format(mock_result)
mock_dumps.assert_called_with(mock_properties)
mock_dumps.return_value.replace.assert_called_with('"', '\\"')
# Test properties-file flag.
mock_path = mocker.MagicMock(spec=str)
result = setproperties.SetProperties.build_arg_string(properties_file=mock_path)
assert result == "--properties-file={}".format(mock_path)
# register_parser_args
def test_register_parser_args(self, mocker):
mock_parser = mocker.MagicMock(spec=argparse.ArgumentParser)
setproperties.SetProperties.register_parser_args(mock_parser)
calls = [
mocker.call("--properties", nargs=1, action="store"),
mocker.call("--properties-file", nargs="*", action="store", dest="properties_file"),
]
mock_parser.add_argument.assert_has_calls(calls)
# Methods
def test_filter_camera(self, init_operation, mocker):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_manager = mocker.MagicMock(spec=setproperties.PropertySetterManager)
mock_prop_manager.return_value = mock_manager
op = init_operation()
op.filter_camera()
mock_manager.set_properties.assert_called_with("camera")
def test_filter_instance(self, init_operation, mocker, patch_soho):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_manager = mocker.MagicMock(spec=setproperties.PropertySetterManager)
mock_prop_manager.return_value = mock_manager
op = init_operation()
op.filter_instance()
mock_manager.set_properties.assert_called_with("instance")
def test_filter_light(self, init_operation, mocker, patch_soho):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_manager = mocker.MagicMock(spec=setproperties.PropertySetterManager)
mock_prop_manager.return_value = mock_manager
op = init_operation()
op.filter_light()
mock_manager.set_properties.assert_called_with("light")
# process_parsed_args
def test_process_parsed_args__noop(self, init_operation, mocker):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_mgr = mocker.MagicMock(spec=setproperties.PropertySetterManager)
mock_prop_manager.return_value = mock_mgr
mock_namespace = mocker.MagicMock(spec=argparse.Namespace)
mock_namespace.properties = None
mock_namespace.properties_file = None
op = init_operation()
op.process_parsed_args(mock_namespace)
mock_mgr.parse_from_string.assert_not_called()
mock_mgr.load_from_file.assert_not_called()
def test_process_parsed_args__properties(self, init_operation, mocker):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_mgr = mocker.MagicMock(spec=setproperties.PropertySetterManager)
mock_prop_manager.return_value = mock_mgr
mock_prop1 = mocker.MagicMock(spec=str)
mock_prop2 = mocker.MagicMock(spec=str)
mock_namespace = mocker.MagicMock(spec=argparse.Namespace)
mock_namespace.properties = [mock_prop1, mock_prop2]
mock_namespace.properties_file = None
op = init_operation()
op.process_parsed_args(mock_namespace)
calls = [mocker.call(mock_prop1), mocker.call(mock_prop2)]
mock_mgr.parse_from_string.assert_has_calls(calls)
mock_mgr.load_from_file.assert_not_called()
def test_process_parsed_args__properties_file(self, init_operation, mocker):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_mgr = mocker.MagicMock(spec=setproperties.PropertySetterManager)
mock_prop_manager.return_value = mock_mgr
mock_file1 = mocker.MagicMock(spec=str)
mock_file2 = mocker.MagicMock(spec=str)
mock_namespace = mocker.MagicMock(spec=argparse.Namespace)
mock_namespace.properties_file = [mock_file1, mock_file2]
mock_namespace.properties = None
op = init_operation()
op.process_parsed_args(mock_namespace)
calls = [mocker.call(mock_file1), mocker.call(mock_file2)]
mock_mgr.parse_from_string.assert_not_called()
mock_mgr.load_from_file.assert_has_calls(calls)
# should_run
def test_should_run__false(self, init_operation, mocker):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_properties = mocker.MagicMock(spec=dict)
mock_mgr = mocker.MagicMock(spec=setproperties.PropertySetterManager)
type(mock_mgr).properties = mocker.PropertyMock(return_value=mock_properties)
mock_prop_manager.return_value = mock_mgr
op = init_operation()
result = op.should_run()
assert not result
def test_should_run__true(self, init_operation, mocker):
mock_prop_manager = mocker.patch.object(setproperties.SetProperties, "property_manager", new_callable=mocker.PropertyMock)
mock_properties = {"key": "value"}
mock_mgr = mocker.MagicMock(spec=setproperties.PropertySetterManager)
type(mock_mgr).properties = mocker.PropertyMock(return_value=mock_properties)
mock_prop_manager.return_value = mock_mgr
op = init_operation()
result = op.should_run()
assert result
class Test__create_property_setter(object):
"""Test the ht.pyfilter.operations.setproperties._create_property_setter."""
def test_property(self, mocker):
mock_setter = mocker.patch("ht.pyfilter.operations.setproperties.PropertySetter", autospec=True)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
mock_stage = mocker.MagicMock(spec=str)
result = setproperties._create_property_setter(mock_name, mock_block, mock_stage)
assert result == mock_setter.return_value
mock_setter.assert_called_with(mock_name, mock_block)
def test_mask_plane(self, mocker):
mock_setter = mocker.patch("ht.pyfilter.operations.setproperties.MaskedPropertySetter", autospec=True)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
mock_block.__contains__.return_value = True
stage = "plane"
result = setproperties._create_property_setter(mock_name, mock_block, stage)
assert result == mock_setter.return_value
mock_block.__contains__.assert_called_with("mask")
mock_setter.assert_called_with(mock_name, mock_block, "plane:variable")
def test_mask_fog(self, mocker):
mock_setter = mocker.patch("ht.pyfilter.operations.setproperties.MaskedPropertySetter", autospec=True)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
mock_block.__contains__.return_value = True
stage = "fog"
result = setproperties._create_property_setter(mock_name, mock_block, stage)
assert result == mock_setter.return_value
mock_block.__contains__.assert_called_with("mask")
mock_setter.assert_called_with(mock_name, mock_block, "object:name")
def test_mask_light(self, mocker):
mock_setter = mocker.patch("ht.pyfilter.operations.setproperties.MaskedPropertySetter", autospec=True)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
mock_block.__contains__.return_value = True
stage = "light"
result = setproperties._create_property_setter(mock_name, mock_block, stage)
assert result == mock_setter.return_value
mock_block.__contains__.assert_called_with("mask")
mock_setter.assert_called_with(mock_name, mock_block, "object:name")
def test_mask_instance(self, mocker):
mock_setter = mocker.patch("ht.pyfilter.operations.setproperties.MaskedPropertySetter", autospec=True)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
mock_block.__contains__.return_value = True
stage = "instance"
result = setproperties._create_property_setter(mock_name, mock_block, stage)
assert result == mock_setter.return_value
mock_block.__contains__.assert_called_with("mask")
mock_setter.assert_called_with(mock_name, mock_block, "object:name")
def test_mask_unknown_stage(self, mocker):
mock_setter = mocker.patch("ht.pyfilter.operations.setproperties.PropertySetter", autospec=True)
mock_logger = mocker.patch("ht.pyfilter.operations.setproperties._logger")
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
mock_block.__contains__.return_value = True
stage = mocker.MagicMock(spec=str)
result = setproperties._create_property_setter(mock_name, mock_block, stage)
assert result == mock_setter.return_value
mock_block.__contains__.assert_called_with("mask")
mock_logger.warning.assert_called()
mock_setter.assert_called_with(mock_name, mock_block)
class Test__process_block(object):
"""Test the ht.pyfilter.operations.setproperties._process_block."""
def test_dict(self, mocker):
mock_create = mocker.patch("ht.pyfilter.operations.setproperties._create_property_setter")
properties = []
mock_stage = mocker.MagicMock(spec=str)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
setproperties._process_block(properties, mock_stage, mock_name, mock_block)
assert properties == [mock_create.return_value]
mock_create.assert_called_with(mock_name, mock_block, mock_stage)
def test_list(self, mocker):
mock_create = mocker.patch("ht.pyfilter.operations.setproperties._create_property_setter")
properties = []
mock_stage = mocker.MagicMock(spec=str)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=dict)
setproperties._process_block(properties, mock_stage, mock_name, [mock_block])
assert properties == [mock_create.return_value]
mock_create.assert_called_with(mock_name, mock_block, mock_stage)
def test_noniterable(self, mocker):
mock_create = mocker.patch("ht.pyfilter.operations.setproperties._create_property_setter")
properties = []
mock_stage = mocker.MagicMock(spec=str)
mock_name = mocker.MagicMock(spec=str)
mock_block = mocker.MagicMock(spec=int)
setproperties._process_block(properties, mock_stage, mock_name, mock_block)
assert properties == []
mock_create.assert_not_called()
class Test__process_rendertype_block(object):
"""Test the ht.pyfilter.operations.setproperties._process_rendertype_block."""
def test_dict(self, mocker):
mock_process = mocker.patch("ht.pyfilter.operations.setproperties._process_block")
mock_properties = mocker.MagicMock(spec=list)
mock_stage = mocker.MagicMock(spec=str)
mock_rendertype = mocker.MagicMock(spec=str)
mock_name = mocker.MagicMock(spec=str)
block = {}
property_block = {
mock_name: block
}
setproperties._process_rendertype_block(mock_properties, mock_stage, mock_rendertype, property_block)
mock_process.assert_called_with(mock_properties, mock_stage, mock_name, {"rendertype": mock_rendertype})
def test_list(self, mocker):
mock_process = mocker.patch("ht.pyfilter.operations.setproperties._process_block")
mock_properties = mocker.MagicMock(spec=list)
mock_stage = mocker.MagicMock(spec=str)
mock_rendertype = mocker.MagicMock(spec=str)
mock_name = mocker.MagicMock(spec=str)
block = {}
property_block = {
mock_name: [block]
}
setproperties._process_rendertype_block(mock_properties, mock_stage, mock_rendertype, property_block)
mock_process.assert_called_with(mock_properties, mock_stage, mock_name, [{"rendertype": mock_rendertype}])
def test_error(self, mocker):
mock_properties = mocker.MagicMock(spec=list)
mock_stage = mocker.MagicMock(spec=str)
mock_rendertype = mocker.MagicMock(spec=str)
mock_name = mocker.MagicMock(spec=str)
property_block = {
mock_name: mocker.MagicMock()
}
with pytest.raises(TypeError):
setproperties._process_rendertype_block(mock_properties, mock_stage, mock_rendertype, property_block)
| [
"[email protected]"
] | |
37c277ed6f27a9c565bc80f5f94a5ff4a42bba0b | eb7513f3e59cf5ab1dda5611627793e4391582f4 | /fab_bundle/django.py | 78fb35a4743d2e5d3d4dc088cb680677edfc4fa5 | [] | no_license | linovia/fab-bundle | 69e51224216cf8d365fb128fd98165b79564fc1b | 640544bd1d9131f8814e5dc8b4ea0d050889b502 | refs/heads/master | 2021-01-18T14:31:40.857998 | 2015-05-20T05:54:49 | 2015-05-20T05:54:49 | 7,828,777 | 1 | 0 | null | 2015-01-16T13:32:22 | 2013-01-25T21:21:08 | Python | UTF-8 | Python | false | false | 2,021 | py | from fabric.api import env, run
from .utils import die, template
from .db import postgres
def manage(command, noinput=True):
"""Runs a management command"""
noinput = '--noinput' if noinput else ''
run('%s/env/bin/django-admin.py %s %s --settings=settings' % (
env.bundle_root, command, noinput))
def database_migration():
if 'migrations' in env:
if env.migrations == 'nashvegas':
bundle_name = env.http_host
manage('upgradedb -l', noinput=False) # This creates the migration
# tables
installed = postgres(
'psql %s %s -c "select id from nashvegas_migration limit 1;"' %
('%s', bundle_name))
installed = '0 rows' not in installed
if installed:
manage('upgradedb -e', noinput=False)
else:
# 1st deploy, force syncdb and seed migrations.
manage('syncdb')
manage('upgradedb -s', noinput=False)
elif env.migrations == 'south':
manage('syncdb')
manage('migrate')
elif env.migrations == 'migrations':
manage('migrate')
else:
die("%s is not supported for migrations." % env.migrations)
else:
manage('syncdb')
def collectstatic():
if env.staticfiles:
manage('collectstatic')
def setup():
if 'media_url' not in env:
env.media_url = '/media/'
if 'media_root' not in env:
env.media_root = env.bundle_root + '/public' + env.media_url
if 'static_url' not in env:
env.static_url = '/static/'
if 'static_root' not in env:
env.static_root = env.bundle_root + '/public' + env.static_url
if not 'staticfiles' in env:
env.staticfiles = True
if not 'cache' in env:
env.cache = 0 # redis DB
template('settings.py', '%s/settings.py' % env.bundle_root)
template('wsgi.py', '%s/wsgi.py' % env.bundle_root)
| [
"[email protected]"
] | |
339acf442deb6e5204b27de4e42be183a8496e8c | 59453f279255ed7e65ba6b134ab428e5b8c3a565 | /chapter4/ans_34.py | b4b3668a3cccd7a562615202cfff8e5c6bc2952a | [] | no_license | takapy0210/nlp_2020 | b42497f0db95f947e7ad61ec058769e824bbbc9c | 085747a8c22573a095658e202faccb7197a7041e | refs/heads/master | 2023-07-12T20:32:34.225335 | 2021-08-21T04:31:12 | 2021-08-21T04:31:12 | 258,741,277 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | """
名詞の連接(連続して出現する名詞)を最長一致で抽出せよ.
"""
def parse_morpheme(morpheme):
(surface, attr) = morpheme.split('\t')
attr = attr.split(',')
morpheme_dict = {
'surface': surface,
'base': attr[6],
'pos': attr[0],
'pos1': attr[1]
}
return morpheme_dict
def get_value(items):
ret = []
noun_list = []
for i, x in enumerate(items):
if x['pos'] == '名詞':
if items[i+1]['pos'] == '名詞':
noun_list.append(x['surface'])
else:
if len(noun_list) >= 1:
noun_list.append(x['surface'])
ret.append(noun_list)
noun_list = []
return ret
file = 'neko.txt.mecab'
with open(file, mode='rt', encoding='utf-8') as f:
morphemes_list = [s.strip('EOS\n') for s in f.readlines()]
morphemes_list = [s for s in morphemes_list if s != '']
ans_list = list(map(parse_morpheme, morphemes_list))
ans = get_value(ans_list)
print(ans[:5])
| [
"[email protected]"
] | |
7c92ef5096f56c6a4b16819e0acc383015d28bf6 | 40f4908483b98fc4f370ff4f2d520e1284d045b3 | /phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/expression/expressionnodenumerical.py | febd6b50e97fc22a14dfb1027c13f4ae466cba44 | [] | no_license | TF-185/bbn-immortals | 7f70610bdbbcbf649f3d9021f087baaa76f0d8ca | e298540f7b5f201779213850291337a8bded66c7 | refs/heads/master | 2023-05-31T00:16:42.522840 | 2019-10-24T21:45:07 | 2019-10-24T21:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from pymmortals.generated.com.securboration.immortals.ontology.expression.booleanexpressionnode import BooleanExpressionNode
# noinspection PyPep8Naming
class ExpressionNodeNumerical(BooleanExpressionNode):
_validator_values = dict()
_types = dict()
def __init__(self):
super().__init__()
| [
"[email protected]"
] | |
7a95e48001ec08aa7b2bedd788bde129b64e0428 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/desktopvirtualization/azure-mgmt-desktopvirtualization/azure/mgmt/desktopvirtualization/aio/__init__.py | 94f7d29315942145af837775f21461b65c304732 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 896 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._desktop_virtualization_mgmt_client import DesktopVirtualizationMgmtClient
try:
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"DesktopVirtualizationMgmtClient",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"[email protected]"
] | |
a67884aee78455d36ecfc302f9c6662415a78945 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03807/s795555323.py | e475574fc854fb02f0a1a7c16e5c54a3109a3d00 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | N = int(input())
A = [int(j) for j in input().split()]
if sum(A)%2==0:
print('YES')
else:
print('NO') | [
"[email protected]"
] | |
d35d55dc09e71211c9c1ab9c5f08da0e330fe2f5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02261/s604578340.py | adf89a926b10d9913875be65ef10f9e9f300870a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | def BubbleTrump(C, N):
i = 0
while i < N:
j = N - 1
while j > i:
if int(C[j][1]) < int(C[j-1][1]):
c = C[j]
C[j] = C[j - 1]
C[j - 1] = c
j -= 1
i += 1
return C
def SelectionTrump(C, N):
i = 0
while i < N:
minj = i
j = i
while j < N:
if int(C[j][1]) < int(C[minj][1]):
minj = j
j += 1
if minj != i:
c = C[i]
C[i] = C[minj]
C[minj] = c
i += 1
return C
n = int(input())
C = list(map(str, input().split(' ')))
ans = ''
Cb = BubbleTrump(C.copy(), n)
ans += ' '.join(map(str, Cb)) + '\nStable\n'
Cs = SelectionTrump(C.copy(), n)
q = 0
f = 1
while q < n:
if Cb[q] != Cs[q]:
f = 0
q += 1
ans += ' '.join(map(str, Cs)) + '\n'
if f == 0:
ans += 'Not stable'
else:
ans += 'Stable'
print(ans)
| [
"[email protected]"
] | |
6c2769a2bf8a7ef756a5322aeb6a586e6aa66ebf | 5ede9cb1b4a13286c7844680f3ca3e7befb0f06d | /lang/clang32/files/patch-utils_llvm-build_llvmbuild_main.py | 3c878e708b1cbf3a19a033ce53b311e83d92c93e | [] | no_license | waynemareci/DPorts | 3fdeb479f3aaee1188704799c143acaa0191de38 | 8f47230488a8a169641b023a357dbc9d50858d30 | refs/heads/master | 2021-01-17T08:52:43.282910 | 2014-12-16T05:42:03 | 2014-12-16T05:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py |
$FreeBSD: head/lang/clang32/files/patch-utils_llvm-build_llvmbuild_main.py 340725 2014-01-22 17:40:44Z mat $
--- utils/llvm-build/llvmbuild/main.py.orig
+++ utils/llvm-build/llvmbuild/main.py
@@ -633,7 +633,13 @@
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
- native_target_name = { 'x86' : 'X86',
+ native_target_name = { 'amd64' : 'X86',
+ 'arm' : 'ARM',
+ 'i386' : 'X86',
+ 'mips' : 'Mips',
+ 'powerpc' : 'PowerPC',
+ 'sparc64' : 'Sparc',
+ 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
| [
"[email protected]"
] | |
52af64e512464476b3c7a71be4c14934d950a785 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03095/s821591285.py | 5426f4262b305dbd1540275eb9ed8bdc4c475f98 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from collections import Counter
n = int(input())
s = input()
mod = 10**9+7
scnt = Counter(s).most_common()
ans = 1
for i in range(len(scnt)):
ans = ans*(scnt[i][1]+1)%mod
ans = (ans - 1 + mod)%mod
print(ans) | [
"[email protected]"
] | |
54e92565033d5c76621554dac95020cc7b579dc2 | fa5713863cada0177d15e56f5327b79d907a119f | /test/compare_releases.py | 70cf5e5907f8577f16a47c9a3461b04ef5c48543 | [] | no_license | rappoccio/EXOVV | 1500c126d8053b47fbc425d1c2f9e76f14cb75c5 | db96edf661398b5bab131bbeba36d331b180d12d | refs/heads/master | 2020-04-03T20:12:57.959191 | 2018-08-24T01:30:03 | 2018-08-24T01:30:03 | 39,910,319 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,404 | py | #!/usr/bin/env python
from optparse import OptionParser
from jettools import getJER
from math import sqrt
parser = OptionParser()
parser.add_option('--outname', type='string', action='store',
dest='outname',
default = "",
help='Histogram to plot')
(options, args) = parser.parse_args()
argv = []
print options
import ROOT
import array
import math
import random
ROOT.gStyle.SetOptStat(000000)
#ROOT.gROOT.Macro("rootlogon.C")
#ROOT.gStyle.SetPadRightMargin(0.15)
ROOT.gStyle.SetOptStat(000000)
ROOT.gStyle.SetTitleFont(43)
#ROOT.gStyle.SetTitleFontSize(0.05)
ROOT.gStyle.SetTitleFont(43, "XYZ")
ROOT.gStyle.SetTitleSize(30, "XYZ")
ROOT.gStyle.SetTitleOffset(3.5, "X")
ROOT.gStyle.SetTitleOffset(1.5, "Y")
ROOT.gStyle.SetLabelFont(43, "XYZ")
ROOT.gStyle.SetLabelSize(24, "XYZ")
def setupPads(canv, pads):
canv.cd()
pad1 = ROOT.TPad('pad' + canv.GetName() + '1', 'pad' + canv.GetName() + '1', 0., 0.3, 1.0, 1.0)
pad1.SetBottomMargin(0)
pad2 = ROOT.TPad('pad' + canv.GetName() + '2', 'pad' + canv.GetName() + '2', 0., 0.0, 1.0, 0.3)
pad2.SetTopMargin(0)
pad1.SetLeftMargin(0.20)
pad2.SetLeftMargin(0.20)
pad2.SetBottomMargin(0.5)
pad1.Draw()
pad2.Draw()
pads.append( [pad1,pad2] )
return [pad1, pad2]
samples = [
['responses_repdf_otherway_qcdmc_2dplots.root', '7.6.x', 1, 1],
['responses_otherway_qcdmc.root', '7.4.x', 2, 2]
]
names = [
"h_pt_meas",
"h_y_meas",
"h_phi_meas",
"h_m_meas",
"h_msd_meas",
"h_rho_meas",
"h_tau21_meas",
"h_dphi_meas",
"h_ptasym_meas",
]
hists = []
stacks = []
f = []
legs = []
for isample in xrange( len(samples) ) :
f.append( ROOT.TFile(samples[isample][0] ) )
hists.append( [] )
for iname in xrange( len(names) ) :
htemp = f[isample].Get(names[iname] )
htemp.UseCurrentStyle()
#if htemp.Integral() > 0 :
# htemp.Scale( 1.0 / htemp.Integral() )
htemp.SetLineStyle(samples[isample][2])
htemp.SetLineColor(samples[isample][3])
hists[isample].append( htemp )
canvs = []
allpads = []
ratios = []
for iname in xrange( len(names) ) :
c = ROOT.TCanvas("c" + str(iname), "c" + str(iname), 800, 600 )
pads = setupPads(c, allpads)
pads[0].cd()
hists[0][iname].Draw("hist")
hists[1][iname].Draw("hist same")
leg = ROOT.TLegend(0.6, 0.6, 0.85, 0.85)
leg.SetFillColor(0)
leg.SetBorderSize(0)
leg.AddEntry( hists[0][iname], samples[0][1], 'l' )
leg.AddEntry( hists[1][iname], samples[1][1], 'l' )
leg.Draw()
legs.append(leg)
max0 = hists[0][iname].GetMaximum()
max1 = hists[1][iname].GetMaximum()
maxtot = max( max0, max1) * 1.2
hists[0][iname].SetMaximum(maxtot)
pads[0].SetLogy()
pads[0].Update()
pads[1].cd()
ratio = hists[1][iname].Clone( hists[1][iname].GetName() + "clone")
ratio.Divide( hists[0][iname] )
ratio.SetTitle("")
ratio.GetYaxis().SetTitle("Ratio")
ratio.Draw("e")
ratio.GetYaxis().SetRangeUser(0.9,1.1)
ratio.GetYaxis().SetNdivisions(2,4,0,False)
ratios.append(ratio)
pads[1].Update()
c.Update()
c.Print( 'compare_' + options.outname + names[iname] + ".png", "png")
c.Print( 'compare_' + options.outname + names[iname] + ".pdf", "pdf")
canvs.append(c)
| [
"[email protected]"
] | |
9f78ccf6f31955eb84196daddffa4cab5da805dd | 18c6f7ee10526583d8c65acc5ce04579a91fdeeb | /ch_01/7.integers.py | 0883a24b2e0b30ba2b91364da434831ca9e1cb38 | [] | no_license | cloudsecuritylabs/pythonProject_1 | 97273634df25e306d0a2aed56fcf5c836d2ac33c | 8fc0d17b549d7195f8de46a227e5bb5d9f2ed4ed | refs/heads/master | 2023-07-22T16:06:14.550571 | 2021-08-24T03:09:00 | 2021-08-24T03:09:00 | 399,319,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5 | py | (4+4) | [
"[email protected]"
] | |
35ee5dd2a7c74bc6385b06d89e2b1c6463b7d74e | 814872021c865b44d6b8c1d1db6e8e16971163fc | /IPTVPlayer/hosts/hostanyfiles.py | 6aab25ea4d647cd34ad62796da5ac8c758effa5b | [] | no_license | friman/iptvplayer-for-e2-1 | b661a31567a00616e7d04768a1600f8bdeed307b | 88292aa16a742cd1387568927490cc07722b8316 | refs/heads/master | 2021-01-14T14:03:06.537602 | 2015-11-27T23:20:27 | 2015-11-27T23:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,239 | py | # -*- coding: utf-8 -*-
# Based on (root)/trunk/xbmc-addons/src/plugin.video.polishtv.live/self.HOSTs/ @ 419 - Wersja 636
#ToDo
# Błąd przy wyszukiwaniu filmów z polskimi znakami
###################################################
# LOCAL import
###################################################
from Plugins.Extensions.IPTVPlayer.components.iptvplayerinit import TranslateTXT as _, SetIPTVPlayerLastHostError
from Plugins.Extensions.IPTVPlayer.components.ihost import CHostBase, CBaseHostClass, CDisplayListItem, RetHost, CUrlItem
from Plugins.Extensions.IPTVPlayer.libs.anyfilesapi import AnyFilesVideoUrlExtractor
from Plugins.Extensions.IPTVPlayer.tools.iptvtools import printDBG, printExc, CSearchHistoryHelper, GetLogoDir, GetCookieDir
from Plugins.Extensions.IPTVPlayer.libs.youtube_dl.utils import clean_html
###################################################
# FOREIGN import
###################################################
import re, string
from Components.config import config, ConfigSelection, ConfigYesNo, ConfigText, getConfigListEntry
###################################################
###################################################
# E2 GUI COMMPONENTS
###################################################
from Plugins.Extensions.IPTVPlayer.components.asynccall import MainSessionWrapper
from Screens.MessageBox import MessageBox
###################################################
###################################################
# Config options for HOST
###################################################
config.plugins.iptvplayer.anyfilespl_login = ConfigText(default = "", fixed_size = False)
config.plugins.iptvplayer.anyfilespl_password = ConfigText(default = "", fixed_size = False)
def GetConfigList():
optionList = []
optionList.append(getConfigListEntry("Anyfiles.pl " + _('login:'), config.plugins.iptvplayer.anyfilespl_login))
optionList.append(getConfigListEntry("Anyfiles.pl " + _('password:'), config.plugins.iptvplayer.anyfilespl_password))
return optionList
###################################################
def gettytul():
return 'AnyFiles'
class AnyFiles(CBaseHostClass):
MAIN_URL = 'http://video.anyfiles.pl'
SEARCH_URL = MAIN_URL + '/Search.jsp'
MAIN_CAT_TAB = [{'category':'genres', 'title': _('Genres'), 'url':MAIN_URL + '/pageloading/index-categories-loader.jsp', 'icon':''},
{'category':'list_movies', 'title': _('Newest'), 'url':MAIN_URL + '/najnowsze/0', 'icon':''},
{'category':'list_movies', 'title': _('Most Popular'), 'url':MAIN_URL + '/najpopularniejsze/0', 'icon':''},
{'category':'search', 'title': _('Search'), 'search_item':True},
{'category':'search_history', 'title': _('Search history')} ]
def __init__(self):
CBaseHostClass.__init__(self, {'history':'AnyFiles', 'cookie':'anyfiles.cookie'})
self.defaultParams = {'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE}
self.anyfiles = AnyFilesVideoUrlExtractor()
def _getFullUrl(self, url):
if 0 < len(url) and not url.startswith('http'):
url = self.MAIN_URL + url
if not self.MAIN_URL.startswith('https://'):
url = url.replace('https://', 'http://')
return url
def listsTab(self, tab, cItem, type='dir'):
printDBG("AnyFiles.listsTab")
for item in tab:
params = dict(cItem)
params.update(item)
params['name'] = 'category'
if type == 'dir':
self.addDir(params)
else: self.addVideo(params)
def listGenres(self, cItem, category):
printDBG("AnyFiles.listGenres")
sts, data = self.cm.getPage(cItem['url'], self.defaultParams)
if not sts: return
data = data.split('<div class="thumbnail"')
if len(data): del data[0]
for item in data:
url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"', 1)[0]
title = self.cm.ph.getDataBeetwenMarkers(item, '<strong>', '</strong>', False)[1]
icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"', 1)[0]
params = dict(cItem)
params.update( {'category':category, 'title':title, 'url':self._getFullUrl(url), 'icon':self._getFullUrl(icon)} )
self.addDir(params)
def listMovies(self, cItem, m1='<div class="kat-box-div">', m2='<script type="text/javascript">', reTitle='class="kat-box-name">([^<]+?)<'):
printDBG("AnyFiles.listMovies")
cItem = dict(cItem)
page = cItem.get('page', 1)
if 1 == page:
sts, data = self.cm.getPage(self._getFullUrl('/all.jsp?reset_f=true'), self.defaultParams)
if not sts: return
url = cItem['url']
else: url = cItem['url'] + str(page * cItem['page_size'])
post_data = cItem.get('post_data', None)
httpParams = dict(self.defaultParams)
ContentType = cItem.get('Content-Type', None)
Referer = cItem.get('Referer', None)
if None != Referer: httpParams['header'] = {'DNT': '1', 'Referer':Referer, 'User-Agent':self.cm.HOST}
else: {'DNT':'1', 'User-Agent':self.cm.HOST}
sts, data = self.cm.getPage(url, httpParams, post_data)
if not sts: return
#printDBG(data)
tmp = self.cm.ph.getSearchGroups(data, 'new Paginator\("paginator0",[^,]*?([0-9]+?)\,[^,]*?[0-9]+?\,[^,]*?[0-9]+?\,[^"]*?"([^"]+?)"\,[^,]*?([0-9]+?)[^0-9]', 3)
if '' == tmp[1]:
tmp = self.cm.ph.getDataBeetwenMarkers(data, '.paginator(', ');', False)[1]
printDBG(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> %s" % tmp)
tmpTab = []
tmpTab.append( self.cm.ph.getSearchGroups(tmp, 'pagesTotal:[^0-9]([0-9]+?)[^0-9]', 1)[0])
tmpTab.append( '/all.jsp?st=')
tmpTab.append( self.cm.ph.getSearchGroups(tmp, 'numberObjects:[^0-9]([0-9]+?)[^0-9]', 1)[0])
tmp = tmpTab
try: cItem['num_of_pages'] = int(tmp[0])
except: cItem['num_of_pages'] = 1
try: cItem['url'] = self._getFullUrl(tmp[1])
except: pass
try: cItem['page_size'] = int(tmp[2])
except: cItem['page_size'] = 1
if '/pageloading/all-loader.jsp' in data:
httpParams['header'] = {'DNT': '1', 'Referer':url, 'User-Agent':self.cm.HOST}
url = self._getFullUrl('/pageloading/all-loader.jsp?ads=false')
sts, data = self.cm.getPage(url, httpParams, None)
if not sts: return
#printDBG(data)
m1 = '<div class="thumbnail"'
m2 = '</li>'
newhandle = True
else:
newhandle = False
data = self.cm.ph.getDataBeetwenMarkers(data, m1, m2, False)[1]
data = data.split(m1)
#if len(data): del data[0]
for item in data:
url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"', 1)[0]
icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"', 1)[0]
if newhandle:
title = self.cm.ph.getDataBeetwenMarkers(item, '<strong>', '</strong>', False)[1]
try: desc = self.cleanHtmlStr(item.split('</div>')[1])
except: desc = ''
else:
title = self.cm.ph.getSearchGroups(item, reTitle, 1)[0]
try: desc = self.cleanHtmlStr(item.split('</tr>')[1])
except: desc = ''
if title != '' and url != '':
params = dict(cItem)
params.update( {'title':title, 'url':self._getFullUrl(url), 'icon':self._getFullUrl(icon), 'desc':desc} )
self.addVideo(params)
if page < cItem['num_of_pages']:
params = dict(cItem)
params.update({'title':_('Next page'), 'page':page+1})
self.addDir(params)
def listSearchResult(self, cItem, searchPattern, searchType):
printDBG("AnyFiles.searchTab")
post_data = cItem.get('post_data', None)
if None == post_data:
sts, data = self.cm.getPage(self.MAIN_URL, self.defaultParams)
if not sts: return
data = self.cm.ph.getDataBeetwenMarkers(data, 'POST', ';', False)[1]
data = re.compile('[ ]*?se:[ ]*?"([^"]+?)"').findall(data)
post_data = {}
for item in data:
post_data['se'] = item
post_data['q'] = searchPattern
cItem = dict(cItem)
cItem['post_data'] = post_data
cItem['url'] = self.SEARCH_URL
cItem['Referer'] = self.SEARCH_URL
self.listMovies(cItem, m1='<div class="u-hr-div" >', reTitle='<a [^>]+?>([^<]+?)</a>')
def getLinksForVideo(self, cItem):
return self.getLinksForFavourite(cItem['url'])
def getFavouriteData(self, cItem):
return cItem['url']
def getLinksForFavourite(self, fav_data):
printDBG("AnyFiles.getLinksForFavourite [%s]" % fav_data)
data = self.anyfiles.getVideoUrl(fav_data)
for item in data:
item['need_resolve'] = 0
return data
def handleService(self, index, refresh = 0, searchPattern = '', searchType = ''):
printDBG('handleService start')
CBaseHostClass.handleService(self, index, refresh, searchPattern, searchType)
name = self.currItem.get("name", '')
category = self.currItem.get("category", '')
printDBG( "handleService: |||||||||||||||||||||||||||||||||||| name[%s], category[%s] " % (name, category) )
self.currList = []
#MAIN MENU
if name == None:
self.listsTab(self.MAIN_CAT_TAB, {'name':'category'})
#MOVIES
elif category == 'genres':
self.listGenres(self.currItem, 'list_movies')
elif category == 'list_movies':
self.listMovies(self.currItem)
#SEARCH
elif category in ["search", "search_next_page"]:
cItem = dict(self.currItem)
cItem.update({'search_item':False, 'name':'category'})
self.listSearchResult(cItem, searchPattern, searchType)
#HISTORIA SEARCH
elif category == "search_history":
self.listsHistory({'name':'history', 'category': 'search'}, 'desc', _("Type: "))
else:
printExc()
CBaseHostClass.endHandleService(self, index, refresh)
class IPTVHost(CHostBase):
def __init__(self):
CHostBase.__init__(self, AnyFiles(), True, [CDisplayListItem.TYPE_VIDEO, CDisplayListItem.TYPE_AUDIO])
def getLogoPath(self):
return RetHost(RetHost.OK, value = [GetLogoDir('anyfileslogo.png')])
def getLinksForVideo(self, Index = 0, selItem = None):
retCode = RetHost.ERROR
retlist = []
if not self.isValidIndex(Index): return RetHost(retCode, value=retlist)
urlList = self.host.getLinksForVideo(self.host.currList[Index])
for item in urlList:
retlist.append(CUrlItem(item["name"], item["url"], item['need_resolve']))
return RetHost(RetHost.OK, value = retlist)
# end getLinksForVideo
def getResolvedURL(self, url):
# resolve url to get direct url to video file
retlist = []
urlList = self.host.getVideoLinks(url)
for item in urlList:
need_resolve = 0
retlist.append(CUrlItem(item["name"], item["url"], need_resolve))
return RetHost(RetHost.OK, value = retlist)
def converItem(self, cItem):
hostList = []
searchTypesOptions = [] # ustawione alfabetycznie
hostLinks = []
type = CDisplayListItem.TYPE_UNKNOWN
possibleTypesOfSearch = None
if 'category' == cItem['type']:
if cItem.get('search_item', False):
type = CDisplayListItem.TYPE_SEARCH
possibleTypesOfSearch = searchTypesOptions
else:
type = CDisplayListItem.TYPE_CATEGORY
elif cItem['type'] == 'video':
type = CDisplayListItem.TYPE_VIDEO
elif 'more' == cItem['type']:
type = CDisplayListItem.TYPE_MORE
elif 'audio' == cItem['type']:
type = CDisplayListItem.TYPE_AUDIO
if type in [CDisplayListItem.TYPE_AUDIO, CDisplayListItem.TYPE_VIDEO]:
url = cItem.get('url', '')
if '' != url:
hostLinks.append(CUrlItem("Link", url, 1))
title = cItem.get('title', '')
description = cItem.get('desc', '')
icon = cItem.get('icon', '')
return CDisplayListItem(name = title,
description = description,
type = type,
urlItems = hostLinks,
urlSeparateRequest = 1,
iconimage = icon,
possibleTypesOfSearch = possibleTypesOfSearch)
# end converItem
def getSearchItemInx(self):
try:
list = self.host.getCurrList()
for i in range( len(list) ):
if list[i]['category'] == 'search':
return i
except:
printDBG('getSearchItemInx EXCEPTION')
return -1
def setSearchPattern(self):
try:
list = self.host.getCurrList()
if 'history' == list[self.currIndex]['name']:
pattern = list[self.currIndex]['title']
search_type = list[self.currIndex]['search_type']
self.host.history.addHistoryItem( pattern, search_type)
self.searchPattern = pattern
self.searchType = search_type
except:
printDBG('setSearchPattern EXCEPTION')
self.searchPattern = ''
self.searchType = ''
return
| [
"[email protected]"
] | |
b6d448d6dfd72df5f4a0ff1d9f654247d4fed8d2 | 6ff0f16f7207a058c8aa4703031168ec14a2299a | /autoregressive_pybullet.py | c5139497ca0391efc249fae0a839906c48957f83 | [] | no_license | modanesh/recurrent_implicit_quantile_networks | 528ca85187cd36d05e536f03121674f38d8f49b1 | 8a31d988158856b43bac145f983a93c2f4f3b6cd | refs/heads/master | 2023-08-28T10:12:51.189662 | 2021-10-24T06:33:28 | 2021-10-24T06:33:28 | 259,685,919 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 36,261 | py | import os
import gym
import math
import torch
import random
import sklearn
import argparse
import numpy as np
import pybullet_envs
import torch.nn as nn
import seaborn as sns
import env_preparation
import matplotlib.pyplot as plt
from detecta import detect_cusum
from stable_baselines3 import TD3
from sklearn.metrics import roc_curve
import torch.nn.functional as functional
from sklearn.neighbors import NearestNeighbors
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.vec_env import VecNormalize, VecVideoRecorder, DummyVecEnv
sns.set()
class AutoregressiveRecurrentIQN_v2(nn.Module):
def __init__(self, feature_len, gru_size, quantile_embedding_dim, num_quantile_sample, device, fc1_units=64):
super(AutoregressiveRecurrentIQN_v2, self).__init__()
self.gru_size = gru_size
self.quantile_embedding_dim = quantile_embedding_dim
self.num_quantile_sample = num_quantile_sample
self.device = device
self.feature_len = feature_len
self.fc_1 = nn.Linear(feature_len, fc1_units)
self.gru = nn.GRUCell(fc1_units, gru_size)
self.fc_2 = nn.Linear(gru_size, gru_size)
self.fc_3 = nn.Linear(gru_size, feature_len)
self.phi = nn.Linear(self.quantile_embedding_dim, gru_size)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
def forward(self, state, hx, tau, num_quantiles):
input_size = state.size()[0] # batch_size(train) or 1(get_action)
tau = tau.expand(input_size * num_quantiles, self.quantile_embedding_dim)
pi_mtx = torch.Tensor(np.pi * np.arange(0, self.quantile_embedding_dim)).expand(input_size * num_quantiles,
self.quantile_embedding_dim)
cos_tau = torch.cos(tau * pi_mtx).to(self.device)
phi = self.phi(cos_tau)
phi = functional.relu(phi)
state_tile = state.expand(input_size, num_quantiles, self.feature_len)
state_tile = state_tile.flatten().view(-1, self.feature_len).to(self.device)
x = functional.relu(self.fc_1(state_tile))
ghx = self.gru(x, hx)
x = ghx + functional.relu(self.fc_2(ghx))
x = self.fc_3(x * phi)
z = x.view(-1, num_quantiles, self.feature_len)
z = z.transpose(1, 2) # [input_size, num_output, num_quantile]
return z, ghx
class AutoregressiveIQN(nn.Module):
def __init__(self, feature_len, quantile_embedding_dim, num_quantile_sample, device, fc1_units=64, fc2_units=64):
super(AutoregressiveIQN, self).__init__()
self.quantile_embedding_dim = quantile_embedding_dim
self.num_quantile_sample = num_quantile_sample
self.device = device
self.feature_len = feature_len
self.fc_1 = nn.Linear(feature_len, fc1_units)
self.fc_2 = nn.Linear(fc1_units, fc2_units)
self.fc_3 = nn.Linear(fc2_units, feature_len)
self.phi = nn.Linear(self.quantile_embedding_dim, 64)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
def forward(self, state, tau, num_quantiles):
input_size = state.size()[0] # batch_size(train) or 1(get_action)
tau = tau.expand(input_size * num_quantiles, self.quantile_embedding_dim)
pi_mtx = torch.Tensor(np.pi * np.arange(0, self.quantile_embedding_dim)).expand(input_size * num_quantiles,
self.quantile_embedding_dim)
cos_tau = torch.cos(tau * pi_mtx).to(self.device)
phi = self.phi(cos_tau)
phi = functional.relu(phi)
state_tile = state.expand(input_size, num_quantiles, self.feature_len)
state_tile = state_tile.flatten().view(-1, self.feature_len).to(self.device)
x = functional.relu(self.fc_1(state_tile))
x = functional.relu(self.fc_2(x))
x = self.fc_3(x * phi)
z = x.view(-1, num_quantiles, self.feature_len)
z = z.transpose(1, 2) # [input_size, num_output, num_quantile]
return z
def construct_batch_data(feature_len, dataset, device):
episodes_states = []
episodes_next_states = []
episodes_len = []
for i, episode in enumerate(dataset):
episodes_len.append(len(episode))
max_len = max(episodes_len) - 1
for i, episode in enumerate(dataset):
# get rid of features added by TimeFeatureWrapper
episode = np.array(episode).squeeze(1)[:,:-1]
episodes_states.append(torch.Tensor(
np.concatenate((episode[:-1, :], np.zeros((max_len - len(episode[:-1, :]), feature_len))), axis=0)))
episodes_next_states.append(torch.Tensor(
np.concatenate((episode[1:, :], np.zeros((max_len - len(episode[1:, :]), feature_len))), axis=0)))
episodes_states = torch.stack(episodes_states).to(device)
episodes_next_states = torch.stack(episodes_next_states).to(device)
tensor_dataset = torch.utils.data.TensorDataset(episodes_states, episodes_next_states)
return tensor_dataset
def data_splitting(tensor_dataset, batch_size, features_min, features_max, device):
# prevent division by zero in normalization
no_need_normalization = np.where((features_min == features_max))[0]
normalized_states = (tensor_dataset[0][0].cpu().numpy() - features_min) / (features_max - features_min)
normalized_n_states = (tensor_dataset[0][1].cpu().numpy() - features_min) / (features_max - features_min)
for index in no_need_normalization:
normalized_states[:, index] = features_min[index]
normalized_n_states[:, index] = features_min[index]
normalized_tensor_dataset = torch.utils.data.TensorDataset(torch.Tensor(normalized_states).to(device),
torch.Tensor(normalized_n_states).to(device))
all_indices = np.arange(len(normalized_tensor_dataset))
max_len = len(normalized_tensor_dataset[0][0])
np.random.shuffle(all_indices)
train_indices = all_indices[:int(len(all_indices) * 90 / 100)]
test_indices = all_indices[int(len(all_indices) * 90 / 100):]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_dl = DataLoader(normalized_tensor_dataset, batch_size, sampler=train_sampler)
test_dl = DataLoader(normalized_tensor_dataset, batch_size, sampler=test_sampler)
return train_dl, test_dl, max_len
def train_model(model, optimizer, hx, states, target, batch_size, num_tau_sample, device, clip_value, feature_len):
tau = torch.Tensor(np.random.rand(batch_size * num_tau_sample, 1))
states = states.reshape(states.shape[0], 1, -1)
if hx is not None:
z, hx = model(states, hx, tau, num_tau_sample)
else:
z = model(states, tau, num_tau_sample)
T_z = target.reshape(target.shape[0], 1, -1).expand(-1, num_tau_sample, feature_len).transpose(1, 2)
error_loss = T_z - z
huber_loss = functional.smooth_l1_loss(z, T_z.detach(), reduction='none')
if num_tau_sample == 1:
tau = torch.arange(0, 1, 1 / 100).view(1, 100)
else:
tau = torch.arange(0, 1, 1 / num_tau_sample).view(1, num_tau_sample)
loss = (tau.to(device) - (error_loss < 0).float()).abs() * huber_loss
loss = loss.mean()
optimizer.zero_grad()
loss.backward()
if clip_value is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_value)
optimizer.step()
return z.squeeze(2), loss, hx
def test_model(model, hx, states, target, batch_size, num_tau_sample, device, feature_len):
tau = torch.Tensor(np.random.rand(batch_size * num_tau_sample, 1))
states = states.reshape(states.shape[0], 1, -1)
if hx is not None:
z, hx = model(states, hx, tau, num_tau_sample)
else:
z = model(states, tau, num_tau_sample)
T_z = target.reshape(target.shape[0], 1, -1).expand(-1, num_tau_sample, feature_len).transpose(1, 2)
error_loss = T_z - z
huber_loss = functional.smooth_l1_loss(z, T_z.detach(), reduction='none')
if num_tau_sample == 1:
tau = torch.arange(0, 1, 1 / 100).view(1, 100)
else:
tau = torch.arange(0, 1, 1 / num_tau_sample).view(1, num_tau_sample)
loss = (tau.to(device) - (error_loss < 0).float()).abs() * huber_loss
loss = loss.mean()
return z, loss, hx
def feed_forward(model, hx, states, batch_size, num_tau_sample, sampling_size, tree_root=False):
states = states.reshape(states.shape[0], 1, -1)
if tree_root:
tau = torch.Tensor(np.random.rand(batch_size * sampling_size, 1))
if hx is not None:
z, hx = model(states, hx, tau, sampling_size)
else:
z = model(states, tau, sampling_size)
else:
tau = torch.Tensor(np.random.rand(batch_size * num_tau_sample, 1))
if hx is not None:
z, hx = model(states, hx, tau, num_tau_sample)
else:
z = model(states, tau, num_tau_sample)
return z, hx
def ss_learn_model(model, optimizer, memory, max_len, gru_size, num_tau_sample, device, epsilon, clip_value, feature_len,
has_memory):
total_loss = 0
count = 0
model.train()
s_hat = None
for s_batch, mc_returns in memory:
if has_memory:
h_memory = None
for i in range(max_len):
s, mc_return = s_batch[:, i], mc_returns[:, i]
if h_memory is None:
h_memory = torch.zeros(len(s_batch) * num_tau_sample, gru_size)
if random.random() <= epsilon or s_hat is None:
s_hat, loss, h_memory = train_model(model, optimizer, h_memory.detach().to(device), s, mc_return,
len(s_batch), num_tau_sample, device, clip_value, feature_len)
else:
if len(s_hat) != len(s):
s_hat = s_hat[:len(s)]
s_hat, loss, h_memory = train_model(model, optimizer, h_memory.detach().to(device), s_hat.detach(),
mc_return, len(s_batch), num_tau_sample, device, clip_value,
feature_len)
total_loss += loss.item()
count += 1
else:
for i in range(max_len):
s, mc_return = s_batch[:, i], mc_returns[:, i]
if random.random() <= epsilon or s_hat is None:
s_hat, loss, _ = train_model(model, optimizer, None, s, mc_return, len(s_batch), num_tau_sample,
device, clip_value, feature_len)
else:
if len(s_hat) != len(s):
s_hat = s_hat[:len(s)]
s_hat, loss, _ = train_model(model, optimizer, None, s_hat.detach(), mc_return, len(s_batch),
num_tau_sample, device, clip_value, feature_len)
total_loss += loss.item()
count += 1
return total_loss / count
def ss_evaluate_model(model, memory, max_len, gru_size, num_tau_sample, device, best_total_loss, path, epsilon, feature_len, has_memory):
total_loss = 0
count = 0
model.eval()
s_hat = None
for s_batch, mc_returns in memory:
if has_memory:
h_memory = None
for i in range(max_len):
s, mc_return = s_batch[:, i], mc_returns[:, i]
if h_memory is None:
h_memory = torch.zeros(len(s_batch) * num_tau_sample, gru_size)
if random.random() <= epsilon or s_hat is None:
s_hat, loss, h_memory = test_model(model, h_memory.detach().to(device), s, mc_return, len(s_batch),
num_tau_sample, device, feature_len)
else:
if len(s_hat) != len(s):
s_hat = s_hat[:len(s)]
s_hat, loss, h_memory = test_model(model, h_memory.detach().to(device), s_hat.detach(), mc_return,
len(s_batch), num_tau_sample, device, feature_len)
s_hat = s_hat.squeeze(2)
total_loss += loss
count += 1
else:
for i in range(max_len):
s, mc_return = s_batch[:, i], mc_returns[:, i]
if random.random() <= epsilon or s_hat is None:
s_hat, loss, _ = test_model(model, None, s, mc_return, len(s_batch), num_tau_sample, device,
feature_len)
else:
if len(s_hat) != len(s):
s_hat = s_hat[:len(s)]
s_hat, loss, _ = test_model(model, None, s_hat.detach(), mc_return, len(s_batch), num_tau_sample,
device, feature_len)
s_hat = s_hat.squeeze(2)
total_loss += loss
count += 1
print("test loss :", total_loss.item() / count)
if total_loss.item() / count <= best_total_loss:
print("Saving the best model!")
best_total_loss = total_loss.item() / count
torch.save(model.state_dict(), path)
return total_loss.item() / count, best_total_loss
def plot_losses(train_loss, test_loss, results_folder, has_memory, scheduled_sampling=False):
plt.plot(train_loss, label="training loss")
plt.plot(test_loss, label="test loss")
plt.legend()
path_suffix = "_ss" if scheduled_sampling else ""
if has_memory:
plt.savefig(os.path.join(results_folder, "rnn_autoregressive_loss" + path_suffix + ".png"))
else:
plt.savefig(os.path.join(results_folder, "ff_autoregressive_loss" + path_suffix + ".png"))
plt.clf()
def epsilon_decay(epsilon, num_iterations, iteration, decay_type="linear", k=0.997):
if decay_type == "linear":
step = 1 / (num_iterations * 2)
return round(epsilon - step, 6)
elif decay_type == "exponential":
return max(k ** iteration, 0.5)
def measure_as(distribution, actual_return, input_len):
anomaly_scores = []
for i in range(input_len):
anomaly_scores.append(k_nearest_neighbors(distribution[i, :], actual_return[i]))
return np.array(anomaly_scores)
def k_nearest_neighbors(distribution, actual_return):
neigh = NearestNeighbors(n_neighbors=distribution.shape[0])
neigh.fit(distribution.reshape(-1, 1))
distances, indices = neigh.kneighbors(np.array(actual_return).reshape(-1, 1))
return distances.mean()
def separated_confusion_matrix(scores, anom_occurrence, input_len):
results = {}
for i in range(input_len):
fpr, tpr, thresholds = roc_curve(anom_occurrence, np.array(scores)[:, i])
auc = sklearn.metrics.auc(fpr, tpr)
results[i] = (fpr, tpr, thresholds, auc)
return results
def false_alarm_rater(thresholds, scores, nominal_len):
fa_rates = []
for th in thresholds:
no_false_alarms = len(scores[:nominal_len][scores[:nominal_len] > th])
fa_rates.append(no_false_alarms / nominal_len)
return np.array(fa_rates).mean()
def merged_confusion_matrix(scores, anom_occurrence):
fpr, tpr, thresholds = roc_curve(anom_occurrence, scores)
auc = sklearn.metrics.auc(fpr, tpr)
nominal_len = np.where(anom_occurrence==1)[0][0]
far = false_alarm_rater(thresholds, scores, nominal_len)
return auc, far
def ar_anomaly_detection(predictor, gru_size, num_tau_sample, sampling_size, device, feature_len, episode, horizon,
anomaly_occurrence, has_memory):
predictor.eval()
estimated_dists = []
anomaly_scores = []
if has_memory:
h_memory = torch.zeros(len(episode[0]) * sampling_size, gru_size)
for i in range(len(episode) - horizon):
state = episode[i][:, :feature_len]
state = torch.Tensor(state)
value_return, h_memory = feed_forward(predictor, h_memory.detach().to(device), state, len(state),
num_tau_sample, sampling_size, tree_root=True)
# unaffected_h_memory: a trick to keep memory of rnn unaffected
unaffected_h_memory = h_memory
for j in range(1, horizon):
tmp_h_memory = []
tmp_value_return = []
value_return_t = value_return
h_memory_t = h_memory
for sample in range(sampling_size):
value_return, h_memory = feed_forward(predictor, h_memory_t[sample, :].detach().reshape(1, -1),
value_return_t[:, :, sample], len(value_return_t),
num_tau_sample, sampling_size, tree_root=False)
tmp_h_memory.append(h_memory)
tmp_value_return.append(value_return)
h_memory = torch.stack(tmp_h_memory).squeeze(1)
value_return = torch.stack(tmp_value_return).squeeze(1).reshape(1, feature_len, -1)
h_memory = unaffected_h_memory
estimated_dists.append(value_return.squeeze(0).detach().cpu().numpy())
anomaly_score = measure_as(value_return.squeeze(0).detach().cpu().numpy(),
episode[i + horizon][:, :feature_len].squeeze(0), feature_len)
anomaly_scores.append(anomaly_score)
else:
for i in range(len(episode) - horizon):
state = episode[i][:, :feature_len]
state = torch.Tensor(state)
value_return, _ = feed_forward(predictor, None, state, len(state), num_tau_sample, sampling_size,
tree_root=True)
for j in range(1, horizon):
tmp_value_return = []
value_return_t = value_return
for sample in range(sampling_size):
value_return, _ = feed_forward(predictor, None, value_return_t[:, :, sample], len(value_return_t),
num_tau_sample, sampling_size, tree_root=False)
tmp_value_return.append(value_return)
value_return = torch.stack(tmp_value_return).squeeze(1).reshape(1, feature_len, -1)
estimated_dists.append(value_return.squeeze(0).detach().cpu().numpy())
anomaly_score = measure_as(value_return.squeeze(0).detach().cpu().numpy(),
episode[i + horizon][:, :feature_len].squeeze(0), feature_len)
anomaly_scores.append(anomaly_score)
separated_results = separated_confusion_matrix(anomaly_scores, anomaly_occurrence, feature_len)
averaged_as = np.array(anomaly_scores).mean(axis=1)
maxed_as = np.array(anomaly_scores).max(axis=1)
merged_avg_auc, avg_fa_rate = merged_confusion_matrix(averaged_as, anomaly_occurrence)
merged_max_auc, max_fa_rate = merged_confusion_matrix(maxed_as, anomaly_occurrence)
# print("Averaged AUC:", merged_avg_auc)
# print("Max AUC:", merged_max_auc)
return separated_results, np.array(episode).squeeze(1), np.array(estimated_dists), merged_avg_auc, merged_max_auc, \
anomaly_scores, avg_fa_rate, max_fa_rate
def plot_accuracy(feature_len, mc_returns, h_distributions, result_folder, anomaly_insertion, horizons, env_name):
fig, axs = plt.subplots(math.ceil(feature_len / 3), 3, figsize=(20, 20))
colors = ['deepskyblue', 'chartreuse', 'violet']
labels = ["True returns", "Anomaly injection"]
used_colors = ['black', 'red']
for h_i, h in enumerate(horizons):
r, c = 0, 0
for i in range(feature_len):
for xxx in range(len(h_distributions[h][:, i])):
axs[r, c].scatter(np.zeros(len(h_distributions[h][:, i][xxx])) + xxx + h, h_distributions[h][:, i][xxx],
marker='.', color=colors[h_i])
axs[r, c].plot(mc_returns[:, i], color='black')
axs[r, c].axvline(x=anomaly_insertion, color='red')
axs[r, c].set_title("Feature: " + str(i))
axs[r, c].set(xlabel='time', ylabel='value')
if r < math.ceil(feature_len / 3) - 1:
r += 1
else:
c += 1
r = 0
labels.append("H="+str(h))
used_colors.append(colors[h_i])
fig.legend(labels=labels, labelcolor=used_colors, handlelength=0)
fig.suptitle("Autoregressive model predictions vs. true data\n"
"Horizon: " + str(horizons) + "\n"
"Env: " + env_name + "\n")
fig.tight_layout()
fig.savefig(os.path.join(result_folder, "rnn_predictions_vs_truedata_h" + str(horizons) + ".png"))
fig.show()
plt.clf()
plt.cla()
plt.close()
def original_cusum(anomaly_scores, feature_len=18):
cusums = {}
for key in range(feature_len):
cusums[key] = []
cusums[key] = detect_cusum(anomaly_scores[:, key], threshold=0.01, drift=.0018, ending=True, show=False)[0]
return cusums
def states_min_max_finder(train_dataset):
features_min = train_dataset[0][0].min(axis=0).values.cpu().numpy()
features_max = train_dataset[0][0].max(axis=0).values.cpu().numpy()
return features_min, features_max
def input_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--test_policy', action='store_true', default=False,
help="To test the policy")
parser.add_argument('--train_predictive_model', action='store_true', default=False,
help="To train the predictive models")
parser.add_argument('--env_name', type=str,
help="Environment of interest")
parser.add_argument('--iterations', type=int, default=1000000,
help="Training iterations")
parser.add_argument('--clip_obs', type=float, default=10.,
help="Clipping observations for normalization")
parser.add_argument('--power', type=float,
help="Power applied to the taken actions (as the nominal power). Being used for model path")
parser.add_argument('--anomalous_power', type=float, default=None,
help="Power applied to the taken actions (as the anomalous power)")
parser.add_argument('--anomaly_injection', type=int, default=None,
help="When to inject anomaly")
parser.add_argument('--horizons', nargs='+', type=int,
help="Horizon to go forward in time")
parser.add_argument('--n_eval_episodes', type=int, default=10,
help="Number of evaluation episodes")
parser.add_argument('--batch_size', type=int, default=128,
help="Batch size")
parser.add_argument('--quantile_embedding_dim', type=int, default=128,
help="Quantiles embedding dimension in IQN")
parser.add_argument('--gru_units', type=int, default=64,
help="Number of cells in the GRU")
parser.add_argument('--num_quantile_sample', type=int, default=64,
help="Number of quantile samples for IQN")
parser.add_argument('--decay_type', type=str, choices=["linear", "exponential"], default="linear",
help="How to decay epsilon in Scheduled sampling")
parser.add_argument('--lr', type=float, default=0.001,
help="Learning rate")
parser.add_argument('--clip_value', type=int, default=None,
help="Clipping gradients")
parser.add_argument('--num_tau_sample', type=int, default=1,
help="Number of tau samples for IQN, sets the distribution sampling size.")
parser.add_argument('--test_interval', type=int, default=10,
help="Intervals between train and test")
parser.add_argument('--anomaly_detection', action='store_true', default=False,
help="Do the AD when anomalies injected into the system")
parser.add_argument('--sampling_sizes', nargs='+', type=int,
help="Size of the sampling to build the tree of distributions at time t")
parser.add_argument('--case', type=int,
help="Which case of environment to run. Works like -v suffix in standard environment naming.")
parser.add_argument('--is_recurrent_v2', action='store_true', default=False,
help="Determines whether the model has memory or not -- v2 RNN model")
args = parser.parse_args()
return args
if __name__ == "__main__":
powers_dict = {"Ant": 2.5, "HalfCheetah": 0.9, "Hopper": 0.75, "Walker2D": 0.4}
anomalous_powers_dict = {"Ant": 1.5, "HalfCheetah": 0.6, "Hopper": 0.65, "Walker2D": 0.35}
args = input_arg_parser()
env_dir = os.path.join("./models", args.env_name)
if not os.path.exists(env_dir):
os.mkdir(env_dir)
optimal_memory_path = os.path.join(env_dir, "optimal_memory.pt")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.is_recurrent_v2:
predictive_model_path = os.path.join(env_dir, "rnn_autoregressive_v2_ss.pt")
else:
predictive_model_path = os.path.join(env_dir, "ff_autoregressive_v2_ss.pt")
policy_model_path = os.path.join(env_dir, "best_model")
num_features = gym.make(args.env_name).observation_space.shape[0]
if args.test_policy:
# Load the agent
model = TD3.load(policy_model_path)
random_seed = random.randint(0, 1000)
env = DummyVecEnv(
[env_preparation.make_env(args.env_name, 0, random_seed, wrapper_class=env_preparation.TimeFeatureWrapper,
env_kwargs={'power': args.anomalous_power,
'anomaly_injection': args.anomaly_injection,
'case': args.case})])
env = VecVideoRecorder(env, env_dir, record_video_trigger=lambda x: x == 0, video_length=1000,
name_prefix="ap" + str(args.anomalous_power) + "_ai" + str(args.anomaly_injection))
env = VecNormalize(env, norm_obs=True, norm_reward=False, clip_obs=args.clip_obs)
# do not update them at test time
env.training = False
# reward normalization is not needed at test time
env.norm_reward = False
mean_reward, std_reward, observations = evaluate_policy(model, env, n_eval_episodes=args.n_eval_episodes)
tensor_observations = construct_batch_data(num_features, observations, device)
torch.save(tensor_observations, optimal_memory_path)
print(f"Best model mean reward = {mean_reward:.2f} +/- {std_reward:.2f}")
elif args.train_predictive_model:
print("Loading predictor's training data!")
memory_rb = torch.load(optimal_memory_path, map_location=device)
# normalizing data
states_min, states_max = states_min_max_finder(memory_rb)
train_rb, test_rb, max_len = data_splitting(memory_rb, args.batch_size, states_min, states_max, device)
print("Predictor's data loaded!")
if args.is_recurrent_v2:
predictive_model = AutoregressiveRecurrentIQN_v2(num_features, args.gru_units, args.quantile_embedding_dim,
args.num_quantile_sample, device)
else:
predictive_model = AutoregressiveIQN(num_features, args.quantile_embedding_dim, args.num_quantile_sample,
device)
if os.path.exists(predictive_model_path):
print("Loading pre-trained model!")
predictive_model.load_state_dict(torch.load(predictive_model_path, map_location=device))
print("Pre-trained model loaded:", predictive_model_path)
optimizer = torch.optim.Adam(predictive_model.parameters(), lr=args.lr)
predictive_model.to(device)
predictive_model.train()
epsilon = 1
all_train_losses, all_test_losses = [], []
best_total_loss = float("inf")
for i in range(args.iterations):
total_loss = ss_learn_model(predictive_model, optimizer, train_rb, max_len, args.gru_units,
args.num_tau_sample, device, epsilon, args.clip_value, num_features,
args.is_recurrent_v2)
if i % args.test_interval == 0:
print("train loss : {}".format(total_loss))
all_train_losses.append(total_loss)
avg_eval_loss, best_total_loss = ss_evaluate_model(predictive_model, test_rb, max_len, args.gru_units,
args.num_tau_sample, device, best_total_loss,
predictive_model_path, epsilon, num_features,
args.is_recurrent_v2)
all_test_losses.append(avg_eval_loss)
plot_losses(all_train_losses, all_test_losses, env_dir, args.is_recurrent_v2, scheduled_sampling=True)
epsilon = epsilon_decay(epsilon, args.iterations, i, args.decay_type)
final_model_path = predictive_model_path.replace(".pt", "_final.pt")
print("Saving the last model!")
torch.save(predictive_model.state_dict(), final_model_path)
elif args.anomaly_detection:
if args.is_recurrent_v2:
predictive_model = AutoregressiveRecurrentIQN_v2(num_features, args.gru_units, args.quantile_embedding_dim,
args.num_quantile_sample, device)
else:
predictive_model = AutoregressiveIQN(num_features, args.quantile_embedding_dim, args.num_quantile_sample,
device)
predictive_model.load_state_dict(torch.load(predictive_model_path, map_location=device))
print("Trained model loaded:", predictive_model_path)
predictive_model.to(device)
predictive_model.eval()
policy_model = TD3.load(policy_model_path)
print("Loading predictor's training data!")
memory_rb = torch.load(optimal_memory_path, map_location=device)
states_min, states_max = states_min_max_finder(memory_rb)
# prevent division by zero in normalization
no_need_normalization = np.where((states_min == states_max))[0]
individual_feature_auc = {}
for h in args.horizons:
individual_feature_auc[h] = {}
for f in range(num_features):
individual_feature_auc[h][f] = []
for h in args.horizons:
for ss in args.sampling_sizes:
all_avg_aucs = []
all_max_aucs = []
all_avg_false_alarm_rates = []
all_max_false_alarm_rates = []
on_features_original_cusums = []
on_scores_original_cusums = []
for _ in range(args.n_eval_episodes):
random_seed = random.randint(0, 1000)
env = DummyVecEnv(
[env_preparation.make_env(args.env_name, 0, random_seed, wrapper_class=env_preparation.TimeFeatureWrapper,
env_kwargs={'power': args.anomalous_power,
'anomaly_injection': args.anomaly_injection,
'case': args.case})])
env = VecNormalize(env, norm_obs=True, norm_reward=False, clip_obs=args.clip_obs)
# do not update them at test time
env.training = False
# reward normalization is not needed at test time
env.norm_reward = False
mean_reward, std_reward, observations = evaluate_policy(policy_model, env, n_eval_episodes=1)
# print(f"Best model mean reward = {mean_reward:.2f} +/- {std_reward:.2f}")
# normalizing data
tmp_observations = ((np.array(observations[0])[:, :, :num_features] - states_min) / (states_max - states_min))
normalized_observations = [np.concatenate((tmp_observations, np.array(observations[0])[:, :, num_features].reshape(-1, 1, 1)), axis=2)]
normalized_observations = np.array(normalized_observations)
for index in no_need_normalization:
normalized_observations[0, :, 0, index] = states_min[index]
dists_per_horizon = {}
when_anomaly_occurred = np.zeros(len(normalized_observations[0]) - h)
when_anomaly_occurred[args.anomaly_injection - h:] = 1
sep_features_r, true_r, dist_r, merg_avg_auc, merg_max_auc, ass,\
avg_f_a_rate, max_f_a_rate = ar_anomaly_detection(predictive_model, args.gru_units, args.num_tau_sample,
args.sampling_sizes[0], device, num_features, normalized_observations[0],
h, when_anomaly_occurred, args.is_recurrent_v2)
all_avg_aucs.append(merg_avg_auc)
all_max_aucs.append(merg_max_auc)
all_avg_false_alarm_rates.append(avg_f_a_rate)
all_max_false_alarm_rates.append(max_f_a_rate)
on_features_cusum_changepoints = original_cusum(true_r, num_features)
on_scores_cusum_changepoints = original_cusum(np.array(ass), num_features)
on_features_original_cusums.append(on_features_cusum_changepoints[0])
on_scores_original_cusums.append(on_scores_cusum_changepoints[0])
dists_per_horizon[h] = dist_r
for f, value in sep_features_r.items():
individual_feature_auc[h][f].append(value[3])
warmup = 0
on_features_original_changes = []
for item in on_features_original_cusums:
change_points = [x[1] for x in enumerate(item) if x[1] > warmup]
if len(change_points) > 0:
on_features_original_changes.append(change_points[0])
on_scores_original_changes = []
for item in on_scores_original_cusums:
change_points = [x[1] for x in enumerate(item) if x[1] > warmup]
if len(change_points) > 0:
on_scores_original_changes.append(change_points[0])
print("********************************* H, SS:", h, ss)
if len(all_avg_aucs) != 0:
print("Averaging all avg AUCs:", round(sum(all_avg_aucs) / len(all_avg_aucs), 2))
if len(all_max_aucs) != 0:
print("Averaging all max AUCs:", round(sum(all_max_aucs) / len(all_max_aucs), 2))
print("Average of change-point detection times - using features and original CUSUM:",
np.array(on_features_original_changes).mean())
print("Average of change-point detection times - using anomaly scores and original CUSUM:",
np.array(on_scores_original_changes).mean())
print("False alarm rate - using average scores:", round(np.array(all_avg_false_alarm_rates).mean(), 2))
print("False alarm rate - using max scores:", round(np.array(all_max_false_alarm_rates).mean(), 2)) | [
"[email protected]"
] | |
4d422bbbbeeb2c15974cfa4de441a3c6671ccf70 | 3a857528f238c9460fd7c14fc0477a9bee0974a5 | /ipycanvas/_frontend.py | 2da2ede4429a6af5aed88050218d375c62a47c5c | [
"BSD-3-Clause"
] | permissive | amoeba/ipycanvas | aafb38b341828f0ff69cc4816ea3ab169096c251 | 0cc98b4d0cd1dc7a0b4057fe8d5d276efdb55928 | refs/heads/master | 2023-02-04T21:58:01.556211 | 2020-12-17T20:42:01 | 2020-12-17T20:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Martin Renou.
# Distributed under the terms of the Modified BSD License.
"""
Information about the frontend package of the widgets.
"""
module_name = "ipycanvas"
module_version = "^0.7.0"
| [
"[email protected]"
] | |
e7ad97b4def6fb6fa2145820b08d36b0816395ea | 5e31705389a23939101b9ebe9f8814bee8d27317 | /exrs/ifelse1.py | 041f8f55fe22728d3ef1a915088ddf33228e3f2a | [] | no_license | Maga1807/PP2 | 4c7b837bfa61d22b4a6ccee11144a982969c959d | 3e42e9d79ccf048b34cf460502fe92171254b838 | refs/heads/main | 2023-07-17T01:29:35.807121 | 2021-07-29T07:22:13 | 2021-07-29T07:22:13 | 379,923,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py |
a = 50
b = 10
if a > b:
print("Hello World") | [
"[email protected]"
] | |
8f0e1b6af08c0ba68ced1a36c3dd18973cf224ab | 5292b03998384c0d2bb5858058892d7e45c5365b | /InCTF/2021/crypto/Lost_Baggage/main.py | 2db4763071280ad4c8a5294fa28c99cb3bb0c207 | [
"MIT"
] | permissive | TheusZer0/ctf-archives | 430ef80d367b44fd81449bcb108e367842cb8e39 | 033ccf8dab0abdbdbbaa4f0092ab589288ddb4bd | refs/heads/main | 2023-09-04T17:56:24.416820 | 2021-11-21T06:51:27 | 2021-11-21T06:51:27 | 430,603,430 | 1 | 0 | MIT | 2021-11-22T07:24:08 | 2021-11-22T07:24:07 | null | UTF-8 | Python | false | false | 1,119 | py | #!/usr/bin/python3
from random import getrandbits as rand
from gmpy2 import next_prime, invert
import pickle
FLAG = open('flag.txt', 'rb').read()
BUF = 16
def encrypt(msg, key):
msg = format(int(msg.hex(), 16), f'0{len(msg)*8}b')[::-1]
assert len(msg) == len(key)
return sum([k if m=='1' else 0 for m, k in zip(msg, key)])
def decrypt(ct, pv):
b, r, q = pv
ct = (invert(r, q)*ct)%q
msg = ''
for i in b[::-1]:
if ct >= i:
msg += '1'
ct -= i
else:
msg += '0'
return bytes.fromhex(hex(int(msg, 2))[2:])
def gen_inc_list(size, tmp=5):
b = [next_prime(tmp+rand(BUF))]
while len(b)!=size:
val = rand(BUF)
while tmp<sum(b)+val:
tmp = next_prime(tmp<<1)
b += [tmp]
return list(map(int, b))
def gen_key(size):
b = gen_inc_list(size)
q = b[-1]
for i in range(rand(BUF//2)):
q = int(next_prime(q<<1))
r = b[-1]+rand(BUF<<3)
pb = [(r*i)%q for i in b]
return (b, r, q), pb
if __name__ == '__main__':
pvkey, pbkey = gen_key(len(FLAG) * 8)
cip = encrypt(FLAG, pbkey)
assert FLAG == decrypt(cip, pvkey)
pickle.dump({'cip': cip, 'pbkey': pbkey}, open('enc.pickle', 'wb'))
| [
"[email protected]"
] | |
12cf8b1fc6f4b7f9991b27ccfa0db18d53281139 | d96ffbadf4526db6c30a3278f644c1bc25ff4054 | /src/flickr/city.py | 54ab51925d971f99e12f845cbc74900f03e05b10 | [
"MIT"
] | permissive | dballesteros7/master-thesis-2015 | 07c03726f6ceb66e6d706ffe06e4e5eb37dcda75 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | refs/heads/master | 2021-05-03T11:22:28.333473 | 2016-04-26T14:00:30 | 2016-04-26T14:00:30 | 44,601,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | from enum import Enum
from flickr.boundingbox import BoundingBox
class City(Enum):
LONDON = ('london', BoundingBox(51.672343, 0.148271, 51.384940, -0.351468))
ZURICH = ('zurich', BoundingBox(47.434680, 8.625370, 47.320230, 8.448060))
NEW_YORK = ('new-york', BoundingBox(40.915256, -73.700272, 40.491370, -74.259090))
SHANGHAI = ('shanghai', BoundingBox(31.868217, 122.247066, 30.680270, 120.858217))
def __init__(self, city_name: str, bounding_box: BoundingBox):
self.city_name = city_name
self.bounding_box = bounding_box
| [
"[email protected]"
] | |
52ea565222d6051d93f5b73b48e501c55e9bd1ce | 736032949e6ec4291dba0b06a441efe9d8ad2b82 | /colegio/models.py | 451bf327c02291b724848dab273618cf88c656d4 | [] | no_license | CoriAle/Examen-Final | b93498e10818d91cb672ef216dbc1663cb1973ef | bcbdecce989ee55e85c0dbfbf22c20950bfd2199 | refs/heads/master | 2021-08-26T05:32:34.204416 | 2017-11-21T18:56:45 | 2017-11-21T18:56:45 | 111,577,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | from django.db import models
from django.utils import timezone
from django.contrib import admin
class Alumno(models.Model):
carnet=models.CharField(max_length=50)
nombre=models.CharField(max_length=50)
apellido=models.CharField(max_length=50)
fecha_nacimiento = models.DateField()
def __str__(self):
return '{}'.format(self.nombre)
class Profesor(models.Model):
nombre=models.CharField(max_length=50)
apellido=models.CharField(max_length=50)
fecha_nacimiento = models.DateField()
profesion=models.CharField(max_length=50)
def __str__(self):
return '{}'.format(self.nombre)
class Materia(models.Model):
nombre=models.CharField(max_length=50)
creditos= models.IntegerField()
alumno = models.ManyToManyField(Alumno, through='Nota')
profesor = models.ForeignKey(Profesor,null=True )
def __str__(self):
return '{}'.format(self.nombre)
class Nota (models.Model):
material = models.ForeignKey(Materia, on_delete=models.CASCADE)
alumno = models.ForeignKey(Alumno, on_delete=models.CASCADE)
nota = models.CharField(max_length=50, blank=True)
class NotaInLine(admin.TabularInline):
model = Nota
extra = 1
class MateriaAdmin(admin.ModelAdmin):
inlines = (NotaInLine,)
class AlumnoAdmin (admin.ModelAdmin):
inlines = (NotaInLine,)
class Grado(models.Model):
"""docstring forGrado."""
nombre = models.CharField(max_length=50)
seccion = models.CharField(max_length=10)
materia = models.ManyToManyField(Materia, blank=True)
def __str__(self):
return '{}'.format(self.nombre)
| [
"[email protected]"
] | |
127b3622e28069bc808f7dc1542354d21e9dce3c | 80b5bc903e5ceb368f374f9a1313e1dc7ac698c2 | /bcoffice/members/views/member_force_disable.py | 1dbe2b8b190ef21b05374aca59a95a45fed30cbc | [] | no_license | Bobur-kobilov/back | 830471f7d36f9120aa04e3ae884f7091f0ba4333 | 5a852c4d24da3db6226ce17c437c4ae9c6b01141 | refs/heads/master | 2022-10-01T22:50:24.073570 | 2019-08-02T01:51:19 | 2019-08-02T01:51:19 | 200,145,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,360 | py | from . import *
import pytz
# 회원 강제 상태변경
class MemberForceDisable(APIView):
name = "member-force-disable"
permission_classes = [MemberForceDisablePermission]
def put(self, request, *args, **kwargs):
member_id = request.data.get('member_id', None)
reason = request.data.get('reason', None)
authType = request.data.get('authType', None)
if member_id is None:
return Response( # "{0} 파라미터가 없습니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00017.format('member_id'))
, status=status.HTTP_400_BAD_REQUEST
)
auth_type = None
request_after = None
if authType == 'factor_auth':
auth_type = request.data.get('auth_type', None)
if auth_type is None:
return Response( # "{0} 파라미터가 없습니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00017.format('auth_type'))
, status=status.HTTP_400_BAD_REQUEST
)
else:
request_after = request.data.get('after', None)
if request_after is None:
return Response( # "{0} 파라미터가 없습니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00017.format('after'))
, status=status.HTTP_400_BAD_REQUEST
)
category = ""
if authType == 'factor_auth':
factor_type = ""
if auth_type == 'app':
category = "OTP 인증해제"
factor_type = "TwoFactor::App"
elif auth_type == 'sms':
category = "SMS 인증해제"
factor_type = "TwoFactor::Sms"
before = (TwoFactors.objects.using("exchange").filter(member_id=member_id).filter(type=factor_type))[0].activated
after = int(not before)
else:
VALUE_BY_TYPE = {
'kyc_auth' : {'category': '신분증 인증', 'db_column': 'kyc_activated'},
'disable': {'category': '계정비활성화', 'db_column': 'disabled'},
'restrict': {'category': '이용제한', 'db_column': 'restricted'},
'deleted': {'category': '회원탈퇴', 'db_column': 'deleted'},
}
category = VALUE_BY_TYPE[authType]['category']
column = VALUE_BY_TYPE[authType]['db_column']
before = Members.objects.using("exchange").values(column).get(id=member_id)[column]
after = request_after
if before == after:
return Response( # "현재상태와 변경하려는 상태가 동일합니다."
data=ResponseMessage.getMessageData(ResponseMessage.MESSAGE_ERR00032)
, status=status.HTTP_400_BAD_REQUEST
)
user = User.objects.get(id=request.user.get_id())
body = {
"user_id": request.user.get_id()
, "emp_no": user.emp_no
, "member_id": int(member_id)
, "category": category
, "before": before
, "after": after
, "reason": reason
, "created_at": str(datetime.now(pytz.timezone('Asia/Seoul')))
}
API_BY_TYPE = {
'factor_auth': {'api': APIService.MEMBER_TWO_FACTOR, 'params': {'member_id': member_id, 'auth_type': auth_type}},
'kyc_auth': {'api': APIService.MEMBER_KYC_AUTH, 'params': {'member_id': member_id, 'active': request_after}},
'disable': {'api': APIService.MEMBER_DISABLE, 'params': {'member_id': member_id, 'disable': request_after}},
'restrict': {'api': APIService.MEMBER_RESTRICT, 'params': {'member_id': member_id, 'restrict': request_after}},
'deleted': {'api': APIService.MEMBER_DELETED, 'params': {'member_id': member_id}},
}
response = APIService.request_api(request, API_BY_TYPE[authType]['api'], API_BY_TYPE[authType]['params'])
log = CentralLogging()
log.setLog(body, request, UPDATE, response.status_code, 1200)
# MONGO DB 추가
# body = json.dumps(log).encode('utf8')
logging_utils.set_log(TBL_BCOFFICE_MEMBER_MOD, log.toJsonString())
return response | [
"[email protected]"
] | |
5e8d232feecaf3739a9c05723fb003384adf5152 | 23ae328d533fafdeb8b49b8a66911a9e9aef4e83 | /exercises/1901090010/1001S02E06_stats_word.py | 0b2d0b586169d3cc48703ba0459b9c517888bf39 | [] | no_license | sky3116391/selfteaching-python-camp | 85b35ba16e014e7d1c2545e450a22021acd668e7 | 635a74035b37bdd4e37919a81848e86bdb853a11 | refs/heads/master | 2020-05-25T15:51:30.853311 | 2019-05-30T06:49:20 | 2019-05-30T06:49:20 | 187,006,599 | 0 | 0 | null | 2019-05-16T10:32:55 | 2019-05-16T10:32:55 | null | UTF-8 | Python | false | false | 3,945 | py | import re
#(1)定义一个名为 stats_text_en 的函数
def stats_text_en(text):
#(2)函数接受一个字符串 text 作为参数。如果不是字符串,则提示
if not isinstance(text,str):
return '请输入字符串'
#(3)统计参数中每个英文单词出现的次数
# 1.替换掉所有的符号
word_str = text.replace(','," ").replace('.'," ").replace('!'," ").replace('*'," ").replace('--'," ")
# 2.按照空格将所有的单词分割开
word_list = word_str.split()
# 3.对单词进行去重操作,作为字典的key
word_one = set(word_list)
# 4.构建一个词频字典
dict = {}
for word in word_one:
dict[word] = word_list.count(word)
# 5.对之前的词频字典按照value值进行排序
d_list = sorted(dict.items(),key=lambda e:e[1],reverse=True)
return d_list
#(1)定义一个名为 stats_text_en 的函数
def stats_text_cn(text):
#(2)函数接受一个字符串 text 作为参数。如果不是字符串,则提示
if not isinstance(text,str):
return '请输入字符串'
# 1.替换掉所有的符号
d = text.replace(',','').replace('-',' ').replace('.','').replace(':','').replace('《','').replace(';','').replace('"','').replace('!','').replace('?','').replace('》',' ').replace('、','').replace(',','').replace('。','').replace('“','').replace('”','').replace(':','').replace(';','').replace('\n','').replace('!','').replace('?','').replace('/','').replace('*',' ').replace(' ','').replace("'",'')
# 2.将上文中的字符串,用正则运算剔除所有英文字母单词,数字
d = re.sub("[A-Za-z0-9]", "", d)
print(d)
# 3.将字符串中的汉字去重,作为字典的key
d_list = list(d)
print(d_list)
d_index = set(d_list)
# 4.构造词频字典
dict = {}
for i in d_index:
dict[i] = d_list.count(i)
# 5.对之前的词频字典按照value值进行排序
d_list = sorted(dict.items(),key=lambda e:e[1],reverse=True)
return d_list
if __name__ == "__main__":
# 测试统计英文单词词频的函数
text = '''
Fall Day (by J. B. Leishman)
Lord, it is time. This was a very big summer.
Lay your shadows over the sundial,
and let the winds loose on the fields.
Command the last fruits to be full;
give them two more sunny days,
urge them on to fulfillment and throw
the last sweetness into the heavy wine.
Who has no house now, will never build one.
Whoever is alone now, will long remain so,
Will watch, read, write long letters
and will wander in the streets, here and there
restlessly, when the leaves blow.
'''
# 测试不是字符串的情况
test_num = 1
# 测试正常情况
array = stats_text_en(text)
print(array)
# 测试统计中文词频的函数
text = '''
English : Fall Day by J. B. Leishman
Lord, it is time. This was a very big summer.
Lay your shadows over the sundial,
and let the winds loose on the fields.
Command the last fruits to be full;
give them two more sunny days,
urge them on to fulfillment and throw
the last sweetness into the heavy wine.
Who has no house now, will never build one.
Whoever is alone now, will long remain so,
Will watch, read, write long letters
and will wander in the streets, here and there
restlessly, when the leaves blow.
中译一:《秋日》 冯至
1905-1993。著名诗人、翻译家。
主啊!是时候了。夏日曾经很盛大。
把你的阴影落在日规上,
让秋风刮过田野。
让最后的果实长得丰满,
再给它们两天南方的气候,
迫使它们成熟,
把最后的甘甜酿入浓酒。
谁这时没有房屋,就不必建筑,
谁这时孤独,就永远孤独,
就醒着,读着,写着长信,
在林荫道上来回
不安地游荡,当着落叶纷飞。
'''
# 对统计中文词频函数进行测试
array = stats_text_cn(text)
print(array) | [
"[email protected]"
] | |
049dbb89411218fa3f9f32a259e522708a2d52eb | db4de90863ba763f9fc0954a7a534518ec9d5778 | /striplog/markov.py | 9a1d8ce264ebef1247120ce05f963e40b5ab371b | [
"Apache-2.0"
] | permissive | Skuxley/striplog | 744cab01e3352107aac5275907af84b6554f5b92 | 02ae02ec4ec7c31990faa06e5156924a9b87254f | refs/heads/master | 2022-11-11T12:22:32.094127 | 2020-05-26T15:35:04 | 2020-05-26T15:35:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,618 | py | # -*- coding: utf-8 -*-
"""
Markov chains for the striplog package.
"""
from collections import namedtuple
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
from .utils import hollow_matrix
class MarkovError(Exception):
pass
def regularize(sequence, strings_are_states=False) -> tuple:
"""
Turn a sequence or sequence of sequences into a tuple of
the unique elements in the sequence(s), plus a sequence
of sequences (sort of equivalent to `np.atleast_2d()`).
Args
sequence (list-like): A list-like container of either
states, or of list-likes of states.
strings_are_states (bool): True if the strings are
themselves states (i.e. words or tokens) and not
sequences of one-character states. For example,
set to True if you provide something like:
['sst', 'mud', 'mud', 'sst', 'lst', 'lst']
Returns
tuple. A tuple of the unique states, and a sequence
of sequences.
"""
if strings_are_states:
if isinstance(sequence[0], str):
seq_of_seqs = [sequence]
else:
seq_of_seqs = sequence
else:
# Just try to iterate over the contents of the sequence.
try:
seq_of_seqs = [list(i) if len(i) > 1 else i for i in sequence]
except TypeError:
seq_of_seqs = [list(sequence)]
# Annoyingly, still have to fix case of single sequence of
# strings... this seems really hacky.
if len(seq_of_seqs[0]) == 1:
seq_of_seqs = [seq_of_seqs]
# Now we know we have a sequence of sequences.
uniques = set()
for seq in seq_of_seqs:
for i in seq:
uniques.add(i)
return np.array(sorted(uniques)), seq_of_seqs
class Markov_chain(object):
"""
Markov_chain object.
TODO
- Integrate into `striplog` or move into own project.
- Pretty transition matrix printing with state names and row/col sums.
- Allow self-transitions. See also this:
https://stackoverflow.com/q/49340520/3381305
- Hidden Markov model?
- 'Joint' Markov model... where you have lithology and bioturbation index
(say). Not sure if this is really a thing, I just made it up.
- More generally, explore other sequence models, eg LSTM.
"""
def __init__(self,
observed_counts,
states=None,
step=1,
include_self=False
):
"""
Initialize the MarkovChain instance.
Args
observed_counts (ndarray): A 2-D array representing the counts
of change of state in the Markov Chain.
states (array-like): An array-like representing the possible states
of the Markov Chain. Must be in the same order as `observed
counts`.
step (int): The maximum step size, default 1.
include_self (bool): Whether to include self-to-self transitions.
"""
self.step = step
self.include_self = include_self
self.observed_counts = np.atleast_2d(observed_counts).astype(int)
if states is not None:
self.states = np.asarray(states)
elif self.observed_counts is not None:
self.states = np.arange(self.observed_counts.shape[0])
else:
self.states = None
self.expected_counts = self._compute_expected()
return
def __repr__(self):
trans = f"Markov_chain({np.sum(self.observed_counts):.0f} transitions"
states = '[{}]'.format(", ".join("\'{}\'".format(s) for s in self.states))
return f"{trans}, states={states}, step={self.step})"
@staticmethod
def _compute_freqs(C):
"""
Compute frequencies from counts.
"""
epsilon = 1e-12
return (C.T / (epsilon+np.sum(C.T, axis=0))).T
@staticmethod
def _stop_iter(a, b, tol=0.01):
a_small = np.all(np.abs(a[-1] - a[-2]) < tol*a[-1])
b_small = np.all(np.abs(b[-1] - b[-2]) < tol*b[-1])
return (a_small and b_small)
@property
def _index_dict(self):
if self.states is None:
return {}
return {self.states[index]: index for index in range(len(self.states))}
@property
def _state_dict(self):
if self.states is None:
return {}
return {index: self.states[index] for index in range(len(self.states))}
@property
def observed_freqs(self):
return self._compute_freqs(self.observed_counts)
@property
def expected_freqs(self):
return self._compute_freqs(self.expected_counts)
@property
def _state_counts(self):
s = self.observed_counts.copy()
for axis in range(self.observed_counts.ndim - 2):
s = np.sum(s, axis=0)
a = np.sum(s, axis=0)
b = np.sum(s, axis=1)
return np.maximum(a, b)
@property
def _state_probs(self):
return self._state_counts / np.sum(self._state_counts)
@property
def normalized_difference(self):
O = self.observed_counts
E = self.expected_counts
epsilon = 1e-12
return (O - E) / np.sqrt(E + epsilon)
@classmethod
def from_sequence(cls,
sequence,
states=None,
strings_are_states=False,
include_self=False,
step=1,
ngram=False,
):
"""
Parse a sequence and make the transition matrix of the specified order.
**Provide sequence ordered in upwards direction.**
Args
sequence (list-like): A list-like, or list-like of list-likes.
The inner list-likes represent sequences of states.
For example, can be a string or list of strings, or
a list or list of lists.
states (list-like): A list or array of the names of the states.
If not provided, it will be inferred from the data.
strings_are_states (bool): rue if the strings are
themselves states (i.e. words or tokens) and not
sequences of one-character states. For example,
set to True if you provide something like:
['sst', 'mud', 'mud', 'sst', 'lst', 'lst']
include_self (bool): Whether to include self-to-self
transitions (default is `False`: do not include them).
step (integer): The distance to step. Default is 1: use
the previous state only. If 2, then the previous-but-
one state is used; but if ngram is true then both
the previous and the previous-but-one are used (and
the matrix is commensurately bigger).
ngram (bool): If True, we compute transitions from n-grams,
so the matrix will have one row for every combination
of n states. You will want to set return_states to
True to see the state n-grams.
return_states (bool): Whether to return the states.
TODO:
- Use `states` to figure out whether 'strings_are_states'.
"""
uniques, seq_of_seqs = regularize(sequence, strings_are_states=strings_are_states)
if states is None:
states = uniques
else:
states = np.asarray(list(states))
O = np.zeros(tuple(states.size for _ in range(step+1)))
for seq in seq_of_seqs:
seq = np.array(seq)
_, integer_seq = np.where(seq.reshape(-1, 1) == states)
for idx in zip(*[integer_seq[n:] for n in range(step+1)]):
O[idx] += 1
if not include_self:
O = hollow_matrix(O)
return cls(observed_counts=np.array(O),
states=states,
include_self=include_self
)
def _conditional_probs(self, state):
"""
Conditional probabilities of each state, given a
current state.
"""
return self.observed_freqs[self._index_dict[state]]
def _next_state(self, current_state: str) -> str:
"""
Returns the state of the random variable at the next time
instance.
Args
current_state (str): The current state of the system.
Returns
str. One realization of the next state.
"""
return np.random.choice(self.states,
p=self._conditional_probs(current_state)
)
def generate_states(self, n:int=10, current_state:str=None) -> list:
"""
Generates the next states of the system.
Args
n (int): The number of future states to generate.
current_state (str): The state of the current random variable.
Returns
list. The next n states.
"""
if current_state is None:
current_state = np.random.choice(self.states, p=self._state_probs)
future_states = []
for _ in range(n):
next_state = self._next_state(current_state)
future_states.append(next_state)
current_state = next_state
return future_states
def _compute_expected(self):
"""
Try to use Powers & Easterling, fall back on Monte Carlo sampling
based on the proportions of states in the data.
"""
try:
E = self._compute_expected_pe()
except:
E = self._compute_expected_mc()
return E
def _compute_expected_mc(self, n=100000, verbose=False):
"""
If we can't use Powers & Easterling's method, and it's possible there's
a way to extend it to higher dimensions (which we have for step > 1),
the next best thing might be to use brute force and just compute a lot
of random sequence transitions, given the observed proportions. If I'm
not mistaken, this is what P & E's method tries to estimate iteratively.
"""
seq = np.random.choice(self.states, size=n, p=self._state_probs)
E = self.from_sequence(seq).observed_counts
E = np.sum(self.observed_counts) * E / np.sum(E)
if not self.include_self:
return hollow_matrix(E)
else:
return E
def _compute_expected_pe(self, max_iter=100, verbose=False):
"""
Compute the independent trials matrix, using method of
Powers & Easterling 1982.
"""
m = len(self.states)
M = self.observed_counts
a, b = [], []
# Loop 1
a.append(np.sum(M, axis=1) / (m - 1))
b.append(np.sum(M, axis=0) / (np.sum(a[-1]) - a[-1]))
i = 2
while i < max_iter:
if verbose:
print(f"iteration: {i-1}")
print(f"a: {a[-1]}")
print(f"b: {b[-1]}")
print()
a.append(np.sum(M, axis=1) / (np.sum(b[-1]) - b[-1]))
b.append(np.sum(M, axis=0) / (np.sum(a[-1]) - a[-1]))
# Check for stopping criterion.
if self._stop_iter(a, b, tol=0.001):
break
i += 1
E = a[-1] * b[-1].reshape(-1, 1)
if not self.include_self:
return hollow_matrix(E)
else:
return E
@property
def degrees_of_freedom(self) -> int:
m = len(self.states)
return (m - 1)**2 - m
def _chi_squared_critical(self, q=0.95, df=None):
"""
The chi-squared critical value for a confidence level q
and degrees of freedom df.
"""
if df is None:
df = self.degrees_of_freedom
return scipy.stats.chi2.ppf(q=q, df=df)
def _chi_squared_percentile(self, x, df=None):
"""
The chi-squared critical value for a confidence level q
and degrees of freedom df.
"""
if df is None:
df = self.degrees_of_freedom
return scipy.stats.chi2.cdf(x, df=df)
def chi_squared(self, q=0.95):
"""
The chi-squared statistic for the given transition
frequencies.
Also returns the critical statistic at the given confidence
level q (default 95%).
If the first number is bigger than the second number,
then you can reject the hypothesis that the sequence
is randomly ordered.
"""
# Observed and Expected matrices:
O = self.observed_counts
E = self.expected_counts
# Adjustment for divide-by-zero
epsilon = 1e-12
chi2 = np.sum((O - E)**2 / (E + epsilon))
crit = self._chi_squared_critical(q=q)
perc = self._chi_squared_percentile(x=chi2)
Chi2 = namedtuple('Chi2', ['chi2', 'crit', 'perc'])
return Chi2(chi2, crit, perc)
def as_graph(self, directed=True):
if self.normalized_difference.ndim > 2:
raise MarkovError("You can only graph one-step chains.")
try:
import networkx as nx
except ImportError:
nx = None
if nx is None:
print("Please install networkx with `pip install networkx`.")
return
if directed:
alg = nx.DiGraph
else:
alg = nx.Graph
G = nx.from_numpy_array(self.normalized_difference, create_using=alg)
nx.set_node_attributes(G, self._state_dict, 'state')
return G
def plot_graph(self, ax=None,
figsize=None,
max_size=1000,
directed=True,
edge_labels=False,
draw_neg=False
):
if self.normalized_difference.ndim > 2:
raise MarkovError("You can only graph one-step chains.")
try:
import networkx as nx
except ImportError:
nx = None
if nx is None:
print("Please install networkx with `pip install networkx`.")
return
G = self.as_graph(directed=directed)
return_ax = True
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
return_ax = False
e_neg = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if d['weight'] <= -1.0}
e_small = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if -1.0 < d['weight'] <= 1.0}
e_med = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if 1.0 < d['weight'] <= 2.0}
e_large = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if d['weight'] > 2.0}
pos = nx.spring_layout(G)
sizes = max_size * (self._state_counts / max(self._state_counts))
nx.draw_networkx_nodes(G, pos, ax=ax, node_size=sizes, node_color='orange')
nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_large, width=10, arrowsize=40, splines='curved')
nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_med, width=4, arrowsize=20)
nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_small,
width=3,
alpha=0.1,
edge_color='k')
if draw_neg:
nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_neg,
width=2,
alpha=0.1,
edge_color='k')
if edge_labels:
nx.draw_networkx_edge_labels(G,pos,edge_labels=e_large)
nx.draw_networkx_edge_labels(G,pos,edge_labels=e_med)
labels = nx.get_node_attributes(G, 'state')
ax = nx.draw_networkx_labels(G, pos, labels=labels,
font_size=20,
font_family='sans-serif',
font_color='blue')
if return_ax:
return ax
else:
plt.axis('off')
plt.show()
return
def plot_norm_diff(self, ax=None, cmap='RdBu', center_zero=True):
if self.normalized_difference.ndim > 2:
raise MarkovError("You can only plot one-step chains.")
return_ax = True
if ax is None:
fig, ax = plt.subplots(figsize=(1 + self.states.size/2, self.states.size/2))
return_ax = False
ma = np.ceil(np.max(self.normalized_difference))
if center_zero:
vmin, vmax = -ma, ma
else:
vmin, vmax = None, None
im = ax.imshow(self.normalized_difference, cmap=cmap, vmin=vmin, vmax=vmax)
plt.colorbar(im)
ax.tick_params(axis='x', which='both',
bottom=False, labelbottom=False,
top=False, labeltop=True,
)
ax.tick_params(axis='y', which='both',
left=False, labelleft=True,
right=False, labelright=False,
)
ticks = np.arange(self.states.size)
ax.set_yticks(ticks)
ax.set_xticks(ticks)
labels = [str(s) for s in self.states]
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
# Deal with probable bug in matplotlib 3.1.1
ax.set_ylim(reversed(ax.get_xlim()))
if return_ax:
return ax
else:
plt.show()
return
| [
"[email protected]"
] | |
99a090ec7d96de49b32002c3d389094efe608890 | b6233af6a39e7ab500743d6b2ac7d52f68ae3be2 | /19/00/2.py | 9a4163a95b325fbdda531426194e1e107c5ac754 | [
"CC0-1.0"
] | permissive | pylangstudy/201712 | 9754526e1d8f1c0519fcce98bc7df803f456cc4e | f18f1251074729c4a3865b113edc89ec06b54130 | refs/heads/master | 2021-09-02T06:08:08.278115 | 2017-12-30T23:04:55 | 2017-12-30T23:04:55 | 112,670,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import asyncio
try:
from socket import socketpair
except ImportError:
from asyncio.windows_utils import socketpair
# Create a pair of connected file descriptors
rsock, wsock = socketpair()
loop = asyncio.get_event_loop()
def reader():
data = rsock.recv(100)
print("Received:", data.decode())
# We are done: unregister the file descriptor
loop.remove_reader(rsock)
# Stop the event loop
loop.stop()
# Register the file descriptor for read event
loop.add_reader(rsock, reader)
# Simulate the reception of data from the network
loop.call_soon(wsock.send, 'abc'.encode())
# Run the event loop
loop.run_forever()
# We are done, close sockets and the event loop
rsock.close()
wsock.close()
loop.close()
| [
"[email protected]"
] | |
ffff5d674aabfb39a7fe604058e907e6a8469b8d | af71dc3825a4ad9f8f3582a1532828d680005dea | /social/__init__.py | 895ff9af9158dab005e6dc20c74f0c3c076e27b9 | [
"BSD-2-Clause",
"Python-2.0",
"BSD-3-Clause"
] | permissive | loitd/python-social-auth | 48c81b05858e9aabecd7989f6721018da14406d7 | 3a2e40c1d4341a0237363e28928b540ba7e7a49b | refs/heads/master | 2021-07-15T17:07:17.700728 | 2016-04-29T16:40:05 | 2016-04-29T16:42:53 | 58,113,389 | 0 | 0 | NOASSERTION | 2021-03-20T05:01:35 | 2016-05-05T07:40:35 | Python | UTF-8 | Python | false | false | 212 | py | """
python-social-auth application, allows OpenId or OAuth user
registration/authentication just adding a few configurations.
"""
version = (0, 2, 19)
extra = ''
__version__ = '.'.join(map(str, version)) + extra
| [
"[email protected]"
] | |
30aadff3b0ad693ad37410c5ddadb1a597999933 | 9f86a677c78db9b670759595b3b8b1a7f233acfd | /listings/admin.py | f8e5077d979804e2802b8dda4a1599caec15823d | [] | no_license | husainr/btre_project | 0337f8e705ca59460a62da06f54905fff4b50841 | e5bfbdc5d9e214d04aa212badeaca09fb2f37090 | refs/heads/master | 2020-04-20T07:00:44.152929 | 2019-01-28T14:26:25 | 2019-01-28T14:26:25 | 168,700,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from django.contrib import admin
from .models import Listing
class ListingAdmin(admin.ModelAdmin):
list_display = ('id','title','is_published','price','list_date','realtor')
list_display_links = ('id','title')
list_filter = ('realtor',)
list_editable = ('is_published',)
search_fields = ('title','description','address','city','price')
list_per_page = 25
admin.site.register(Listing, ListingAdmin)
| [
"[email protected]"
] | |
827af1cf985b10a20d8f337fcaae5cbb62d5988b | 23165420a2ced2306c1154cbd479e67006860e87 | /Algorithms/二叉搜索树的最近公共祖先.py | 91ed039655c7be3b5e96ef60887d3ebe524a141a | [] | no_license | pppineapple/LeetCode | 8a446a65f99e2f2c572696caca2550c6e3bd8acc | e41a86e9d4615079247ef3ef9a35537f4b40d338 | refs/heads/master | 2020-04-06T14:54:02.475837 | 2018-11-19T09:38:56 | 2018-11-19T09:38:56 | 157,558,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 22:29:22 2018
@author: pineapple
"""
'''
给定一个二叉搜索树, 找到该树中两个指定节点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,
最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先
且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
例如,给定如下二叉搜索树: root = [6,2,8,0,4,7,9,null,null,3,5]
_______6______
/ \
___2__ ___8__
/ \ / \
0 _4 7 9
/ \
3 5
示例 1:
输入: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8
输出: 6
解释: 节点 2 和节点 8 的最近公共祖先是 6。
示例 2:
输入: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4
输出: 2
解释: 节点 2 和节点 4 的最近公共祖先是 2, 因为根据定义最近公共祖先节点可以为节点本身。
说明:
所有节点的值都是唯一的。
p、q 为不同节点且均存在于给定的二叉搜索树中。
'''
'''
别人的: 时间复杂度O(n) 空间复杂度O(1)
思路:递归
因为题目说这是 二叉搜索树:即 root.left < root < root.right
所以p和q的最近公共祖先一定有 p.val <= root.val <= q.val
上式是假设p.val < q.val,实际函数中会对p.val和q.val取 max 和 min
所以递归思想就是,先对p.val和q.val取 max 和 min
minn = min(p.val, q.val)
maxn = max(p.val, q.val)
如果 minn <= root.val <= maxn 成立,就说明root是最近公共祖先
否则如果 root.val > maxn,就说明p和q都在root的左子树中,
只需要递归调用函数 self.lowestCommonAncestor(root.left, p, q)
否则如果 root.val < minn,就说明p和q都在root的右子树中,
只需要递归调用函数 self.lowestCommonAncestor(root.right, p, q)
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return None
minn = min(p.val, q.val)
maxn = max(p.val, q.val)
if minn <= root.val <= maxn:
return root
elif root.val > maxn:
return self.lowestCommonAncestor(root.left, p, q)
elif root.val < minn:
return self.lowestCommonAncestor(root.right, p, q) | [
"[email protected]"
] | |
9969c9d8301f7eb0cef375d5b636e00db8126cfb | d2cacbd1bde10e464faabc22ad5936f1aaf4e2ef | /data/DescLearning/SummerTests/RGBtrainD/Alexnet/BUTF/OTS/MAC-true/main.py | 07ef2c62f3b6a26072f5d33198b09fcb7f045794 | [] | no_license | npiasco/dl_management | a26950a3b53c720d881a8b7ac3fa81161a048256 | 11c29a3637efa5fd223b36664d62c704e8166bab | refs/heads/master | 2021-03-16T05:44:39.806437 | 2019-09-06T13:52:52 | 2019-09-06T13:52:52 | 124,055,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | #!/usr/bin/env python
import os, sys
import setlog
conf_file = os.environ['DEV'] + 'dl_management/.log/logging.yaml'
save_file = os.path.abspath(sys.argv[0])[:-len(sys.argv[0])] + 'log/'
setlog.reconfigure(conf_file, save_file)
import system.DescriptorLearning as System
if __name__ == '__main__':
machine = System.MultNet(root=os.path.abspath(sys.argv[0])[:-len(sys.argv[0])],
cnn_type='cnn.yaml',
dataset_file='../../../../../datasets/cmu_lt.yaml')
action = input('Exec:\n[t]\ttrain\n[e]\ttest\n[p]\tprint (console)\n[P]\tprint (full)\n[ ]\ttrain+test\n')
if action == 't':
machine.train()
elif action == 'e':
machine.test()
machine.plot(print_loss=False, print_val=False)
elif action == 'p':
machine.plot(print_loss=False, print_val=False)
elif action == 'P':
machine.plot()
elif action == '':
machine.train()
machine.test()
machine.plot(print_loss=False, print_val=False)
elif action == 's':
machine.serialize_net(final=False)
elif action == 'sf':
machine.serialize_net(final=True)
elif action == 'm':
machine.map_print('Main', final=False)
elif action == 'mf':
machine.map_print('Main', final=True)
elif action == 'jet':
machine.map_print()
elif action == 'dataset':
machine.print('train')
elif action == 'testq':
machine.print('test_query')
elif action == 'testd':
machine.print('test_data')
else:
raise ValueError('Unknown cmd: {}'.format(action))
| [
"[email protected]"
] | |
b856dc6e44247152863978aafc57924c228d7b01 | 9c85d132b2ed8c51f021f42ed9f20652827bca45 | /source/res/scripts/client/gui/scaleform/locale/messenger.py | 88742dcdfb19b86cca8c151693bcde5ffae16696 | [] | no_license | Mododejl/WorldOfTanks-Decompiled | 0f4063150c7148184644768b55a9104647f7e098 | cab1b318a58db1e428811c41efc3af694906ba8f | refs/heads/master | 2020-03-26T18:08:59.843847 | 2018-06-12T05:40:05 | 2018-06-12T05:40:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89,831 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/locale/MESSENGER.py
from debug_utils import LOG_WARNING
class MESSENGER(object):
CONTACTS_SETTINGSVIEW_TOOLTIPS_BTNS_APPLY = '#messenger:contacts/settingsView/tooltips/btns/apply'
CONTACTS_SETTINGSVIEW_TOOLTIPS_BTNS_CLOSE = '#messenger:contacts/settingsView/tooltips/btns/close'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_ADD = '#messenger:contacts/searchView/tooltips/btns/add'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_CLOSE = '#messenger:contacts/searchView/tooltips/btns/close'
CONTACTS_CREATEGROUPVIEW_TOOLTIPS_BTNS_APPLY = '#messenger:contacts/createGroupView/tooltips/btns/apply'
CONTACTS_CREATEGROUPVIEW_TOOLTIPS_BTNS_CLOSE = '#messenger:contacts/createGroupView/tooltips/btns/close'
CONTACTS_GROUPRENAMEVIEW_TOOLTIPS_BTNS_APPLY = '#messenger:contacts/groupRenameView/tooltips/btns/apply'
CONTACTS_GROUPRENAMEVIEW_TOOLTIPS_BTNS_CLOSE = '#messenger:contacts/groupRenameView/tooltips/btns/close'
CONTACTS_GROUPDELETEVIEW_TOOLTIPS_BTNS_APPLY = '#messenger:contacts/groupDeleteView/tooltips/btns/apply'
CONTACTS_GROUPDELETEVIEW_TOOLTIPS_BTNS_CLOSE = '#messenger:contacts/groupDeleteView/tooltips/btns/close'
MESSENGER_CONTACTS_EDITNOTE_TOOLTIPS_BTNS_CLOSE = '#messenger:messenger/contacts/editNote/tooltips/btns/close'
MESSENGER_CONTACTS_EDITNOTE_TOOLTIPS_BTNS_OK = '#messenger:messenger/contacts/editNote/tooltips/btns/ok'
MESSENGER_CONTACTS_CREATENOTE_TOOLTIPS_BTNS_OK = '#messenger:messenger/contacts/createNote/tooltips/btns/ok'
MESSENGER_CONTACTS_CREATENOTE_TOOLTIPS_BTNS_CLOSE = '#messenger:messenger/contacts/createNote/tooltips/btns/close'
LISTVIEW_TITLE = '#messenger:listView/title'
LISTVIEW_EMPTYLIST_TEMPLATE = '#messenger:listView/emptyList/template'
LISTVIEW_EMPTYLIST_INFO = '#messenger:listView/emptyList/info'
LISTVIEW_EMPTYLIST_INVITE = '#messenger:listView/emptyList/invite'
LISTVIEW_EMPTYLIST_OFFER = '#messenger:listView/emptyList/offer'
STATUS_ONLINE = '#messenger:status/online'
STATUS_OFFLINE = '#messenger:status/offline'
LOBBY_BUTTONS_CHANNELS = '#messenger:lobby/buttons/channels'
LOBBY_BUTTONS_CONTACTS = '#messenger:lobby/buttons/contacts'
LOBBY_BUTTONS_SEND = '#messenger:lobby/buttons/send'
LOBBY_BUTTONS_CANCEL = '#messenger:lobby/buttons/cancel'
LOBBY_BSCHANNELS_TITLE_STARTTIME = '#messenger:lobby/bsChannels/title/startTime'
LOBBY_CHANNELS_ACTIONS_REMOVE = '#messenger:lobby/channels/actions/remove'
LOBBY_CHANNELS_ACTIONS_SETTIME = '#messenger:lobby/channels/actions/setTime'
LOBBY_CHANNELS_ACTIONS_SETDAY = '#messenger:lobby/channels/actions/setDay'
LOBBY_USERS_ACTIONS_SHOWINFO = '#messenger:lobby/users/actions/showInfo'
LOBBY_USERS_ACTIONS_CREATEPRIVATECHANNEL = '#messenger:lobby/users/actions/createPrivateChannel'
LOBBY_USERS_ACTIONS_ADDTOFRIENDS = '#messenger:lobby/users/actions/addToFriends'
LOBBY_USERS_ACTIONS_ADDTOIGNORE = '#messenger:lobby/users/actions/addToIgnore'
LOBBY_USERS_ACTIONS_COPYNAMETOCLIPBOARD = '#messenger:lobby/users/actions/copyNameToClipboard'
LOBBY_FAQ_TITLE = '#messenger:lobby/faq/title'
LOBBY_FAQ_CLOSE = '#messenger:lobby/faq/close'
BATTLE_HINTS_CHANGERECEIVER = '#messenger:battle/hints/changeReceiver'
BATTLE_TOOLTIPS_USINGCHAT = '#messenger:battle/toolTips/usingChat'
BATTLE_TOOLTIPS_STARTCHAT = '#messenger:battle/toolTips/startChat'
BATTLE_TOOLTIPS_DEFAULT = '#messenger:battle/toolTips/default'
BATTLE_TOOLTIPS_WITHSQUAD = '#messenger:battle/toolTips/withSquad'
BATTLE_TOOLTIPS_DURINGCHAT = '#messenger:battle/toolTips/duringChat'
BATTLE_TOOLTIPS_CHANGERECEIVER = '#messenger:battle/toolTips/changeReceiver'
BATTLE_TOOLTIPS_SEND = '#messenger:battle/toolTips/send'
BATTLE_TOOLTIPS_CANCEL = '#messenger:battle/toolTips/cancel'
BATTLE_TOOLTIPS_MUTEPLAYER = '#messenger:battle/toolTips/mutePlayer'
BATTLE_RECEIVERS_TEAM = '#messenger:battle/receivers/team'
BATTLE_RECEIVERS_COMMON = '#messenger:battle/receivers/common'
BATTLE_RECEIVERS_SQUAD = '#messenger:battle/receivers/squad'
BATTLE_RECEIVERS_CHATISLOCKED = '#messenger:battle/receivers/chatIsLocked'
BATTLE_RECEIVERS_CHATISLOCKEDTOOLTIP = '#messenger:battle/receivers/chatIsLockedToolTip'
BATTLE_UNKNOWN_ALLY = '#messenger:battle/unknown/ally'
BATTLE_UNKNOWN_ENEMY = '#messenger:battle/unknown/enemy'
BATTLE_HISTORY_HEADER = '#messenger:battle/history/header'
BATTLE_HISTORY_TITLE = '#messenger:battle/history/title'
BATTLE_HISTORY_UPMESSAGE = '#messenger:battle/history/upMessage'
BATTLE_HISTORY_DOWNMESSAGE = '#messenger:battle/history/downMessage'
BATTLE_HISTORY_LATESTMESSAGE = '#messenger:battle/history/latestMessage'
DIALOGS_USERINFO_TITLE = '#messenger:dialogs/userInfo/title'
DIALOGS_USERINFO_LABELS_STATUS = '#messenger:dialogs/userInfo/labels/status'
DIALOGS_USERINFO_LABELS_COMMON = '#messenger:dialogs/userInfo/labels/common'
DIALOGS_USERINFO_LABELS_CREATIONTIME = '#messenger:dialogs/userInfo/labels/creationTime'
DIALOGS_USERINFO_LABELS_LASTBATTLETIME = '#messenger:dialogs/userInfo/labels/lastBattleTime'
DIALOGS_USERINFO_BUTTONS_CREATEPRIVATECHANNEL = '#messenger:dialogs/userInfo/buttons/createPrivateChannel'
DIALOGS_USERINFO_BUTTONS_ADDTOFRIENDS = '#messenger:dialogs/userInfo/buttons/addToFriends'
DIALOGS_USERINFO_BUTTONS_ADDTOIGNORED = '#messenger:dialogs/userInfo/buttons/addToIgnored'
DIALOGS_USERINFO_BUTTONS_ADDMUTED = '#messenger:dialogs/userInfo/buttons/addMuted'
DIALOGS_USERINFO_BUTTONS_REMOVEMUTED = '#messenger:dialogs/userInfo/buttons/removeMuted'
DIALOGS_USERINFO_BUTTONS_CLOSE = '#messenger:dialogs/userInfo/buttons/close'
DIALOGS_USERINFO_BAN_REASON_TITLE = '#messenger:dialogs/userInfo/ban_reason/title'
DIALOGS_USERINFO_BAN_REASON_SUSPICION = '#messenger:dialogs/userInfo/ban_reason/suspicion'
DIALOGS_USERINFO_BAN_REASON_GUILTY = '#messenger:dialogs/userInfo/ban_reason/guilty'
DIALOGS_CHANNELS_TITLE = '#messenger:dialogs/channels/title'
DIALOGS_CHANNELS_BUTTONS_CREATE = '#messenger:dialogs/channels/buttons/create'
DIALOGS_CHANNELS_BUTTONS_SEARCHANDJOIN = '#messenger:dialogs/channels/buttons/searchAndJoin'
DIALOGS_CHANNELS_BUTTONS_CLOSE = '#messenger:dialogs/channels/buttons/close'
DIALOGS_CREATECHANNEL_TITLE = '#messenger:dialogs/createChannel/title'
DIALOGS_CREATECHANNEL_LABELS_NAME = '#messenger:dialogs/createChannel/labels/name'
DIALOGS_CREATECHANNEL_LABELS_PASSWORD = '#messenger:dialogs/createChannel/labels/password'
DIALOGS_CREATECHANNEL_LABELS_USEPASSWORD = '#messenger:dialogs/createChannel/labels/usePassword'
DIALOGS_CREATECHANNEL_LABELS_FILLPASSWORD = '#messenger:dialogs/createChannel/labels/fillPassword'
DIALOGS_CREATECHANNEL_LABELS_RETYPEPASSWORD = '#messenger:dialogs/createChannel/labels/retypePassword'
DIALOGS_CREATECHANNEL_BUTTONS_CREATE = '#messenger:dialogs/createChannel/buttons/create'
DIALOGS_CREATECHANNEL_BUTTONS_CLOSE = '#messenger:dialogs/createChannel/buttons/close'
DIALOGS_SEARCHCHANNEL_TITLE = '#messenger:dialogs/searchChannel/title'
DIALOGS_SEARCHCHANNEL_LABELS_SEARCH = '#messenger:dialogs/searchChannel/labels/search'
DIALOGS_SEARCHCHANNEL_LABELS_RESULT = '#messenger:dialogs/searchChannel/labels/result'
DIALOGS_SEARCHCHANNEL_BUTTONS_SEARCH = '#messenger:dialogs/searchChannel/buttons/search'
DIALOGS_SEARCHCHANNEL_BUTTONS_JOIN = '#messenger:dialogs/searchChannel/buttons/join'
DIALOGS_SEARCHCHANNEL_BUTTONS_CLOSE = '#messenger:dialogs/searchChannel/buttons/close'
DIALOGS_CONTACTS_TITLE = '#messenger:dialogs/contacts/title'
DIALOGS_CONTACTS_TREE_FRIENDS = '#messenger:dialogs/contacts/tree/friends'
DIALOGS_CONTACTS_TREE_CLAN = '#messenger:dialogs/contacts/tree/clan'
DIALOGS_CONTACTS_TREE_IGNORED = '#messenger:dialogs/contacts/tree/ignored'
DIALOGS_CONTACTS_TREE_MUTED = '#messenger:dialogs/contacts/tree/muted'
DIALOGS_CONTACTS_TREE_FRIENDSHIP_REQUEST = '#messenger:dialogs/contacts/tree/friendship_request'
DIALOGS_CONTACTS_CONTACT_USERINFO = '#messenger:dialogs/contacts/contact/userInfo'
DIALOGS_CONTACTS_CONTACT_MONEYTRANSFER = '#messenger:dialogs/contacts/contact/moneyTransfer'
DIALOGS_CONTACTS_CONTACT_CREATEPRIVATECHANNEL = '#messenger:dialogs/contacts/contact/createPrivateChannel'
DIALOGS_CONTACTS_CONTACT_ADDTOFRIENDS = '#messenger:dialogs/contacts/contact/addToFriends'
DIALOGS_CONTACTS_CONTACT_ADDTOCLAN = '#messenger:dialogs/contacts/contact/addToClan'
DIALOGS_CONTACTS_CONTACT_REMOVEFROMFRIENDS = '#messenger:dialogs/contacts/contact/removeFromFriends'
DIALOGS_CONTACTS_CONTACT_ADDTOIGNORED = '#messenger:dialogs/contacts/contact/addToIgnored'
DIALOGS_CONTACTS_CONTACT_REMOVEFROMIGNORED = '#messenger:dialogs/contacts/contact/removeFromIgnored'
DIALOGS_CONTACTS_CONTACT_COPYTOCLIPBORAD = '#messenger:dialogs/contacts/contact/copyToClipBorad'
DIALOGS_CONTACTS_GROUP_SORTBYNAME = '#messenger:dialogs/contacts/group/sortByName'
DIALOGS_CONTACTS_GROUP_SORTBYSTATUS = '#messenger:dialogs/contacts/group/sortByStatus'
DIALOGS_CONTACTS_BUTTONS_CLOSE = '#messenger:dialogs/contacts/buttons/close'
DIALOGS_SEARCHCONTACT_TITLE = '#messenger:dialogs/searchContact/title'
DIALOGS_SEARCHCONTACT_LABELS_SEARCH = '#messenger:dialogs/searchContact/labels/search'
DIALOGS_SEARCHCONTACT_LABELS_RESULT = '#messenger:dialogs/searchContact/labels/result'
DIALOGS_SEARCHCONTACT_BUTTONS_SEARCH = '#messenger:dialogs/searchContact/buttons/search'
DIALOGS_SEARCHCONTACT_BUTTONS_ADDTOFRIENDS = '#messenger:dialogs/searchContact/buttons/addToFriends'
DIALOGS_SEARCHCONTACT_BUTTONS_ADDTOIGNORED = '#messenger:dialogs/searchContact/buttons/addToIgnored'
DIALOGS_SEARCHCONTACT_BUTTONS_USERINFO = '#messenger:dialogs/searchContact/buttons/userInfo'
DIALOGS_SEARCHCONTACT_BUTTONS_CLOSE = '#messenger:dialogs/searchContact/buttons/close'
DIALOGS_CONNECTINGTOSECURECHANNEL_TITLE = '#messenger:dialogs/connectingToSecureChannel/title'
DIALOGS_CONNECTINGTOSECURECHANNEL_LABELS_WARRNING = '#messenger:dialogs/connectingToSecureChannel/labels/warrning'
DIALOGS_CONNECTINGTOSECURECHANNEL_LABELS_INFO = '#messenger:dialogs/connectingToSecureChannel/labels/info'
DIALOGS_CONNECTINGTOSECURECHANNEL_LABELS_PASSWORD = '#messenger:dialogs/connectingToSecureChannel/labels/password'
DIALOGS_CONNECTINGTOSECURECHANNEL_BUTTONS_CONNECT = '#messenger:dialogs/connectingToSecureChannel/buttons/connect'
DIALOGS_CONNECTINGTOSECURECHANNEL_BUTTONS_CLOSE = '#messenger:dialogs/connectingToSecureChannel/buttons/close'
DIALOGS_CONNECTINGTOSECURECHANNEL_ERRORS_INVALIDPASSWORD_TITLE = '#messenger:dialogs/connectingToSecureChannel/errors/invalidPassword/title'
DIALOGS_CONNECTINGTOSECURECHANNEL_ERRORS_INVALIDPASSWORD_MESSAGE = '#messenger:dialogs/connectingToSecureChannel/errors/invalidPassword/message'
DIALOGS_SERVICECHANNEL_TITLE = '#messenger:dialogs/serviceChannel/title'
DIALOGS_SQUAD_MESSAGE_ALLREADY = '#messenger:dialogs/squad/message/allReady'
DIALOGS_SQUAD_MESSAGE_GETREADY = '#messenger:dialogs/squad/message/getReady'
DIALOGS_SQUAD_MESSAGE_GETNOTREADY = '#messenger:dialogs/squad/message/getNotReady'
DIALOGS_SQUAD_EVENT_VEHICLE = '#messenger:dialogs/squad/event/vehicle'
DIALOGS_SQUAD_MESSAGE_INVALIDVEHICLELEVEL = '#messenger:dialogs/squad/message/invalidVehicleLevel'
DIALOGS_SQUAD_MESSAGE_SPGFORBIDDEN = '#messenger:dialogs/squad/message/spgForbidden'
DIALOGS_SQUAD_MESSAGE_SPGFULL = '#messenger:dialogs/squad/message/spgFull'
DIALOGS_SQUAD_MESSAGE_VEHICLES_DIFFERENTLEVELS = '#messenger:dialogs/squad/message/vehicles/differentLevels'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLES = '#messenger:dialogs/falloutSquadChannel/vehicles'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLES_STEELHUNTING = '#messenger:dialogs/falloutSquadChannel/vehicles/steelHunting'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLES_DOMINATION = '#messenger:dialogs/falloutSquadChannel/vehicles/domination'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLESREQUIRED_STEELHUNTING = '#messenger:dialogs/falloutSquadChannel/vehiclesRequired/steelHunting'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLESREQUIRED_DOMINATION_MAX = '#messenger:dialogs/falloutSquadChannel/vehiclesRequired/domination/max'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLESREQUIRED_DOMINATION_MIN = '#messenger:dialogs/falloutSquadChannel/vehiclesRequired/domination/min'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLENOTIFY = '#messenger:dialogs/falloutSquadChannel/vehicleNotify'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLENOTIFYRANGE = '#messenger:dialogs/falloutSquadChannel/vehicleNotifyRange'
DIALOGS_FALLOUTSQUADCHANNEL_VEHICLENOTIFYMULTITEAM = '#messenger:dialogs/falloutSquadChannel/vehicleNotifyMultiteam'
DIALOGS_SIMPLESQUAD_VEHICLE_RECOMMENDATIONLEVEL = '#messenger:dialogs/simpleSquad/vehicle/recommendationLevel'
DIALOGS_SQUADCHANNEL_CHATNAME = '#messenger:dialogs/squadChannel/chatName'
DIALOGS_SQUADCHANNEL_SIMPLECHATNAME = '#messenger:dialogs/squadChannel/simpleChatName'
DIALOGS_SQUADCHANNEL_MEMBERS = '#messenger:dialogs/squadChannel/members'
DIALOGS_SQUADCHANNEL_VEHICLES = '#messenger:dialogs/squadChannel/vehicles'
DIALOGS_SQUADCHANNEL_VEHICLESLBL = '#messenger:dialogs/squadChannel/vehiclesLbl'
DIALOGS_SIMPLESQUAD_VEHICLELEVEL = '#messenger:dialogs/simpleSquad/vehicleLevel'
DIALOGS_EVENTSQUAD_VEHICLE = '#messenger:dialogs/eventSquad/vehicle'
DIALOGS_SQUADCHANNEL_BATTLETYPE = '#messenger:dialogs/squadChannel/battleType'
DIALOGS_SQUADCHANNEL_BATTLETYPEDOMINATION = '#messenger:dialogs/squadChannel/battleTypeDomination'
DIALOGS_SQUADCHANNEL_BUTTONS_INVITE = '#messenger:dialogs/squadChannel/buttons/invite'
DIALOGS_SQUADCHANNEL_BUTTONS_RECOMMEND = '#messenger:dialogs/squadChannel/buttons/recommend'
DIALOGS_SIMPLESQUAD_RECOMMENDATIONLEVEL = '#messenger:dialogs/simpleSquad/recommendationLevel'
DIALOGS_SQUADCHANNEL_BUTTONS_LEAVE = '#messenger:dialogs/squadChannel/buttons/leave'
DIALOGS_SQUADCHANNEL_BUTTONS_READY = '#messenger:dialogs/squadChannel/buttons/ready'
DIALOGS_SQUADCHANNEL_BUTTONS_NOTREADY = '#messenger:dialogs/squadChannel/buttons/notReady'
DIALOGS_SQUADCHANNEL_BUTTONS_DISMISS = '#messenger:dialogs/squadChannel/buttons/dismiss'
DIALOGS_SQUADCHANNEL_BUTTONS_AFK = '#messenger:dialogs/squadChannel/buttons/afk'
DIALOGS_SQUADCHANNEL_MESSEGE_NOPREMIUM = '#messenger:dialogs/squadChannel/messege/noPremium'
DIALOGS_SQUADCHANNEL_CONTEXT_KICKPLAYER = '#messenger:dialogs/squadChannel/context/kickPlayer'
DIALOGS_SQUADCHANNEL_HEADERMSG_SQUADFORMATION = '#messenger:dialogs/squadChannel/headerMsg/squadFormation'
DIALOGS_SQUADCHANNEL_HEADERMSG_SQUADFORMATIONRESTRICTION = '#messenger:dialogs/squadChannel/headerMsg/squadFormationRestriction'
DIALOGS_SQUADCHANNEL_HEADERMSG_EVENTFORMATIONRESTRICTION = '#messenger:dialogs/squadChannel/headerMsg/eventFormationRestriction'
DIALOGS_SQUADCHANNEL_HEADERMSG_EPICBATTLEFORMATIONRESTRICTION = '#messenger:dialogs/squadChannel/headerMsg/epicBattleFormationRestriction'
DIALOGS_SQUADCHANNEL_HEADERMSG_DYNSQUAD = '#messenger:dialogs/squadChannel/headerMsg/dynSquad'
DIALOGS_BSCHANNEL_CONTEXT_KICKPLAYER = '#messenger:dialogs/bsChannel/context/kickPlayer'
DIALOGS_TEAMCHANNEL_BUTTONS_DISMISS = '#messenger:dialogs/teamChannel/buttons/dismiss'
DIALOGS_TEAMCHANNEL_BUTTONS_INVITE = '#messenger:dialogs/teamChannel/buttons/invite'
DIALOGS_SQUADCHANNEL_TOOLTIPS_STATUS_OFFLINE = '#messenger:dialogs/squadChannel/tooltips/status/offline'
DIALOGS_SQUADCHANNEL_TOOLTIPS_STATUS_NOTREADY = '#messenger:dialogs/squadChannel/tooltips/status/notReady'
DIALOGS_SQUADCHANNEL_TOOLTIPS_STATUS_READY = '#messenger:dialogs/squadChannel/tooltips/status/ready'
DIALOGS_SQUADCHANNEL_TOOLTIPS_STATUS_INBATTLE = '#messenger:dialogs/squadChannel/tooltips/status/inBattle'
DIALOGS_SQUADCHANNEL_TOOLTIPS_STATUS_AFK = '#messenger:dialogs/squadChannel/tooltips/status/afk'
SERVER_ERRORS_INTERNALERROR_TITLE = '#messenger:server/errors/internalError/title'
SERVER_ERRORS_INTERNALERROR_MESSAGE = '#messenger:server/errors/internalError/message'
SERVER_ERRORS_CHANNELALREADYEXISTS_TITLE = '#messenger:server/errors/channelAlreadyExists/title'
SERVER_ERRORS_CHANNELALREADYEXISTS_MESSAGE = '#messenger:server/errors/channelAlreadyExists/message'
SERVER_ERRORS_CHANNELDESTROYED_TITLE = '#messenger:server/errors/channelDestroyed/title'
SERVER_ERRORS_CHANNELDESTROYED_MESSAGE = '#messenger:server/errors/channelDestroyed/message'
SERVER_ERRORS_PASSWORDREQUIRED_TITLE = '#messenger:server/errors/passwordRequired/title'
SERVER_ERRORS_PASSWORDREQUIRED_MESSAGE = '#messenger:server/errors/passwordRequired/message'
SERVER_ERRORS_INCORRECTPASSWORD_TITLE = '#messenger:server/errors/incorrectPassword/title'
SERVER_ERRORS_INCORRECTPASSWORD_MESSAGE = '#messenger:server/errors/incorrectPassword/message'
SERVER_ERRORS_MEMBERBANNED_TITLE = '#messenger:server/errors/memberBanned/title'
SERVER_ERRORS_MEMBERBANNED_MESSAGE = '#messenger:server/errors/memberBanned/message'
SERVER_ERRORS_CHATBANNED_TITLE = '#messenger:server/errors/chatBanned/title'
SERVER_ERRORS_CHATBANNED_MESSAGE = '#messenger:server/errors/chatBanned/message'
SERVER_ERRORS_MEMBERDISCONNECTING_TITLE = '#messenger:server/errors/memberDisconnecting/title'
SERVER_ERRORS_MEMBERDISCONNECTING_MESSAGE = '#messenger:server/errors/memberDisconnecting/message'
SERVER_ERRORS_NOTALLOWED_TITLE = '#messenger:server/errors/notAllowed/title'
SERVER_ERRORS_NOTALLOWED_MESSAGE = '#messenger:server/errors/notAllowed/message'
SERVER_ERRORS_CONNECTTIMEOUT_TITLE = '#messenger:server/errors/connectTimeout/title'
SERVER_ERRORS_CONNECTTIMEOUT_MESSAGE = '#messenger:server/errors/connectTimeout/message'
SERVER_ERRORS_INITIALIZATIONFAILURE_TITLE = '#messenger:server/errors/initializationFailure/title'
SERVER_ERRORS_INITIALIZATIONFAILURE_MESSAGE = '#messenger:server/errors/initializationFailure/message'
SERVER_ERRORS_USERNOTEXISTS_TITLE = '#messenger:server/errors/userNotExists/title'
SERVER_ERRORS_USERNOTEXISTS_MESSAGE = '#messenger:server/errors/userNotExists/message'
SERVER_ERRORS_USERSROSTERLIMITREACHED_TITLE = '#messenger:server/errors/usersRosterLimitReached/title'
SERVER_ERRORS_USERSROSTERLIMITREACHED_MESSAGE = '#messenger:server/errors/usersRosterLimitReached/message'
SERVER_ERRORS_ACTIVECHANNELSLIMITREACHED_TITLE = '#messenger:server/errors/activeChannelsLimitReached/title'
SERVER_ERRORS_ACTIVECHANNELSLIMITREACHED_MESSAGE = '#messenger:server/errors/activeChannelsLimitReached/message'
SERVER_ERRORS_SQLERROR_TITLE = '#messenger:server/errors/sqlError/title'
SERVER_ERRORS_SQLERROR_MESSAGE = '#messenger:server/errors/sqlError/message'
SERVER_ERRORS_INCORRECTCHARACTER_TITLE = '#messenger:server/errors/incorrectCharacter/title'
SERVER_ERRORS_INCORRECTCHARACTER_MESSAGE = '#messenger:server/errors/incorrectCharacter/message'
SERVER_ERRORS_ADDFRIENDERROR_TITLE = '#messenger:server/errors/addFriendError/title'
SERVER_ERRORS_ADDFRIENDERROR_MESSAGE = '#messenger:server/errors/addFriendError/message'
SERVER_ERRORS_ADDIGNOREDERROR_TITLE = '#messenger:server/errors/addIgnoredError/title'
SERVER_ERRORS_ADDIGNOREDERROR_MESSAGE = '#messenger:server/errors/addIgnoredError/message'
SERVER_ERRORS_USERIGNOREDERROR_TITLE = '#messenger:server/errors/userIgnoredError/title'
SERVER_ERRORS_USERIGNOREDERROR_MESSAGE = '#messenger:server/errors/userIgnoredError/message'
SERVER_ERRORS_CHATCOMMANDERROR_TITLE = '#messenger:server/errors/chatCommandError/title'
SERVER_ERRORS_CHATCOMMANDERROR_MESSAGE = '#messenger:server/errors/chatCommandError/message'
SERVER_ERRORS_MEMBERALREADYBANNED_TITLE = '#messenger:server/errors/memberAlreadyBanned/title'
SERVER_ERRORS_MEMBERALREADYBANNED_MESSAGE = '#messenger:server/errors/memberAlreadyBanned/message'
SERVER_ERRORS_MEMBERALREADYMODERATOR_TITLE = '#messenger:server/errors/memberAlreadyModerator/title'
SERVER_ERRORS_MEMBERALREADYMODERATOR_MESSAGE = '#messenger:server/errors/memberAlreadyModerator/message'
SERVER_ERRORS_MEMBERNOTMODERATOR_TITLE = '#messenger:server/errors/memberNotModerator/title'
SERVER_ERRORS_MEMBERNOTMODERATOR_MESSAGE = '#messenger:server/errors/memberNotModerator/message'
SERVER_ERRORS_COMMANDINCOOLDOWN_TITLE = '#messenger:server/errors/commandInCooldown/title'
SERVER_ERRORS_COMMANDINCOOLDOWN_MESSAGE = '#messenger:server/errors/commandInCooldown/message'
SERVER_ERRORS_CREATEPRIVATEERROR_TITLE = '#messenger:server/errors/createPrivateError/title'
SERVER_ERRORS_CREATEPRIVATEERROR_MESSAGE = '#messenger:server/errors/createPrivateError/message'
SERVER_ERRORS_ACTIONINCOOLDOWN_TITLE = '#messenger:server/errors/actionInCooldown/title'
SERVER_ERRORS_ACTIONINCOOLDOWN_MESSAGE = '#messenger:server/errors/actionInCooldown/message'
SERVER_ERRORS_INVITECOMMANDERROR_TITLE = '#messenger:server/errors/inviteCommandError/title'
SERVER_ERRORS_INVITECOMMANDERROR_MESSAGE = '#messenger:server/errors/inviteCommandError/message'
SERVER_ERRORS_MEMBERSLIMITREACHED_TITLE = '#messenger:server/errors/membersLimitReached/title'
SERVER_ERRORS_MEMBERSLIMITREACHED_MESSAGE = '#messenger:server/errors/membersLimitReached/message'
SERVER_ERRORS_INCORRECTCOMMANDARGUMENT_TITLE = '#messenger:server/errors/incorrectCommandArgument/title'
SERVER_ERRORS_INCORRECTCOMMANDARGUMENT_MESSAGE = '#messenger:server/errors/incorrectCommandArgument/message'
SERVER_ERRORS_INVALIDCHANNELNAME_TITLE = '#messenger:server/errors/invalidChannelName/title'
SERVER_ERRORS_INVALIDCHANNELNAME_MESSAGE = '#messenger:server/errors/invalidChannelName/message'
CLIENT_INFORMATION_ADDTOFRIENDS_MESSAGE = '#messenger:client/information/addToFriends/message'
CLIENT_INFORMATION_ADDTOIGNORED_MESSAGE = '#messenger:client/information/addToIgnored/message'
CLIENT_INFORMATION_ADDTOTMPIGNORED_MESSAGE = '#messenger:client/information/addToTmpIgnored/message'
CLIENT_INFORMATION_SETMUTED_MESSAGE = '#messenger:client/information/setMuted/message'
CLIENT_INFORMATION_UNSETMUTED_MESSAGE = '#messenger:client/information/unsetMuted/message'
CLIENT_INFORMATION_REMOVEFROMFRIENDS_MESSAGE = '#messenger:client/information/removeFromFriends/message'
CLIENT_INFORMATION_REMOVEFROMIGNORED_MESSAGE = '#messenger:client/information/removeFromIgnored/message'
CLIENT_INFORMATION_REMOVEFROMTMPIGNORED_MESSAGE = '#messenger:client/information/removeFromTmpIgnored/message'
CLIENT_INFORMATION_EMPTYSEARCHRESULT_MESSAGE = '#messenger:client/information/emptySearchResult/message'
CLIENT_WARNING_EMPTYUSERSEARCHTOKEN_TITLE = '#messenger:client/warning/emptyUserSearchToken/title'
CLIENT_WARNING_EMPTYUSERSEARCHTOKEN_MESSAGE = '#messenger:client/warning/emptyUserSearchToken/message'
CLIENT_WARNING_INVALIDUSERSEARCHTOKEN_TITLE = '#messenger:client/warning/invalidUserSearchToken/title'
CLIENT_WARNING_INVALIDUSERSEARCHTOKEN_MESSAGE = '#messenger:client/warning/invalidUserSearchToken/message'
CLIENT_ERROR_BROADCASTINCOOLDOWN = '#messenger:client/error/broadcastInCooldown'
CLIENT_ERROR_COMMANDINCOOLDOWN_LIMITED = '#messenger:client/error/commandInCooldown/limited'
CLIENT_ERROR_COMMANDINCOOLDOWN_UNLIMITED = '#messenger:client/error/commandInCooldown/unlimited'
CLIENT_DYNSQUAD_INVITESENT = '#messenger:client/dynSquad/inviteSent'
CLIENT_DYNSQUAD_INVITERECEIVED = '#messenger:client/dynSquad/inviteReceived'
CLIENT_DYNSQUAD_INVITEACCEPTED_USER = '#messenger:client/dynSquad/inviteAccepted/user'
CLIENT_DYNSQUAD_INVITEACCEPTED_MYSELF_ENABLEVOIP = '#messenger:client/dynSquad/inviteAccepted/myself/enableVOIP'
CLIENT_DYNSQUAD_INVITEACCEPTED_MYSELF_DISABLEVOIP = '#messenger:client/dynSquad/inviteAccepted/myself/disableVOIP'
CLIENT_DYNSQUAD_INVITEACCEPTED_MYSELF_SPECIFYVOIP = '#messenger:client/dynSquad/inviteAccepted/myself/specifyVOIP'
CLIENT_DYNSQUAD_INVITEACCEPTED_MYSELF_WITHOUTVOIP = '#messenger:client/dynSquad/inviteAccepted/myself/withoutVOIP'
CLIENT_DYNSQUAD_CREATED_OWNER_ENABLEVOIP = '#messenger:client/dynSquad/created/owner/enableVOIP'
CLIENT_DYNSQUAD_CREATED_OWNER_DISABLEVOIP = '#messenger:client/dynSquad/created/owner/disableVOIP'
CLIENT_DYNSQUAD_CREATED_OWNER_SPECIFYVOIP = '#messenger:client/dynSquad/created/owner/specifyVOIP'
CLIENT_DYNSQUAD_CREATED_OWNER_WITHOUTVOIP = '#messenger:client/dynSquad/created/owner/withoutVOIP'
CLIENT_DYNSQUAD_CREATED_RECRUIT_ENABLEVOIP = '#messenger:client/dynSquad/created/recruit/enableVOIP'
CLIENT_DYNSQUAD_CREATED_RECRUIT_DISABLEVOIP = '#messenger:client/dynSquad/created/recruit/disableVOIP'
CLIENT_DYNSQUAD_CREATED_RECRUIT_SPECIFYVOIP = '#messenger:client/dynSquad/created/recruit/specifyVOIP'
CLIENT_DYNSQUAD_CREATED_RECRUIT_WITHOUTVOIP = '#messenger:client/dynSquad/created/recruit/withoutVOIP'
CLIENT_DYNSQUAD_ENABLEVOIP = '#messenger:client/dynSquad/enableVOIP'
CLIENT_DYNSQUAD_DISABLEVOIP = '#messenger:client/dynSquad/disableVOIP'
CLIENT_DYNSQUAD_CREATED_ALLIES = '#messenger:client/dynSquad/created/allies'
CLIENT_DYNSQUAD_CREATED_ENEMIES = '#messenger:client/dynSquad/created/enemies'
COMMAND_FINDUSER = '#messenger:command/findUser'
COMMAND_FINDCHATCHANNELS = '#messenger:command/findChatChannels'
COMMAND_ADDFRIEND = '#messenger:command/addFriend'
COMMAND_ADDIGNORED = '#messenger:command/addIgnored'
COMMAND_REMOVEFRIEND = '#messenger:command/removeFriend'
COMMAND_REMOVEIGNORED = '#messenger:command/removeIgnored'
COMMAND_SETMUTED = '#messenger:command/setMuted'
COMMAND_UNSETMUTED = '#messenger:command/unsetMuted'
COMMAND_CREATEPRIVATE = '#messenger:command/createPrivate'
COMMAND_ACCEPTINVITE = '#messenger:command/acceptInvite'
COMMAND_REJECTINVITE = '#messenger:command/rejectInvite'
COMMAND_GETACTIVEINVITES = '#messenger:command/getActiveInvites'
COMMAND_GETARCHIVEINVITES = '#messenger:command/getArchiveInvites'
COMMAND_GETMEMBERSCOUNT = '#messenger:command/getMembersCount'
COMMAND_REQUESTSYSTEMCHATCHANNELS = '#messenger:command/requestSystemChatChannels'
COMMAND_CREATECHATCHANNEL = '#messenger:command/createChatChannel'
COMMAND_BROADCAST = '#messenger:command/broadcast'
COMMAND_CREATEINVITE = '#messenger:command/createInvite'
COMMAND_LEAVECHATCHANNEL = '#messenger:command/leaveChatChannel'
COMMAND_REQUESTCHATCHANNELMEMBERS = '#messenger:command/requestChatChannelMembers'
EVENTS_ENTERCHANNEL = '#messenger:events/enterChannel'
EVENTS_LEAVECHANNEL = '#messenger:events/leaveChannel'
SERVICECHANNELMESSAGES_PRIORITYMESSAGETITLE = '#messenger:serviceChannelMessages/priorityMessageTitle'
SERVICECHANNELMESSAGES_BUTTONS_CLOSE = '#messenger:serviceChannelMessages/buttons/close'
SERVICECHANNELMESSAGES_SERVERREBOOT = '#messenger:serviceChannelMessages/serverReboot'
SERVICECHANNELMESSAGES_SERVERREBOOTCANCELLED = '#messenger:serviceChannelMessages/serverRebootCancelled'
SERVICECHANNELMESSAGES_BATTLERESULTS_BUTTON = '#messenger:serviceChannelMessages/battleResults/button'
SERVICECHANNELMESSAGES_BATTLERESULTS_VICTORY = '#messenger:serviceChannelMessages/battleResults/victory'
SERVICECHANNELMESSAGES_BATTLERESULTS_TECHVICTORY = '#messenger:serviceChannelMessages/battleResults/techVictory'
SERVICECHANNELMESSAGES_BATTLERESULTS_DRAWGAME = '#messenger:serviceChannelMessages/battleResults/drawGame'
SERVICECHANNELMESSAGES_BATTLERESULTS_ENDED = '#messenger:serviceChannelMessages/battleResults/ended'
SERVICECHANNELMESSAGES_BATTLERESULTS_DEFEAT = '#messenger:serviceChannelMessages/battleResults/defeat'
SERVICECHANNELMESSAGES_BATTLERESULTS_TECHDEFEAT = '#messenger:serviceChannelMessages/battleResults/techDefeat'
SERVICECHANNELMESSAGES_BATTLERESULTS_BATTLE = '#messenger:serviceChannelMessages/battleResults/battle'
SERVICECHANNELMESSAGES_BATTLERESULTS_TRAINING_BATTLE = '#messenger:serviceChannelMessages/battleResults/training_battle'
SERVICECHANNELMESSAGES_BATTLERESULTS_TANKS = '#messenger:serviceChannelMessages/battleResults/tanks'
SERVICECHANNELMESSAGES_BATTLERESULTS_EXPERIENCE = '#messenger:serviceChannelMessages/battleResults/experience'
SERVICECHANNELMESSAGES_BATTLERESULTS_FREEXP = '#messenger:serviceChannelMessages/battleResults/freeXP'
SERVICECHANNELMESSAGES_BATTLERESULTS_CREDITS = '#messenger:serviceChannelMessages/battleResults/credits'
SERVICECHANNELMESSAGES_BATTLERESULTS_CREDITS_NOTACCRUED = '#messenger:serviceChannelMessages/battleResults/credits/notAccrued'
SERVICECHANNELMESSAGES_BATTLERESULTS_GOLD = '#messenger:serviceChannelMessages/battleResults/gold'
SERVICECHANNELMESSAGES_BATTLERESULTS_CRYSTAL = '#messenger:serviceChannelMessages/battleResults/crystal'
SERVICECHANNELMESSAGES_BATTLERESULTS_DOUBLEXPFACTOR = '#messenger:serviceChannelMessages/battleResults/doubleXpFactor'
SERVICECHANNELMESSAGES_BATTLERESULTS_PENALTYFORDAMAGEALLIES = '#messenger:serviceChannelMessages/battleResults/penaltyForDamageAllies'
SERVICECHANNELMESSAGES_BATTLERESULTS_CONTRIBUTIONFORDAMAGEALLIES = '#messenger:serviceChannelMessages/battleResults/contributionForDamageAllies'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACTION = '#messenger:serviceChannelMessages/battleResults/action'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACTIONS = '#messenger:serviceChannelMessages/battleResults/actions'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACTIONSRESULTSGOLD = '#messenger:serviceChannelMessages/battleResults/actionsResultsGold'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACTIONSRESULTSCREDITS = '#messenger:serviceChannelMessages/battleResults/actionsResultsCredits'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACTIONSRESULTSCRYSTAL = '#messenger:serviceChannelMessages/battleResults/actionsResultsCrystal'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACTIONSRESULTSFREEXP = '#messenger:serviceChannelMessages/battleResults/actionsResultsFreeXP'
SERVICECHANNELMESSAGES_BATTLERESULTS_ACHIEVES = '#messenger:serviceChannelMessages/battleResults/achieves'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS = '#messenger:serviceChannelMessages/battleResults/quests'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_VEHICLESLOTS = '#messenger:serviceChannelMessages/battleResults/quests/vehicleSlots'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_FREEXP = '#messenger:serviceChannelMessages/battleResults/quests/freeXP'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_CREDITS = '#messenger:serviceChannelMessages/battleResults/quests/credits'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_GOLD = '#messenger:serviceChannelMessages/battleResults/quests/gold'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_CRYSTAL = '#messenger:serviceChannelMessages/battleResults/quests/crystal'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_BERTHS = '#messenger:serviceChannelMessages/battleResults/quests/berths'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_ACHIEVEMENTS = '#messenger:serviceChannelMessages/battleResults/quests/achievements'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_PREMIUM = '#messenger:serviceChannelMessages/battleResults/quests/premium'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_ITEMS = '#messenger:serviceChannelMessages/battleResults/quests/items'
SERVICECHANNELMESSAGES_BATTLERESULTS_QUESTS_ITEMS_NAME = '#messenger:serviceChannelMessages/battleResults/quests/items/name'
SERVICECHANNELMESSAGES_BATTLERESULTS_REFSYSTEMQUESTS = '#messenger:serviceChannelMessages/battleResults/refSystemQuests'
SERVICECHANNELMESSAGES_BATTLERESULTS_REFSYSTEMBOUGHTVEHICLE = '#messenger:serviceChannelMessages/battleResults/refSystemBoughtVehicle'
SERVICECHANNELMESSAGES_BATTLERESULTS_REFSYSTEMCONTRIBUTEXP = '#messenger:serviceChannelMessages/battleResults/refSystemContributeXp'
SERVICECHANNELMESSAGES_BATTLERESULTS_PERSONALMISSIONS = '#messenger:serviceChannelMessages/battleResults/personalMissions'
SERVICECHANNELMESSAGES_BATTLERESULTS_FORTRESOURCE = '#messenger:serviceChannelMessages/battleResults/fortResource'
SERVICECHANNELMESSAGES_BATTLERESULTS_FORTBUILDING = '#messenger:serviceChannelMessages/battleResults/fortBuilding'
SERVICECHANNELMESSAGES_BATTLERESULTS_FORTRESOURCE_CLAN = '#messenger:serviceChannelMessages/battleResults/fortResource/clan'
SERVICECHANNELMESSAGES_BATTLERESULTS_FORTRESOURCE_PLAYER = '#messenger:serviceChannelMessages/battleResults/fortResource/player'
SERVICECHANNELMESSAGES_BATTLERESULTS_FORT_CLAN = '#messenger:serviceChannelMessages/battleResults/fort/clan'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_RANKEARNED = '#messenger:serviceChannelMessages/battleResults/rankedState/win/rankEarned'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_RANKLOST = '#messenger:serviceChannelMessages/battleResults/rankedState/win/rankLost'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_STEPSEARNED = '#messenger:serviceChannelMessages/battleResults/rankedState/win/stepsEarned'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_STEPEARNED = '#messenger:serviceChannelMessages/battleResults/rankedState/win/stepEarned'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_STEPLOST = '#messenger:serviceChannelMessages/battleResults/rankedState/win/stepLost'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_NOTHINGCHANGED = '#messenger:serviceChannelMessages/battleResults/rankedState/win/nothingChanged'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_RANKPOINT = '#messenger:serviceChannelMessages/battleResults/rankedState/win/rankPoint'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_RANKEARNED = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/rankEarned'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_RANKLOST = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/rankLost'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_STEPSEARNED = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/stepsEarned'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_STEPEARNED = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/stepEarned'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_STEPLOST = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/stepLost'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_NOTHINGCHANGED = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/nothingChanged'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_RANKPOINT = '#messenger:serviceChannelMessages/battleResults/rankedState/lose/rankPoint'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_SHIELD_LOSE = '#messenger:serviceChannelMessages/battleResults/rankedState/shield/shield_lose'
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_SHIELD_LOSE_STEP = '#messenger:serviceChannelMessages/battleResults/rankedState/shield/shield_lose_step'
SERVICECHANNELMESSAGES_BATTLETUTORIAL_RESULTS_FINISHED = '#messenger:serviceChannelMessages/battleTutorial/results/finished'
SERVICECHANNELMESSAGES_BATTLETUTORIAL_RESULTS_FAILED = '#messenger:serviceChannelMessages/battleTutorial/results/failed'
SERVICECHANNELMESSAGES_BATTLETUTORIAL_REASONS_FINISHED = '#messenger:serviceChannelMessages/battleTutorial/reasons/finished'
SERVICECHANNELMESSAGES_BATTLETUTORIAL_REASONS_FAILED = '#messenger:serviceChannelMessages/battleTutorial/reasons/failed'
SERVICECHANNELMESSAGES_BATTLETUTORIAL_REASONS_TIMEOUT = '#messenger:serviceChannelMessages/battleTutorial/reasons/timeout'
SERVICECHANNELMESSAGES_BATTLETUTORIAL_REASONS_EXTERMINATION = '#messenger:serviceChannelMessages/battleTutorial/reasons/extermination'
SERVICECHANNELMESSAGES_BOOTCAMP_FINISHED = '#messenger:serviceChannelMessages/bootcamp/finished'
SERVICECHANNELMESSAGES_BOOTCAMP_AWARDS = '#messenger:serviceChannelMessages/bootcamp/awards'
SERVICECHANNELMESSAGES_BOOTCAMP_NO_AWARDS = '#messenger:serviceChannelMessages/bootcamp/no_awards'
SERVICECHANNELMESSAGES_BOOTCAMP_DEVICES = '#messenger:serviceChannelMessages/bootcamp/devices'
SERVICECHANNELMESSAGES_BOOTCAMP_CREW = '#messenger:serviceChannelMessages/bootcamp/crew'
SERVICECHANNELMESSAGES_GOLDRECEIVED_FINANCIAL_TRANSACTION = '#messenger:serviceChannelMessages/goldReceived/financial_transaction'
SERVICECHANNELMESSAGES_GOLDRECEIVED_DATE = '#messenger:serviceChannelMessages/goldReceived/date'
SERVICECHANNELMESSAGES_GOLDRECEIVED_RECEIVED = '#messenger:serviceChannelMessages/goldReceived/received'
SERVICECHANNELMESSAGES_INVOICERECEIVED_GOLDACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/goldAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_GOLDDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/goldDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CRYSTALACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/crystalAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CRYSTALDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/crystalDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CREDITSACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/creditsAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CREDITSDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/creditsDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_PREMIUM_ACCOUNT = '#messenger:serviceChannelMessages/invoiceReceived/premium_account'
SERVICECHANNELMESSAGES_INVOICERECEIVED_PREMIUMACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/premiumAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_PREMIUMDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/premiumDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_FREEXP = '#messenger:serviceChannelMessages/invoiceReceived/freeXp'
SERVICECHANNELMESSAGES_INVOICERECEIVED_FREEXPACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/freeXpAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_FREEXPDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/freeXpDebited'
SERVICECHANNELMESSAGES_ACHIEVEMENTRECEIVED_MESSAGE = '#messenger:serviceChannelMessages/achievementReceived/message'
SERVICECHANNELMESSAGES_INVOICERECEIVED_INVOICE = '#messenger:serviceChannelMessages/invoiceReceived/invoice'
SERVICECHANNELMESSAGES_INVOICERECEIVED_ITEMSACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/itemsAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_ITEMSDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/itemsDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_VEHICLESACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/vehiclesAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_VEHICLESDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/vehiclesDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_VEHICLESRENTED = '#messenger:serviceChannelMessages/invoiceReceived/vehiclesRented'
SERVICECHANNELMESSAGES_INVOICERECEIVED_SLOTSACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/slotsAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_SLOTSDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/slotsDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_BERTHSACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/berthsAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_BERTHSDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/berthsDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_DOSSIERSACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/dossiersAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_DOSSIERSDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/dossiersDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION = '#messenger:serviceChannelMessages/invoiceReceived/compensation'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_PAINT = '#messenger:serviceChannelMessages/invoiceReceived/compensation/paint'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_CAMOUFLAGE = '#messenger:serviceChannelMessages/invoiceReceived/compensation/camouflage'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_MODIFICATION = '#messenger:serviceChannelMessages/invoiceReceived/compensation/modification'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_STYLE = '#messenger:serviceChannelMessages/invoiceReceived/compensation/style'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_EMBLEM = '#messenger:serviceChannelMessages/invoiceReceived/compensation/emblem'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_INSCRIPTION = '#messenger:serviceChannelMessages/invoiceReceived/compensation/inscription'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_GOLD = '#messenger:serviceChannelMessages/invoiceReceived/compensation/gold'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_CREDITS = '#messenger:serviceChannelMessages/invoiceReceived/compensation/credits'
SERVICECHANNELMESSAGES_INVOICERECEIVED_COMPENSATION_CRYSTAL = '#messenger:serviceChannelMessages/invoiceReceived/compensation/crystal'
SERVICECHANNELMESSAGES_INVOICERECEIVED_TANKMEN = '#messenger:serviceChannelMessages/invoiceReceived/tankmen'
SERVICECHANNELMESSAGES_INVOICERECEIVED_BOOSTERS = '#messenger:serviceChannelMessages/invoiceReceived/boosters'
SERVICECHANNELMESSAGES_INVOICERECEIVED_DISCOUNTS = '#messenger:serviceChannelMessages/invoiceReceived/discounts'
SERVICECHANNELMESSAGES_INVOICERECEIVED_PIECES = '#messenger:serviceChannelMessages/invoiceReceived/pieces'
SERVICECHANNELMESSAGES_INVOICERECEIVED_RENTDAYS = '#messenger:serviceChannelMessages/invoiceReceived/rentDays'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CREWONVEHICLE = '#messenger:serviceChannelMessages/invoiceReceived/crewOnVehicle'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CREWWITHLVLDROPPEDTOBARRACKS = '#messenger:serviceChannelMessages/invoiceReceived/crewWithLvlDroppedToBarracks'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CREWDROPPEDTOBARRACKS = '#messenger:serviceChannelMessages/invoiceReceived/crewDroppedToBarracks'
SERVICECHANNELMESSAGES_INVOICERECEIVED_CREWWITHDRAWN = '#messenger:serviceChannelMessages/invoiceReceived/crewWithdrawn'
SERVICECHANNELMESSAGES_INVOICERECEIVED_TANKMENFREEXPACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/tankmenFreeXpAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_TANKMENFREEXPDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/tankmenFreeXpDebited'
SERVICECHANNELMESSAGES_INVOICERECEIVED_FREEAWARDLISTSACCRUED = '#messenger:serviceChannelMessages/invoiceReceived/freeAwardListsAccrued'
SERVICECHANNELMESSAGES_INVOICERECEIVED_FREEAWARDLISTSDEBITED = '#messenger:serviceChannelMessages/invoiceReceived/freeAwardListsDebited'
SERVICECHANNELMESSAGES_DURATIONOFPREMIUMACCOUNEXPIRES = '#messenger:serviceChannelMessages/durationOfPremiumAccounExpires'
SERVICECHANNELMESSAGES_PREMIUMBOUGHT = '#messenger:serviceChannelMessages/premiumBought'
SERVICECHANNELMESSAGES_PREMIUMEXTENDED = '#messenger:serviceChannelMessages/premiumExtended'
SERVICECHANNELMESSAGES_PREMIUMEXPIRED = '#messenger:serviceChannelMessages/premiumExpired'
SERVICECHANNELMESSAGES_BOOSTEREXPIRED = '#messenger:serviceChannelMessages/boosterExpired'
SERVICECHANNELMESSAGES_BOOSTERDISABLED = '#messenger:serviceChannelMessages/boosterDisabled'
SERVICECHANNELMESSAGES_CREDITSANDGOLDRECEIVEDASGIFT = '#messenger:serviceChannelMessages/creditsAndGoldReceivedAsGift'
SERVICECHANNELMESSAGES_CREDITSRECEIVEDASGIFT = '#messenger:serviceChannelMessages/creditsReceivedAsGift'
SERVICECHANNELMESSAGES_GOLDRECEIVEDASGIFT = '#messenger:serviceChannelMessages/goldReceivedAsGift'
SERVICECHANNELMESSAGES_XPRECEIVEDASGIFT = '#messenger:serviceChannelMessages/xpReceivedAsGift'
SERVICECHANNELMESSAGES_PREMIUMRECEIVEDASGIFT = '#messenger:serviceChannelMessages/premiumReceivedAsGift'
SERVICECHANNELMESSAGES_ITEMRECEIVEDASGIFT = '#messenger:serviceChannelMessages/itemReceivedAsGift'
SERVICECHANNELMESSAGES_VEHICLERECEIVEDASGIFT = '#messenger:serviceChannelMessages/vehicleReceivedAsGift'
SERVICECHANNELMESSAGES_FORTIFICATIONSTARTUP = '#messenger:serviceChannelMessages/fortificationStartUp'
SERVICECHANNELMESSAGES_WARESBOUGHTASGOLD = '#messenger:serviceChannelMessages/waresBoughtAsGold'
SERVICECHANNELMESSAGES_WARESSOLDASGOLD = '#messenger:serviceChannelMessages/waresSoldAsGold'
SERVICECHANNELMESSAGES_VEHICLELOCKEXPIRED = '#messenger:serviceChannelMessages/vehicleLockExpired'
SERVICECHANNELMESSAGES_VEHICLELOCK = '#messenger:serviceChannelMessages/vehicleLock'
SERVICECHANNELMESSAGES_VEHICLESALLLOCKEXPIRED = '#messenger:serviceChannelMessages/vehiclesAllLockExpired'
SERVICECHANNELMESSAGES_AUTOREPAIRERROR = '#messenger:serviceChannelMessages/autoRepairError'
SERVICECHANNELMESSAGES_AUTOLOADERROR = '#messenger:serviceChannelMessages/autoLoadError'
SERVICECHANNELMESSAGES_AUTOEQUIPERROR = '#messenger:serviceChannelMessages/autoEquipError'
SERVICECHANNELMESSAGES_AUTOEQUIPBOOSTERERROR = '#messenger:serviceChannelMessages/autoEquipBoosterError'
SERVICECHANNELMESSAGES_AUTORENTSTYLEERROR = '#messenger:serviceChannelMessages/autoRentStyleError'
SERVICECHANNELMESSAGES_AUTOREPAIRERRORNOWALLET = '#messenger:serviceChannelMessages/autoRepairErrorNoWallet'
SERVICECHANNELMESSAGES_AUTOLOADERRORNOWALLET = '#messenger:serviceChannelMessages/autoLoadErrorNoWallet'
SERVICECHANNELMESSAGES_AUTOEQUIPERRORNOWALLET = '#messenger:serviceChannelMessages/autoEquipErrorNoWallet'
SERVICECHANNELMESSAGES_AUTOBOOSTERERRORNOWALLET = '#messenger:serviceChannelMessages/autoBoosterErrorNoWallet'
SERVICECHANNELMESSAGES_AUTORENTSTYLEERRORNOWALLET = '#messenger:serviceChannelMessages/autoRentStyleErrorNoWallet'
SERVICECHANNELMESSAGES_AUTORENTSTYLERENTISOVER_TEXT = '#messenger:serviceChannelMessages/autoRentStyleRentIsOver/text'
SERVICECHANNELMESSAGES_AUTORENTSTYLERENTISOVER_BUTTON = '#messenger:serviceChannelMessages/autoRentStyleRentIsOver/button'
SERVICECHANNELMESSAGES_AUTOREPAIRSUCCESS = '#messenger:serviceChannelMessages/autoRepairSuccess'
SERVICECHANNELMESSAGES_AUTOLOADSUCCESS = '#messenger:serviceChannelMessages/autoLoadSuccess'
SERVICECHANNELMESSAGES_AUTOEQUIPSUCCESS = '#messenger:serviceChannelMessages/autoEquipSuccess'
SERVICECHANNELMESSAGES_AUTOEQUIPBOOSTERSUCCESS = '#messenger:serviceChannelMessages/autoEquipBoosterSuccess'
SERVICECHANNELMESSAGES_AUTORENTSTYLESUCCESS = '#messenger:serviceChannelMessages/autoRentStyleSuccess'
SERVICECHANNELMESSAGES_AUTOREPAIRSKIPPED = '#messenger:serviceChannelMessages/autoRepairSkipped'
SERVICECHANNELMESSAGES_AUTOLOADSKIPPED = '#messenger:serviceChannelMessages/autoLoadSkipped'
SERVICECHANNELMESSAGES_AUTOEQUIPSKIPPED = '#messenger:serviceChannelMessages/autoEquipSkipped'
SERVICECHANNELMESSAGES_AUTOEQUIPBOOSTERSKIPPED = '#messenger:serviceChannelMessages/autoEquipBoosterSkipped'
SERVICECHANNELMESSAGES_AUTORENTSTYLESKIPPED = '#messenger:serviceChannelMessages/autoRentStyleSkipped'
SERVICECHANNELMESSAGES_AUTOREPAIRDISABLEDOPTION = '#messenger:serviceChannelMessages/autoRepairDisabledOption'
SERVICECHANNELMESSAGES_AUTOLOADDISABLEDOPTION = '#messenger:serviceChannelMessages/autoLoadDisabledOption'
SERVICECHANNELMESSAGES_AUTOEQUIPDISABLEDOPTION = '#messenger:serviceChannelMessages/autoEquipDisabledOption'
SERVICECHANNELMESSAGES_AUTOEQUIPBOOSTERDISABLEDOPTION = '#messenger:serviceChannelMessages/autoEquipBoosterDisabledOption'
SERVICECHANNELMESSAGES_AUTORENTSTYLEDISABLEDOPTION = '#messenger:serviceChannelMessages/autoRentStyleDisabledOption'
SERVICECHANNELMESSAGES_VEHCAMOUFLAGETIMEDOUT = '#messenger:serviceChannelMessages/vehCamouflageTimedOut'
SERVICECHANNELMESSAGES_VEHEMBLEMTIMEDOUT = '#messenger:serviceChannelMessages/vehEmblemTimedOut'
SERVICECHANNELMESSAGES_VEHINSCRIPTIONTIMEDOUT = '#messenger:serviceChannelMessages/vehInscriptionTimedOut'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_ERROR = '#messenger:serviceChannelMessages/sysMsg/titles/error'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_POWERLEVEL = '#messenger:serviceChannelMessages/sysMsg/titles/powerlevel'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_FINANCIALTRANSACTION = '#messenger:serviceChannelMessages/sysMsg/titles/financialTransaction'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_PURCHASE = '#messenger:serviceChannelMessages/sysMsg/titles/purchase'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_RESTORE = '#messenger:serviceChannelMessages/sysMsg/titles/restore'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_DISMANTLING = '#messenger:serviceChannelMessages/sysMsg/titles/dismantling'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_SELLING = '#messenger:serviceChannelMessages/sysMsg/titles/selling'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_REMOVE = '#messenger:serviceChannelMessages/sysMsg/titles/remove'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_REPAIR = '#messenger:serviceChannelMessages/sysMsg/titles/repair'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_CUSTOMIZATION = '#messenger:serviceChannelMessages/sysMsg/titles/customization'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_PRIMETIME = '#messenger:serviceChannelMessages/sysMsg/titles/primeTime'
SERVICECHANNELMESSAGES_SYSMSG_TITLES_RANKEDBATTLESAVAILABLE = '#messenger:serviceChannelMessages/sysMsg/titles/rankedBattlesAvailable'
SERVICECHANNELMESSAGES_PREBATTLE_BATTLETYPE_TOURNAMENT = '#messenger:serviceChannelMessages/prebattle/battleType/tournament'
SERVICECHANNELMESSAGES_PREBATTLE_BATTLETYPE_CLAN = '#messenger:serviceChannelMessages/prebattle/battleType/clan'
SERVICECHANNELMESSAGES_PREBATTLE_BATTLETYPE_PREBATTLE = '#messenger:serviceChannelMessages/prebattle/battleType/prebattle'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_BASE_DRAFTGAME = '#messenger:serviceChannelMessages/prebattle/finish/base/draftGame'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_BASE_DEFEAT = '#messenger:serviceChannelMessages/prebattle/finish/base/defeat'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_BASE_UNDEFINED = '#messenger:serviceChannelMessages/prebattle/finish/base/undefined'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_BASE_WIN = '#messenger:serviceChannelMessages/prebattle/finish/base/win'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_TECHNICAL_DRAFTGAME = '#messenger:serviceChannelMessages/prebattle/finish/technical/draftGame'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_TECHNICAL_DEFEAT = '#messenger:serviceChannelMessages/prebattle/finish/technical/defeat'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_TECHNICAL_WIN = '#messenger:serviceChannelMessages/prebattle/finish/technical/win'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_TECHNICAL_UNDEFINED = '#messenger:serviceChannelMessages/prebattle/finish/technical/undefined'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_FAILURE = '#messenger:serviceChannelMessages/prebattle/finish/failure'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_CREATORLEFT = '#messenger:serviceChannelMessages/prebattle/finish/creatorLeft'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_PLAYERKICK = '#messenger:serviceChannelMessages/prebattle/finish/playerKick'
SERVICECHANNELMESSAGES_PREBATTLE_FINISH_TIMEOUT = '#messenger:serviceChannelMessages/prebattle/finish/timeout'
SERVICECHANNELMESSAGES_PREBATTLE_NUMBEROFBATTLE = '#messenger:serviceChannelMessages/prebattle/numberOfBattle'
SERVICECHANNELMESSAGES_PREBATTLE_SUBTOTAL = '#messenger:serviceChannelMessages/prebattle/subtotal'
SERVICECHANNELMESSAGES_PREBATTLE_TOTAL = '#messenger:serviceChannelMessages/prebattle/total'
SERVICECHANNELMESSAGES_PREBATTLE_FINISHEDTIME = '#messenger:serviceChannelMessages/prebattle/finishedTime'
PREBATTLE_ADDALLBTN_TOOLTIP_HEADER = '#messenger:prebattle/addAllBtn/tooltip/header'
PREBATTLE_ADDALLBTN_TOOLTIP_BODY = '#messenger:prebattle/addAllBtn/tooltip/body'
PREBATTLE_ADDBTN_TOOLTIP_HEADER = '#messenger:prebattle/addBtn/tooltip/header'
PREBATTLE_ADDBTN_TOOLTIP_BODY = '#messenger:prebattle/addBtn/tooltip/body'
PREBATTLE_REMOVEALLBTN_TOOLTIP_HEADER = '#messenger:prebattle/removeAllBtn/tooltip/header'
PREBATTLE_REMOVEALLBTN_TOOLTIP_BODY = '#messenger:prebattle/removeAllBtn/tooltip/body'
PREBATTLE_REMOVEBTN_TOOLTIP_HEADER = '#messenger:prebattle/removeBtn/tooltip/header'
PREBATTLE_REMOVEBTN_TOOLTIP_BODY = '#messenger:prebattle/removeBtn/tooltip/body'
SERVICECHANNELMESSAGES_SERVERDOWNTIMECOMPENSATION = '#messenger:serviceChannelMessages/serverDowntimeCompensation'
SERVICECHANNELMESSAGES_SERVERDOWNTIMECOMPENSATION_PREMIUM = '#messenger:serviceChannelMessages/serverDowntimeCompensation/premium'
SERVICECHANNELMESSAGES_SERVERDOWNTIMECOMPENSATION_CAMOUFLAGES = '#messenger:serviceChannelMessages/serverDowntimeCompensation/camouflages'
SERVICECHANNELMESSAGES_SERVERDOWNTIMECOMPENSATION_PLAYEREMBLEMS = '#messenger:serviceChannelMessages/serverDowntimeCompensation/playerEmblems'
SERVICECHANNELMESSAGES_SERVERDOWNTIMECOMPENSATION_PLAYERINSCRIPTIONS = '#messenger:serviceChannelMessages/serverDowntimeCompensation/playerInscriptions'
SERVICECHANNELMESSAGES_SERVERDOWNTIMECOMPENSATION_RENTALS = '#messenger:serviceChannelMessages/serverDowntimeCompensation/rentals'
SERVICECHANNELMESSAGES_ACTIONBEGIN = '#messenger:serviceChannelMessages/actionBegin'
SERVICECHANNELMESSAGES_ACTIONEND = '#messenger:serviceChannelMessages/actionEnd'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_TITLE = '#messenger:serviceChannelMessages/sysMsg/converter/title'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_EMBLEMS = '#messenger:serviceChannelMessages/sysMsg/converter/emblems'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_INSCRIPTIONS = '#messenger:serviceChannelMessages/sysMsg/converter/inscriptions'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CAMOUFLAGES = '#messenger:serviceChannelMessages/sysMsg/converter/camouflages'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CUSTOMIZATIONS = '#messenger:serviceChannelMessages/sysMsg/converter/customizations'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CUSTOMIZATIONSBUY = '#messenger:serviceChannelMessages/sysMsg/converter/customizationsBuy'
SERVICECHANNELMESSAGES_SYSMSG_CUSTOMIZATIONS_SELL = '#messenger:serviceChannelMessages/sysMsg/customizations/sell'
SERVICECHANNELMESSAGES_SYSMSG_CUSTOMIZATIONS_BUY = '#messenger:serviceChannelMessages/sysMsg/customizations/buy'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_GOLDRECEIVED = '#messenger:serviceChannelMessages/sysMsg/converter/goldReceived'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_GOLDWITHDRAWN = '#messenger:serviceChannelMessages/sysMsg/converter/goldWithdrawn'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CRYSTALRECEIVED = '#messenger:serviceChannelMessages/sysMsg/converter/crystalReceived'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CRYSTALWITHDRAWN = '#messenger:serviceChannelMessages/sysMsg/converter/crystalWithdrawn'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CREDITSRECEIVED = '#messenger:serviceChannelMessages/sysMsg/converter/creditsReceived'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_CREDITSWITHDRAWN = '#messenger:serviceChannelMessages/sysMsg/converter/creditsWithdrawn'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_FREEXPRECEIVED = '#messenger:serviceChannelMessages/sysMsg/converter/freeXPReceived'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_FREEXPWITHDRAWN = '#messenger:serviceChannelMessages/sysMsg/converter/freeXPWithdrawn'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_SLOTSRECEIVED = '#messenger:serviceChannelMessages/sysMsg/converter/slotsReceived'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_SLOTSWITHDRAWN = '#messenger:serviceChannelMessages/sysMsg/converter/slotsWithdrawn'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_VEHICLESRECEIVED = '#messenger:serviceChannelMessages/sysMsg/converter/vehiclesReceived'
SERVICECHANNELMESSAGES_SYSMSG_CONVERTER_VEHICLESWITHDRAWN = '#messenger:serviceChannelMessages/sysMsg/converter/vehiclesWithdrawn'
KEYBOARDCHAT = '#messenger:keyboardChat'
SERVICECHANNELMESSAGES_CLAN_LEFT_CLAN = '#messenger:serviceChannelMessages/clan/LEFT_CLAN'
SERVICECHANNELMESSAGES_FORT_FORT_READY = '#messenger:serviceChannelMessages/fort/FORT_READY'
SERVICECHANNELMESSAGES_FORT_FORT_GOT_8_LEVEL = '#messenger:serviceChannelMessages/fort/FORT_GOT_8_LEVEL'
SERVICECHANNELMESSAGES_FORT_DEF_HOUR_SHUTDOWN = '#messenger:serviceChannelMessages/fort/DEF_HOUR_SHUTDOWN'
SERVICECHANNELMESSAGES_FORT_RESERVE_ACTIVATED_TITLE = '#messenger:serviceChannelMessages/fort/RESERVE_ACTIVATED/title'
SERVICECHANNELMESSAGES_FORT_RESERVE_ACTIVATED = '#messenger:serviceChannelMessages/fort/RESERVE_ACTIVATED'
SERVICECHANNELMESSAGES_FORT_RESERVE_EXPIRED = '#messenger:serviceChannelMessages/fort/RESERVE_EXPIRED'
SERVICECHANNELMESSAGES_FORT_RESERVE_PRODUCED = '#messenger:serviceChannelMessages/fort/RESERVE_PRODUCED'
SERVICECHANNELMESSAGES_FORT_STORAGE_OVERFLOW = '#messenger:serviceChannelMessages/fort/STORAGE_OVERFLOW'
SERVICECHANNELMESSAGES_FORT_ORDER_CANCELED = '#messenger:serviceChannelMessages/fort/ORDER_CANCELED'
SERVICECHANNELMESSAGES_FORT_REATTACHED_TO_BASE = '#messenger:serviceChannelMessages/fort/REATTACHED_TO_BASE'
SERVICECHANNELMESSAGES_FORT_DEF_HOUR_CHANGED = '#messenger:serviceChannelMessages/fort/DEF_HOUR_CHANGED'
SERVICECHANNELMESSAGES_FORT_DEF_HOUR_ACTIVATED = '#messenger:serviceChannelMessages/fort/DEF_HOUR_ACTIVATED'
SERVICECHANNELMESSAGES_FORT_OFF_DAY_ACTIVATED = '#messenger:serviceChannelMessages/fort/OFF_DAY_ACTIVATED'
SERVICECHANNELMESSAGES_FORT_NO_OFF_DAY_ACTIVATED = '#messenger:serviceChannelMessages/fort/NO_OFF_DAY_ACTIVATED'
SERVICECHANNELMESSAGES_FORT_VACATION_STARTED = '#messenger:serviceChannelMessages/fort/VACATION_STARTED'
SERVICECHANNELMESSAGES_FORT_VACATION_FINISHED = '#messenger:serviceChannelMessages/fort/VACATION_FINISHED'
SERVICECHANNELMESSAGES_FORT_PERIPHERY_CHANGED = '#messenger:serviceChannelMessages/fort/PERIPHERY_CHANGED'
SERVICECHANNELMESSAGES_FORT_BUILDING_DAMAGED = '#messenger:serviceChannelMessages/fort/BUILDING_DAMAGED'
SERVICECHANNELMESSAGES_FORT_BUILDING_DAMAGED_MILITARY_BASE = '#messenger:serviceChannelMessages/fort/BUILDING_DAMAGED_MILITARY_BASE'
SERVICECHANNELMESSAGES_FORT_BASE_DESTROYED = '#messenger:serviceChannelMessages/fort/BASE_DESTROYED'
SERVICECHANNELMESSAGES_FORT_ORDER_COMPENSATED = '#messenger:serviceChannelMessages/fort/ORDER_COMPENSATED'
SERVICECHANNELMESSAGES_FORT_ATTACK_PLANNED = '#messenger:serviceChannelMessages/fort/ATTACK_PLANNED'
SERVICECHANNELMESSAGES_FORT_DEFENCE_PLANNED = '#messenger:serviceChannelMessages/fort/DEFENCE_PLANNED'
SERVICECHANNELMESSAGES_FORT_BATTLE_DELETED = '#messenger:serviceChannelMessages/fort/BATTLE_DELETED'
SERVICECHANNELMESSAGES_FORT_BATTLE_DELETED_LEVEL = '#messenger:serviceChannelMessages/fort/BATTLE_DELETED_LEVEL'
SERVICECHANNELMESSAGES_FORT_SPECIAL_ORDER_EXPIRED_EVACUATION = '#messenger:serviceChannelMessages/fort/SPECIAL_ORDER_EXPIRED_EVACUATION'
SERVICECHANNELMESSAGES_FORT_SPECIAL_ORDER_EXPIRED_EVACUATION_ADDITIONAL = '#messenger:serviceChannelMessages/fort/SPECIAL_ORDER_EXPIRED_EVACUATION_ADDITIONAL'
SERVICECHANNELMESSAGES_FORT_SPECIAL_ORDER_EXPIRED_REQUISITION = '#messenger:serviceChannelMessages/fort/SPECIAL_ORDER_EXPIRED_REQUISITION'
SERVICECHANNELMESSAGES_FORT_SPECIAL_ORDER_EXPIRED_REQUISITION_ADDITIONAL = '#messenger:serviceChannelMessages/fort/SPECIAL_ORDER_EXPIRED_REQUISITION_ADDITIONAL'
SERVICECHANNELMESSAGES_FORT_PROM_RESOURCE_EARNED = '#messenger:serviceChannelMessages/fort/PROM_RESOURCE_EARNED'
SERVICECHANNELMESSAGES_FORT_PROM_RESOURCE_WITHDRAWN = '#messenger:serviceChannelMessages/fort/PROM_RESOURCE_WITHDRAWN'
SERVICECHANNELMESSAGES_FORT_RESERVES_EARNED = '#messenger:serviceChannelMessages/fort/RESERVES_EARNED'
SERVICECHANNELMESSAGES_FORT_RESERVES_WITHDRAWN = '#messenger:serviceChannelMessages/fort/RESERVES_WITHDRAWN'
SERVICECHANNELMESSAGES_BADGEACHIEVEMENT = '#messenger:serviceChannelMessages/badgeAchievement'
SERVICECHANNELMESSAGES_REMOVEDBADGEACHIEVEMENT = '#messenger:serviceChannelMessages/removedBadgeAchievement'
COMMAND_SUCCESS_USERBAN = '#messenger:command/success/USERBAN'
COMMAND_SUCCESS_USERUNBAN = '#messenger:command/success/USERUNBAN'
CHAT_ERROR_NOT_ALLOWED = '#messenger:chat_error/NOT_ALLOWED'
CHAT_ERROR_USER_NOT_FOUND = '#messenger:chat_error/USER_NOT_FOUND'
CHAT_ERROR_CANNOT_BAN_ONESELF = '#messenger:chat_error/CANNOT_BAN_ONESELF'
CHAT_ERROR_WRONG_ARGS = '#messenger:chat_error/WRONG_ARGS'
CHAT_ERROR_NOT_READY = '#messenger:chat_error/NOT_READY'
CHAT_ERROR_IS_BUSY = '#messenger:chat_error/IS_BUSY'
CHAT_ERROR_GENERIC_ERROR = '#messenger:chat_error/GENERIC_ERROR'
CLIENT_ERROR_COMMAND_NOT_SUPPORTED = '#messenger:client_error/command/not_supported'
CLIENT_ERROR_COMMAND_WRONG_BAN_TYPE = '#messenger:client_error/command/wrong_ban_type'
CLIENT_ERROR_COMMAND_WRONG_PLAYER_NAME = '#messenger:client_error/command/wrong_player_name'
CLIENT_ERROR_COMMAND_WRONG_BAN_PERIOD = '#messenger:client_error/command/wrong_ban_period'
CLIENT_ERROR_COMMAND_IN_COOLDOWN_WO_NAME = '#messenger:client_error/command/in_cooldown_wo_name'
CLIENT_ERROR_COMMAND_GENERIC_ERROR = '#messenger:client_error/command/generic_error'
CLIENT_ERROR_ACTION_IN_COOLDOWN = '#messenger:client_error/action/in_cooldown'
CLIENT_ERROR_ACTION_IN_COOLDOWN_WO_PERIOD = '#messenger:client_error/action/in_cooldown_wo_period'
CLIENT_ERROR_SHARED_GENERIC = '#messenger:client_error/shared/GENERIC'
CLIENT_ERROR_SHARED_LOCKED = '#messenger:client_error/shared/LOCKED'
CLIENT_ERROR_SHARED_NOT_CONNECTED = '#messenger:client_error/shared/NOT_CONNECTED'
CLIENT_ERROR_SHARED_WRONG_ARGS = '#messenger:client_error/shared/WRONG_ARGS'
CLIENT_ERROR_SHARED_NOT_SUPPORTED = '#messenger:client_error/shared/NOT_SUPPORTED'
CLIENT_ERROR_SHARED_DBID_INVALID = '#messenger:client_error/shared/DBID_INVALID'
CLIENT_ERROR_SHARED_NAME_EMPTY = '#messenger:client_error/shared/NAME_EMPTY'
CLIENT_ERROR_SHARED_NAME_INVALID = '#messenger:client_error/shared/NAME_INVALID'
CLIENT_ERROR_SHARED_WAITING_BEFORE_START = '#messenger:client_error/shared/WAITING_BEFORE_START'
CLIENT_ERROR_CONTACT_CONTACT_ITEM_NOT_FOUND = '#messenger:client_error/contact/CONTACT_ITEM_NOT_FOUND'
CLIENT_ERROR_CONTACT_ROSTER_ITEM_EXISTS = '#messenger:client_error/contact/ROSTER_ITEM_EXISTS'
CLIENT_ERROR_CONTACT_ROSTER_ITEM_NOT_FOUND = '#messenger:client_error/contact/ROSTER_ITEM_NOT_FOUND'
CLIENT_ERROR_CONTACT_FRIENDSHIP_APPROVED = '#messenger:client_error/contact/FRIENDSHIP_APPROVED'
CLIENT_ERROR_CONTACT_FRIENDSHIP_CANCELED = '#messenger:client_error/contact/FRIENDSHIP_CANCELED'
CLIENT_ERROR_CONTACT_FRIENDSHIP_RQ_PROCESS = '#messenger:client_error/contact/FRIENDSHIP_RQ_PROCESS'
CLIENT_ERROR_CONTACT_BLOCK_ITEM_EXISTS = '#messenger:client_error/contact/BLOCK_ITEM_EXISTS'
CLIENT_ERROR_CONTACT_BLOCK_ITEM_NOT_FOUND = '#messenger:client_error/contact/BLOCK_ITEM_NOT_FOUND'
CLIENT_ERROR_CONTACT_MUTED_ITEM_NOT_FOUND = '#messenger:client_error/contact/MUTED_ITEM_NOT_FOUND'
CLIENT_ERROR_CONTACT_GROUP_EMPTY = '#messenger:client_error/contact/GROUP_EMPTY'
CLIENT_ERROR_CONTACT_GROUP_EXISTS = '#messenger:client_error/contact/GROUP_EXISTS'
CLIENT_ERROR_CONTACT_GROUP_NOT_FOUND = '#messenger:client_error/contact/GROUP_NOT_FOUND'
CLIENT_ERROR_CONTACT_GROUP_INVALID_NAME = '#messenger:client_error/contact/GROUP_INVALID_NAME'
CLIENT_ERROR_CONTACT_NOTE_EMPTY = '#messenger:client_error/contact/NOTE_EMPTY'
CLIENT_ERROR_CONTACT_NOTE_NOT_FOUND = '#messenger:client_error/contact/NOTE_NOT_FOUND'
CLIENT_ERROR_LIMIT_MAX_ROSTER_ITEMS = '#messenger:client_error/limit/MAX_ROSTER_ITEMS'
CLIENT_ERROR_LIMIT_MAX_GROUP = '#messenger:client_error/limit/MAX_GROUP'
CLIENT_ERROR_LIMIT_MAX_BLOCK_ITEMS = '#messenger:client_error/limit/MAX_BLOCK_ITEMS'
CLIENT_ERROR_LIMIT_GROUP_INVALID_LENGTH = '#messenger:client_error/limit/GROUP_INVALID_LENGTH'
CLIENT_ERROR_LIMIT_NOTE_INVALID_LENGTH = '#messenger:client_error/limit/NOTE_INVALID_LENGTH'
XMPP_ERROR_SIMPLE = '#messenger:xmpp_error/simple'
XMPP_ERROR_ACTION = '#messenger:xmpp_error/action'
XMPP_ERROR_USER_ROOM_CREATION = '#messenger:xmpp_error/user_room_creation'
CLIENT_ERROR_LIMIT_CHANNEL_INVALID_LENGTH = '#messenger:client_error/limit/CHANNEL_INVALID_LENGTH'
CLIENT_ERROR_LIMIT_PWD_INVALID_LENGTH = '#messenger:client_error/limit/PWD_INVALID_LENGTH'
CLIENT_ERROR_CHANNEL_NAME_EMPTY = '#messenger:client_error/channel/NAME_EMPTY'
CLIENT_ERROR_CHANNEL_NAME_INVALID = '#messenger:client_error/channel/NAME_INVALID'
CLIENT_ERROR_CHANNEL_PASSWORD_EMPTY = '#messenger:client_error/channel/PASSWORD_EMPTY'
CLIENT_ERROR_CHANNEL_PASSWORD_INVALID = '#messenger:client_error/channel/PASSWORD_INVALID'
CLIENT_ERROR_CHANNEL_RETYPE_EMPTY = '#messenger:client_error/channel/RETYPE_EMPTY'
CLIENT_ERROR_CHANNEL_RETYPE_INVALID = '#messenger:client_error/channel/RETYPE_INVALID'
CLIENT_ERROR_CHANNEL_PASSWORDS_NOT_EQUALS = '#messenger:client_error/channel/PASSWORDS_NOT_EQUALS'
CLIENT_ERROR_CHANNEL_LIMIT_REACHED = '#messenger:client_error/channel/LIMIT_REACHED'
CLIENT_ERROR_CHANNEL_NAME_ALREADY_EXISTS = '#messenger:client_error/channel/NAME_ALREADY_EXISTS'
SERVER_ERROR_USER_ROOM_CREATION_NAME_EXISTS = '#messenger:server_error/user_room_creation/NAME_EXISTS'
SERVER_ERROR_USER_ROOM_CREATION_LIMIT_COUNT = '#messenger:server_error/user_room_creation/LIMIT_COUNT'
SERVER_ERROR_USER_ROOM_CREATION_LIMIT_PASS = '#messenger:server_error/user_room_creation/LIMIT_PASS'
SERVER_ERROR_USER_ROOM_CREATION_LIMIT_NAME = '#messenger:server_error/user_room_creation/LIMIT_NAME'
SERVER_ERROR_USER_ROOM_CREATION_WRONG_SYMBOL = '#messenger:server_error/user_room_creation/WRONG_SYMBOL'
SERVER_ERROR_USER_ROOM_CREATION_WRONG_WORD = '#messenger:server_error/user_room_creation/WRONG_WORD'
CHAT_ACTION_BROADCAST_UNIT_MESSAGE = '#messenger:chat_action/BROADCAST_UNIT_MESSAGE'
CHAT_ACTION_BROADCAST_BATTLE_MESSAGE = '#messenger:chat_action/BROADCAST_BATTLE_MESSAGE'
CHAT_ACTION_FIND_USERS_BY_NAME = '#messenger:chat_action/FIND_USERS_BY_NAME'
CHAT_ACTION_GET_VOIP_CREDENTIALS = '#messenger:chat_action/GET_VOIP_CREDENTIALS'
CLIENT_ACTION_ADD_FRIEND = '#messenger:client_action/ADD_FRIEND'
CLIENT_ACTION_REMOVE_FRIEND = '#messenger:client_action/REMOVE_FRIEND'
CLIENT_ACTION_ADD_IGNORED = '#messenger:client_action/ADD_IGNORED'
CLIENT_ACTION_REMOVE_IGNORED = '#messenger:client_action/REMOVE_IGNORED'
CLIENT_ACTION_SET_MUTE = '#messenger:client_action/SET_MUTE'
CLIENT_ACTION_UNSET_MUTE = '#messenger:client_action/UNSET_MUTE'
CLIENT_ACTION_ADD_GROUP = '#messenger:client_action/ADD_GROUP'
CLIENT_ACTION_CHANGE_GROUP = '#messenger:client_action/CHANGE_GROUP'
CLIENT_ACTION_RQ_FRIENDSHIP = '#messenger:client_action/RQ_FRIENDSHIP'
CLIENT_ACTION_APPROVE_FRIENDSHIP = '#messenger:client_action/APPROVE_FRIENDSHIP'
CLIENT_ACTION_CANCEL_FRIENDSHIP = '#messenger:client_action/CANCEL_FRIENDSHIP'
CLIENT_ACTION_SEND_MESSAGE = '#messenger:client_action/SEND_MESSAGE'
CLIENT_ACTION_SET_NOTE = '#messenger:client_action/SET_NOTE'
CLIENT_ACTION_REMOVE_NOTE = '#messenger:client_action/REMOVE_NOTE'
CLIENT_ACTION_RQ_HISTORY = '#messenger:client_action/RQ_HISTORY'
CLIENT_ACTION_CREATE_USER_ROOM = '#messenger:client_action/CREATE_USER_ROOM'
CLIENT_ACTION_JOIN_USER_ROOM = '#messenger:client_action/JOIN_USER_ROOM'
CLIENT_ACTION_LEAVE_USER_ROOM = '#messenger:client_action/LEAVE_USER_ROOM'
CLIENT_ACTION_SEARCH_USER_ROOM = '#messenger:client_action/SEARCH_USER_ROOM'
CLIENT_ACTION_FIND_USERS_BY_PREFIX = '#messenger:client_action/FIND_USERS_BY_PREFIX'
CUSTOM_CLIENT_ACTION_ADMIN_CHAT_COMMAND = '#messenger:custom_client_action/ADMIN_CHAT_COMMAND'
SERVICECHANNELMESSAGES_VEHICLERENTED = '#messenger:serviceChannelMessages/vehicleRented'
SERVICECHANNELMESSAGES_RENTALSEXPIRED = '#messenger:serviceChannelMessages/rentalsExpired'
SERVICECHANNELMESSAGES_RENTCOMPENSATION_COMPENSATION = '#messenger:serviceChannelMessages/rentCompensation/compensation'
SERVICECHANNELMESSAGES_RENTCOMPENSATION_GOLDRECEIVED = '#messenger:serviceChannelMessages/rentCompensation/goldReceived'
SERVICECHANNELMESSAGES_RENTCOMPENSATION_CREDITSRECEIVED = '#messenger:serviceChannelMessages/rentCompensation/creditsReceived'
MESSENGER_CONTACTS_TITLE = '#messenger:messenger/contacts/title'
MESSENGER_CONTACTS_SEARCHINPUTPROMPT = '#messenger:messenger/contacts/searchInputPrompt'
MESSENGER_CONTACTS_DROPCONTACTPROMPT = '#messenger:messenger/contacts/dropContactPrompt'
MESSENGER_CONTACTS_MAINGROPS_FRIENDS = '#messenger:messenger/contacts/mainGrops/Friends'
MESSENGER_CONTACTS_MAINGROPS_FORMATIONS = '#messenger:messenger/contacts/mainGrops/Formations'
MESSENGER_CONTACTS_MAINGROPS_OTHER = '#messenger:messenger/contacts/mainGrops/Other'
MESSENGER_CONTACTS_MAINGROPS_OTHER_IGNORED = '#messenger:messenger/contacts/mainGrops/Other/Ignored'
MESSENGER_CONTACTS_MAINGROPS_OTHER_FRIENDSHIPREQUEST = '#messenger:messenger/contacts/mainGrops/Other/friendshipRequest'
MESSENGER_CONTACTS_MAINGROUPS_OTHER_REFERRALS = '#messenger:messenger/contacts/mainGroups/other/referrals'
MESSENGER_CONTACTS_MAINGROUPS_OTHER_REFERRERS = '#messenger:messenger/contacts/mainGroups/other/referrers'
MESSENGER_CONTACTS_BUTTONS_EXTERNALSEARCH = '#messenger:messenger/contacts/buttons/externalSearch'
MESSENGER_CONTACTS_BUTTONS_EXTERNALSEARCHCANCEL = '#messenger:messenger/contacts/buttons/externalSearchCancel'
MESSENGER_CONTACTS_SEARCHUSERS_LISTEMPTYPROMPT_TITLE = '#messenger:messenger/contacts/searchUsers/listEmptyPrompt/title'
MESSENGER_CONTACTS_SEARCHUSERS_LISTEMPTYPROMPT_DESCR = '#messenger:messenger/contacts/searchUsers/listEmptyPrompt/descr'
MESSENGER_CONTACTS_SEARCHUSERS_NOONLINECONTACTS_TITLE = '#messenger:messenger/contacts/searchUsers/noOnlineContacts/title'
MESSENGER_CONTACTS_SEARCHUSERS_NOONLINECONTACTS_DESCR = '#messenger:messenger/contacts/searchUsers/noOnlineContacts/descr'
MESSENGER_CONTACTS_SEARCHUSERS_SEARCHFAIL_TITLE = '#messenger:messenger/contacts/searchUsers/searchFail/title'
MESSENGER_CONTACTS_SEARCHUSERS_SEARCHFAIL_DESCR = '#messenger:messenger/contacts/searchUsers/searchFail/descr'
MESSENGER_CONTACTS_SEARCHUSERS_SEARCHINPUTPROMPT = '#messenger:messenger/contacts/searchUsers/searchInputPrompt'
MESSENGER_CONTACTS_EXTERNALSEARCH_EMPTYSEARCHRESULTPROMPT = '#messenger:messenger/contacts/externalSearch/emptySearchResultPrompt'
MESSENGER_CONTACTS_CONTEXTMENU_EDITGROUP = '#messenger:messenger/contacts/contextmenu/editGroup'
MESSENGER_CONTACTS_CONTEXTMENU_REMOVEGROUP = '#messenger:messenger/contacts/contextmenu/removeGroup'
MESSENGER_CONTACTS_CONTEXTMENU_REMOVEFROMGROUP = '#messenger:messenger/contacts/contextmenu/removeFromGroup'
MESSENGER_CONTACTS_VIEW_ADDUSER_TITLE = '#messenger:messenger/contacts/view/addUser/title'
MESSENGER_CONTACTS_VIEW_SEARCHUSER_TITLE = '#messenger:messenger/contacts/view/searchUser/title'
MESSENGER_CONTACTS_VIEW_ADDUSER_BTNOK_LABEL = '#messenger:messenger/contacts/view/addUser/btnOk/Label'
MESSENGER_CONTACTS_VIEW_ADDUSER_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/addUser/btnCancel/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_CREATEGROUP_SEARCHINPUTPROMPT = '#messenger:messenger/contacts/view/manageGroup/createGroup/searchInputPrompt'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_CREATEGROUP_MAINLABEL = '#messenger:messenger/contacts/view/manageGroup/createGroup/mainLabel'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_CREATEGROUP_BTNOK_LABEL = '#messenger:messenger/contacts/view/manageGroup/createGroup/btnOk/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_CREATEGROUP_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/manageGroup/createGroup/btnCancel/Label'
MESSENGER_CONTACTS_VIEW_MANAGENOTE_INPUT_PROMPT = '#messenger:messenger/contacts/view/manageNote/input/prompt'
MESSENGER_CONTACTS_VIEW_MANAGENOTE_INPUT_TOOLTIP = '#messenger:messenger/contacts/view/manageNote/input/tooltip'
MESSENGER_CONTACTS_VIEW_CREATENOTE_MAINLABEL = '#messenger:messenger/contacts/view/createNote/mainLabel'
MESSENGER_CONTACTS_VIEW_CREATENOTE_BTNOK_LABEL = '#messenger:messenger/contacts/view/createNote/btnOk/Label'
MESSENGER_CONTACTS_VIEW_CREATENOTE_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/createNote/btnCancel/Label'
MESSENGER_CONTACTS_CREATENOTE_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:messenger/contacts/createNote/tooltips/btns/close/header'
MESSENGER_CONTACTS_CREATENOTE_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:messenger/contacts/createNote/tooltips/btns/close/body'
MESSENGER_CONTACTS_CREATENOTE_TOOLTIPS_BTNS_OK_HEADER = '#messenger:messenger/contacts/createNote/tooltips/btns/ok/header'
MESSENGER_CONTACTS_CREATENOTE_TOOLTIPS_BTNS_OK_BODY = '#messenger:messenger/contacts/createNote/tooltips/btns/ok/body'
MESSENGER_CONTACTS_VIEW_EDITNOTE_MAINLABEL = '#messenger:messenger/contacts/view/editNote/mainLabel'
MESSENGER_CONTACTS_VIEW_EDITNOTE_BTNOK_LABEL = '#messenger:messenger/contacts/view/editNote/btnOk/Label'
MESSENGER_CONTACTS_VIEW_EDITNOTE_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/editNote/btnCancel/Label'
MESSENGER_CONTACTS_EDITNOTE_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:messenger/contacts/editNote/tooltips/btns/close/header'
MESSENGER_CONTACTS_EDITNOTE_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:messenger/contacts/editNote/tooltips/btns/close/body'
MESSENGER_CONTACTS_EDITNOTE_TOOLTIPS_BTNS_OK_HEADER = '#messenger:messenger/contacts/editNote/tooltips/btns/ok/header'
MESSENGER_CONTACTS_EDITNOTE_TOOLTIPS_BTNS_OK_BODY = '#messenger:messenger/contacts/editNote/tooltips/btns/ok/body'
MESSENGER_CONTACTS_VIEW_EDITNOTE_USERNAME = '#messenger:messenger/contacts/view/editNote/userName'
MESSENGER_CONTACTS_VIEW_EDITNOTE_INPUT_TOOLTIP = '#messenger:messenger/contacts/view/editNote/input/tooltip'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_RENAMEGROUP_SEARCHINPUTPROMPT = '#messenger:messenger/contacts/view/manageGroup/renameGroup/searchInputPrompt'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_RENAMEGROUP_MAINLABEL = '#messenger:messenger/contacts/view/manageGroup/renameGroup/mainLabel'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_RENAMEGROUP_BTNOK_LABEL = '#messenger:messenger/contacts/view/manageGroup/renameGroup/btnOk/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_RENAMEGROUP_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/manageGroup/renameGroup/btnCancel/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_DELETEGROUP_MAINLABEL = '#messenger:messenger/contacts/view/manageGroup/deleteGroup/mainLabel'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_DELETEGROUP_BTNOK_LABEL = '#messenger:messenger/contacts/view/manageGroup/deleteGroup/btnOk/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_DELETEGROUP_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/manageGroup/deleteGroup/btnCancel/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_DELETEGROUP_DELETEWITHMEMBERS_LABEL = '#messenger:messenger/contacts/view/manageGroup/deleteGroup/DeleteWithMembers/Label'
MESSENGER_CONTACTS_VIEW_MANAGEGROUP_DELETEGROUP_GROUPNAME_LABEL = '#messenger:messenger/contacts/view/manageGroup/deleteGroup/groupName/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_MAINLABEL = '#messenger:messenger/contacts/view/settings/mainLabel'
MESSENGER_CONTACTS_VIEW_SETTINGS_BTNOK_LABEL = '#messenger:messenger/contacts/view/settings/btnOk/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_BTNCANCEL_LABEL = '#messenger:messenger/contacts/view/settings/btnCancel/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_ISSHOWOFFLINEUSERS_LABEL = '#messenger:messenger/contacts/view/settings/IsShowOfflineUsers/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_ISSHOWOTHERS_LABEL = '#messenger:messenger/contacts/view/settings/IsShowOthers/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_LABEL = '#messenger:messenger/contacts/view/settings/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_RECEIVEFROMFRIENDSONLY_LABEL = '#messenger:messenger/contacts/view/settings/ReceiveFromFriendsOnly/Label'
MESSENGER_CONTACTS_VIEW_SETTINGS_GROUPMESSAGES_LABEL = '#messenger:messenger/contacts/view/settings/GroupMessages/Label'
MESSENGER_CONTACTS_VIEW_ADDUSER_ERROR_GROUPALREADYEXIST = '#messenger:messenger/contacts/view/addUser/error/groupAlreadyExist'
CONTACTS_TOOLTIPS_BTNS_SEARCH_HEADER = '#messenger:contacts/tooltips/btns/search/header'
CONTACTS_TOOLTIPS_BTNS_SEARCH_BODY = '#messenger:contacts/tooltips/btns/search/body'
CONTACTS_TOOLTIPS_BTNS_ADDGROUP_HEADER = '#messenger:contacts/tooltips/btns/addGroup/header'
CONTACTS_TOOLTIPS_BTNS_ADDGROUP_BODY = '#messenger:contacts/tooltips/btns/addGroup/body'
CONTACTS_TOOLTIPS_BTNS_SETTINGS_HEADER = '#messenger:contacts/tooltips/btns/settings/header'
CONTACTS_TOOLTIPS_BTNS_SETTINGS_BODY = '#messenger:contacts/tooltips/btns/settings/body'
CONTACTS_TOOLTIPS_BTNS_EXTERNALSEARCH_HEADER = '#messenger:contacts/tooltips/btns/externalSearch/header'
CONTACTS_TOOLTIPS_BTNS_EXTERNALSEARCH_BODY = '#messenger:contacts/tooltips/btns/externalSearch/body'
CONTACTS_TOOLTIPS_SEARCHINPUT_CREDENTIALS = '#messenger:contacts/tooltips/searchInput/credentials'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_SEARCH_HEADER = '#messenger:contacts/searchView/tooltips/btns/search/header'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_SEARCH_BODY = '#messenger:contacts/searchView/tooltips/btns/search/body'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_ADD_HEADER = '#messenger:contacts/searchView/tooltips/btns/add/header'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_ADD_BODY = '#messenger:contacts/searchView/tooltips/btns/add/body'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:contacts/searchView/tooltips/btns/close/header'
CONTACTS_SEARCHVIEW_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:contacts/searchView/tooltips/btns/close/body'
CONTACTS_SEARCHVIEW_TOOLTIPS_SEARCHDISABLED_BODY = '#messenger:contacts/searchView/tooltips/searchDisabled/body'
CONTACTS_SEARCHVIEW_TOOLTIPS_SEARCHCREDENTIALS_BODY = '#messenger:contacts/searchView/tooltips/searchCredentials/body'
CONTACTS_SETTINGSVIEW_TOOLTIPS_BTNS_APPLY_HEADER = '#messenger:contacts/settingsView/tooltips/btns/apply/header'
CONTACTS_SETTINGSVIEW_TOOLTIPS_BTNS_APPLY_BODY = '#messenger:contacts/settingsView/tooltips/btns/apply/body'
CONTACTS_SETTINGSVIEW_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:contacts/settingsView/tooltips/btns/close/header'
CONTACTS_SETTINGSVIEW_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:contacts/settingsView/tooltips/btns/close/body'
CONTACTS_MANAGEGROUPVIEW_TOOLTIPS_INPUT = '#messenger:contacts/manageGroupView/tooltips/input'
CONTACTS_CREATEGROUPVIEW_TOOLTIPS_BTNS_APPLY_HEADER = '#messenger:contacts/createGroupView/tooltips/btns/apply/header'
CONTACTS_CREATEGROUPVIEW_TOOLTIPS_BTNS_APPLY_BODY = '#messenger:contacts/createGroupView/tooltips/btns/apply/body'
CONTACTS_CREATEGROUPVIEW_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:contacts/createGroupView/tooltips/btns/close/header'
CONTACTS_CREATEGROUPVIEW_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:contacts/createGroupView/tooltips/btns/close/body'
CONTACTS_GROUPRENAMEVIEW_TOOLTIPS_BTNS_APPLY_HEADER = '#messenger:contacts/groupRenameView/tooltips/btns/apply/header'
CONTACTS_GROUPRENAMEVIEW_TOOLTIPS_BTNS_APPLY_BODY = '#messenger:contacts/groupRenameView/tooltips/btns/apply/body'
CONTACTS_GROUPRENAMEVIEW_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:contacts/groupRenameView/tooltips/btns/close/header'
CONTACTS_GROUPRENAMEVIEW_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:contacts/groupRenameView/tooltips/btns/close/body'
CONTACTS_GROUPDELETEVIEW_TOOLTIPS_BTNS_APPLY_HEADER = '#messenger:contacts/groupDeleteView/tooltips/btns/apply/header'
CONTACTS_GROUPDELETEVIEW_TOOLTIPS_BTNS_APPLY_BODY = '#messenger:contacts/groupDeleteView/tooltips/btns/apply/body'
CONTACTS_GROUPDELETEVIEW_TOOLTIPS_BTNS_CLOSE_HEADER = '#messenger:contacts/groupDeleteView/tooltips/btns/close/header'
CONTACTS_GROUPDELETEVIEW_TOOLTIPS_BTNS_CLOSE_BODY = '#messenger:contacts/groupDeleteView/tooltips/btns/close/body'
CHAT_POSTBATTLESHARING_LABEL = '#messenger:chat/postBattleSharing/label'
CHAT_TOXICMESSAGES_BLOCKEDMESSAGE = '#messenger:chat/toxicMessages/blockedMessage'
BATTLE_TOOLTIPS_TOXICMESSAGE = '#messenger:battle/toolTips/toxicMessage'
PERSONALMISSIONS_COMPLETIONTOKENGAIN = '#messenger:personalMissions/completionTokenGain'
CHAT_PERSONALMESSAGE_WARNINGHEAD = '#messenger:chat/personalMessage/warningHead'
CHAT_PERSONALMESSAGE_WARNINGBODY = '#messenger:chat/personalMessage/warningBody'
CHAT_ERROR_ENUM = (CHAT_ERROR_NOT_ALLOWED,
CHAT_ERROR_USER_NOT_FOUND,
CHAT_ERROR_CANNOT_BAN_ONESELF,
CHAT_ERROR_WRONG_ARGS,
CHAT_ERROR_NOT_READY,
CHAT_ERROR_IS_BUSY,
CHAT_ERROR_GENERIC_ERROR)
SERVER_ERROR_USER_ROOM_CREATION_ENUM = (SERVER_ERROR_USER_ROOM_CREATION_NAME_EXISTS,
SERVER_ERROR_USER_ROOM_CREATION_LIMIT_COUNT,
SERVER_ERROR_USER_ROOM_CREATION_LIMIT_PASS,
SERVER_ERROR_USER_ROOM_CREATION_LIMIT_NAME,
SERVER_ERROR_USER_ROOM_CREATION_WRONG_SYMBOL,
SERVER_ERROR_USER_ROOM_CREATION_WRONG_WORD)
CHAT_ACTION_ENUM = (CHAT_ACTION_BROADCAST_UNIT_MESSAGE,
CHAT_ACTION_BROADCAST_BATTLE_MESSAGE,
CHAT_ACTION_FIND_USERS_BY_NAME,
CHAT_ACTION_GET_VOIP_CREDENTIALS)
CLIENT_ERROR_SHARED_ENUM = (CLIENT_ERROR_SHARED_GENERIC,
CLIENT_ERROR_SHARED_LOCKED,
CLIENT_ERROR_SHARED_NOT_CONNECTED,
CLIENT_ERROR_SHARED_WRONG_ARGS,
CLIENT_ERROR_SHARED_NOT_SUPPORTED,
CLIENT_ERROR_SHARED_DBID_INVALID,
CLIENT_ERROR_SHARED_NAME_EMPTY,
CLIENT_ERROR_SHARED_NAME_INVALID,
CLIENT_ERROR_SHARED_WAITING_BEFORE_START)
CLIENT_ERROR_CONTACT_ENUM = (CLIENT_ERROR_CONTACT_CONTACT_ITEM_NOT_FOUND,
CLIENT_ERROR_CONTACT_ROSTER_ITEM_EXISTS,
CLIENT_ERROR_CONTACT_ROSTER_ITEM_NOT_FOUND,
CLIENT_ERROR_CONTACT_FRIENDSHIP_APPROVED,
CLIENT_ERROR_CONTACT_FRIENDSHIP_CANCELED,
CLIENT_ERROR_CONTACT_FRIENDSHIP_RQ_PROCESS,
CLIENT_ERROR_CONTACT_BLOCK_ITEM_EXISTS,
CLIENT_ERROR_CONTACT_BLOCK_ITEM_NOT_FOUND,
CLIENT_ERROR_CONTACT_MUTED_ITEM_NOT_FOUND,
CLIENT_ERROR_CONTACT_GROUP_EMPTY,
CLIENT_ERROR_CONTACT_GROUP_EXISTS,
CLIENT_ERROR_CONTACT_GROUP_NOT_FOUND,
CLIENT_ERROR_CONTACT_GROUP_INVALID_NAME,
CLIENT_ERROR_CONTACT_NOTE_EMPTY,
CLIENT_ERROR_CONTACT_NOTE_NOT_FOUND)
CLIENT_ERROR_CHANNEL_ENUM = (CLIENT_ERROR_CHANNEL_NAME_EMPTY,
CLIENT_ERROR_CHANNEL_NAME_INVALID,
CLIENT_ERROR_CHANNEL_PASSWORD_EMPTY,
CLIENT_ERROR_CHANNEL_PASSWORD_INVALID,
CLIENT_ERROR_CHANNEL_RETYPE_EMPTY,
CLIENT_ERROR_CHANNEL_RETYPE_INVALID,
CLIENT_ERROR_CHANNEL_PASSWORDS_NOT_EQUALS,
CLIENT_ERROR_CHANNEL_LIMIT_REACHED,
CLIENT_ERROR_CHANNEL_NAME_ALREADY_EXISTS)
LISTVIEW_EMPTYLIST_ENUM = (LISTVIEW_EMPTYLIST_TEMPLATE,
LISTVIEW_EMPTYLIST_INFO,
LISTVIEW_EMPTYLIST_INVITE,
LISTVIEW_EMPTYLIST_OFFER)
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_ALL_ENUM = (SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_RANKEARNED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_RANKLOST,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_STEPSEARNED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_STEPEARNED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_STEPLOST,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_NOTHINGCHANGED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_WIN_RANKPOINT,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_RANKEARNED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_RANKLOST,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_STEPSEARNED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_STEPEARNED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_STEPLOST,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_NOTHINGCHANGED,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_LOSE_RANKPOINT,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_SHIELD_LOSE,
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_SHIELD_LOSE_STEP)
CLIENT_ERROR_LIMIT_ENUM = (CLIENT_ERROR_LIMIT_MAX_ROSTER_ITEMS,
CLIENT_ERROR_LIMIT_MAX_GROUP,
CLIENT_ERROR_LIMIT_MAX_BLOCK_ITEMS,
CLIENT_ERROR_LIMIT_GROUP_INVALID_LENGTH,
CLIENT_ERROR_LIMIT_NOTE_INVALID_LENGTH,
CLIENT_ERROR_LIMIT_CHANNEL_INVALID_LENGTH,
CLIENT_ERROR_LIMIT_PWD_INVALID_LENGTH)
CLIENT_ACTION_ENUM = (CLIENT_ACTION_ADD_FRIEND,
CLIENT_ACTION_REMOVE_FRIEND,
CLIENT_ACTION_ADD_IGNORED,
CLIENT_ACTION_REMOVE_IGNORED,
CLIENT_ACTION_SET_MUTE,
CLIENT_ACTION_UNSET_MUTE,
CLIENT_ACTION_ADD_GROUP,
CLIENT_ACTION_CHANGE_GROUP,
CLIENT_ACTION_RQ_FRIENDSHIP,
CLIENT_ACTION_APPROVE_FRIENDSHIP,
CLIENT_ACTION_CANCEL_FRIENDSHIP,
CLIENT_ACTION_SEND_MESSAGE,
CLIENT_ACTION_SET_NOTE,
CLIENT_ACTION_REMOVE_NOTE,
CLIENT_ACTION_RQ_HISTORY,
CLIENT_ACTION_CREATE_USER_ROOM,
CLIENT_ACTION_JOIN_USER_ROOM,
CLIENT_ACTION_LEAVE_USER_ROOM,
CLIENT_ACTION_SEARCH_USER_ROOM,
CLIENT_ACTION_FIND_USERS_BY_PREFIX)
SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_ENUM = (SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_SHIELD_LOSE, SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_SHIELD_LOSE_STEP)
COMMAND_SUCCESS_ENUM = (COMMAND_SUCCESS_USERBAN, COMMAND_SUCCESS_USERUNBAN)
@classmethod
def client_error_shared(cls, key0):
outcome = '#messenger:client_error/shared/{}'.format(key0)
if outcome not in cls.CLIENT_ERROR_SHARED_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def client_error_contact(cls, key0):
outcome = '#messenger:client_error/contact/{}'.format(key0)
if outcome not in cls.CLIENT_ERROR_CONTACT_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def client_error_channel(cls, key0):
outcome = '#messenger:client_error/channel/{}'.format(key0)
if outcome not in cls.CLIENT_ERROR_CHANNEL_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def client_error_limit(cls, key0):
outcome = '#messenger:client_error/limit/{}'.format(key0)
if outcome not in cls.CLIENT_ERROR_LIMIT_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def server_error_user_room_creation(cls, key0):
outcome = '#messenger:server_error/user_room_creation/{}'.format(key0)
if outcome not in cls.SERVER_ERROR_USER_ROOM_CREATION_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def client_action(cls, key0):
outcome = '#messenger:client_action/{}'.format(key0)
if outcome not in cls.CLIENT_ACTION_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def chat_error(cls, key0):
outcome = '#messenger:chat_error/{}'.format(key0)
if outcome not in cls.CHAT_ERROR_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def chat_action(cls, key0):
outcome = '#messenger:chat_action/{}'.format(key0)
if outcome not in cls.CHAT_ACTION_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def command_success(cls, key0):
outcome = '#messenger:command/success/{}'.format(key0)
if outcome not in cls.COMMAND_SUCCESS_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def listview_emptylist(cls, key0):
outcome = '#messenger:listView/emptyList/{}'.format(key0)
if outcome not in cls.LISTVIEW_EMPTYLIST_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def rankedStateChange(cls, isWon, key0):
outcome = '#messenger:serviceChannelMessages/battleResults/rankedState/{}/{}'.format(isWon, key0)
if outcome not in cls.SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_ALL_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
@classmethod
def rankedShieldStateChange(cls, key0):
outcome = '#messenger:serviceChannelMessages/battleResults/rankedState/shield/{}'.format(key0)
if outcome not in cls.SERVICECHANNELMESSAGES_BATTLERESULTS_RANKEDSTATE_SHIELD_ENUM:
LOG_WARNING('Localization key "{}" not found'.format(outcome))
return None
else:
return outcome
| [
"[email protected]"
] | |
b83333bde4c5f401bfdfa13b41e2fcfc9a51c187 | 42fa1862effc3e494859904b76c43ce2bcd623a0 | /low_high_band_pass_filtering.py | f4ad03423a9a9bded4c998b4dc162316a4c715fa | [] | no_license | PaulHalloran/desktop_python_scripts | 3e83aedf3e232da610b5f7477e4d7e8fb0253f99 | 325e923527278a5c3e9ab8c978f29b2816dab087 | refs/heads/master | 2021-01-01T19:52:06.828997 | 2015-06-27T21:14:10 | 2015-06-27T21:14:10 | 38,155,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import iris
import matplotlib.pyplot as plt
import scipy
import iris.quickplot as qplt
import numpy as np
'''
We will use the following functions, so make sure they are available
'''
def butter_bandpass(lowcut, cutoff):
order = 2
low = 1/lowcut
b, a = scipy.signal.butter(order, low , btype=cutoff,analog = False)
return b, a
def low_pass_filter(cube,limit_years):
b1, a1 = butter_bandpass(limit_years, 'low')
output = scipy.signal.filtfilt(b1, a1, cube,axis = 0)
return output
def high_pass_filter(cube,limit_years):
b1, a1 = butter_bandpass(limit_years, 'high')
output = scipy.signal.filtfilt(b1, a1, cube,axis = 0)
return output
'''
Initially just reading in a dataset to work with, and averaging lats and longs to give us a timeseries to plot - you can obviously swap in your timeseries
'''
file = '/media/usb_external1/cmip5/tas_regridded/MPI-ESM-P_tas_piControl_regridded.nc'
cube = iris.load_cube(file)
timeseries1 = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
'''
Filtering out everything happening on timescales shorter than than X years (where x is called lower_limit_years)
'''
lower_limit_years = 10.0
output_cube = cube.copy()
output_cube.data = low_pass_filter(cube.data,lower_limit_years)
timeseries2 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
plt.close('all')
qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
qplt.plot(timeseries2 - np.mean(timeseries2.data),'g',alpha = 0.5,linewidth = 2)
plt.show(block = True)
'''
Filtering out everything happening on timescales longer than than X years (where x is called upper_limit_years)
'''
upper_limit_years = 5.0
output_cube = cube.copy()
output_cube.data = high_pass_filter(cube.data,upper_limit_years)
timeseries3 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
plt.close('all')
qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
qplt.plot(timeseries3 - np.mean(timeseries3.data),'b',alpha = 0.5,linewidth = 2)
plt.show(block = True)
'''
Filtering out everything happening on timescales longer than than X years (where x is called upper_limit_years) but shorter than y years (where y is called lower_limit_years)
'''
upper_limit_years = 50.0
output_cube = cube.copy()
output_cube.data = high_pass_filter(cube.data,upper_limit_years)
lower_limit_years = 5.0
output_cube.data = low_pass_filter(output_cube.data,lower_limit_years)
timeseries4 = output_cube.collapsed(['latitude','longitude'],iris.analysis.MEAN)
plt.close('all')
qplt.plot(timeseries1 - np.mean(timeseries1.data),'r',alpha = 0.5,linewidth = 2)
qplt.plot(timeseries4 - np.mean(timeseries4.data),'y',alpha = 0.5,linewidth = 2)
plt.show(block = True)
'''
Hopefully this tells you everything you need. Just be aware that strange tings can happen at he ends of the timeseries (just check it is doing something sensible)
'''
| [
"[email protected]"
] | |
a031ad5ed05e689e5acac722ec476741ddb709b9 | 55d4e10ff2c71ac0f0042bda930d7e3dcc7a1a76 | /freezing/web/scripts/fix_photo_urls.py | 6950149827302ee3339431889337c9bc2382c8c6 | [] | no_license | freezingsaddles/freezing-legacy | 28f813690b0c81d6a075fe2fc4ebe6d644ab7417 | d2c0a445e189e0a359fa134258e0d5e14bd9de56 | refs/heads/master | 2021-05-09T11:24:56.219056 | 2018-01-23T16:25:57 | 2018-01-23T16:25:57 | 118,989,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | from instagram import InstagramAPIError
from freezing.model import meta
from freezing.model.orm import RidePhoto
from freezing.web.autolog import log
from freezing.web.scripts import BaseCommand
from freezing.web.utils.insta import configured_instagram_client
class FixPhotoUrls(BaseCommand):
@property
def name(self):
return 'sync-photos'
def build_parser(self):
parser = super(FixPhotoUrls, self).build_parser()
#
# parser.add_option("--rewrite", action="store_true", dest="rewrite", default=False,
# help="Whether to rewrite the ride photo data already in database.")
return parser
def execute(self, options, args):
# if options.rewrite:
# meta.engine.execute(model.RidePhoto.__table__.delete())
# meta.session_factory().query(model.Ride).update({"photos_fetched": False})
q = meta.session_factory().query(RidePhoto)
q = q.filter_by(img_t=None)
insta_client = configured_instagram_client()
del_q = []
for ride_photo in q:
self.logger.debug("Updating URLs for photo {}".format(ride_photo))
try:
media = insta_client.media(ride_photo.id)
ride_photo.img_l = media.get_standard_resolution_url()
ride_photo.img_t = media.get_thumbnail_url()
meta.session_factory().commit()
except InstagramAPIError as e:
if e.status_code == 400:
self.logger.error("Skipping photo {}; user is set to private".format(ride_photo))
del_q.append(ride_photo.id)
else:
self.logger.exception("Error fetching instagram photo {0} (skipping)".format(ride_photo))
if del_q:
meta.engine.execute(RidePhoto.__table__.delete().where(RidePhoto.id.in_(del_q)))
meta.session_factory().commit()
def main():
FixPhotoUrls().run()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
b867f04b4d5338ab74beb5d363063aedb323f8bc | 9b2255e0a474555d8a4d90f586e280d40224a181 | /apps/navigation/api.py | c97f598f1ff10310113981f52a3913081ccd3fed | [] | no_license | rogeriofalcone/redirector | 85f496f7c3a3c755b2d9f86f90d25ace783842e4 | 8255be80ce4e3245317864dcc580a1ef68a7c244 | refs/heads/master | 2020-04-08T07:03:19.053680 | 2012-08-12T19:13:35 | 2012-08-12T19:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | import copy
object_navigation = {}
multi_object_navigation = {}
model_list_columns = {}
sidebar_templates = {}
top_menu_entries = []
def register_multi_item_links(src, links, menu_name=None):
"""
Register a multiple item action action to be displayed in the
generic list template
"""
multi_object_navigation.setdefault(menu_name, {})
if hasattr(src, '__iter__'):
for one_src in src:
multi_object_navigation[menu_name].setdefault(one_src, {'links': []})
multi_object_navigation[menu_name][one_src]['links'].extend(links)
else:
multi_object_navigation[menu_name].setdefault(src, {'links': []})
multi_object_navigation[menu_name][src]['links'].extend(links)
def register_links(src, links, menu_name=None, position=None):
"""
Associate a link to a model a view, or an url
"""
object_navigation.setdefault(menu_name, {})
if hasattr(src, '__iter__'):
for one_src in src:
object_navigation[menu_name].setdefault(one_src, {'links': []})
if position is not None:
for link in reversed(links):
object_navigation[menu_name][one_src]['links'].insert(position, link)
else:
object_navigation[menu_name][one_src]['links'].extend(links)
else:
object_navigation[menu_name].setdefault(src, {'links': []})
if position is not None:
for link in reversed(links):
object_navigation[menu_name][src]['links'].insert(position, link)
else:
object_navigation[menu_name][src]['links'].extend(links)
def register_top_menu(name, link, children_views=None,
children_path_regex=None, children_view_regex=None,
position=None):
"""
Register a new menu entry for the main menu displayed at the top
of the page
"""
entry = {'link': link, 'name': name}
if children_views:
entry['children_views'] = children_views
if children_path_regex:
entry['children_path_regex'] = children_path_regex
if children_view_regex:
entry['children_view_regex'] = children_view_regex
if position is not None:
entry['position'] = position
top_menu_entries.insert(position, entry)
else:
length = len(top_menu_entries)
entry['position'] = length
top_menu_entries.append(entry)
sort_menu_entries()
return entry
def sort_menu_entries():
global top_menu_entries
top_menu_entries = sorted(top_menu_entries, key=lambda k: (k['position'] < 0, k['position']))
def register_model_list_columns(model, columns):
"""
Define which columns will be displayed in the generic list template
for a given model
"""
model_list_columns.setdefault(model, [])
model_list_columns[model].extend(columns)
def register_sidebar_template(source_list, template_name):
for source in source_list:
sidebar_templates.setdefault(source, [])
sidebar_templates[source].append(template_name)
| [
"[email protected]"
] | |
fbe5e008b57babb92e2dd06f1be7edb60f2f4dac | 299e5934971f9de638692e2667d6e270bcab5cbd | /13.罗马数字转整数.py | 6aa2f8131eac1ddc741f6bd0b4331f804a96ec00 | [] | no_license | ycj123/Leetcode-Python3 | 14bcd6c9f4d26191d5d40c77e923df4d0be4c0e5 | 1593960cdf2655ef1dcf68e3517e7121670c6ac3 | refs/heads/master | 2022-12-16T23:12:19.326702 | 2020-09-18T00:17:45 | 2020-09-18T00:17:45 | 295,302,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | #
# @lc app=leetcode.cn id=13 lang=python3
#
# [13] 罗马数字转整数
#
# 优先匹配长度为2
# @lc code=start
class Solution:
def romanToInt(self, s: str) -> int:
helper = {
'I':1,
'V':5,
'IV':4,
'X':10,
'IX':9,
'L':50,
'C':100,
'XL':40,
'XC':90,
'D':500,
'M':1000,
'CD':400,
'CM':900
}
length = len(s)
i = 0
res = 0
while i < length:
if s[i:i+2] in helper:
res += helper[s[i:i+2]]
i+=2
else:
res += helper[s[i]]
i+=1
return res
# @lc code=end
| [
"[email protected]"
] | |
9ebb2d8c25c8e4c6f8e7973204a6127aa4844a89 | 40530b1d97c237944e4a01826f2e7a746d98acc0 | /modifydevicetree.py | e555e1cd8a21521c84bf59a3b2062e527d8e0050 | [
"CC0-1.0"
] | permissive | Hackveda/XNUQEMUScripts | e0270b27a3cb889822a8429a0842e548507a34c8 | d5492cbffe2c1ce29026901b833c0d9cd295f37b | refs/heads/master | 2021-01-05T22:40:01.532712 | 2018-07-21T17:07:54 | 2018-07-21T17:07:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,320 | py | import sys
from devicetreefromim4p import *
#removeCompatibles = [b"aic,1", b"pmgr1,t8015", b"wdt,t8015\x00wdt,s5l8960x", b"gpio,t8015\x00gpio,s5l8960x", b"sochot,s5l8955x", b"tempsensor,t8015", b"aes,s8000"]
keepCompatibles = [b"uart-1,samsung", b"D22AP\x00iPhone10,3\x00AppleARM"]
removeNames = [b"wdt", b"backlight"]
removeDeviceTypes = [b"wdt", b"backlight"]
# pexpert/pexpert/device_tree.h
def u32(a, i):
return a[i] | a[i+1] << 8 | a[i+2] << 16 | a[i+3] << 24
def w32(a, i, v):
a[i] = v & 0xff
a[i+1] = (v >> 8) & 0xff
a[i+2] = (v >> 16) & 0xff
a[i+3] = (v >> 24) & 0xff
def writenode(nodebytes, nodeoffset, nodedepth):
nProperties = u32(nodebytes, nodeoffset)
nChildren = u32(nodebytes, nodeoffset + 4)
ptr = 8
for p in range(nProperties):
ptr += writeproperty(nodebytes, nodeoffset + ptr, nodedepth)
for c in range(nChildren):
ptr += writenode(nodebytes, nodeoffset + ptr, nodedepth + 1)
return ptr
def padStringNull(instr, lenstr=32):
return instr.encode("ascii") + b"\x00"*(lenstr - len(instr))
def writeproperty(nodebytes, nodeoffset, nodedepth):
kPropNameLength = 32
propname = nodebytes[nodeoffset:nodeoffset + kPropNameLength].rstrip(b"\x00").decode("utf-8")
ptr = kPropNameLength
proplen = u32(nodebytes, nodeoffset + ptr) & 0x7fffffff
if u32(nodebytes, nodeoffset + ptr) != proplen:
w32(nodebytes, nodeoffset + ptr, proplen)
ptr += 4
if propname == "timebase-frequency" and u32(nodebytes, nodeoffset + ptr) == 0:
print("setting timebase")
w32(nodebytes, nodeoffset + ptr, (1000 * 1000 * 1000)//16)
if propname == "random-seed":
print("setting random seed")
w32(nodebytes, nodeoffset + ptr, 0xdeadf00d)
if propname == "dram-vendor-id":
print("Removing dram-vendor-id")
nodebytes[nodeoffset:nodeoffset + kPropNameLength] = padStringNull("chip-epoch")
nodebytes[nodeoffset + ptr:nodeoffset + ptr + proplen] = b"\x00" * proplen
if propname == "display-corner-radius":
print("Removing display-corner-radius")
nodebytes[nodeoffset:nodeoffset + kPropNameLength] = padStringNull("security-domain")
nodebytes[nodeoffset + ptr:nodeoffset + ptr + proplen] = b"\x00" * proplen
if propname == "compatible" and not nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1] in keepCompatibles:
print("removing compatible for", nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1].decode("ascii"))
nodebytes[nodeoffset+ptr:nodeoffset + ptr + proplen - 1] = b"~" * (proplen - 1)
if propname == "name" and nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1] in removeNames:
print("removing name for", nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1].decode("ascii"))
nodebytes[nodeoffset+ptr] = ord("~")
if propname == "device_type" and nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1] in removeDeviceTypes:
print("removing device type for", nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1].decode("ascii"))
nodebytes[nodeoffset+ptr] = ord("~")
ptr += proplen
ptr = (ptr + 0x3) & ~0x3 #round up to nearest 4
return ptr
def printone(filename, outname):
with open(filename, "rb") as infile:
indata = infile.read()
devicetreebytes = bytearray(devicetreefromim4p(indata))
size = writenode(devicetreebytes, 0, 0)
with open(outname, "wb") as outfile:
outfile.write(devicetreebytes[:size])
if __name__ == "__main__":
printone(sys.argv[1], sys.argv[2])
| [
"[email protected]"
] | |
95509adab1d7fd1668388a55b0f78c9747fc7365 | 23744d0acc0119d0222c003179335af5b3259a67 | /DOTA_configs/_base_/datasets/NV10.py | 5ce6a3f114d7cd51496ba0e3c9b8a205c7f78373 | [
"Apache-2.0"
] | permissive | yawudede/mmd_rs | f3db3c0288eea67b78fdde09600d9b1ef4e60478 | a0a468933e69bf90d9bc71c37c8626d8dda7fd24 | refs/heads/master | 2023-03-18T01:00:11.029808 | 2021-03-23T03:41:14 | 2021-03-23T03:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | dataset_type = 'CocoDataset'
data_root = 'data/NWPU_VHR_10/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cat_name_list =['airplane', 'ship', 'storage tank', 'baseball diamond', 'tennis court',
'basketball court', 'ground track field', 'harbor', 'bridge', 'vehicle']
num_classes = len(cat_name_list) # 10
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'train_val_coco_ann.json',
img_prefix=data_root + 'images',
classes=cat_name_list,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'test_coco_ann.json',
img_prefix=data_root + 'images',
classes=cat_name_list,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'test_coco_ann.json',
img_prefix=data_root + 'images',
classes=cat_name_list,
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
max_bbox_per_img = 100
| [
"[email protected]"
] | |
c98407abf255ab9a3130d1de2aec9835447aed79 | d1ea752332028ad6e621b68c32496cad5ae33fa4 | /backend/testing_module_options_1/urls.py | 89bfdd81e27b4afcd715afbaecd9cc0510ba8d08 | [] | no_license | crowdbotics-apps/testing-module-options-1 | 872d24a6dd8e34c0b73b61e066b3187f2a4adfab | 1a061e46115787df3b1013aeb5282891f825b473 | refs/heads/master | 2023-07-27T02:01:22.644918 | 2021-09-16T14:50:17 | 2021-09-16T14:50:17 | 407,200,684 | 0 | 0 | null | 2021-09-16T14:51:11 | 2021-09-16T14:40:27 | Python | UTF-8 | Python | false | false | 2,291 | py | """testing_module_options_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "testing module options"
admin.site.site_title = "testing module options Admin Portal"
admin.site.index_title = "testing module options Admin"
# swagger
api_info = openapi.Info(
title="testing module options API",
default_version="v1",
description="API documentation for testing module options App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
5c1df1d7fc9d51ab054ea9607fffab52a876136c | 55a729cee20bb37f9a2bab323b1afb148e0768d2 | /pyigm/cgm/cgmsurvey.py | d2d1529f9d090035854331d413db2ec16708549b | [] | no_license | SunilSimha/pyigm | 502b1d46533dfec60dacfa41498b38bcc9622298 | cd09dedce40cf959ed15eaecd4f01f05856dd9f2 | refs/heads/master | 2021-01-25T06:24:29.282634 | 2017-06-01T18:54:39 | 2017-06-01T18:54:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,740 | py | """ Classes for CGM Surveys
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
import warnings
import pdb
import json, io
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from pyigm.utils import lst_to_array
from pyigm.surveys.igmsurvey import GenericIGMSurvey
from pyigm.cgm.cgm import CGMAbsSys
try:
basestring
except NameError: # For Python 3
basestring = str
class CGMAbsSurvey(object):
"""A CGM Survey class in absorption
Attributes
----------
survey : str, optional
Survey name
ref : str, optional
Reference(s)
"""
@classmethod
def from_tarball(cls, tfile, debug=False, **kwargs):
""" Load the COS-Halos survey from a tarball of JSON files
Parameters
----------
tfile : str
Returns
-------
"""
import tarfile
import json
from linetools.lists.linelist import LineList
llist = LineList('ISM')
slf = cls(**kwargs)
# Load
tar = tarfile.open(tfile)
for kk, member in enumerate(tar.getmembers()):
if '.' not in member.name:
print('Skipping a likely folder: {:s}'.format(member.name))
continue
# Debug
if debug and (kk == 5):
break
# Extract
f = tar.extractfile(member)
tdict = json.load(f)
# Generate
cgmsys = CGMAbsSys.from_dict(tdict, chk_vel=False, chk_sep=False, chk_data=False,
use_coord=True, use_angrho=True,
linelist=llist, **kwargs)
slf.cgm_abs.append(cgmsys)
tar.close()
# Return
return slf
def __init__(self, survey='', ref='', **kwargs):
"""
Parameters
----------
survey : str, optional
ref : str, optional
Returns
-------
"""
# Name of survey
self.survey = survey
self.ref = ref
self.cgm_abs = []
self.mask = None
@property
def nsys(self):
""" Number of systems
Returns
-------
nsys : int
"""
return len(self.cgm_abs)
def to_json_tarball(self, outfil):
""" Generates a gzipped tarball of JSON files, one per system
Parameters
----------
outfil : str
"""
import subprocess
tmpdir = 'CGM_JSON'
try:
os.mkdir(tmpdir)
except OSError:
pass
jfiles = []
# Loop on systems
for cgm_abs in self.cgm_abs:
# Dict
cdict = cgm_abs.to_dict()
# Temporary JSON file
json_fil = tmpdir+'/'+cgm_abs.name+'.json'
jfiles.append(json_fil)
with io.open(json_fil, 'w', encoding='utf-8') as f:
#try:
f.write(unicode(json.dumps(cdict, sort_keys=True, indent=4,
separators=(',', ': '))))
# Tar
warnings.warn("Modify to write directly to tar file")
subprocess.call(['tar', '-czf', outfil, tmpdir])
print('Wrote: {:s}'.format(outfil))
# Clean up
for jfile in jfiles:
os.remove(jfile)
os.rmdir(tmpdir)
def ion_tbl(self, Zion, fill_ion=True):
""" Generate a Table of Ionic column densities for an input ion
Parameters
----------
Zion : tuple or str
fill_ion : bool, optional
Fill each ionN table in the survey (a bit slow)
Returns
-------
tbl : astropy.Table
"""
from linetools.abund.ions import name_to_ion
if isinstance(Zion, basestring):
Zion = name_to_ion(Zion)
# Generate dummy IGMSurvey
dumb = GenericIGMSurvey()
names = []
for cgmabs in self.cgm_abs:
if fill_ion:
cgmabs.igm_sys.fill_ionN()
if cgmabs.igm_sys._ionN is not None:
dumb._abs_sys.append(cgmabs.igm_sys)
# Names
names.append(cgmabs.name)
# Run ions
tbl = dumb.ions(Zion)
# Add CGM name
tbl.add_column(Column(names, name='cgm_name'))
# Return
return tbl
def trans_tbl(self, inp, fill_ion=True):
""" Generate a Table of Data on a given transition, e.g. SiIII 1206
Parameters
----------
inp : str or Quantity
str -- Name of the transition, e.g. 'CII 1334'
Quantity -- Rest wavelength of the transition, e.g. 1334.53*u.AA to 0.01 precision
Returns
-------
tbl : astropy.Table
"""
# Generate dummy IGMSurvey
dumb = GenericIGMSurvey()
names = []
for cgmabs in self.cgm_abs:
dumb._abs_sys.append(cgmabs.igm_sys)
# Names
names.append(cgmabs.name)
# Run ions
tbl = dumb.trans(inp)
# Add CGM name
tbl.add_column(Column(names, name='cgm_name'))
# Return
return tbl
def abs_kin(self, lbl):
""" Create a Table of the Kinematic info
Parameters
----------
lbl : string
Label for the Kinematics dict
TODO:
Add wrest!!
"""
from astropy.table import Table
keys = self.cgm_abs[0].igm_sys.kin[lbl].keys
t = Table(names=keys,
dtype=self.cgm_abs[0].igm_sys.kin[lbl].key_dtype)
for cgm_abs in self.cgm_abs:
try:
kdict = cgm_abs.igm_sys.kin[lbl]
except KeyError:
# No dict. Filling in zeros
row = [0 for key in keys]
t.add_row( row )
continue
# Filling
row = [kdict[key] for key in keys]
t.add_row( row )
return t
def __getattr__(self, k):
# Try Self first
try:
lst = [getattr(cgm_abs, k) for cgm_abs in self.cgm_abs]
except AttributeError:
# Galaxy?
try:
lst = [getattr(cgm_abs.galaxy, k) for cgm_abs in self.cgm_abs]
except AttributeError:
# Try AbsLine_Sys last
try:
lst = [getattr(cgm_abs.igm_sys, k) for cgm_abs in self.cgm_abs]
except AttributeError:
print('cgm.core: Attribute not found!')
pdb.set_trace()
# Special cases
if k == 'coord':
ra = [coord.fk5.ra.value for coord in lst]
dec = [coord.fk5.dec.value for coord in lst]
lst = SkyCoord(ra=ra, dec=dec, unit='deg')
if self.mask is not None:
return lst[self.mask]
else:
return lst
elif k == 'scoord': # Sightline coordinates
lst = [getattr(cgm_abs.igm_sys, 'coord') for cgm_abs in self.cgm_abs]
ra = [coord.fk5.ra.value for coord in lst]
dec = [coord.fk5.dec.value for coord in lst]
lst = SkyCoord(ra=ra, dec=dec, unit='deg')
if self.mask is not None:
return lst[self.mask]
else:
return lst
# Return array
return lst_to_array(lst, mask=self.mask)
def __repr__(self):
str1 = '<CGM_Survey: {:s} nsys={:d}, ref={:s}>\n'.format(self.survey, self.nsys, self.ref)
for ii in range(self.nsys):
str1 = str1+self.cgm_abs[ii].igm_sys.__repr__()+'\n'
return str1
| [
"[email protected]"
] | |
e33058af373d06e2f01f0dedd2779cbce9ab58b6 | cef082f9fd218e807ad6deedfc95b485fe4152a0 | /SecondWeek/example_http.py | 601ec43cf64d916ec85671ec7715c03db19b71b8 | [] | no_license | banziha104/DjangoProjects | bdb814dbcaa9a0c50d05b42ee7e319a5129dfef9 | 802e412fcabedd5e08abbf14f2587bd2231491ff | refs/heads/master | 2021-01-23T05:29:17.563194 | 2017-10-17T10:28:16 | 2017-10-17T10:28:16 | 102,469,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # python3 -m http.server 8000
# localhost:8000
# python3 -m http.server 8000 --bind 127.0.0.1
# python3 -m http.server --cgi 8000
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("",PORT), Handler) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
| [
"[email protected]"
] | |
3cd443b57e159232737995be844e35ac1210e2c3 | 72b77f97876983025eb05a5aa1d6f248a1be3074 | /difference_between_element_sum_and_digit_sum_of_an_array.py | 8bac67b7f16134a05d0c93079b75484b8335f70b | [
"Apache-2.0"
] | permissive | erjan/coding_exercises | 4c6bccb2cdac65ccbc3107a482914275ecd157f7 | 68dac358a6d4dabd41d47dbd4addb2ec50e0ca11 | refs/heads/master | 2023-09-02T07:25:30.886175 | 2023-08-27T06:13:06 | 2023-08-27T06:13:06 | 236,281,070 | 5 | 0 | Apache-2.0 | 2020-05-05T15:08:49 | 2020-01-26T07:32:09 | Python | UTF-8 | Python | false | false | 727 | py | '''
You are given a positive integer array nums.
The element sum is the sum of all the elements in nums.
The digit sum is the sum of all the digits (not necessarily distinct) that appear in nums.
Return the absolute difference between the element sum and digit sum of nums.
Note that the absolute difference between two integers x and y is defined as |x - y|.
'''
class Solution:
def differenceOfSum(self, nums: List[int]) -> int:
elementsum = sum(nums)
digitsum = 0
for n in nums:
item = list(str(n))
temp = 0
for x in item:
temp+= int(x)
digitsum+=temp
return abs(elementsum-digitsum)
| [
"[email protected]"
] | |
958cfc9e90e536eeeaddfdc8e2dc87dfd64c8875 | faa83d63a23aec7c4f45c6ce6d140985a9fb2d50 | /tests/conftest.py | 7baac17e56d03fac2affcd5502b8e75d9cc40e56 | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | thomasyi17/diana2 | dbf23382f5f84bd9cf86ce531f46452f0083e7f6 | 983e58ef0a5fe0d820a56c41c823369754019171 | refs/heads/master | 2023-03-24T15:13:29.421614 | 2022-06-12T21:42:28 | 2022-06-12T21:42:28 | 167,248,482 | 0 | 0 | MIT | 2019-06-25T19:41:36 | 2019-01-23T20:22:50 | Python | UTF-8 | Python | false | false | 1,973 | py | import sys
import pytest
import docker
from diana.utils.endpoint import Containerized
sys.path.append('utils')
@pytest.fixture(scope="session")
def setup_orthanc0():
S = mk_orthanc()
yield S
print("Tearing down orthanc fixture")
S.stop_container()
@pytest.fixture(scope="session")
def setup_orthanc1():
S = mk_orthanc(8043, 4243, 8042, 4242)
yield S
print("Tearing down orthanc fixture")
S.stop_container()
def mk_orthanc(http_port=8042, dcm_port=4242, remote_peer=8043, remote_mod=4243):
print("Standing up orthanc fixture")
dkr_name = "orthanc-{}".format(http_port)
import socket
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
S = Containerized(
dkr_name = dkr_name,
dkr_image = "derekmerck/orthanc-confd",
dkr_ports = {"8042/tcp": http_port, "4242/tcp": dcm_port},
dkr_env = {"ORTHANC_MOD_0": "mod0,ORTHANC{},{},{}".format(
remote_mod, host_ip, remote_mod),
"ORTHANC_PEER_0": "peer0,http://{}:{},orthanc,passw0rd!".format(
host_ip, remote_peer),
"ORTHANC_AET": "ORTHANC{}".format(dcm_port),
"ORTHANC_VERBOSE": "true"}
)
S.start_container()
client = docker.from_env()
c = client.containers.get(dkr_name)
print("{}: {}".format(S.dkr_name, c.status))
return S
@pytest.fixture(scope="session")
def setup_redis():
S = mk_redis()
yield S
print("Tearing down redis fixture")
S.stop_container()
def mk_redis():
print("Standing up redis fixture")
dkr_name = "redis"
S = Containerized(
dkr_name = dkr_name,
dkr_image = "redis",
dkr_ports = {"6379/tcp": 6379}
)
S.start_container()
client = docker.from_env()
c = client.containers.get(dkr_name)
print("{}: {}".format(S.dkr_name, c.status))
return S
| [
"[email protected]"
] | |
32138084abc6dd3ae53877add615df18640ec606 | 92f69f1f33f6b3aa29dc4f3ccce7d4a06eb24bdf | /deploy/infer_onnx_tensorrt.py | be847017b35e952bf6f78ecc7da8ac525342cab4 | [
"MIT"
] | permissive | carlsummer/lcnn | 5d0b4c81e3b626e0380fdd36ad5685f3a6b9eb8f | b7ad7fa5502243ac50ca15a355e0001c5992d050 | refs/heads/master | 2023-06-21T05:45:44.910052 | 2021-07-29T00:55:33 | 2021-07-29T00:55:33 | 384,020,516 | 0 | 0 | MIT | 2021-07-08T06:13:42 | 2021-07-08T06:13:41 | null | UTF-8 | Python | false | false | 1,530 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:7/26/2021 10:12 AM
# @File:infer_net1_onnx.py
import argparse
import onnx
import onnx_tensorrt.backend as backend
import torch
from imutils import paths
from deploy.torch2onnx import get_image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="获取杭州人工认为有缺陷大图")
parser.add_argument('--devices',
default=r"0",
help="没有分的文件夹")
parser.add_argument('--onnx_path',
default=r"/home/zengxh/workspace/lcnn/logs/210726-144038-88f281a-baseline/checkpoint_best.onnx",
help="没有分的文件夹")
parser.add_argument('--predict_dir',
default=r"/home/zengxh/medias/data/ext/creepageDistance/20210714/smallimg/tb/org",
help="没有分的文件夹")
parser.add_argument('--predict_type',
default=r"tb",
help="没有分的文件夹")
opt = parser.parse_args()
model = onnx.load(opt.onnx_path)
engine = backend.prepare(model, device='CUDA:0')
image_paths = list(paths.list_images(opt.predict_dir))
image_path = image_paths[0]
image = get_image(image_path, opt.predict_type).cuda()
junc = torch.zeros(1, 2).cuda()
jtyp = torch.zeros(1, dtype=torch.uint8).cuda()
Lpos = torch.zeros(2, 2, dtype=torch.uint8).cuda()
ret = engine.run(image)
print(ret) | [
"[email protected]"
] | |
7306eaafeb9996f56c25f6148352a8b7c266f68f | fcfc5d6b6fe509072ace7522a8b48e7c3a8d80a8 | /api/upload/urls.py | 0f1e7170bdc27c20e7ede08af83e29b54b07da69 | [] | no_license | Torque-Webdev/ChamipnshpDatabase | 0323d13d39f27aa5516b68891e3cfde51ad063f4 | a3f44f7786d98afb58744e0410904750906f5ae0 | refs/heads/master | 2023-05-01T22:19:14.180758 | 2021-05-21T16:41:16 | 2021-05-21T16:41:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.urls import path
from .views import FileViewSet
from rest_framework import routers
from django.urls import path, include
app_name = "upload"
router = routers.DefaultRouter()
router.register('file', FileViewSet)
urlpatterns = [
path('', include(router.urls))
]
| [
"[email protected]"
] | |
d118b958141f7a74f558cabfc48f8c64170a1520 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW03_20210706182647.py | 1346d8dad64a99274877a7859b1338bb7f9edce1 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | """
Georgia Institute of Technology - CS1301
HW03 - Strings and Lists
Collaboration Statement:
"""
#########################################
"""
Function Name: movieNight()
Parameters: subtitle (str)
Returns: fixed subtitle (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def movieNight(subtitle):
newSubtitle = ''
for i in subtitle:
if not i.isdigit():
newSubtitle += i
return newSubtitle
"""
Function Name: longestWord()
Parameters: sentence (str)
Returns: longest word (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def longestWord(sentence):
return max(sentence.split(' '))
"""
Function Name: tennisMatch()
Parameters: player1 (str), player2 (str), matchRecord (str)
Returns: game statement (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: freshFruit()
Parameters: barcodes (list), startIndex (int), stopIndex (int)
Returns: freshest barcode (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: highestSum()
Parameters: stringList (list)
Returns: highest sum index (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# subtitle = "Mr. and M4rs. Dursley of nu28mber four, Privet Drive, wer903e proud to say th6at they we6re perfectly norm3al, tha894nk you ve89ry much."
# print(movieNight(subtitle))
sentence = " Left foot, right foot, levitatin’ "
longestWord(sentence) | [
"[email protected]"
] | |
68da85645730e39e77b20e6b4145c05c4e7f8e65 | c8b427f7d548d2028911682ec1fcdcd0150fd1c3 | /encoding/datasets/cocostuff.py | e3e06d059fbc752baa2bd5098006bcabb120653b | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | yougoforward/GSFramework | 544f120681777752cda9dcd4872170118e2f6073 | 000060691fcf252cbdf834326db19415f9754cdf | refs/heads/master | 2022-12-21T07:59:48.571069 | 2020-07-10T12:57:34 | 2020-07-10T12:57:34 | 271,774,293 | 0 | 1 | NOASSERTION | 2022-12-19T09:48:36 | 2020-06-12T10:44:20 | Python | UTF-8 | Python | false | false | 3,726 | py | ###########################################################################
# Created by: CASIA IVA
# Email: [email protected]
# Copyright (c) 2018
###########################################################################
import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
import re
from tqdm import tqdm
from .base import BaseDataset
class CocostuffSegmentation(BaseDataset):
BASE_DIR = 'cocostuff'
NUM_CLASS = 171
def __init__(self, root='./datasets', split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(CocostuffSegmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
# assert exists
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please download the dataset!!"
self.images, self.masks = _get_cocostuff_pairs(root, split)
if split != 'vis':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise (RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'vis':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _mask_transform(self, mask):
target = np.array(mask).astype('int32')-1
# target[target == 255] = -1
return torch.from_numpy(target).long()
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 0
def _get_cocostuff_pairs(folder, split='train'):
def get_path_pairs(folder, split_f):
img_paths = []
mask_paths = []
with open(split_f, 'r') as lines:
for line in tqdm(lines):
ll_str = re.split(' ', line)
imgpath = os.path.join(folder, ll_str[0].lstrip('/').lstrip(
).rstrip())
maskpath = os.path.join(folder, ll_str[1].lstrip('/').rstrip())
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
if split == 'train':
split_f = os.path.join(folder, 'train.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
elif split == 'val':
split_f = os.path.join(folder, 'val.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
elif split == 'test':
split_f = os.path.join(folder, 'test.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
else:
split_f = os.path.join(folder, 'all.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
return img_paths, mask_paths
| [
"[email protected]"
] | |
3abf98dbe914d4e4050166a3786a346691c89178 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/nmw.py | 24096ea6412cc5e2a05bb57f81dff109435bf570 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'nMW':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
70ecb1031c4cffdd7b880aa97025bd23e23d1211 | 240c225039581d7454e3229f8e35cce2e2f6b529 | /httprunner/context.py | f812b7e7b1b39f2170652635bd02036028d6b6ae | [
"MIT"
] | permissive | leo1001/HttpRunner | 2588666286476ff4e4d00f2647ef3bf008b768ae | bb2343470a328f7444c42582162b4c32f3b331b7 | refs/heads/master | 2020-03-16T08:44:02.810857 | 2018-08-21T07:57:18 | 2018-08-21T07:57:18 | 132,601,259 | 0 | 0 | MIT | 2018-08-21T07:57:19 | 2018-05-08T11:49:22 | Python | UTF-8 | Python | false | false | 20,851 | py | # encoding: utf-8
import copy
import os
import random
import re
import sys
from httprunner import built_in, exceptions, loader, logger, parser, utils
from httprunner.compat import OrderedDict, basestring, builtin_str, str
def parse_parameters(parameters, testset_path=None):
""" parse parameters and generate cartesian product.
Args:
parameters (list) parameters: parameter name and value in list
parameter value may be in three types:
(1) data list, e.g. ["iOS/10.1", "iOS/10.2", "iOS/10.3"]
(2) call built-in parameterize function, "${parameterize(account.csv)}"
(3) call custom function in debugtalk.py, "${gen_app_version()}"
testset_path (str): testset file path, used for locating csv file and debugtalk.py
Returns:
list: cartesian product list
Examples:
>>> parameters = [
{"user_agent": ["iOS/10.1", "iOS/10.2", "iOS/10.3"]},
{"username-password": "${parameterize(account.csv)}"},
{"app_version": "${gen_app_version()}"}
]
>>> parse_parameters(parameters)
"""
testcase_parser = TestcaseParser(file_path=testset_path)
parsed_parameters_list = []
for parameter in parameters:
parameter_name, parameter_content = list(parameter.items())[0]
parameter_name_list = parameter_name.split("-")
if isinstance(parameter_content, list):
# (1) data list
# e.g. {"app_version": ["2.8.5", "2.8.6"]}
# => [{"app_version": "2.8.5", "app_version": "2.8.6"}]
# e.g. {"username-password": [["user1", "111111"], ["test2", "222222"]}
# => [{"username": "user1", "password": "111111"}, {"username": "user2", "password": "222222"}]
parameter_content_list = []
for parameter_item in parameter_content:
if not isinstance(parameter_item, (list, tuple)):
# "2.8.5" => ["2.8.5"]
parameter_item = [parameter_item]
# ["app_version"], ["2.8.5"] => {"app_version": "2.8.5"}
# ["username", "password"], ["user1", "111111"] => {"username": "user1", "password": "111111"}
parameter_content_dict = dict(zip(parameter_name_list, parameter_item))
parameter_content_list.append(parameter_content_dict)
else:
# (2) & (3)
parsed_parameter_content = testcase_parser.eval_content_with_bindings(parameter_content)
# e.g. [{'app_version': '2.8.5'}, {'app_version': '2.8.6'}]
# e.g. [{"username": "user1", "password": "111111"}, {"username": "user2", "password": "222222"}]
if not isinstance(parsed_parameter_content, list):
raise exceptions.ParamsError("parameters syntax error!")
parameter_content_list = [
# get subset by parameter name
{key: parameter_item[key] for key in parameter_name_list}
for parameter_item in parsed_parameter_content
]
parsed_parameters_list.append(parameter_content_list)
return utils.gen_cartesian_product(*parsed_parameters_list)
class TestcaseParser(object):
def __init__(self, variables={}, functions={}, file_path=None):
self.update_binded_variables(variables)
self.bind_functions(functions)
self.file_path = file_path
def update_binded_variables(self, variables):
""" bind variables to current testcase parser
@param (dict) variables, variables binds mapping
{
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"random": "A2dEx",
"data": {"name": "user", "password": "123456"},
"uuid": 1000
}
"""
self.variables = variables
def bind_functions(self, functions):
""" bind functions to current testcase parser
@param (dict) functions, functions binds mapping
{
"add_two_nums": lambda a, b=1: a + b
}
"""
self.functions = functions
def _get_bind_item(self, item_type, item_name):
""" get specified function or variable.
Args:
item_type(str): functions or variables
item_name(str): function name or variable name
Returns:
object: specified function or variable object.
"""
if item_type == "functions":
if item_name in self.functions:
return self.functions[item_name]
try:
# check if builtin functions
item_func = eval(item_name)
if callable(item_func):
# is builtin function
return item_func
except (NameError, TypeError):
# is not builtin function, continue to search
pass
else:
# item_type == "variables":
if item_name in self.variables:
return self.variables[item_name]
debugtalk_module = loader.load_debugtalk_module(self.file_path)
return loader.get_module_item(debugtalk_module, item_type, item_name)
def get_bind_function(self, func_name):
return self._get_bind_item("functions", func_name)
def get_bind_variable(self, variable_name):
return self._get_bind_item("variables", variable_name)
def load_csv_list(self, csv_file_name, fetch_method="Sequential"):
""" locate csv file and load csv content.
Args:
csv_file_name (str): csv file name
fetch_method (str): fetch data method, defaults to Sequential.
If set to "random", csv data list will be reordered in random.
Returns:
list: csv data list
"""
csv_file_path = loader.locate_file(self.file_path, csv_file_name)
csv_content_list = loader.load_file(csv_file_path)
if fetch_method.lower() == "random":
random.shuffle(csv_content_list)
return csv_content_list
def _eval_content_functions(self, content):
functions_list = parser.extract_functions(content)
for func_content in functions_list:
function_meta = parser.parse_function(func_content)
func_name = function_meta['func_name']
args = function_meta.get('args', [])
kwargs = function_meta.get('kwargs', {})
args = self.eval_content_with_bindings(args)
kwargs = self.eval_content_with_bindings(kwargs)
if func_name in ["parameterize", "P"]:
eval_value = self.load_csv_list(*args, **kwargs)
else:
func = self.get_bind_function(func_name)
eval_value = func(*args, **kwargs)
func_content = "${" + func_content + "}"
if func_content == content:
# content is a variable
content = eval_value
else:
# content contains one or many variables
content = content.replace(
func_content,
str(eval_value), 1
)
return content
def _eval_content_variables(self, content):
""" replace all variables of string content with mapping value.
@param (str) content
@return (str) parsed content
e.g.
variable_mapping = {
"var_1": "abc",
"var_2": "def"
}
$var_1 => "abc"
$var_1#XYZ => "abc#XYZ"
/$var_1/$var_2/var3 => "/abc/def/var3"
${func($var_1, $var_2, xyz)} => "${func(abc, def, xyz)}"
"""
variables_list = parser.extract_variables(content)
for variable_name in variables_list:
variable_value = self.get_bind_variable(variable_name)
if "${}".format(variable_name) == content:
# content is a variable
content = variable_value
else:
# content contains one or several variables
if not isinstance(variable_value, str):
variable_value = builtin_str(variable_value)
content = content.replace(
"${}".format(variable_name),
variable_value, 1
)
return content
def eval_content_with_bindings(self, content):
""" parse content recursively, each variable and function in content will be evaluated.
@param (dict) content in any data structure
{
"url": "http://127.0.0.1:5000/api/users/$uid/${add_two_nums(1, 1)}",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "$authorization",
"random": "$random",
"sum": "${add_two_nums(1, 2)}"
},
"body": "$data"
}
@return (dict) parsed content with evaluated bind values
{
"url": "http://127.0.0.1:5000/api/users/1000/2",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"random": "A2dEx",
"sum": 3
},
"body": {"name": "user", "password": "123456"}
}
"""
if content is None:
return None
if isinstance(content, (list, tuple)):
return [
self.eval_content_with_bindings(item)
for item in content
]
if isinstance(content, dict):
evaluated_data = {}
for key, value in content.items():
eval_key = self.eval_content_with_bindings(key)
eval_value = self.eval_content_with_bindings(value)
evaluated_data[eval_key] = eval_value
return evaluated_data
if isinstance(content, basestring):
# content is in string format here
content = content.strip()
# replace functions with evaluated value
# Notice: _eval_content_functions must be called before _eval_content_variables
content = self._eval_content_functions(content)
# replace variables with binding value
content = self._eval_content_variables(content)
return content
class Context(object):
""" Manages context functions and variables.
context has two levels, testset and testcase.
"""
def __init__(self):
self.testset_shared_variables_mapping = OrderedDict()
self.testcase_variables_mapping = OrderedDict()
self.testcase_parser = TestcaseParser()
self.evaluated_validators = []
self.init_context()
def init_context(self, level='testset'):
"""
testset level context initializes when a file is loaded,
testcase level context initializes when each testcase starts.
"""
if level == "testset":
self.testset_functions_config = {}
self.testset_request_config = {}
self.testset_shared_variables_mapping = OrderedDict()
# testcase config shall inherit from testset configs,
# but can not change testset configs, that's why we use copy.deepcopy here.
self.testcase_functions_config = copy.deepcopy(self.testset_functions_config)
self.testcase_variables_mapping = copy.deepcopy(self.testset_shared_variables_mapping)
self.testcase_parser.bind_functions(self.testcase_functions_config)
self.testcase_parser.update_binded_variables(self.testcase_variables_mapping)
if level == "testset":
self.import_module_items(built_in)
def config_context(self, config_dict, level):
if level == "testset":
self.testcase_parser.file_path = config_dict.get("path", None)
variables = config_dict.get('variables') \
or config_dict.get('variable_binds', OrderedDict())
self.bind_variables(variables, level)
def bind_functions(self, function_binds, level="testcase"):
""" Bind named functions within the context
This allows for passing in self-defined functions in testing.
e.g. function_binds:
{
"add_one": lambda x: x + 1, # lambda function
"add_two_nums": "lambda x, y: x + y" # lambda function in string
}
"""
eval_function_binds = {}
for func_name, function in function_binds.items():
if isinstance(function, str):
function = eval(function)
eval_function_binds[func_name] = function
self.__update_context_functions_config(level, eval_function_binds)
def import_module_items(self, imported_module):
""" import module functions and variables and bind to testset context
"""
module_mapping = loader.load_python_module(imported_module)
self.__update_context_functions_config("testset", module_mapping["functions"])
self.bind_variables(module_mapping["variables"], "testset")
def bind_variables(self, variables, level="testcase"):
""" bind variables to testset context or current testcase context.
variables in testset context can be used in all testcases of current test suite.
@param (list or OrderDict) variables, variable can be value or custom function.
if value is function, it will be called and bind result to variable.
e.g.
OrderDict({
"TOKEN": "debugtalk",
"random": "${gen_random_string(5)}",
"json": {'name': 'user', 'password': '123456'},
"md5": "${gen_md5($TOKEN, $json, $random)}"
})
"""
if isinstance(variables, list):
variables = utils.convert_to_order_dict(variables)
for variable_name, value in variables.items():
variable_eval_value = self.eval_content(value)
if level == "testset":
self.testset_shared_variables_mapping[variable_name] = variable_eval_value
self.bind_testcase_variable(variable_name, variable_eval_value)
def bind_testcase_variable(self, variable_name, variable_value):
""" bind and update testcase variables mapping
"""
self.testcase_variables_mapping[variable_name] = variable_value
self.testcase_parser.update_binded_variables(self.testcase_variables_mapping)
def bind_extracted_variables(self, variables):
""" bind extracted variables to testset context
@param (OrderDict) variables
extracted value do not need to evaluate.
"""
for variable_name, value in variables.items():
self.testset_shared_variables_mapping[variable_name] = value
self.bind_testcase_variable(variable_name, value)
def __update_context_functions_config(self, level, config_mapping):
"""
@param level: testset or testcase
@param config_type: functions
@param config_mapping: functions config mapping
"""
if level == "testset":
self.testset_functions_config.update(config_mapping)
self.testcase_functions_config.update(config_mapping)
self.testcase_parser.bind_functions(self.testcase_functions_config)
def eval_content(self, content):
""" evaluate content recursively, take effect on each variable and function in content.
content may be in any data structure, include dict, list, tuple, number, string, etc.
"""
return self.testcase_parser.eval_content_with_bindings(content)
def get_parsed_request(self, request_dict, level="testcase"):
""" get parsed request with bind variables and functions.
@param request_dict: request config mapping
@param level: testset or testcase
"""
if level == "testset":
request_dict = self.eval_content(
request_dict
)
self.testset_request_config.update(request_dict)
testcase_request_config = utils.deep_update_dict(
copy.deepcopy(self.testset_request_config),
request_dict
)
parsed_request = self.eval_content(
testcase_request_config
)
return parsed_request
def eval_check_item(self, validator, resp_obj):
""" evaluate check item in validator
@param (dict) validator
{"check": "status_code", "comparator": "eq", "expect": 201}
{"check": "$resp_body_success", "comparator": "eq", "expect": True}
@param (object) resp_obj
@return (dict) validator info
{
"check": "status_code",
"check_value": 200,
"expect": 201,
"comparator": "eq"
}
"""
check_item = validator["check"]
# check_item should only be the following 5 formats:
# 1, variable reference, e.g. $token
# 2, function reference, e.g. ${is_status_code_200($status_code)}
# 3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
# 4, string joined by delimiter. e.g. "status_code", "headers.content-type"
# 5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
if isinstance(check_item, (dict, list)) \
or parser.extract_variables(check_item) \
or parser.extract_functions(check_item):
# format 1/2/3
check_value = self.eval_content(check_item)
else:
# format 4/5
check_value = resp_obj.extract_field(check_item)
validator["check_value"] = check_value
# expect_value should only be in 2 types:
# 1, variable reference, e.g. $expect_status_code
# 2, actual value, e.g. 200
expect_value = self.eval_content(validator["expect"])
validator["expect"] = expect_value
validator["check_result"] = "unchecked"
return validator
def do_validation(self, validator_dict):
""" validate with functions
"""
# TODO: move comparator uniform to init_test_suites
comparator = utils.get_uniform_comparator(validator_dict["comparator"])
validate_func = self.testcase_parser.get_bind_function(comparator)
if not validate_func:
raise exceptions.FunctionNotFound("comparator not found: {}".format(comparator))
check_item = validator_dict["check"]
check_value = validator_dict["check_value"]
expect_value = validator_dict["expect"]
if (check_value is None or expect_value is None) \
and comparator not in ["is", "eq", "equals", "=="]:
raise exceptions.ParamsError("Null value can only be compared with comparator: eq/equals/==")
validate_msg = "validate: {} {} {}({})".format(
check_item,
comparator,
expect_value,
type(expect_value).__name__
)
try:
validator_dict["check_result"] = "pass"
validate_func(check_value, expect_value)
validate_msg += "\t==> pass"
logger.log_debug(validate_msg)
except (AssertionError, TypeError):
validate_msg += "\t==> fail"
validate_msg += "\n{}({}) {} {}({})".format(
check_value,
type(check_value).__name__,
comparator,
expect_value,
type(expect_value).__name__
)
logger.log_error(validate_msg)
validator_dict["check_result"] = "fail"
raise exceptions.ValidationFailure(validate_msg)
def validate(self, validators, resp_obj):
""" make validations
"""
if not validators:
return
logger.log_info("start to validate.")
self.evaluated_validators = []
validate_pass = True
for validator in validators:
# evaluate validators with context variable mapping.
evaluated_validator = self.eval_check_item(
parser.parse_validator(validator),
resp_obj
)
try:
self.do_validation(evaluated_validator)
except exceptions.ValidationFailure:
validate_pass = False
self.evaluated_validators.append(evaluated_validator)
if not validate_pass:
raise exceptions.ValidationFailure
| [
"httprunner"
] | httprunner |
74c32424b84aeca23312e1bc1337aeae4ce88b19 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5769900270288896_0/Python/Yizow/noisyNeighbors.py | 56699b486ce068641289c157a81da28e58850b66 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | import sys
def main():
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
with open(inputFileName, 'r') as inputFile:
with open(outputFileName, 'w') as outputFile:
numTestCases = int(inputFile.readline())
for testNum in range(numTestCases):
if testNum > 0:
outputFile.write("\n")
print testNum
line1 = inputFile.readline().split()
R, C, N = int(line1[0]), int(line1[1]), int(line1[2])
outputFile.write("Case #%d: %d" % (testNum+1, calcHappy(R,C,N)))
def calcHappy(R, C, N):
if N == R*C:
return totalWalls(R,C)
if N <= (R*C+1)/2:
return 0
else:
empty = R*C-N
if min(R, C) == 1:
return totalWalls(R,C) - empty * 2
if min(R, C) == 2:
length = max(R,C)
if empty == length - 1:
return totalWalls(R, C) - ( 3*empty - 1)
return totalWalls(R, C) - (3 * empty)
inR, inC = R-2, C-2
innerRooms = inR*inC
if innerRooms == 1:
if N == 8:
return totalWalls(R, C) - 4
if N == 7:
return totalWalls(R, C) - 6
if N == 6:
return totalWalls(R, C) - 9
if (innerRooms+1)/2 >= empty:
return totalWalls(R, C) - empty*4
happy = 4*((innerRooms+1)/2)
empty -= (innerRooms+1)/2
edgeRooms = (R*2+(C-2)*2)/2
if R%2 == 0 or C%2 == 0:
edgeRooms += 2
if empty < edgeRooms:
happy += 3*empty
return totalWalls(R, C) - happy
return 1/0
def totalWalls(R, C):
return R*(C-1) + C*(R-1)
main() | [
"[email protected]"
] | |
e636ed229257f4495131784688b2d673f17fedc4 | 86f8b6366fefdc91f9bd658efcb292001fe02da0 | /src/rosbag_to_file/scripts/rosbag_to_csv.py | cbc70a3e2768d76ec7e4b9324606857c49befc69 | [
"MIT"
] | permissive | mingtsung86/dodobot-ros | 32a1a42fbf5b7b9e7415f5e4ba21e9574827e5ba | 85971caeac958db2f592eeeca13b01b4d365eebd | refs/heads/master | 2023-06-09T08:07:22.867868 | 2021-07-02T05:58:03 | 2021-07-02T05:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | import csv
import rospy
from datetime import datetime
import utils
def bag_to_csv(options):
writers = dict()
for topic, msg, timestamp in utils.enumerate_bag(options, options.path):
if topic in writers:
writer = writers[topic][0]
else:
f = open_csv(options, topic)
writer = csv.writer(f)
writers[topic] = writer, f
# header
if options.header:
header_row = ["date", "time"]
message_type_to_csv(header_row, msg)
writer.writerow(header_row)
row = [
utils.to_datestr(timestamp.to_time()),
timestamp.to_time()
]
message_to_csv(row, msg, flatten=not options.header)
writer.writerow(row)
for writer, f in writers.values():
f.close()
def open_csv(options, topic_name):
path = utils.get_output_path(options, topic_name)
path += ".csv"
return open(path, 'w')
def message_to_csv(row, msg, flatten=False):
"""
row: list
msg: message
"""
for key, value in utils.iter_msg(msg):
msg_str = str(value)
if msg_str.find(",") is not -1:
if flatten:
msg_str = msg_str.strip("(")
msg_str = msg_str.strip(")")
msg_str = msg_str.strip(" ")
else:
msg_str = "\"" + msg_str + "\""
row.append(msg_str)
def format_header_key(key):
header = ""
for index, subfield in enumerate(key):
if type(subfield) == int:
header += "[%s]" % subfield
else:
if index == 0:
header += subfield
else:
header += "." + subfield
return header
def message_type_to_csv(row, msg, parent_content_name=""):
"""
row: list
msg: message
"""
for key, value in utils.iter_msg(msg):
row.append(format_header_key(key))
| [
"[email protected]"
] | |
e5a0abd466402d924ea93b9c01766280787d1b41 | 5ac15873df4df7aeef3908ea7aebe0805cca0181 | /flask_mrbob/templates/project/+project.name+/base/context_processors.py | 833ad7be23d69c4d1f2722e10bb1eaacf874d3c6 | [
"BSD-3-Clause"
] | permissive | jstacoder/flask-manage | 0a942cef5a32b2966dc73cf14c5a58682b0af0f2 | 76f1802c4b7042c482dcdcc884fbc0e4fd114b5e | refs/heads/master | 2020-04-19T18:18:11.872007 | 2014-07-02T15:24:38 | 2014-07-02T15:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
"""
base.context_processors
~~~~~~~~~~~~~~~~~~~~~~~
The most common context processors
for the whole project.
"""
from flask.helpers import url_for
def common_context():
return {
'my_email': '[email protected]',
'type':type,
'dir':dir,
}
def common_forms():
return {}
| [
"[email protected]"
] | |
4a6230ff0f148f3c5547d32f390c7c2905610b99 | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /sandbox/linux/sandbox_linux.gypi | 039e9b405174b708ad4fc36049f0f97c6b1ddcd3 | [
"BSD-3-Clause"
] | permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 13,026 | gypi | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'conditions': [
['OS=="linux"', {
'compile_suid_client': 1,
'compile_credentials': 1,
'use_base_test_suite': 1,
}, {
'compile_suid_client': 0,
'compile_credentials': 0,
'use_base_test_suite': 0,
}],
['OS=="linux" and (target_arch=="ia32" or target_arch=="x64" or '
'target_arch=="mipsel")', {
'compile_seccomp_bpf_demo': 1,
}, {
'compile_seccomp_bpf_demo': 0,
}],
],
},
'target_defaults': {
'target_conditions': [
# All linux/ files will automatically be excluded on Android
# so make sure we re-include them explicitly.
['OS == "android"', {
'sources/': [
['include', '^linux/'],
],
}],
],
},
'targets': [
# We have two principal targets: sandbox and sandbox_linux_unittests
# All other targets are listed as dependencies.
# There is one notable exception: for historical reasons, chrome_sandbox is
# the setuid sandbox and is its own target.
{
'target_name': 'sandbox',
'type': 'none',
'dependencies': [
'sandbox_services',
],
'conditions': [
[ 'compile_suid_client==1', {
'dependencies': [
'suid_sandbox_client',
],
}],
# Compile seccomp BPF when we support it.
[ 'use_seccomp_bpf==1', {
'dependencies': [
'seccomp_bpf',
],
}],
],
},
{
'target_name': 'sandbox_linux_test_utils',
'type': 'static_library',
'dependencies': [
'../testing/gtest.gyp:gtest',
],
'include_dirs': [
'../..',
],
'sources': [
'tests/sandbox_test_runner.cc',
'tests/sandbox_test_runner.h',
'tests/sandbox_test_runner_function_pointer.cc',
'tests/sandbox_test_runner_function_pointer.h',
'tests/test_utils.cc',
'tests/test_utils.h',
'tests/unit_tests.cc',
'tests/unit_tests.h',
],
'conditions': [
[ 'use_seccomp_bpf==1', {
'sources': [
'seccomp-bpf/bpf_tester_compatibility_delegate.h',
'seccomp-bpf/bpf_tests.h',
'seccomp-bpf/sandbox_bpf_test_runner.cc',
'seccomp-bpf/sandbox_bpf_test_runner.h',
],
'dependencies': [
'seccomp_bpf',
]
}],
[ 'use_base_test_suite==1', {
'dependencies': [
'../base/base.gyp:test_support_base',
],
'defines': [
'SANDBOX_USES_BASE_TEST_SUITE',
],
}],
],
},
{
# The main sandboxing test target.
'target_name': 'sandbox_linux_unittests',
'includes': [
'sandbox_linux_test_sources.gypi',
],
'type': 'executable',
'conditions': [
[ 'OS == "android"', {
'variables': {
'test_type': 'gtest',
'test_suite_name': '<(_target_name)',
},
'includes': [
'../../build/android/test_runner.gypi',
],
}]
]
},
{
'target_name': 'seccomp_bpf',
'type': '<(component)',
'sources': [
'bpf_dsl/bpf_dsl.cc',
'bpf_dsl/bpf_dsl.h',
'bpf_dsl/bpf_dsl_forward.h',
'bpf_dsl/bpf_dsl_impl.h',
'bpf_dsl/codegen.cc',
'bpf_dsl/codegen.h',
'bpf_dsl/cons.h',
'bpf_dsl/errorcode.h',
'bpf_dsl/linux_syscall_ranges.h',
'bpf_dsl/policy.cc',
'bpf_dsl/policy.h',
'bpf_dsl/policy_compiler.cc',
'bpf_dsl/policy_compiler.h',
'bpf_dsl/seccomp_macros.h',
'bpf_dsl/seccomp_macros.h',
'bpf_dsl/syscall_set.cc',
'bpf_dsl/syscall_set.h',
'bpf_dsl/trap_registry.h',
'seccomp-bpf-helpers/baseline_policy.cc',
'seccomp-bpf-helpers/baseline_policy.h',
'seccomp-bpf-helpers/sigsys_handlers.cc',
'seccomp-bpf-helpers/sigsys_handlers.h',
'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
'seccomp-bpf-helpers/syscall_parameters_restrictions.h',
'seccomp-bpf-helpers/syscall_sets.cc',
'seccomp-bpf-helpers/syscall_sets.h',
'seccomp-bpf/die.cc',
'seccomp-bpf/die.h',
'seccomp-bpf/sandbox_bpf.cc',
'seccomp-bpf/sandbox_bpf.h',
'seccomp-bpf/syscall.cc',
'seccomp-bpf/syscall.h',
'seccomp-bpf/trap.cc',
'seccomp-bpf/trap.h',
],
'dependencies': [
'../base/base.gyp:base',
'sandbox_services',
'sandbox_services_headers',
],
'defines': [
'SANDBOX_IMPLEMENTATION',
],
'includes': [
# Disable LTO due to compiler bug
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57703
'../../build/android/disable_gcc_lto.gypi',
],
'include_dirs': [
'../..',
],
},
{
# The setuid sandbox, for Linux
'target_name': 'chrome_sandbox',
'type': 'executable',
'sources': [
'suid/common/sandbox.h',
'suid/common/suid_unsafe_environment_variables.h',
'suid/process_util.h',
'suid/process_util_linux.c',
'suid/sandbox.c',
],
'cflags': [
# For ULLONG_MAX
'-std=gnu99',
],
'include_dirs': [
'../..',
],
# Do not use any sanitizer tools with this binary. http://crbug.com/382766
'cflags/': [
['exclude', '-fsanitize'],
],
'ldflags/': [
['exclude', '-fsanitize'],
],
},
{ 'target_name': 'sandbox_services',
'type': '<(component)',
'sources': [
'services/init_process_reaper.cc',
'services/init_process_reaper.h',
'services/proc_util.cc',
'services/proc_util.h',
'services/resource_limits.cc',
'services/resource_limits.h',
'services/scoped_process.cc',
'services/scoped_process.h',
'services/syscall_wrappers.cc',
'services/syscall_wrappers.h',
'services/thread_helpers.cc',
'services/thread_helpers.h',
'services/yama.cc',
'services/yama.h',
'syscall_broker/broker_channel.cc',
'syscall_broker/broker_channel.h',
'syscall_broker/broker_client.cc',
'syscall_broker/broker_client.h',
'syscall_broker/broker_common.h',
'syscall_broker/broker_file_permission.cc',
'syscall_broker/broker_file_permission.h',
'syscall_broker/broker_host.cc',
'syscall_broker/broker_host.h',
'syscall_broker/broker_policy.cc',
'syscall_broker/broker_policy.h',
'syscall_broker/broker_process.cc',
'syscall_broker/broker_process.h',
],
'dependencies': [
'../base/base.gyp:base',
],
'defines': [
'SANDBOX_IMPLEMENTATION',
],
'conditions': [
['compile_credentials==1', {
'sources': [
'services/credentials.cc',
'services/credentials.h',
'services/namespace_sandbox.cc',
'services/namespace_sandbox.h',
'services/namespace_utils.cc',
'services/namespace_utils.h',
],
'dependencies': [
# for capability.h.
'sandbox_services_headers',
],
}],
],
'include_dirs': [
'..',
],
},
{ 'target_name': 'sandbox_services_headers',
'type': 'none',
'sources': [
'system_headers/arm64_linux_syscalls.h',
'system_headers/arm64_linux_ucontext.h',
'system_headers/arm_linux_syscalls.h',
'system_headers/arm_linux_ucontext.h',
'system_headers/capability.h',
'system_headers/i386_linux_ucontext.h',
'system_headers/linux_futex.h',
'system_headers/linux_seccomp.h',
'system_headers/linux_syscalls.h',
'system_headers/linux_time.h',
'system_headers/linux_ucontext.h',
'system_headers/mips_linux_syscalls.h',
'system_headers/mips_linux_ucontext.h',
'system_headers/x86_32_linux_syscalls.h',
'system_headers/x86_64_linux_syscalls.h',
],
'include_dirs': [
'..',
],
},
{
'target_name': 'suid_sandbox_client',
'type': '<(component)',
'sources': [
'suid/common/sandbox.h',
'suid/common/suid_unsafe_environment_variables.h',
'suid/client/setuid_sandbox_client.cc',
'suid/client/setuid_sandbox_client.h',
'suid/client/setuid_sandbox_host.cc',
'suid/client/setuid_sandbox_host.h',
],
'defines': [
'SANDBOX_IMPLEMENTATION',
],
'dependencies': [
'../base/base.gyp:base',
'sandbox_services',
],
'include_dirs': [
'..',
],
},
{
'target_name': 'bpf_dsl_golden',
'type': 'none',
'actions': [
{
'action_name': 'generate',
'inputs': [
'bpf_dsl/golden/generate.py',
'bpf_dsl/golden/i386/ArgSizePolicy.txt',
'bpf_dsl/golden/i386/BasicPolicy.txt',
'bpf_dsl/golden/i386/ElseIfPolicy.txt',
'bpf_dsl/golden/i386/MaskingPolicy.txt',
'bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
'bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
'bpf_dsl/golden/i386/SwitchPolicy.txt',
'bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
'bpf_dsl/golden/x86-64/BasicPolicy.txt',
'bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
'bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
'bpf_dsl/golden/x86-64/MaskingPolicy.txt',
'bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
'bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
'bpf_dsl/golden/x86-64/SwitchPolicy.txt',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
],
'action': [
'python',
'linux/bpf_dsl/golden/generate.py',
'<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
'linux/bpf_dsl/golden/i386/ArgSizePolicy.txt',
'linux/bpf_dsl/golden/i386/BasicPolicy.txt',
'linux/bpf_dsl/golden/i386/ElseIfPolicy.txt',
'linux/bpf_dsl/golden/i386/MaskingPolicy.txt',
'linux/bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
'linux/bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
'linux/bpf_dsl/golden/i386/SwitchPolicy.txt',
'linux/bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
'linux/bpf_dsl/golden/x86-64/BasicPolicy.txt',
'linux/bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
'linux/bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
'linux/bpf_dsl/golden/x86-64/MaskingPolicy.txt',
'linux/bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
'linux/bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
'linux/bpf_dsl/golden/x86-64/SwitchPolicy.txt',
],
'message': 'Generating header from golden files ...',
},
],
},
],
'conditions': [
[ 'OS=="android"', {
'targets': [
{
'target_name': 'sandbox_linux_unittests_deps',
'type': 'none',
'dependencies': [
'sandbox_linux_unittests',
],
'variables': {
'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests__dist/',
'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests',
'include_main_binary': 1,
},
'includes': [
'../../build/android/native_app_dependencies.gypi'
],
}],
}],
[ 'OS=="android"', {
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'sandbox_linux_unittests_apk_run',
'type': 'none',
'dependencies': [
'sandbox_linux_unittests',
],
'includes': [
'../../build/isolate.gypi',
],
'sources': [
'../sandbox_linux_unittests_apk.isolate',
],
},
],
},
],
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'sandbox_linux_unittests_run',
'type': 'none',
'dependencies': [
'sandbox_linux_unittests',
],
'includes': [
'../../build/isolate.gypi',
],
'sources': [
'../sandbox_linux_unittests.isolate',
],
},
],
}],
],
}
| [
"[email protected]"
] | |
7fea42d5d6e2f953ca1bc59e1a18b1b4ebe9d8f1 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/ctypes/test/test_slicing.py | c84caaf7855f513682056335febb29c971fb9bf0 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/python-3.7.7-oihhthdoxtgh4krvzpputn5ozwcnq2by/lib/python3.7/ctypes/test/test_slicing.py | [
"[email protected]"
] | |
b9095da8a1ed6d8c33f37c41cd94d2c31737a97d | cc2f91415451ba988a009c0e68303ef6a0b083c1 | /trydjango/settings.py | adefa67a5c8bcd1037fcebd4c8ffbb300e50b2d1 | [] | no_license | Jordan-Rob/Jcfe-Django-tutorial | 12a4ed169c37020cef2708202cb2e2279ae5a691 | fa970f92943cd613c455d8db187a22ce3508e7d4 | refs/heads/master | 2020-12-21T13:35:24.264645 | 2020-01-28T16:44:12 | 2020-01-28T16:44:12 | 236,446,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | """
Django settings for trydjango project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yv#a7f8-6c7fgt_)bmng@o2igm$60ym-#y=nd$6z1=%n3_!k7j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products.apps.ProductsConfig',
'pages.apps.PagesConfig',
'Blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trydjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trydjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'trydjango',
'USER': 'root',
'PASSWORD': 'thinking23house',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
75ae3936f8f66921218c81a81973c7132ac7ba0c | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/django/contrib/localflavor/se/utils.py | 3a6cdf966ef46c2f855cef9304e79e2e6bc2de00 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/django/contrib/localflavor/se/utils.py | [
"[email protected]"
] | |
008b269902e9e83c6d68e09034f245162da4c7ff | 2342b8737b9ffeb9715158b8ec74a33c7a4947f6 | /koku/masu/util/azure/common.py | 8456c9274467140607943451c04e58784967db68 | [
"Apache-2.0"
] | permissive | project-koku/koku | 444d8df05da5416c9cee606c42481c99be45f13d | 0416e5216eb1ec4b41c8dd4999adde218b1ab2e1 | refs/heads/main | 2023-08-20T11:30:17.510182 | 2023-08-17T18:27:30 | 2023-08-17T18:27:30 | 126,496,611 | 225 | 94 | Apache-2.0 | 2023-09-14T17:38:08 | 2018-03-23T14:29:23 | Python | UTF-8 | Python | false | false | 7,156 | py | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Common util functions."""
import datetime
import logging
import re
import uuid
from enum import Enum
from itertools import chain
import pandas as pd
from django_tenants.utils import schema_context
from api.models import Provider
from masu.database.azure_report_db_accessor import AzureReportDBAccessor
from masu.database.provider_db_accessor import ProviderDBAccessor
from masu.util.ocp.common import match_openshift_labels
LOG = logging.getLogger(__name__)
INGRESS_REQUIRED_COLUMNS = {
"SubscriptionGuid",
"ResourceGroup",
"ResourceLocation",
"UsageDateTime",
"MeterCategory",
"MeterSubcategory",
"MeterId",
"MeterName",
"MeterRegion",
"UsageQuantity",
"ResourceRate",
"PreTaxCost",
"ConsumedService",
"ResourceType",
"InstanceId",
"OfferId",
"AdditionalInfo",
"ServiceInfo1",
"ServiceInfo2",
"ServiceName",
"ServiceTier",
"Currency",
"UnitOfMeasure",
}
INGRESS_ALT_COLUMNS = {
"SubscriptionId",
"ResourceGroup",
"ResourceLocation",
"Date",
"MeterCategory",
"MeterSubCategory",
"MeterId",
"MeterName",
"MeterRegion",
"UnitOfMeasure",
"Quantity",
"EffectivePrice",
"CostInBillingCurrency",
"ConsumedService",
"ResourceId",
"OfferId",
"AdditionalInfo",
"ServiceInfo1",
"ServiceInfo2",
"ResourceName",
"ReservationId",
"ReservationName",
"UnitPrice",
"PublisherType",
"PublisherName",
"ChargeType",
"BillingAccountId",
"BillingAccountName",
"BillingCurrencyCode",
"BillingPeriodStartDate",
"BillingPeriodEndDate",
"ServiceFamily",
}
class AzureBlobExtension(Enum):
manifest = "_manifest.json"
csv = ".csv"
json = ".json"
def extract_uuids_from_string(source_string):
"""
Extract uuids out of a given source string.
Args:
source_string (Source): string to locate UUIDs.
Returns:
([]) List of UUIDs found in the source string
"""
uuid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
found_uuid = re.findall(uuid_regex, source_string, re.IGNORECASE)
return found_uuid
def get_local_file_name(cur_key):
"""
Return the local file name for a given cost usage report key.
If an assemblyID is present in the key, it will prepend it to the filename.
Args:
cur_key (String): reportKey value from manifest file.
example:
With AssemblyID: /koku/20180701-20180801/882083b7-ea62-4aab-aa6a-f0d08d65ee2b/koku-1.csv.gz
Without AssemblyID: /koku/20180701-20180801/koku-Manifest.json
Returns:
(String): file name for the local file,
example:
With AssemblyID: "882083b7-ea62-4aab-aa6a-f0d08d65ee2b-koku-1.csv.gz"
Without AssemblyID: "koku-Manifest.json"
"""
local_file_name = cur_key.split("/")[-1]
return local_file_name
def get_bills_from_provider(provider_uuid, schema, start_date=None, end_date=None):
"""
Return the Azure bill IDs given a provider UUID.
Args:
provider_uuid (str): Provider UUID.
schema (str): Tenant schema
start_date (datetime): Start date for bill IDs.
end_date (datetime) End date for bill IDs.
Returns:
(list): Azure cost entry bill objects.
"""
if isinstance(start_date, (datetime.datetime, datetime.date)):
start_date = start_date.replace(day=1)
start_date = start_date.strftime("%Y-%m-%d")
if isinstance(end_date, (datetime.datetime, datetime.date)):
end_date = end_date.strftime("%Y-%m-%d")
with ProviderDBAccessor(provider_uuid) as provider_accessor:
provider = provider_accessor.get_provider()
if provider.type not in (Provider.PROVIDER_AZURE, Provider.PROVIDER_AZURE_LOCAL):
err_msg = f"Provider UUID is not an Azure type. It is {provider.type}"
LOG.warning(err_msg)
return []
with AzureReportDBAccessor(schema) as report_accessor:
with schema_context(schema):
bills = report_accessor.get_cost_entry_bills_query_by_provider(provider.uuid)
if start_date:
bills = bills.filter(billing_period_start__gte=start_date)
if end_date:
bills = bills.filter(billing_period_start__lte=end_date)
bills = list(bills.all())
return bills
def match_openshift_resources_and_labels(data_frame, cluster_topologies, matched_tags):
"""Filter a dataframe to the subset that matches an OpenShift source."""
nodes = chain.from_iterable(cluster_topology.get("nodes", []) for cluster_topology in cluster_topologies)
volumes = chain.from_iterable(
cluster_topology.get("persistent_volumes", []) for cluster_topology in cluster_topologies
)
matchable_resources = list(nodes) + list(volumes)
resource_id_df = data_frame["resourceid"]
if resource_id_df.isna().values.all():
resource_id_df = data_frame["instanceid"]
LOG.info("Matching OpenShift on Azure by resource ID.")
resource_id_matched = resource_id_df.str.contains("|".join(matchable_resources))
data_frame["resource_id_matched"] = resource_id_matched
tags = data_frame["tags"]
tags = tags.str.lower()
special_case_tag_matched = tags.str.contains(
"|".join(["openshift_cluster", "openshift_project", "openshift_node"])
)
data_frame["special_case_tag_matched"] = special_case_tag_matched
if matched_tags:
tag_keys = []
tag_values = []
for tag in matched_tags:
tag_keys.extend(list(tag.keys()))
tag_values.extend(list(tag.values()))
tag_matched = tags.str.contains("|".join(tag_keys)) & tags.str.contains("|".join(tag_values))
data_frame["tag_matched"] = tag_matched
any_tag_matched = tag_matched.any()
if any_tag_matched:
tag_df = pd.concat([tags, tag_matched], axis=1)
tag_df.columns = ("tags", "tag_matched")
tag_subset = tag_df[tag_df.tag_matched == True].tags # noqa: E712
LOG.info("Matching OpenShift on Azure tags.")
matched_tag = tag_subset.apply(match_openshift_labels, args=(matched_tags,))
data_frame["matched_tag"] = matched_tag
data_frame["matched_tag"].fillna(value="", inplace=True)
else:
data_frame["matched_tag"] = ""
else:
data_frame["tag_matched"] = False
data_frame["matched_tag"] = ""
openshift_matched_data_frame = data_frame[
(data_frame["resource_id_matched"] == True) # noqa: E712
| (data_frame["special_case_tag_matched"] == True) # noqa: E712
| (data_frame["matched_tag"] != "") # noqa: E712
]
openshift_matched_data_frame["uuid"] = openshift_matched_data_frame.apply(lambda _: str(uuid.uuid4()), axis=1)
openshift_matched_data_frame = openshift_matched_data_frame.drop(
columns=["special_case_tag_matched", "tag_matched"]
)
return openshift_matched_data_frame
| [
"[email protected]"
] | |
ce7cef500f46983152daf5c63552763ab26651fc | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/third_party/google/auth/metrics.py | f7303282c9410598e9ec61290b667f7e8fed6423 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 5,089 | py | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" We use x-goog-api-client header to report metrics. This module provides
the constants and helper methods to construct x-goog-api-client header.
"""
import platform
from google.auth import version
API_CLIENT_HEADER = "x-goog-api-client"
# Auth request type
REQUEST_TYPE_ACCESS_TOKEN = "auth-request-type/at"
REQUEST_TYPE_ID_TOKEN = "auth-request-type/it"
REQUEST_TYPE_MDS_PING = "auth-request-type/mds"
REQUEST_TYPE_REAUTH_START = "auth-request-type/re-start"
REQUEST_TYPE_REAUTH_CONTINUE = "auth-request-type/re-cont"
# Credential type
CRED_TYPE_USER = "cred-type/u"
CRED_TYPE_SA_ASSERTION = "cred-type/sa"
CRED_TYPE_SA_JWT = "cred-type/jwt"
CRED_TYPE_SA_MDS = "cred-type/mds"
CRED_TYPE_SA_IMPERSONATE = "cred-type/imp"
# Versions
def python_and_auth_lib_version():
return "gl-python/{} auth/{}".format(platform.python_version(), version.__version__)
# Token request metric header values
# x-goog-api-client header value for access token request via metadata server.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds"
def token_request_access_token_mds():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_MDS
)
# x-goog-api-client header value for ID token request via metadata server.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/mds"
def token_request_id_token_mds():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_MDS
)
# x-goog-api-client header value for impersonated credentials access token request.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
def token_request_access_token_impersonate():
return "{} {} {}".format(
python_and_auth_lib_version(),
REQUEST_TYPE_ACCESS_TOKEN,
CRED_TYPE_SA_IMPERSONATE,
)
# x-goog-api-client header value for impersonated credentials ID token request.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/imp"
def token_request_id_token_impersonate():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_IMPERSONATE
)
# x-goog-api-client header value for service account credentials access token
# request (assertion flow).
# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/sa"
def token_request_access_token_sa_assertion():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_ASSERTION
)
# x-goog-api-client header value for service account credentials ID token
# request (assertion flow).
# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/sa"
def token_request_id_token_sa_assertion():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_ASSERTION
)
# x-goog-api-client header value for user credentials token request.
# Example: "gl-python/3.7 auth/1.1 cred-type/u"
def token_request_user():
return "{} {}".format(python_and_auth_lib_version(), CRED_TYPE_USER)
# Miscellenous metrics
# x-goog-api-client header value for metadata server ping.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/mds"
def mds_ping():
return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_MDS_PING)
# x-goog-api-client header value for reauth start endpoint calls.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/re-start"
def reauth_start():
return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_START)
# x-goog-api-client header value for reauth continue endpoint calls.
# Example: "gl-python/3.7 auth/1.1 cred-type/re-cont"
def reauth_continue():
return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_CONTINUE)
def add_metric_header(headers, metric_header_value):
"""Add x-goog-api-client header with the given value.
Args:
headers (Mapping[str, str]): The headers to which we will add the
metric header.
metric_header_value (Optional[str]): If value is None, do nothing;
if headers already has a x-goog-api-client header, append the value
to the existing header; otherwise add a new x-goog-api-client
header with the given value.
"""
if not metric_header_value:
return
if API_CLIENT_HEADER not in headers:
headers[API_CLIENT_HEADER] = metric_header_value
else:
headers[API_CLIENT_HEADER] += " " + metric_header_value
| [
"[email protected]"
] | |
3106a84c83a2be4575d24d0c7b941bb53fec2b47 | 4024383a9ba300d266611c61e358dd72880350ae | /tests/tests_top/test_topchart_music.py | 18588c4711e15462f6272b5bfdbcde80654e45c4 | [
"MIT"
] | permissive | dbeley/senscritiquescraper | 54bed18dc7c9348a780c15338ebd472cf33feff1 | 8e199422475c44f0dbef53f2471066c39afec949 | refs/heads/master | 2023-08-16T18:12:10.008142 | 2023-08-04T20:51:04 | 2023-08-04T20:51:04 | 192,126,896 | 15 | 1 | MIT | 2022-12-08T12:40:47 | 2019-06-15T21:52:51 | Python | UTF-8 | Python | false | false | 1,620 | py | from senscritiquescraper.utils.row_utils import row_utils
def test_music_rank(topchart_row_music):
rank = row_utils.get_rank(topchart_row_music)
assert rank == "1"
def test_music_title(topchart_row_music):
title = row_utils.get_title(topchart_row_music)
assert title == "The Dark Side of the Moon"
def test_music_url(topchart_row_music):
url = row_utils.get_url(topchart_row_music)
assert url.startswith("https")
def test_music_year(topchart_row_music):
year = row_utils.get_year(topchart_row_music)
assert year == "1973"
def test_music_release_date(topchart_row_music):
release_date = row_utils.get_baseline_0(topchart_row_music)
assert release_date == "23 mars 1973"
def test_music_genre(topchart_row_music):
genre = row_utils.get_baseline_1(topchart_row_music)
assert genre == "Art rock etprog rock"
def test_music_number_songs(topchart_row_music):
length = row_utils.get_number_of_seasons(topchart_row_music)
assert length == "10 morceaux"
def test_music_cover(topchart_row_music):
cover_url = row_utils.get_picture_url(topchart_row_music)
assert cover_url.startswith("https")
def test_music_artist(topchart_row_music):
artist = row_utils.get_producer(topchart_row_music)
assert artist == "Pink Floyd"
def test_music_average_rating(topchart_row_music):
average_rating = row_utils.get_average_rating(topchart_row_music)
assert len(average_rating) == 3
def test_music_number_ratings(topchart_row_music):
number_ratings = row_utils.get_number_of_ratings(topchart_row_music)
assert int(number_ratings) > 35000
| [
"[email protected]"
] | |
4e83059941f89096991608bfcdb404009f4e710c | 55ceefc747e19cdf853e329dba06723a44a42623 | /_CodeTopics/LeetCode_contest/weekly/weekly2020/193/WA--193_2.py | 8a599c9e2ac46086ec219372aec9f53a4f8504f7 | [] | no_license | BIAOXYZ/variousCodes | 6c04f3e257dbf87cbe73c98c72aaa384fc033690 | ee59b82125f100970c842d5e1245287c484d6649 | refs/heads/master | 2023-09-04T10:01:31.998311 | 2023-08-26T19:44:39 | 2023-08-26T19:44:39 | 152,967,312 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: int
"""
length = len(arr)
numdic = dict()
for i in range(length):
if numdic.has_key(arr[i]):
numdic[arr[i]] += 1
else:
numdic[arr[i]] = 1
# valuelist = numdic.values().sort()
# 这里用上面这句返回的是None
valuelist = sorted(numdic.values())
valuetypes = len(valuelist)
for i in range(valuetypes):
k -= valuelist[i]
if k >= 0:
continue
else:
return valuetypes - i
"""
https://leetcode-cn.com/contest/weekly-contest-193/submissions/detail/78798394/
41 / 43 个通过测试用例
状态:解答错误
输入: [1]
1
输出: null
预期: 0
"""
| [
"[email protected]"
] | |
dfd413d017821009e8b3b472eccf594be8be195c | 6a7fc59a1fe8c7cd9593ae436b222f96538998e1 | /predict/predictor.py | 38afe69d4d4425feb707771c57228882a8481a4c | [] | no_license | ductri/few_shot_learning | df57919b41aa1f299cadc3dff78e3ff5eb5735d2 | ae24df58df5df031e517d221173e263d630d461c | refs/heads/master | 2020-04-15T02:47:16.515192 | 2019-01-08T09:39:38 | 2019-01-08T09:39:38 | 164,324,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | import tensorflow as tf
import logging
import pickle
import numpy as np
class Predictor:
def __init__(self, path_to_params, path_to_model):
with open(path_to_params, 'rb') as input_file:
self.params_dict = pickle.load(input_file)
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
with self.sess.as_default():
saver = tf.train.import_meta_graph('{}.meta'.format(path_to_model))
saver.restore(self.sess, path_to_model)
logging.info('Restored saved model at %s', path_to_model)
def predict(self, list_images_1, list_images_2):
X = list(zip(list_images_1, list_images_2))
return self._predict(X)
def _predict(self, X):
tf_predict = self.graph.get_tensor_by_name(self.params_dict['tf_predict'])
feed_dict = self.__build_feed_dict(X)
return np.squeeze(self.sess.run(tf_predict, feed_dict=feed_dict))
def _predict_prob(self, X):
tf_predict_prob = self.graph.get_tensor_by_name(self.params_dict['tf_predict_prob'])
feed_dict = self.__build_feed_dict(X)
return np.squeeze(self.sess.run(tf_predict_prob, feed_dict=feed_dict))
def predict_prob(self, list_images_1, list_images_2):
X = list(zip(list_images_1, list_images_2))
return self._predict_prob(X)
def __build_feed_dict(self, X):
tf_X = self.graph.get_tensor_by_name(self.params_dict['tf_X'])
feed_dict_func = self.params_dict['feed_dict_for_infer_func']
feed_dict = feed_dict_func(tf_X, X)
return feed_dict
| [
"[email protected]"
] | |
143defc4044544ced29f3f4e5c3d0fd502f1e6cc | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/eve/client/script/ui/shared/incursionJournal.py | 88318ea8ca2a754c75e8d102ce6330f315ceff56 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,882 | py | #Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/ui/shared/incursionJournal.py
import uiconst
import uicls
import uiutil
from mapcommon import STARMODE_INCURSION, STARMODE_FRIENDS_CORP
import uix
import util
import taleCommon
import localization
MIDDLECONTAINER_WIDTH = 200
MARGIN = 8
class IncursionTab:
GlobalReport, Encounters, LPLog = range(3)
class GlobalIncursionReportEntry(uicls.SE_BaseClassCore):
__guid__ = 'listentry.GlobalIncursionReportEntry'
MARGIN = 8
TEXT_OFFSET = 84
BUTTON_OFFSET = 295
def ApplyAttributes(self, attributes):
uicls.SE_BaseClassCore.ApplyAttributes(self, attributes)
self.iconsize = iconsize = 44
uicls.Line(parent=self, align=uiconst.TOBOTTOM, color=(1, 1, 1, 0.25))
self.factionParent = uicls.Container(name='factionParent', parent=self, align=uiconst.TOLEFT, pos=(0, 0, 64, 64), padding=MARGIN)
middleCont = uicls.Container(parent=self, name='middleContainer', width=MIDDLECONTAINER_WIDTH, align=uiconst.TOLEFT, padTop=MARGIN, clipChildren=True)
self.constellationLabel = BigReportLabel(name='constellationName', parent=middleCont, fontsize=20, align=uiconst.TOTOP, state=uiconst.UI_NORMAL)
self.statusText = SmallReportLabel(parent=middleCont, align=uiconst.TOTOP, uppercase=True)
SmallReportLabel(name='systemInfluence', parent=middleCont, align=uiconst.TOTOP, text=localization.GetByLabel('UI/Incursion/Common/HUDInfluenceTitle'))
self.statusBar = uicls.SystemInfluenceBar(parent=middleCont, pos=(0, 0, 200, 10), align=uiconst.TOTOP, padding=(0, 4, 0, 4))
self.stagingText = SmallReportLabel(parent=middleCont, align=uiconst.TOTOP, state=uiconst.UI_NORMAL)
self.bossIcon = uicls.IncursionBossIcon(parent=middleCont, left=3, top=3, align=uiconst.TOPRIGHT, idx=0)
btn = uix.GetBigButton(iconsize, self, left=self.BUTTON_OFFSET, top=MARGIN, align=uiconst.BOTTOMLEFT)
btn.hint = localization.GetByLabel('UI/Incursion/Journal/ShowActiveCorpMembersInMap')
btn.sr.icon.LoadIcon('ui_7_64_4', ignoreSize=True)
uicls.Icon(name='subicon', icon='ui_7_64_6', parent=btn, idx=0, size=32, align=uiconst.BOTTOMRIGHT, ignoreSize=True, color=(1, 1, 1, 0.85), state=uiconst.UI_DISABLED)
self.corpMapButton = btn
btn = uix.GetBigButton(iconsize, self, left=self.BUTTON_OFFSET + 50, top=MARGIN, align=uiconst.BOTTOMLEFT)
btn.hint = localization.GetByLabel('UI/Incursion/Journal/ShowOnStarMap')
btn.sr.icon.LoadIcon('ui_7_64_4', ignoreSize=True)
self.mapButton = btn
btn = uix.GetBigButton(iconsize, self, left=self.BUTTON_OFFSET + 100, top=MARGIN, align=uiconst.BOTTOMLEFT)
btn.hint = localization.GetByLabel('UI/Incursion/Journal/StagingAsAutopilotDestination')
btn.sr.icon.LoadIcon('ui_9_64_5', ignoreSize=True)
self.autopilotButton = btn
btn = uix.GetBigButton(iconsize, self, left=self.BUTTON_OFFSET, top=MARGIN)
btn.hint = localization.GetByLabel('UI/Incursion/Journal/ViewLoyaltyPointLog')
btn.sr.icon.LoadIcon('ui_70_64_11', ignoreSize=True)
self.lpButton = btn
self.loyaltyPoints = ReportNumber(name='loyaltyPoints', parent=self, pos=(self.BUTTON_OFFSET + 50,
MARGIN,
100,
iconsize), number=0, hint=localization.GetByLabel('UI/Incursion/Journal/LoyaltyPointsWin'), padding=(4, 4, 4, 4))
def Load(self, data):
iconsize = 48
self.factionParent.Flush()
if data.factionID:
owner = cfg.eveowners.Get(data.factionID)
uiutil.GetLogoIcon(parent=self.factionParent, align=uiconst.RELATIVE, size=64, itemID=data.factionID, ignoreSize=True, hint=localization.GetByLabel('UI/Incursion/Journal/FactionStagingRuler', faction=owner.ownerName))
else:
uicls.Icon(parent=self.factionParent, size=64, icon='ui_94_64_16', ignoreSize=True, hint=localization.GetByLabel('UI/Incursion/Journal/StagingSystemUnclaimed'), align=uiconst.RELATIVE)
rowHeader = localization.GetByLabel('UI/Incursion/Journal/ReportRowHeader', constellation=data.constellationID, constellationInfo=('showinfo', const.typeConstellation, data.constellationID))
self.constellationLabel.SetText(rowHeader)
incursionStateMessages = [localization.GetByLabel('UI/Incursion/Journal/Withdrawing'), localization.GetByLabel('UI/Incursion/Journal/Mobilizing'), localization.GetByLabel('UI/Incursion/Journal/Established')]
self.statusText.SetText(incursionStateMessages[data.state])
if data.jumps is not None:
distanceAwayText = localization.GetByLabel('UI/Incursion/Journal/ReportRowNumJumps', jumps=data.jumps)
else:
distanceAwayText = localization.GetByLabel('UI/Incursion/Journal/ReportRowSystemUnreachable')
bodyText = localization.GetByLabel('UI/Incursion/Journal/ReportRowBody', color='<color=' + sm.GetService('map').GetSystemColorString(data.stagingSolarSystemID) + '>', security=data.security, securityColor=sm.GetService('map').GetSystemColorString(data.stagingSolarSystemID), system=data.stagingSolarSystemID, systemInfo=('showinfo', const.typeSolarSystem, data.stagingSolarSystemID), distanceAway=distanceAwayText)
self.stagingText.SetText(bodyText)
self.statusBar.SetInfluence(taleCommon.CalculateDecayedInfluence(data.influenceData), None, animate=False)
self.bossIcon.SetBossSpawned(data.hasBoss)
self.corpMapButton.OnClick = lambda : sm.GetService('viewState').ActivateView('starmap', interestID=data.constellationID, starColorMode=STARMODE_FRIENDS_CORP)
self.mapButton.OnClick = lambda : sm.GetService('viewState').ActivateView('starmap', interestID=data.constellationID, starColorMode=STARMODE_INCURSION)
self.autopilotButton.OnClick = lambda : sm.GetService('starmap').SetWaypoint(data.stagingSolarSystemID, clearOtherWaypoints=True)
self.lpButton.OnClick = lambda : sm.GetService('journal').ShowIncursionTab(flag=IncursionTab.LPLog, taleID=data.taleID, constellationID=data.constellationID)
self.loyaltyPoints.number.SetText(localization.GetByLabel('UI/Incursion/Journal/NumberLoyaltyPointsAcronym', points=util.FmtAmt(data.loyaltyPoints)))
def GetDynamicHeight(node, width):
rowHeader = localization.GetByLabel('UI/Incursion/Journal/ReportRowHeader', constellation=node.constellationID, constellationInfo=('showinfo', const.typeConstellation, node.constellationID))
headerWidth, headerHeight = BigReportLabel.MeasureTextSize(rowHeader, fontsize=20, width=MIDDLECONTAINER_WIDTH)
incursionStateMessages = [localization.GetByLabel('UI/Incursion/Journal/Withdrawing'), localization.GetByLabel('UI/Incursion/Journal/Mobilizing'), localization.GetByLabel('UI/Incursion/Journal/Established')]
statusWidth, statusHeight = SmallReportLabel.MeasureTextSize(incursionStateMessages[node.state], width=MIDDLECONTAINER_WIDTH)
influenceWidth, influenceHeight = SmallReportLabel.MeasureTextSize(localization.GetByLabel('UI/Incursion/Common/HUDInfluenceTitle'), width=MIDDLECONTAINER_WIDTH)
statusBar = 18
if node.jumps is not None:
distanceAwayText = localization.GetByLabel('UI/Incursion/Journal/ReportRowNumJumps', jumps=node.jumps)
else:
distanceAwayText = localization.GetByLabel('UI/Incursion/Journal/ReportRowSystemUnreachable')
bodyText = localization.GetByLabel('UI/Incursion/Journal/ReportRowBody', color='<color=' + sm.GetService('map').GetSystemColorString(node.stagingSolarSystemID) + '>', security=node.security, securityColor=sm.GetService('map').GetSystemColorString(node.stagingSolarSystemID), system=node.stagingSolarSystemID, systemInfo=('showinfo', const.typeSolarSystem, node.stagingSolarSystemID), distanceAway=distanceAwayText)
bodyWidth, bodyHeight = SmallReportLabel.MeasureTextSize(bodyText, width=MIDDLECONTAINER_WIDTH)
return max(114, headerHeight + statusHeight + influenceHeight + statusBar + bodyHeight + MARGIN * 2)
class ReportNumber(uicls.Container):
__guid__ = 'incursion.ReportNumber'
default_align = uiconst.RELATIVE
default_clipChildren = True
def ApplyAttributes(self, attributes):
uicls.Container.ApplyAttributes(self, attributes)
number = attributes.Get('number', 0)
self.number = BigReportLabel(name='bignumber', parent=self, align=uiconst.CENTERRIGHT, text=str(number), fontsize=18, hint=attributes.Get('hint', None))
class SmallReportLabel(uicls.Label):
default_align = uiconst.RELATIVE
default_fontsize = 13
class BigReportLabel(uicls.Label):
__guid__ = 'incursion.ReportLabel'
default_fontsize = 20
default_letterspace = 0
default_align = uiconst.RELATIVE
default_maxLines = None
exports = {'incursion.IncursionTab': IncursionTab} | [
"[email protected]"
] | |
ccddd8581c9ee9fa1b2814939eb690869a35b89d | 0308403d211ae8161d4ea5e283ccba9118ef11fb | /djangogirls/settings.py | 03ead1b5dbfb437cd6d2e1a2f59d7533b31e47ca | [] | no_license | mkone112/my-first-blog | 5dfe6c1221018c8df83059e985cfc0ec1d099168 | 977bdd68dd07f52733d5026bc190239d01e8964d | refs/heads/master | 2021-04-14T09:14:24.235240 | 2020-04-13T05:32:23 | 2020-04-13T05:32:23 | 249,221,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,261 | py | """
Django settings for djangogirls project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vk8-ro6&3n$&8wp5cmyk*h_z66gmu(00p8h0@mxkbof$tse-&#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangogirls.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangogirls.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"[email protected]"
] | |
2b4dd33d55994aa1a2b85efdc4c041ee32c81d8d | 7a1768da35f0746274b894ee170e732cc3490b19 | /file_server_box_sync/aiofiles/__init__.py | 049c6e4936449e1d819877e9276f9f07ac45ff89 | [
"Apache-2.0",
"MIT"
] | permissive | dtrodger/file-server-box-syncer-public | 5bf2e06b19a0ee6f382784ff2c9b0e7218175495 | 6296a66352df8c06ea691922a3728aeb487ab246 | refs/heads/master | 2023-04-16T03:47:29.161573 | 2021-04-30T03:04:34 | 2021-04-30T03:04:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | """Utilities for asyncio-friendly file handling."""
from file_server_box_sync.aiofiles.threadpool import open
from file_server_box_sync.aiofiles import os
__version__ = "0.5.0.dev0"
__all__ = ['open', 'os']
| [
"[email protected]"
] | |
1013244a8cfb3d145b496f85e28b5a7569910cea | 87a1e123c3ced5c54f15f3c2d1a9dbcaaf6c4be4 | /post/admin.py | 7156f502d5f3a2dab2287662bb759aed8d696db5 | [] | no_license | aaogoltcov/blogAPI | c73c26043a4fb4244eca617a0c3494cf98b3a2f5 | 232d6d2ffb412b1112be25bc4321c5011267ce45 | refs/heads/master | 2023-07-10T03:45:47.172277 | 2021-08-11T19:58:52 | 2021-08-11T19:58:52 | 391,724,564 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from post.models import Post, Comment
@admin.register(Post)
class PhotoAdmin(admin.ModelAdmin):
pass
admin.site.register(Comment, MPTTModelAdmin)
| [
"[email protected]"
] | |
bcd6616c0ea6410289fa174d2124d9cb3307c79a | fcf870abec4a3fe936668ed14afcded9c10e4aa3 | /descnucleotide/ENAC.py | 78208ab2ceb01406948e589745a25cd7baebd939 | [] | no_license | sirpan/iLearn | f8d81523720245cc1ab8368aeb609511fc93af5a | 507aae17d9fea3d74a7c77984f1f1750eb734f53 | refs/heads/master | 2023-03-22T06:55:48.791894 | 2021-03-17T07:23:15 | 2021-03-17T07:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,672 | py | #!/usr/bin/env python
# _*_coding:utf-8_*_
import re, sys, os, platform
from collections import Counter
import argparse
pPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(pPath)
father_path = os.path.abspath(
os.path.dirname(pPath) + os.path.sep + ".") + r'\pubscripts' if platform.system() == 'Windows' else os.path.abspath(
os.path.dirname(pPath) + os.path.sep + ".") + r'/pubscripts'
sys.path.append(father_path)
import read_fasta_sequences
import save_file
import check_sequences
def ENAC(fastas, window=5, **kw):
if check_sequences.check_fasta_with_equal_length == False:
print('Error: for "ENAC" encoding, the input fasta sequences should be with equal length. \n\n')
return 0
if window < 1:
print('Error: the sliding window should be greater than zero' + '\n\n')
return 0
if check_sequences.get_min_sequence_length(fastas) < window:
print('Error: all the sequence length should be larger than the sliding window :' + str(window) + '\n\n')
return 0
AA = kw['order'] if kw['order'] != None else 'ACGT'
encodings = []
header = ['#', 'label']
for w in range(1, len(fastas[0][1]) - window + 2):
for aa in AA:
header.append('SW.' + str(w) + '.' + aa)
encodings.append(header)
for i in fastas:
name, sequence, label = i[0], i[1], i[2]
code = [name, label]
for j in range(len(sequence)):
if j < len(sequence) and j + window <= len(sequence):
count = Counter(sequence[j:j + window])
for key in count:
count[key] = count[key] / len(sequence[j:j + window])
for aa in AA:
code.append(count[aa])
encodings.append(code)
return encodings
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage="it's usage tip.",
description="Generating ENAC feature vector for nucleotide sequences")
parser.add_argument("--file", required=True, help="input fasta file")
parser.add_argument("--slwindow", type=int, default=5, help="the sliding window of ENAC descriptor")
parser.add_argument("--format", choices=['csv', 'tsv', 'svm', 'weka'], default='svm', help="the encoding type")
parser.add_argument("--out", help="the generated descriptor file")
args = parser.parse_args()
output = args.out if args.out != None else 'encoding.txt'
kw = {'order': 'ACGT'}
fastas = read_fasta_sequences.read_nucleotide_sequences(args.file)
encodings = ENAC(fastas, window=args.slwindow, **kw)
save_file.save_file(encodings, args.format, output)
| [
"[email protected]"
] | |
eafcf4f63787fb973da921348c4467ef4b604c5f | ba784177302ac10dbb38207867f8aba3c03f5cac | /nth_of_fibb.py | 048d217f543a8a2836be3c2a623789a1a7c328cd | [] | no_license | veronicaerick/coding-challenges | 8adee286c423f171d59866ce8325fecc8cd1d366 | d9512ec2bf26658bd2fddbaed30839fa47c10877 | refs/heads/master | 2020-04-06T06:56:09.664810 | 2016-09-09T00:51:34 | 2016-09-09T00:51:34 | 64,095,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | def fib(n):
if n < 2:
return n
else:
return fib(n-2) + fib(n-1) | [
"[email protected]"
] | |
f460f77829192bbc19a1f0ff08e7bac4d07a4833 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/metricsadvisor/azure-ai-metricsadvisor/tests/test_metrics_advisor_client_live.py | 34797d48de53f2cc544d611c93a490e005584cce | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 10,790 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import datetime
import os
import functools
from azure.ai.metricsadvisor.models import (
AnomalyFeedback,
ChangePointFeedback,
CommentFeedback,
PeriodFeedback,
)
from devtools_testutils import recorded_by_proxy
from azure.ai.metricsadvisor import MetricsAdvisorClient
from base_testcase import TestMetricsAdvisorClientBase, MetricsAdvisorClientPreparer, CREDENTIALS, API_KEY, ids
MetricsAdvisorPreparer = functools.partial(MetricsAdvisorClientPreparer, MetricsAdvisorClient)
class TestMetricsAdvisorClient(TestMetricsAdvisorClientBase):
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_anomalies_for_detection_configuration(self, client):
results = list(client.list_anomalies(
detection_configuration_id=self.anomaly_detection_configuration_id,
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_anomaly_dimension_values(self, client):
results = list(client.list_anomaly_dimension_values(
detection_configuration_id=self.anomaly_detection_configuration_id,
dimension_name="Dim1",
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_incidents_for_detection_configuration(self, client):
results = list(client.list_incidents(
detection_configuration_id=self.anomaly_detection_configuration_id,
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_metric_dimension_values(self, client):
results = list(client.list_metric_dimension_values(
metric_id=self.metric_id,
dimension_name="Dim1",
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_incident_root_cause(self, client):
results = list(client.list_incident_root_causes(
detection_configuration_id=self.anomaly_detection_configuration_id,
incident_id=self.incident_id,
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_metric_enriched_series_data(self, client):
series_identity = {"Dim1": "USD"}
results = list(client.list_metric_enriched_series_data(
detection_configuration_id=self.anomaly_detection_configuration_id,
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
series=[series_identity]
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_metric_enrichment_status(self, client):
results = list(client.list_metric_enrichment_status(
metric_id=self.metric_id,
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
))
assert len(results) > 0
@pytest.mark.skip()
@pytest.mark.parametrize("credential", CREDENTIALS, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_alerts(self, client):
results = list(client.list_alerts(
alert_configuration_id=self.anomaly_alert_configuration_id,
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
time_mode="AnomalyTime",
))
assert len(list(results)) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_metrics_series_data(self, client):
results = list(client.list_metric_series_data(
metric_id=self.metric_id,
start_time=datetime.datetime(2022, 2, 28),
end_time=datetime.datetime(2022, 9, 29),
series_keys=[
{"Dim1": "USD", "Dim2": "US"}
]
))
assert len(results) > 0
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_metric_series_definitions(self, client):
results = list(client.list_metric_series_definitions(
metric_id=self.metric_id,
active_since=datetime.datetime(2022, 3, 1),
))
assert len(results) > 0
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/26569")
@pytest.mark.parametrize("credential", API_KEY, ids=ids) # only using API key for now since service issue with AAD
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_add_anomaly_feedback(self, client):
anomaly_feedback = AnomalyFeedback(metric_id=self.metric_id,
dimension_key={"Dim1": "USD"},
start_time=datetime.datetime(2022, 3, 1),
end_time=datetime.datetime(2022, 9, 29),
value="NotAnomaly")
client.add_feedback(anomaly_feedback)
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/26569")
@pytest.mark.parametrize("credential", API_KEY, ids=ids) # only using API key for now since service issue with AAD
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_add_change_point_feedback(self, client):
change_point_feedback = ChangePointFeedback(metric_id=self.metric_id,
dimension_key={"Dim1": "USD"},
start_time=datetime.datetime(2022, 3, 1),
end_time=datetime.datetime(2022, 9, 29),
value="NotChangePoint")
client.add_feedback(change_point_feedback)
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/26569")
@pytest.mark.parametrize("credential", API_KEY, ids=ids) # only using API key for now since service issue with AAD
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_add_comment_feedback(self, client):
comment_feedback = CommentFeedback(metric_id=self.metric_id,
dimension_key={"Dim1": "USD"},
start_time=datetime.datetime(2022, 3, 1),
end_time=datetime.datetime(2022, 9, 29),
value="comment")
client.add_feedback(comment_feedback)
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/26569")
@pytest.mark.parametrize("credential", API_KEY, ids=ids) # only using API key for now since service issue with AAD
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_add_period_feedback(self, client):
period_feedback = PeriodFeedback(metric_id=self.metric_id,
dimension_key={"Dim1": "USD"},
start_time=datetime.datetime(2022, 3, 1),
end_time=datetime.datetime(2022, 9, 29),
period_type="AssignValue",
value=2)
client.add_feedback(period_feedback)
@pytest.mark.skip("InternalServerError")
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_feedback(self, client):
results = list(client.list_feedback(
metric_id=self.metric_id,
start_time=datetime.datetime(2022, 3, 1),
end_time=datetime.datetime(2022, 9, 29),
time_mode="FeedbackCreatedTime"
))
assert len(results) > 0
@pytest.mark.skip("InternalServerError")
@pytest.mark.parametrize("credential", API_KEY, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_get_feedback(self, client):
result = client.get_feedback(feedback_id=self.feedback_id)
assert result
@pytest.mark.skip()
@pytest.mark.parametrize("credential", CREDENTIALS, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_anomalies_for_alert(self, client):
result = list(client.list_anomalies(
alert_configuration_id=self.anomaly_alert_configuration_id,
alert_id=self.alert_id,
))
assert len(result) > 0
@pytest.mark.skip()
@pytest.mark.parametrize("credential", CREDENTIALS, ids=ids)
@MetricsAdvisorPreparer()
@recorded_by_proxy
def test_list_incidents_for_alert(self, client):
results = list(client.list_incidents(
alert_configuration_id=self.anomaly_alert_configuration_id,
alert_id=self.alert_id,
))
assert len(results) > 0
def test_models_removed(self):
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import AlertResultList
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import AnomalyAlertingConfigurationList
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import MetricDimensionList
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import MetricFeedbackList
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import MetricSeriesList
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import RootCauseList
with pytest.raises(ImportError):
from azure.ai.metricsadvisor.models import SeriesResultList
| [
"[email protected]"
] | |
28d773c01a93fe55f405b13365affa72ac82f085 | c8633d2e72701f103d44e98960be8e1d0032bbcf | /Opuslog/settings/common.py | ddb10849521a25a0f423e660c0ed611b3e752e46 | [] | no_license | rushil02/opuslog | c3165b21bfd8a2283e19a4251f8041eb7371e443 | dd34e80302551dee674b8ec700620b47e339c7a2 | refs/heads/master | 2021-04-27T04:45:04.165429 | 2016-05-20T18:19:10 | 2016-05-20T18:19:10 | 122,584,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,648 | py | """
Django settings for Opuslog project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import psycopg2
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^@j4m-ff2wcago%qkos@4$q(8#0jm6rkp#k3#hcq#9xdy2a9lr'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'cities_light',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'debug_toolbar',
'tinymce',
'rest_framework', # TODO: remove?
'user_custom',
'publication',
'write_up',
'engagement',
'essential',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Opuslog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Opuslog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'OpuslogDB',
'USER': 'MooPoint',
'PASSWORD': 'root',
'HOST': 'localhost',
'OPTIONS': {
'isolation_level': psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ, # TODO: change if needed
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_URL = '/media/'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
# Cities-light settings
CITIES_LIGHT_TRANSLATION_LANGUAGES = ['en', ]
CITIES_LIGHT_INCLUDE_COUNTRIES = ['IN', ]
# Email settings
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'sappy8086'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Domain name settings
WEBSITE_DOMAIN = 'www.opuslog.com'
# django-tinymce
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10
}
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.