content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""Top-level package for Bactopia."""
__version__ = '2.1.0'
__all__ = [
'const',
'parse',
'summary'
]
from bactopia import *
| 11.666667 | 37 | 0.585714 | [
"MIT"
] | bactopia/bactopia-ap | bactopia/__init__.py | 140 | Python |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 12:54:52 2016
@author: YPC
"""
import matplotlib.pyplot as plt
import json
import numpy
f=open('../petitions.json', 'r')
met=json.load(f)
f.close()
s=[]
s2=[]
""" this plots the signature distribution function"""
if False:
for i in range(len(met)):
s.append( met[i]['petition']['signature_count'])
s=numpy.array(s)
s=sorted(s, reverse=True)
fig=plt.figure(figsize=(7, 4.5))
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
plt.loglog(s,'-',color='darkblue',marker='x')
plt.loglog([1,len(s)],[100000,100000])
plt.loglog([1,len(s)],[10000,10000])
plt.title("Signatures distribution")
plt.xlabel("Rank")
plt.ylabel("Number of Signatures")
plt.tight_layout()
plt.legend(['Signatures','100,000','10,000'],loc=3,fontsize=12)
fig.savefig('Signatures_dist', dpi=500)
#%%
""" this plots the distr of len(text) """
if True:
for i in range(len(met)):
s.append( len(met[i]['petition']['description']))
if len(met[i]['petition']['description']) ==1000:
print met[i]['petition']['description']
fig=plt.figure(figsize=(4.5, 4.5))
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
hist(s, bins=1000)
plt.title("Histogram of textlengths")
plt.ylabel("Number of petitions")
plt.xlabel("Length of text")
plt.tight_layout()
#fig.savefig('textlen_dist', dpi=500)
#%%
""" this plots the distr of len(text) """
if False:
for i in range(len(met)):
if met[i]['petition']['signature_count'] >1000:
s.append( len(met[i]['petition']['description']))
s2.append( len(met[i]['petition']['description']))
plt.rcParams.update({'font.size': 12})
plt.rc('font', family='serif')
_,bins, _= hist(s, bins=50)
fig, ax1 = plt.subplots()
fig.set_size_inches(7,4.5)
ax1.hist(s2,bins=bins,color='k',histtype='step')
ax1.set_ylabel('Petitions', color='k')
ax2 = ax1.twinx()
ax2.hist(s,bins=bins,color='b',histtype='step')
ax2.set_ylabel('Petitions with \n >1,000 signatures',color='b')
plt.title("Histogram of textlengths")
ax1.set_xlabel("Length of text")
plt.show()
fig.tight_layout()
fig.savefig('textlen_s_dist', dpi=500)
#%%
""" this plots the cum number of len(text)"""
if False:
k=0
for i in range(len(met)):
k=k+1
t = met[i]['petition']['created_datetime']
dt = t.encode()[0:10]
t0 = datetime.datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
s.append(t0)
s2.append(k)
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
ax.plot(s,s2,color='darkblue')
plt.title("Cumulative number of petitions")
plt.ylabel("Number of Petitions",fontsize=15)
ax.set_xlim([734300,735687])
for label in ax.xaxis.get_ticklabels()[1::2]:
label.set_visible(False)
ax.xaxis.get_ticklabels()[0].set_visible(True)
ax.xaxis.get_ticklabels()[-1].set_visible(True)
ax.tick_params(axis='both', which='major', labelsize=10)
plt.tight_layout()
fig.savefig('pets_vs_time', dpi=500)
#%%
if False:
for i in range(len(met)):
if int(met[i]['petition']['signature_count'])<100000 and int(met[i]['petition']['signature_count'])>5000:
print met[i]['petition']['id'], met[i]['petition']['signature_count']
#347 148373
#885 149470
#1535 113490
#2199 156218
#7337 258276
#8903 118875
#19149 118475
#19658 145544
#22321 102701
#22670 179466
#29349 154662
#29399 110704
#29664 108848
#31778 114499
#33133 117469
#35788 109306
#37180 174578
#38257 304255
#40925 106210
#41492 153828
#43154 104818
#45969 134835
#46455 170931
#48389 106410
#48628 104068
#49528 111572
#52740 110561
#53523 123881
#56810 107261
#58166 103063
#60164 113797
#62385 327877
#62490 123307
#63445 103479
#64331 118956
#64997 112285
#67165 124511
#67911 102170
#71455 118068
#73911 103841
#74830 135408 | 27.888158 | 114 | 0.610757 | [
"MIT"
] | yukimasano/pet_forecast | network/get_sig_histogram.py | 4,239 | Python |
# model settings
model = dict(
type='RetinaNet',
pretrained='torchvision://resnet50',
backbone=dict(type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=[
dict(type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
dict(
type='SEPC',
out_channels=256,
Pconv_num=4,
pconv_deform=True,
lcconv_deform=True,
iBN=True, # when open, please set imgs/gpu >= 4
)
],
bbox_head=dict(type='SepcFreeAnchorRetinaHead',
num_classes=81,
in_channels=256,
stacked_convs=0,
feat_channels=256,
octave_base_scale=4,
scales_per_octave=3,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[8, 16, 32, 64, 128],
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2],
loss_bbox=dict(type='SmoothL1Loss',
beta=0.11,
loss_weight=0.75)))
# training and testing settings
train_cfg = dict(assigner=dict(type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=1,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/retinanet_free_anchor_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 35.359375 | 75 | 0.53977 | [
"Apache-2.0"
] | jshilong/SEPC | sepc/exp/freeanchor/sepc_freeanchor.py | 4,526 | Python |
"""
PollingDelayConfig
"""
from typing import Any
class PollingDelayConfig(object):
"""
Summarized configuration for the polling_delay settings in the Gateway Application.
"""
DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S = 0.0
DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S = 60.0
DEFAULT_MIN_ATTEMPT_LIST_WORKER_DELAY_S = 0.1
DEFAULT_MAX_ATTEMPT_LIST_WORKER_DELAY_S = 60.0
def __init__(self,
coin_min_polling_delay_s: float = DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S,
coin_max_polling_delay_s: float = DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S,
waves_min_polling_delay_s: float = DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S,
waves_max_polling_delay_s: float = DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S,
attempt_list_worker_min_polling_delay_s: float = DEFAULT_MIN_ATTEMPT_LIST_WORKER_DELAY_S,
attempt_list_worker_max_polling_delay_s: float = DEFAULT_MAX_ATTEMPT_LIST_WORKER_DELAY_S) -> None:
self._coin_polling_delay_s_min = coin_min_polling_delay_s
self._coin_polling_delay_s_max = coin_max_polling_delay_s
self._waves_polling_delay_s_min = waves_min_polling_delay_s
self._waves_polling_delay_s_max = waves_max_polling_delay_s
self._attempt_list_worker_min_polling_delay_s = attempt_list_worker_min_polling_delay_s
self._attempt_list_worker_max_polling_delay_s = attempt_list_worker_max_polling_delay_s
@staticmethod
def from_single_polling_delay(polling_delay_s: float) -> Any:
return PollingDelayConfig(
coin_min_polling_delay_s=polling_delay_s,
coin_max_polling_delay_s=polling_delay_s,
waves_min_polling_delay_s=polling_delay_s,
waves_max_polling_delay_s=polling_delay_s,
attempt_list_worker_min_polling_delay_s=polling_delay_s,
attempt_list_worker_max_polling_delay_s=polling_delay_s)
@property
def waves_max_polling_delay_s(self) -> float:
return self._waves_polling_delay_s_max
@property
def waves_min_polling_delay_s(self) -> float:
return self._waves_polling_delay_s_min
@property
def coin_min_polling_delay_s(self) -> float:
return self._coin_polling_delay_s_min
@property
def coin_max_polling_delay_s(self) -> float:
return self._coin_polling_delay_s_max
@property
def attempt_list_worker_min_polling_delay_s(self) -> float:
return self._attempt_list_worker_min_polling_delay_s
@property
def attempt_list_worker_max_polling_delay_s(self) -> float:
return self._attempt_list_worker_max_polling_delay_s
| 41.71875 | 115 | 0.762921 | [
"MIT"
] | NeolithEra/WavesGatewayFramework | waves_gateway/model/polling_delay_config.py | 2,670 | Python |
import time
from options.train_options import TrainOptions
from data import DataLoader
from models import create_model
from util.writer import Writer
from test import run_test
if __name__ == '__main__':
opt = TrainOptions().parse()
# opt.serial_batches = True # no shuffle
print('Creating DataLoader...')
dataset = DataLoader(opt)
print('DataLoader created!')
print('#training meshes = %d' % dataset_size)
model = create_model(opt)
writer = Writer(opt)
total_steps = 0
for epoch in range(opt.epoch_count,
opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
o_ncorrect = 0
o_nexamples = 0
o_pr = 0
o_re = 0
model.save_network(0)
for i, data in enumerate(dataset):
print(i)
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
ncorrect, nexamples, pr, re = model.optimize_parameters()
o_ncorrect += ncorrect
o_nexamples += nexamples
o_pr += pr
o_re += re
if total_steps % opt.print_freq == 0:
loss = model.loss
t = (time.time() - iter_start_time) / opt.batch_size
writer.print_current_losses(epoch, epoch_iter, loss, t, t_data)
writer.plot_loss(loss, epoch, epoch_iter, dataset_size)
if i % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_network('latest')
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_network('latest')
model.save_network(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
if opt.verbose_plot:
writer.plot_model_wts(model, epoch)
if epoch % opt.run_test_freq == 0:
acc, pr, re = run_test(epoch)
writer.plot_acc(acc, epoch)
writer.plot_pr(pr, epoch)
writer.plot_re(re, epoch)
writer.plot_train_acc(float(o_ncorrect)/o_nexamples, epoch)
writer.plot_train_pr(float(o_pr)/o_nexamples, epoch)
writer.plot_train_re(float(o_re)/o_nexamples, epoch)
writer.close()
| 34.289157 | 83 | 0.576599 | [
"MIT"
] | blufzzz/MeshCNN | train.py | 2,846 | Python |
# Tests that are not required benchmark-operator pod
from benchmark_runner.common.oc.oc import OC
from tests.integration.benchmark_runner.test_environment_variables import *
import tempfile
import tarfile
import time
from benchmark_runner.common.prometheus.prometheus_snapshot import PrometheusSnapshot
def test_oc_get_ocp_server_version():
"""
This method get ocp server version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocp_server_version()
def test_oc_get_kata_version():
"""
This method gets the sandboxed containers (kata) version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_kata_version()
def test_oc_get_cnv_version():
"""
This method get cnv version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_cnv_version()
def test_oc_get_ocs_version():
"""
This method get ocs version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocs_version()
def test_oc_get_master_nodes():
"""
This method test get master nodes
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_master_nodes()
def test_login():
"""
This method test login
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.login()
def test_oc_get_pod_name():
"""
This test run oc get pod by name
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == ''
def test_oc_get_pods():
"""
This test run oc get pods
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.get_pods()
def test_get_prom_token():
"""
This method return prom token from cluster
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_prom_token()
def test_is_cnv_installed():
"""
This method check if cnv operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_cnv_installed()
def test_is_kata_installed():
"""
This method checks if the sandboxed containers (kata) operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed()
def test_is_ocs_installed():
"""
This method check if ocs operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_ocs_installed()
def test_is_kata_installed():
"""
This method check if kata operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed()
def test_oc_exec():
"""
Test that oc exec works
:return:
"""
test_message = "I am here"
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
answer = oc.exec(pod_name="prometheus-k8s-0", namespace="openshift-monitoring", container='prometheus', command=f'echo "{test_message}"')
assert answer == test_message
def test_collect_prometheus():
"""
Test that Prometheus data can be collected. TBD test that data is valid.
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with tempfile.TemporaryDirectory() as dirname:
snapshot = PrometheusSnapshot(oc=oc, artifacts_path=dirname, verbose=True)
snapshot.prepare_for_snapshot(pre_wait_time=1)
time.sleep(10)
tarball = snapshot.retrieve_snapshot(post_wait_time=1)
assert tarfile.is_tarfile(tarball)
| 26.060976 | 141 | 0.704024 | [
"Apache-2.0"
] | RobertKrawitz/benchmark-runner | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | 4,274 | Python |
from openpyxl.workbook import Workbook
from scrapy import Selector
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
import time
from csv import writer
driver_path = 'D:\\Application\\installers\\ChromeDriver\\chromedriver.exe'
def append_list_as_row(file_name, list_of_elem):
# Open file in append mode
with open(file_name, 'a+', newline='', encoding="utf-8") as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
def parse_house(link):
driver2 = webdriver.Chrome(executable_path=driver_path)
driver2.maximize_window()
driver2.get(link)
house_info = []
page_source = driver2.page_source
response2 = Selector(text=page_source)
title = response2.css('.kt-page-title h1::text').get()
address = response2.css('.kt-page-title__subtitle.kt-page-title__subtitle--responsive-sized::text').get()
area = response2.css('.kt-group-row-item__value::text').get()
year = response2.css('.kt-group-row-item__value::text')[1].get()
rooms = response2.css('.kt-group-row-item__value::text')[2].get()
price = response2.css('.kt-unexpandable-row__value::text').get()
price_per_meter = response2.css('.kt-unexpandable-row__value::text')[1].get()
elevator = response2.css('span.kt-group-row-item__value.kt-body.kt-body--stable::text')[0].get()
parking = response2.css('span.kt-group-row-item__value.kt-body.kt-body--stable::text')[1].get()
warehouse = response2.css('span.kt-group-row-item__value.kt-body.kt-body--stable::text')[2].get()
date = response2.css('.time::text').get()
house_info.append(title)
house_info.append(address)
house_info.append(area)
house_info.append(year)
house_info.append(rooms)
house_info.append(price)
house_info.append(price_per_meter)
house_info.append(elevator)
house_info.append(parking)
house_info.append(warehouse)
house_info.append(date)
append_list_as_row('Tehran House Data.csv', house_info)
driver2.quit()
def parse_neighborhood(link):
driver1 = webdriver.Chrome(executable_path=driver_path)
driver1.maximize_window()
driver1.get(link)
for i in range(8):
driver1.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(1)
sel = driver1.page_source
response1 = Selector(text=sel)
for cards in response1.css('div.post-card-item.kt-col-6.kt-col-xxl-4'):
link = cards.css('a').attrib['href']
house_link = "https://divar.ir" + link
parse_house(house_link)
time.sleep(1)
driver1.quit()
def parse():
driver = webdriver.Chrome(executable_path=driver_path)
driver.maximize_window()
driver.get("https://divar.ir/s/tehran/buy-apartment")
driver.implicitly_wait(5)
driver.find_element(By.XPATH, "/html/body/div[1]/div[2]/aside/div/div[1]/div[2]/div[1]").click()
driver.find_element(By.XPATH, "/html/body/div[1]/div[2]/aside/div/div[1]/div[2]/div[2]/div/button").click()
component = driver.find_element(By.XPATH, "/html/body/div[2]/div/article/div/div/div/div")
neighborhoods = []
subtitles = []
links = []
for number in range(0, 29280, 650):
driver.execute_script(f"arguments[0].scrollTop = {number}", component)
sel = driver.page_source
response = Selector(text=sel)
for part in response.css('div.kt-control-row.kt-control-row--large.kt-control-row--clickable'):
neighborhood = part.css('.kt-control-row__title::text').get()
neighborhoods.append(neighborhood)
subtitle = part.css('.kt-base-row__description.kt-body--sm::text').get()
subtitles.append(subtitle)
link = part.css('.kt-control-row__title').attrib['href']
links.append(link)
print(type(links))
counter = 1
set_links = set(links)
for element in set_links:
counter += 1
if counter <= 5:
continue
neighborhood_link = "https://divar.ir" + element
parse_neighborhood(neighborhood_link)
parse()
| 35.691667 | 111 | 0.676162 | [
"Apache-2.0"
] | sr-comp/Divar | selenium_test.py | 4,283 | Python |
# RUN: python %s | llvm-mc -filetype=obj -triple i686-pc-win32 - | llvm-readobj -h | FileCheck %s
from __future__ import print_function
# This test checks that the COFF object emitter can produce objects with
# more than 65279 sections.
# While we only generate 65277 sections, an implicit .text, .data and .bss will
# also be emitted. This brings the total to 65280.
num_sections = 65277
# CHECK: ImageFileHeader {
# CHECK-NEXT: Machine: IMAGE_FILE_MACHINE_I386
# CHECK-NEXT: SectionCount: 65280
# CHECK-NEXT: TimeDateStamp: {{[0-9]+}}
# CHECK-NEXT: PointerToSymbolTable: 0x{{[0-9A-F]+}}
# CHECK-NEXT: SymbolCount: 195837
# CHECK-NEXT: OptionalHeaderSize: 0
# CHECK-NEXT: Characteristics [ (0x0)
# CHECK-NEXT: ]
# CHECK-NEXT: }
for i in range(0, num_sections):
print(""" .section .bss,"bw",discard,_b%d
.globl _b%d # @b%d
_b%d:
.byte 0 # 0x0
""" % (i, i, i, i))
| 32.241379 | 97 | 0.656684 | [
"Apache-2.0"
] | 2henwei/llvm | test/MC/COFF/bigobj.py | 935 | Python |
# Calculation the Elastic Constants from given deformations
import os
import subprocess
from pymatgen import Structure, Lattice, Specie
from pymatgen.analysis.elasticity import DeformedStructureSet, Strain, Stress, ElasticTensor
from pmg_lammps import RelaxSet, LammpsLog, LammpsData, LammpsPotentials
supercell = (5, 5, 5)
a = 4.1990858 # From evaluation of potential
lattice = Lattice.from_parameters(a, a, a, 90, 90, 90)
mg = Specie('Mg', 1.4)
o = Specie('O', -1.4)
atoms = [mg, o]
sites = [[0, 0, 0], [0.5, 0.5, 0.5]]
structure = Structure.from_spacegroup(225, lattice, atoms, sites)
initial_structure = structure * supercell
directory = 'runs/elastic'
num_normal = 10
num_shear = 10
max_normal = 0.03
max_shear = 0.08
lammps_potentials = LammpsPotentials(pair={
(mg, mg): '1309362.2766468062 0.104 0.0',
(mg, o ): '9892.357 0.20199 0.0',
(o , o ): '2145.7345 0.3 30.2222'
})
mgo_potential_settings = [
('pair_style', 'buck/coul/long 10.0'),
('kspace_style', 'pppm 1.0e-5'),
]
print('Performing Strained Calculations')
strained_structures = []
deformation_set = DeformedStructureSet(structure, nd=max_normal, ns=max_shear,
num_norm=num_normal, num_shear=num_shear)
for i, deformation in enumerate(deformation_set.deformations):
deformation_directory = os.path.join(directory, str(i))
print('Deformation', i)
strain = Strain.from_deformation(deformation)
strained_structure = deformation.apply_to_structure(initial_structure)
lammps_data = LammpsData.from_structure(strained_structure, potentials=lammps_potentials,
include_charge=True)
lammps_set = RelaxSet(lammps_data, relax_box=False, user_lammps_settings=[
] + mgo_potential_settings)
lammps_set.write_input(deformation_directory)
subprocess.call(['lammps', '-i', 'lammps.in'], cwd=deformation_directory, stdout=subprocess.PIPE)
lammps_log = LammpsLog(os.path.join(deformation_directory, 'lammps.log'))
stress = Stress(lammps_log.get_stress(-1))
strained_structures.append({
'strain': strain,
'structrure': strained_structure,
'stress': stress / -10000.0 # bar to GPa
})
strains = [defo['strain'] for defo in strained_structures]
stresses = [defo['stress'] for defo in strained_structures]
elastic = ElasticTensor.from_pseudoinverse(strains, stresses)
print('Stiffness Tensor')
for row in elastic.voigt:
print('{:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f}\n'.format(*row))
print('Shear Modulus G_V', elastic.g_voigt)
print('Shear Modulus G_R', elastic.g_reuss)
print('Shear Modulus G_vrh', elastic.g_vrh)
print('Bulk Modulus K_V', elastic.k_voigt)
print('Bulk Modulus K_R', elastic.k_reuss)
print('Bulk Modulus K_vrh', elastic.k_vrh)
print('Elastic Anisotropy', elastic.universal_anisotropy)
print('Poisons Ration', elastic.homogeneous_poisson)
| 35.240964 | 101 | 0.709744 | [
"MIT"
] | costrouc/pymatgen-lammps | examples/elastic.py | 2,925 | Python |
# -*- coding: utf-8 -*-
from mootdx.quotes import Quotes
client = Quotes.factory(market='std') # 标准市场
# client = Quotes.factory(market='ext', multithread=True, heartbeat=True) # 扩展市场
quote = client.bars(symbol='600036', frequency=9, offset=10)
print(quote)
quote = client.index(symbol='000001', frequency=9)
print(quote)
quote = client.minute(symbol='000001')
print(quote)
| 25.2 | 80 | 0.719577 | [
"MIT"
] | Pr-Chen/mootdx | sample/basic_quotes.py | 394 | Python |
import numpy as np
def generate_A(filename1, filename2, noise = 'gau'):
exp_T = 4000
big_y_true_gau = []
big_y_noise_gau = []
big_y_true_t2 = []
big_y_noise_t2 = []
for times in range(100):
y_true_gau = np.zeros((exp_T, 1, 1))
y_true_gau[0] = np.random.rand()
y_true_gau[1] = np.random.rand()
y_true_t2 = np.zeros((exp_T, 1, 1))
y_true_t2[0] = np.random.rand()
y_true_t2[1] = np.random.rand()
y_noise_gau = y_true_gau.copy()
y_noise_t2 = y_true_t2.copy()
e_gau = np.random.normal(0, 0.3, (exp_T, 1))
e_t2 = np.random.standard_t(2, (exp_T,1))
y_noise_gau[0] = y_true_gau[0] + e_gau[0]
y_noise_gau[1] = y_true_gau[1] + e_gau[1]
y_noise_t2[0] = y_true_t2[0] + e_t2[0]
y_noise_t2[1] = y_true_t2[1] + e_t2[1]
for t in range(2, exp_T):
y_true_gau[t] = (3./2.)*np.sin(np.pi / 2. * y_noise_gau[t - 1]) - np.sin(np.pi / 2. * y_noise_gau[t - 2])
y_noise_gau[t] = y_true_gau[t] + 2* e_gau[t]
y_true_t2[t] = np.sin(np.pi / 2. * y_noise_t2[t - 1]) -np.sin(np.pi / 2. * y_noise_t2[t - 2])
y_noise_t2[t] = y_true_t2[t] + 2* e_t2[t]
big_y_true_gau.append(y_true_gau)
big_y_noise_gau.append(y_noise_gau)
big_y_true_t2.append(y_true_t2)
big_y_noise_t2.append(y_noise_t2)
if noise == 'gau':
with open(filename1, 'wb') as f:
np.save(f, np.array(big_y_true_gau))
with open(filename2, 'wb') as f:
np.save(f, np.array(big_y_noise_gau))
else:
with open(filename1, 'wb') as f:
np.save(f, np.array(big_y_true_t2))
with open(filename2, 'wb') as f:
np.save(f, np.array(big_y_noise_t2))
| 41.186047 | 117 | 0.570299 | [
"Apache-2.0"
] | FengxiangHe/SpHAM | generate_exampleA.py | 1,771 | Python |
# pylint: disable=missing-docstring, line-too-long, invalid-name, arguments-differ, no-member, pointless-statement
from functools import partial
import torch
from e3nn import Kernel, rs
from e3nn.non_linearities.norm import Norm
from e3nn.non_linearities.rescaled_act import swish
from e3nn.point.operations import Convolution
from e3nn.radial import GaussianRadialModel
# Define the input and output representations
Rs_in = [(1, 0), (2, 1)] # Input = One scalar plus two vectors
Rs_out = [(1, 1)] # Output = One single vector
# Radial model: R+ -> R^d
RadialModel = partial(GaussianRadialModel, max_radius=3.0, number_of_basis=3, h=100, L=1, act=swish)
# kernel: composed on a radial part that contains the learned parameters
# and an angular part given by the spherical hamonics and the Clebsch-Gordan coefficients
K = partial(Kernel, RadialModel=RadialModel)
# Create the convolution module
conv = Convolution(K(Rs_in, Rs_out))
# Module to compute the norm of each irreducible component
norm = Norm(Rs_out)
n = 5 # number of input points
features = rs.randn(1, n, Rs_in, requires_grad=True)
in_geometry = torch.randn(1, n, 3)
out_geometry = torch.zeros(1, 1, 3) # One point at the origin
out = norm(conv(features, in_geometry, out_geometry))
out.backward()
print(out)
print(features.grad)
| 31.214286 | 114 | 0.762777 | [
"MIT"
] | L-sky/e3nn | tests/readme_example_test.py | 1,311 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from ckeditor.fields import RichTextField
from theJekyllProject.choices import BlogTemplates
class Contact(models.Model):
first_name = models.CharField(
max_length=200
)
last_name = models.CharField(
max_length=200,
null=True,
blank=True
)
email = models.EmailField(
max_length=200,
null=True,
blank=True
)
message = models.CharField(
max_length=5000
)
def __str__(self):
return '%s sent message %s' % (self.email, self.message)
class Repo(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
repo = models.CharField(
max_length=200,
)
main = models.BooleanField(
default=False
)
template = models.CharField(
max_length=2,
choices=BlogTemplates.choices,
default=BlogTemplates.TEMPLATE_NOT_SET
)
def __str__(self):
return '%s has made %s and is %s' % (self.user, self.repo, self.main)
class CName(models.Model):
"""
CName model value is used to store the CNAME info of the repo
"""
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
)
c_name = models.CharField(max_length=200)
class Post(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
author = models.CharField(max_length=100, null=True, blank=True)
comments = models.BooleanField(default=True)
date = models.DateField(auto_now_add=True,)
time = models.TimeField(auto_now_add=True,)
layouts = (
('post', 'post'),
)
layout = models.CharField(
max_length=100,
choices=layouts,
null=True,
blank=True
)
title = models.CharField(max_length=2000)
slug = models.CharField(max_length=2000, null=True, blank=True)
content = RichTextField()
background = models.ImageField(upload_to='pictures/', null=True,
blank=True)
def __str__(self):
return '%s on %s' % (self.title, self.date)
class Page(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE
)
title = models.CharField(max_length=2000)
permalink = models.CharField(max_length=2000)
layout = models.CharField(max_length=2000)
description = models.CharField(
max_length=2000,
default='Description of the Page',
)
background = models.ImageField(upload_to='pictures/', null=True,
blank=True)
content = RichTextField()
class PostCategory(models.Model):
post = models.ForeignKey(Post)
category = models.CharField(max_length=200, null=True, blank=True)
class SiteData(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
name = models.CharField(
max_length=200,
default='Your site title',
)
description = models.CharField(
max_length=2000,
default='Description of the site',
)
avatar = models.URLField(
null=True,
blank=True
)
author = models.CharField(
max_length=2000,
default='Author of the site',
null=True,
blank=True
)
baseurl = models.CharField(
max_length=200,
default='/jekyllblog',
null=True,
blank=True
)
url = models.CharField(
max_length=200,
default='http://blog.jeklog.com',
null=True,
blank=True
)
class SiteSocialProfile(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
dribbble = models.CharField(
max_length=200,
null=True,
blank=True
)
email = models.EmailField(
max_length=200,
null=True,
blank=True
)
facebook = models.CharField(
max_length=200,
null=True,
blank=True
)
flickr = models.CharField(
max_length=200,
null=True,
blank=True
)
github = models.CharField(
max_length=200,
null=True,
blank=True
)
instagram = models.CharField(
max_length=200,
null=True,
blank=True
)
linkedin = models.CharField(
max_length=200,
null=True,
blank=True
)
pinterest = models.CharField(
max_length=200,
null=True,
blank=True
)
rss = models.CharField(
max_length=200,
null=True,
blank=True
)
twitter = models.CharField(
max_length=200,
null=True,
blank=True
)
stackoverflow = models.CharField(
max_length=200,
null=True,
blank=True
)
youtube = models.CharField(
max_length=200,
null=True,
blank=True
)
googleplus = models.CharField(
max_length=200,
null=True,
blank=True
)
disqus = models.CharField(
max_length=200,
null=True,
blank=True
)
google_analytics = models.CharField(
max_length=200,
null=True,
blank=True
)
class SitePlugin(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
plugin = models.CharField(
max_length=200,
)
class SiteExclude(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
exclude = models.CharField(
max_length=200,
)
class SiteTheme(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
theme = models.CharField(
max_length=200,
)
| 22.471698 | 77 | 0.589253 | [
"MIT"
] | silvrwolfboy/theJekyllProject | djangoFiles/theJekyllProject/models.py | 5,955 | Python |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import platform
import socket
import sys
from oslo.config import cfg
from nova.compute import flavors
import nova.context
import nova.db
from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
from nova.objects import instance as instance_obj
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
return nova.context.get_admin_context()
def get_test_image_info(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
def get_test_flavor(context=None, options=None):
options = options or {}
if not context:
context = get_test_admin_context()
test_flavor = {'name': 'kinda.big',
'flavorid': 'someid',
'memory_mb': 2048,
'vcpus': 4,
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
test_flavor.update(options)
try:
flavor_ref = nova.db.flavor_create(context, test_flavor)
except (exception.FlavorExists, exception.FlavorIdExists):
flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big')
return flavor_ref
def get_test_instance(context=None, flavor=None, obj=False):
if not context:
context = get_test_admin_context()
if not flavor:
flavor = get_test_flavor(context)
metadata = {}
flavors.save_flavor_info(metadata, flavor, '')
test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 4,
'root_gb': 40,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': '5',
'system_metadata': metadata,
'extra_specs': {}}
if obj:
instance = instance_obj.Instance(context, **test_instance)
instance.create()
else:
instance = nova.db.instance_create(context, test_instance)
return instance
def get_test_network_info(count=1):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0'
fake_netmask = '255.255.255.255'
fake_vlan = 100
fake_bridge_interface = 'eth0'
def current():
subnet_4 = network_model.Subnet(cidr=fake_ip,
dns=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
dhcp_server=fake_ip)
subnet_6 = network_model.Subnet(cidr=fake_ip,
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
version=6)
subnets = [subnet_4]
if ipv6:
subnets.append(subnet_6)
network = network_model.Network(id=None,
bridge=fake,
label=None,
subnets=subnets,
vlan=fake_vlan,
bridge_interface=fake_bridge_interface,
injected=False)
vif = network_model.VIF(id='vif-xxx-yyy-zzz',
address=fake,
network=network,
type=network_model.VIF_TYPE_BRIDGE,
devname=None,
ovs_interfaceid=None)
return vif
return network_model.NetworkInfo([current() for x in xrange(0, count)])
def is_osx():
return platform.mac_ver()[0] != ''
test_dns_managers = []
def dns_manager():
global test_dns_managers
manager = minidns.MiniDNS()
test_dns_managers.append(manager)
return manager
def cleanup_dns_managers():
global test_dns_managers
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
def killer_xml_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
def is_ipv6_supported():
has_ipv6_support = socket.has_ipv6
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.close()
except socket.error as e:
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
raise
# check if there is at least one interface with ipv6
if has_ipv6_support and sys.platform.startswith('linux'):
try:
with open('/proc/net/if_inet6') as f:
if not f.read():
has_ipv6_support = False
except IOError:
has_ipv6_support = False
return has_ipv6_support
| 31.859223 | 79 | 0.534359 | [
"Apache-2.0"
] | bopopescu/nova-35 | nova/tests/utils.py | 6,563 | Python |
import os.path
from importlib import import_module
basedir = os.path.abspath(os.path.dirname(__file__))
env = os.getenv('ENVIRONMENT', 'local')
if not env in ['local', 'test']:
config_file = '/path/to/config/directory/' + env + '.py'
if not os.path.isfile(config_file):
env = 'local'
config_name = 'path.to.config.directory.' + env
module = import_module(config_name)
config = module.config
config.MIGRATIONS_PATH = os.path.join(basedir, 'migrations')
| 27.764706 | 60 | 0.709746 | [
"Apache-2.0"
] | gotitinc/code-samples | misc/configuration/config.py | 472 | Python |
"""distutils.command.build
Implements the Distutils 'build' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build.py 62197 2008-04-07 01:53:39Z mark.hammond $"
import sys, os
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build (Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options (self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%s" % (self.plat_name, sys.version[0:3])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
# finalize_options ()
def run (self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
# class build
| 35.207547 | 76 | 0.58771 | [
"Apache-2.0"
] | Alexisblues/sl4a | python/src/Lib/distutils/command/build.py | 5,598 | Python |
"""
This script:
- Train Sobolev Alignment.
- Save the two networks.
"""
import os, sys, getopt
import pandas as pd
import numpy as np
import re
from anndata import AnnData
import torch
from pickle import dump, load
from copy import deepcopy
import gc
from sobolev_alignment import SobolevAlignment
# Import params
from model_III_synthetic_params import *
from read_data import read_data
# Import parameters
n_artificial_samples = None
tmp_file = None
opts, args = getopt.getopt(sys.argv[1:],'o:d:n:t:j:p:',['output=', 'data=', 'artifsamples=', 'temp=', 'job=', 'perm='])
for opt, arg in opts:
if opt in ("-o", "--output"):
output_folder = str(arg)
elif opt in ("-d", "--data"):
data_subfolder = str(arg)
elif opt in ('-n', '--artifsamples'):
n_artificial_samples = int(arg)
elif opt in ('-t', '--temp'):
tmp_file = str(arg)
elif opt in ('-j', '--job'):
n_jobs = int(arg)
n_artificial_samples = n_artificial_samples if n_artificial_samples is not None else 10**6
n_artificial_samples = int(n_artificial_samples)
tmp_file = tmp_file if tmp_file is not None else '/tmp/SM/'
###
# IMPORT DATA
###
X_source, X_target = read_data(data_folder, data_subfolder)
gc.collect()
###
# Sobolev Alignment start
###
# Read best parameters
cell_line_scvi_params, tumor_scvi_params = read_scvi_params(output_folder)
sobolev_alignment_clf = SobolevAlignment(
source_scvi_params=cell_line_scvi_params,
target_scvi_params=tumor_scvi_params,
source_krr_params=default_krr_params,
target_krr_params=default_krr_params,
n_jobs=n_jobs
)
###
# Training Sobolev Alignment if not already saved.
###
if 'sobolev_alignment_model' not in os.listdir(output_folder):
pass
else:
sys.exit("VAE ALREADY TRAINED")
sobolev_alignment_clf.n_jobs = n_jobs
sobolev_alignment_clf.fit(
X_source=X_source,
X_target=X_target,
source_batch_name=batch_name,
target_batch_name=batch_name,
continuous_covariate_names=continuous_covariate_names,
n_artificial_samples=100,
fit_vae=True,
sample_artificial=False,
krr_approx=False,
n_samples_per_sample_batch=10**6,
frac_save_artificial=1.,
save_mmap=tmp_file,
log_input=log_input,
no_posterior_collapse=no_posterior_collapse,
frob_norm_source=frob_norm_source
)
if 'sobolev_alignment_model' not in os.listdir(output_folder):
sobolev_alignment_clf.save('%s/sobolev_alignment_model/'%(output_folder), with_krr=False)
gc.collect()
# Save embedding
for x in sobolev_alignment_clf.scvi_models:
np.savetxt(
'%s/scvi_embedding_%s.csv'%(output_folder, x),
sobolev_alignment_clf.scvi_models[x].get_latent_representation()
)
torch.cuda.empty_cache()
gc.collect()
sys.exit("FINISH VAE TRAINING") | 26.443396 | 119 | 0.727435 | [
"MIT"
] | saroudant/sobolev_alignment_manuscript | figure_3/model_III/sobolev_alignment/train_VAE.py | 2,803 | Python |
import json
from django.test.utils import override_settings
from tastypie.exceptions import NotFound
from basic.models import Note
from testcases import TestCaseWithFixture
from django.test.testcases import SimpleTestCase
@override_settings(ROOT_URLCONF='validation.api.urls')
class FilteringErrorsTestCase(TestCaseWithFixture):
def test_valid_date(self):
resp = self.client.get('/api/v1/notes/', data={
'format': 'json',
'created__gte': '2010-03-31 00:00:00Z'
})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized['objects']), Note.objects.filter(created__gte='2010-03-31 00:00:00Z').count())
def test_invalid_date(self):
resp = self.client.get('/api/v1/notes/', data={
'format': 'json',
'created__gte': 'foo-baz-bar'
})
self.assertEqual(resp.status_code, 400)
@override_settings(ROOT_URLCONF='validation.api.urls')
class PostRelatedUrlValidationTestCase(TestCaseWithFixture):
def test_valid_url(self):
data_with_pk = json.dumps({
'title': 'Test title related pk',
'slug': 'test-slug-related-pk',
'content': 'This is the content',
'user': {'pk': 1},
})
data_with_url = json.dumps({
'title': 'Test title related url',
'slug': 'test-slug-related-url',
'content': 'This is the content',
'user': '/api/v1/users/1/',
})
resp_with_pk = self.client.post('/api/v1/notes/', data=data_with_pk, content_type='application/json')
self.assertEqual(resp_with_pk.status_code, 201)
note_posted_with_pk = json.loads(self.client.get(resp_with_pk['location']).content.decode('utf-8'))
resp_with_url = self.client.post('/api/v1/notes/', data=data_with_url, content_type='application/json')
self.assertEqual(resp_with_url.status_code, 201)
note_posted_with_url = json.loads(self.client.get(resp_with_url['location']).content.decode('utf-8'))
self.assertEqual(note_posted_with_pk['user'], note_posted_with_url['user'])
def test_invalid_url(self):
data = json.dumps({
'title': 'Test title related url',
'slug': 'test-slug-related-url',
'content': 'This is the content',
'user': 'invalid-url',
})
with self.assertRaises(NotFound):
self.client.post('/api/v1/notes/', data=data, content_type='application/json')
@override_settings(ROOT_URLCONF='validation.api.urls')
class PostNestResouceValidationTestCase(TestCaseWithFixture):
def test_valid_data(self):
data = json.dumps({
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'user': {'pk': 1}, # loaded from fixtures
'annotated': {'annotations': 'This is an annotations'},
})
resp = self.client.post('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
note = json.loads(self.client.get(resp['location']).content.decode('utf-8'))
self.assertTrue(note['annotated'])
def test_invalid_data(self):
data = json.dumps({
'title': '',
'slug': 'test-title',
'content': 'This is the content',
'user': {'pk': 1}, # loaded from fixtures
'annotated': {'annotations': ''},
})
resp = self.client.post('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')), {
'notes': {
'title': ['This field is required.']
},
'annotated': {
'annotations': ['This field is required.']
}
})
@override_settings(ROOT_URLCONF='validation.api.urls')
class PutDetailNestResouceValidationTestCase(TestCaseWithFixture):
def test_valid_data(self):
data = json.dumps({
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'annotated': {'annotations': 'This is another annotations'},
})
resp = self.client.put('/api/v1/notes/1/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 204)
note = json.loads(self.client.get('/api/v1/notes/1/', content_type='application/json').content.decode('utf-8'))
self.assertTrue(note['annotated'])
self.assertEqual('test-title', note['slug'])
def test_invalid_data(self):
data = json.dumps({
'title': '',
'slug': '',
'content': 'This is the content',
'annotated': {'annotations': None},
})
resp = self.client.put('/api/v1/notes/1/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')), {
'notes': {
'slug': ['This field is required.'],
'title': ['This field is required.']
},
'annotated': {
'annotations': ['This field is required.']
}
})
@override_settings(ROOT_URLCONF='validation.api.urls')
class PutListNestResouceValidationTestCase(TestCaseWithFixture):
def test_valid_data(self):
data = json.dumps({'objects': [
{
'id': 1,
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'annotated': {'annotations': 'This is another annotations'},
'user': {'id': 1}
},
{
'id': 2,
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'annotated': {'annotations': 'This is the third annotations'},
'user': {'id': 1}
}
]})
resp = self.client.put('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 204)
note = json.loads(self.client.get('/api/v1/notes/1/', content_type='application/json').content.decode('utf-8'))
self.assertTrue(note['annotated'])
note = json.loads(self.client.get('/api/v1/notes/2/', content_type='application/json').content.decode('utf-8'))
self.assertTrue(note['annotated'])
def test_invalid_data(self):
data = json.dumps({'objects': [
{
'id': 1,
'title': 'Test Title',
'slug': 'test-title',
'annotated': {'annotations': None},
'user': {'id': 1}
},
{
'id': 2,
'title': 'Test Title',
'annotated': {'annotations': None},
'user': {'id': 1}
}
]})
resp = self.client.put('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')), {
'notes': {
'content': ['This field is required.']
},
'annotated': {
'annotations': ['This field is required.']
}
})
class TestJSONPValidation(SimpleTestCase):
"""
Explicitly run the doctests for tastypie.utils.validate_jsonp
"""
def test_jsonp(self):
import tastypie.utils.validate_jsonp
import doctest
doctest.testmod(tastypie.utils.validate_jsonp)
| 38.131707 | 120 | 0.567609 | [
"BSD-3-Clause"
] | 7Geese/django-tastypie | tests/validation/tests.py | 7,817 | Python |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.shortcuts import redirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import loader
from django.template.context import RequestContext
from django.views.decorators.csrf import csrf_exempt
from networkapiclient.exception import NetworkAPIClientError
from networkapiclient.Pagination import Pagination
from CadVlan.Auth.AuthSession import AuthSession
from CadVlan.forms import DeleteForm
from CadVlan.messages import error_messages
from CadVlan.messages import healthcheck_messages
from CadVlan.messages import pool_messages
from CadVlan.permissions import ENVIRONMENT_MANAGEMENT
from CadVlan.permissions import EQUIPMENT_MANAGEMENT
from CadVlan.permissions import HEALTH_CHECK_EXPECT
from CadVlan.permissions import POOL_ALTER_SCRIPT
from CadVlan.permissions import POOL_CREATE_SCRIPT
from CadVlan.permissions import POOL_MANAGEMENT
from CadVlan.permissions import POOL_REMOVE_SCRIPT
from CadVlan.permissions import VIPS_REQUEST
from CadVlan.Pool import facade
from CadVlan.Pool.forms import PoolFormV3
from CadVlan.Pool.forms import PoolGroupUsersForm
from CadVlan.Pool.forms import PoolHealthcheckForm
from CadVlan.Pool.forms import SearchPoolForm
from CadVlan.templates import AJAX_IPLIST_EQUIPMENT_REAL_SERVER_HTML
from CadVlan.templates import POOL_DATATABLE
from CadVlan.templates import POOL_DATATABLE_NEW
from CadVlan.templates import POOL_FORM
from CadVlan.templates import POOL_LIST
from CadVlan.templates import POOL_LIST_NEW
from CadVlan.templates import POOL_MANAGE_TAB1
from CadVlan.templates import POOL_MANAGE_TAB2
from CadVlan.templates import POOL_MANAGE_TAB3
from CadVlan.templates import POOL_MANAGE_TAB4
from CadVlan.templates import POOL_MEMBER_ITEMS
from CadVlan.templates import POOL_REQVIP_DATATABLE
from CadVlan.templates import POOL_SPM_DATATABLE
from CadVlan.Util.converters.util import split_to_array
from CadVlan.Util.Decorators import has_perm
from CadVlan.Util.Decorators import has_perm_external
from CadVlan.Util.Decorators import log
from CadVlan.Util.Decorators import login_required
from CadVlan.Util.shortcuts import render_message_json
from CadVlan.Util.utility import DataTablePaginator
from CadVlan.Util.utility import get_param_in_request
logger = logging.getLogger(__name__)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def list_all(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environments = client.create_pool().list_environments_with_pools()
lists = dict()
lists['delete_form'] = DeleteForm()
lists['search_form'] = SearchPoolForm(environments)
return render_to_response(POOL_LIST, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('home')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def list_all_new(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
search = {
'extends_search': [{'serverpool__environment__isnull': False}],
'start_record': 0,
'custom_search': '',
'end_record': 10000,
'asorting_cols': [],
'searchable_columns': []}
fields = ['id', 'name']
environments = client.create_api_environment().search(search=search,
fields=fields)
lists = {'delete_form': DeleteForm(),
'search_form': SearchPoolForm(environments['environments'])}
return render_to_response(POOL_LIST_NEW, lists,
context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('home')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def datatable(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environment_id = int(request.GET.get('pEnvironment'))
column_index_name_map = {
0: '',
1: 'identifier',
2: 'default_port',
3: 'healthcheck__healthcheck_type',
4: 'environment',
5: 'pool_created',
6: ''
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
dtp.searchable_columns = [
'identifier',
'default_port',
'pool_created',
'healthcheck__healthcheck_type',
]
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search
)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [
{'environment': environment_id}] if environment_id else []
pools = client.create_pool().list_pool(data)
return dtp.build_response(
pools['server_pools'],
pools['total'],
POOL_DATATABLE,
request
)
except NetworkAPIClientError, e:
logger.error(e.error)
return render_message_json(e.error, messages.ERROR)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def datatable_new(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environment_id = int(request.GET.get('pEnvironment'))
column_index_name_map = {
0: '',
1: 'identifier',
2: 'default_port',
3: 'healthcheck__healthcheck_type',
4: 'environment',
5: 'pool_created',
6: '',
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
dtp.searchable_columns = [
'identifier',
'default_port',
'pool_created',
'healthcheck__healthcheck_type',
]
dtp.asorting_cols = ['identifier']
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search
)
search = {'start_record': pagination.start_record,
'end_record': pagination.end_record,
'asorting_cols': pagination.asorting_cols,
'searchable_columns': pagination.searchable_columns,
'custom_search': pagination.custom_search or '',
'extends_search': [{'environment': environment_id}]
if environment_id else []}
fields = [
'id',
'identifier',
'default_port',
'healthcheck__healthcheck_type',
'environment__details',
'pool_created'
]
pools = client.create_api_pool().search(search=search,
fields=fields)
return dtp.build_response(
pools['server_pools'],
pools['total'],
POOL_DATATABLE_NEW,
request
)
except NetworkAPIClientError, e:
logger.error(e.error)
return render_message_json(e.error, messages.ERROR)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def spm_datatable(request, id_server_pool, checkstatus):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
column_index_name_map = {
0: '',
1: 'identifier',
2: 'ip',
3: 'port_real',
4: 'priority',
5: 'member_status',
6: 'member_status',
7: 'member_status',
8: 'last_status_update'
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
pools = client.create_pool().get_pool_members(id_server_pool, checkstatus)
members = pools['server_pools'][0]['server_pool_members']
return dtp.build_response(members, len(members), POOL_SPM_DATATABLE, request)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseServerError(e, mimetype='application/javascript')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def reqvip_datatable(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
column_index_name_map = {
0: '',
1: 'id',
2: 'Nome(s) do VIP',
3: 'IPv4',
4: 'IPv6',
5: 'Equipamento(s)',
6: 'Ambiente VIP',
7: 'criado',
8: ''
}
dtp = DataTablePaginator(request, column_index_name_map)
# Make params
dtp.build_server_side_list()
# Set params in simple Pagination class
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [
{'viprequestport__viprequestportpool__server_pool': id_server_pool}]
requisicoes_vip = client.create_api_vip_request().search(
search=data,
kind='details',
fields=['id', 'name', 'environmentvip', 'ipv4',
'ipv6', 'equipments', 'created'])
return dtp.build_response(requisicoes_vip['vips'], requisicoes_vip['total'],
POOL_REQVIP_DATATABLE, request)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseServerError(e, mimetype='application/javascript')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def add_form(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
environment_choices = facade.populate_enviroments_choices(client)
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
groups_of_logged_user = client.create_usuario().get_by_id(
request.session['user']._User__id)['usuario']['grupos']
lists['action'] = reverse('pool.add.form')
lists['label_tab'] = u'Cadastro de Pool'
lists['pool_created'] = False
if request.method == 'GET':
lists['pool_members'] = list()
lists['healthcheck_expect'] = ''
lists['healthcheck_request'] = ''
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices
)
form_group_users_initial = {
'group_users': groups_of_logged_user
if not isinstance(groups_of_logged_user, basestring) else [groups_of_logged_user]
}
form_group_users = PoolGroupUsersForm(
group_users_list, False, initial=form_group_users_initial)
form_healthcheck = PoolHealthcheckForm()
if request.method == 'POST':
# Get Data From Request Post To Save
pool_id = request.POST.get('id')
environment_id = request.POST.get('environment')
members = dict()
members['id_pool_member'] = request.POST.getlist('id_pool_member')
members['id_equips'] = request.POST.getlist('id_equip')
members['name_equips'] = request.POST.getlist('equip')
members['priorities'] = request.POST.getlist('priority')
members['ports_reals'] = request.POST.getlist('ports_real_reals')
members['weight'] = request.POST.getlist('weight')
members['id_ips'] = request.POST.getlist('id_ip')
members['ips'] = request.POST.getlist('ip')
members['environment'] = environment_id
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST
)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST
)
form_group_users = PoolGroupUsersForm(
group_users_list, False, request.POST)
if form_pool.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
pool = dict()
pool['id'] = pool_id
servicedownaction = facade.format_servicedownaction(
client, form_pool)
healthcheck = facade.format_healthcheck(request)
group_users = form_group_users.cleaned_data['group_users']
groups_permissions = []
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {'replace': False}
pool['identifier'] = str(form_pool.cleaned_data['identifier'])
pool['default_port'] = int(
form_pool.cleaned_data['default_port'])
pool['environment'] = int(
form_pool.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form_pool.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form_pool.cleaned_data['maxcon'])
server_pool_members = facade.format_server_pool_members(
request, pool['default_limit'])
pool['server_pool_members'] = server_pool_members
client.create_pool().save_pool(pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_insert'))
return redirect('pool.list')
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
lists['form_pool'] = form_pool
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_FORM, lists, context_instance=RequestContext(request))
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def edit_form(request, id_server_pool):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
environment_choices = facade.populate_enviroments_choices(client)
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
lists['action'] = reverse('pool.edit.form', args=[id_server_pool])
lists['label_tab'] = u'Edição de Pool'
lists['id_server_pool'] = id_server_pool
try:
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
group_users_list_selected = []
for group in pool['groups_permissions']:
group_users_list_selected.append(group['user_group']['id'])
pool_created = lists['pool_created'] = pool['pool_created']
if pool_created:
return redirect(reverse('pool.manage.tab1', args=[id_server_pool]))
environment_id = pool['environment']['id']
if request.method == 'GET':
server_pool_members = list()
server_pool_members_raw = pool.get('server_pool_members')
if server_pool_members_raw:
for obj_member in server_pool_members_raw:
ipv4 = obj_member.get('ip')
ipv6 = obj_member.get('ipv6')
ip_obj = ipv4 or ipv6
# equipment = client.create_pool().get_equip_by_ip(ip_obj.get('id'))
# get_equip_by_ip method can return many equipments related with those Ips,
# this is an error, because the equipment returned cannot
# be the same
mbs = bin(int(obj_member.get('member_status')))[
2:5].zfill(3)
server_pool_members.append({
'id': obj_member['id'],
'id_equip': obj_member['equipment']['id'],
'nome_equipamento': obj_member['equipment']['name'],
'priority': obj_member['priority'],
'port_real': obj_member['port_real'],
'weight': obj_member['weight'],
'id_ip': ip_obj.get('id'),
'member_status': obj_member.get('member_status'),
'member_status_hab': mbs[1],
'member_status_updown': mbs[2],
'ip': ip_obj.get('ip_formated')
})
healthcheck = pool['healthcheck']['healthcheck_type']
healthcheck_expect = pool['healthcheck']['healthcheck_expect']
healthcheck_request = pool['healthcheck']['healthcheck_request']
healthcheck_destination = pool['healthcheck']['destination'].split(':')[
1]
healthcheck_destination = healthcheck_destination if healthcheck_destination != '*' else ''
form_initial = {
'id': id_server_pool,
'environment': environment_id,
'default_port': pool.get('default_port'),
'balancing': pool.get('lb_method'),
'servicedownaction': pool.get('servicedownaction').get('id'),
'maxcon': pool.get('default_limit'),
'identifier': pool.get('identifier')
}
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
initial=form_initial
)
form_initial = {
'group_users': group_users_list_selected
}
form_group_users = PoolGroupUsersForm(
group_users_list, True, initial=form_initial)
form_initial = {
'healthcheck': healthcheck,
'healthcheck_request': healthcheck_request,
'healthcheck_expect': healthcheck_expect,
'healthcheck_destination': healthcheck_destination
}
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
initial=form_initial
)
lists['pool_members'] = server_pool_members
if request.method == 'POST':
members = dict()
members['id_pool_member'] = request.POST.getlist('id_pool_member')
members['id_equips'] = request.POST.getlist('id_equip')
members['name_equips'] = request.POST.getlist('equip')
members['priorities'] = request.POST.getlist('priority')
members['ports_reals'] = request.POST.getlist('ports_real_reals')
members['weight'] = request.POST.getlist('weight')
members['id_ips'] = request.POST.getlist('id_ip')
members['ips'] = request.POST.getlist('ip')
# member_status = '1%s%s' % (
# request.POST.getlist('member_status_hab'),
# request.POST.getlist('member_status_updown')
# )
# members["member_status"] = int(member_status)
members['environment'] = environment_id
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST
)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST
)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
if form_pool.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
pool = dict()
pool['id'] = int(id_server_pool)
servicedownaction = facade.format_servicedownaction(
client, form_pool)
healthcheck = facade.format_healthcheck(request)
pool['identifier'] = str(form_pool.cleaned_data['identifier'])
pool['default_port'] = int(
form_pool.cleaned_data['default_port'])
pool['environment'] = int(
form_pool.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form_pool.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form_pool.cleaned_data['maxcon'])
server_pool_members = facade.format_server_pool_members(
request, pool['default_limit'])
pool['server_pool_members'] = server_pool_members
group_users = form_group_users.cleaned_data['group_users']
groups_permissions = []
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {
'replace': form_group_users.cleaned_data['overwrite']}
client.create_pool().update_pool(pool, id_server_pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_update'))
return redirect(lists['action'])
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
lists['form_pool'] = form_pool
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_FORM, lists, context_instance=RequestContext(request))
@log
@csrf_exempt
@has_perm_external([
{'permission': POOL_MANAGEMENT, 'read': True, 'write': True},
{'permission': EQUIPMENT_MANAGEMENT, 'read': True, }
])
def ajax_modal_ip_real_server_external(request, form_acess, client):
return _modal_ip_list_real(request, client)
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'read': True, 'write': True},
{'permission': EQUIPMENT_MANAGEMENT, 'read': True, }
])
def ajax_modal_ip_real_server(request):
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
return _modal_ip_list_real(request, client_api)
def _modal_ip_list_real(request, client_api):
lists = {'msg': str(), 'ips': []}
ips = {}
status_code = 200
ambiente = get_param_in_request(request, 'id_environment')
equip_name = get_param_in_request(request, 'equip_name')
try:
column_index_name_map = {
0: '',
1: 'id',
9: ''}
dtp = DataTablePaginator(request, column_index_name_map)
# Make params
dtp.build_server_side_list()
# Set params in simple Pagination class
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search)
extends_search = facade.format_name_ip_search(equip_name)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [extends_search] if extends_search else []
# Valid Equipament
equip = client_api.create_api_equipment().search(
search=data,
include=[
'ipv4__basic__networkipv4__basic',
'ipv6__basic__networkipv6__basic',
'model__details__brand__details',
'equipment_type__details'
],
environment=ambiente
).get('equipments')[0]
except NetworkAPIClientError, e:
logger.error(e)
status_code = 500
return HttpResponse(json.dumps({'message': e.error, 'status': 'error'}), status=status_code,
content_type='application/json')
# if not ips_list['list_ipv4'] and not ips_list['list_ipv6']:
# return HttpResponse(json.dumps({'message': u'Esse equipamento não tem nenhum IP que '
# u'possa ser utilizado nos pools desse ambiente.',
# 'status': 'error'}), status=status_code, content_type='application/json')
ips['list_ipv4'] = equip['ipv4']
ips['list_ipv6'] = equip['ipv6']
lists['ips'] = ips
lists['equip'] = equip
return HttpResponse(
loader.render_to_string(
AJAX_IPLIST_EQUIPMENT_REAL_SERVER_HTML,
lists,
context_instance=RequestContext(request)
), status=status_code)
@log
@csrf_exempt
@has_perm_external([{'permission': POOL_MANAGEMENT, 'read': True}])
def ajax_get_opcoes_pool_by_ambiente_external(request, form_acess, client):
return _get_opcoes_pool_by_ambiente(request, client)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def ajax_get_opcoes_pool_by_ambiente(request):
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
return _get_opcoes_pool_by_ambiente(request, client_api)
def _get_opcoes_pool_by_ambiente(request, client_api):
opcoes_pool = dict()
opcoes_pool['options_pool'] = []
try:
ambiente = get_param_in_request(request, 'id_environment')
opcoes_pool = client_api.create_pool().get_opcoes_pool_by_environment(ambiente)
except NetworkAPIClientError, e:
logger.error(e)
return HttpResponse(json.dumps(opcoes_pool['options_pool']), content_type='application/json')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def delete(request):
"""Delete Pool Into Database"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().delete_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_delete'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([{'permission': POOL_REMOVE_SCRIPT, 'write': True}])
def remove(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_remove_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_remove'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([{'permission': POOL_CREATE_SCRIPT, 'write': True}])
def create(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_create_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_create'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def delete_new(request):
"""Delete Pool Into Database"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().delete_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_delete'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_REMOVE_SCRIPT, 'write': True}])
def remove_new(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_remove_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_remove'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_CREATE_SCRIPT, 'write': True}])
def create_new(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_create_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_create'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def status_change(request):
"""Enable Pool Member Running Script"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
action = request.POST.get('action')
if id_server_pool and ids:
pools = client.create_pool().get_pool_members(id_server_pool)
members = pools['server_pools'][0]['server_pool_members']
for member in members:
member_status = list(bin(member['member_status']))
if action[-2] != 'x':
member_status[-2] = action[-2]
else:
member_status[-1] = action[-1]
member_status = int(''.join(member_status), 2)
if member_status != member['member_status'] and str(member['id']) in ids.split(';'):
member['member_status'] = member_status
client.create_pool().deploy_update_pool_members(
id_server_pool, pools['server_pools'][0])
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_status_change'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def enable(request):
"""Enable Pool Member Running Script"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().enable(split_to_array(ids))
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_enable'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def disable(request):
"""
Disable Pool Member Running Script
"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().disable(split_to_array(ids))
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_disable'))
else:
messages.add_message(
request, messages.ERROR, error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@csrf_exempt
@has_perm_external([{'permission': HEALTH_CHECK_EXPECT, 'write': True}])
def add_healthcheck_expect_external(request, form_acess, client):
return _add_healthcheck_expect_shared(request, client)
@log
@login_required
@has_perm([{'permission': HEALTH_CHECK_EXPECT, 'write': True}])
def add_healthcheck_expect(request):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
return _add_healthcheck_expect_shared(request, client)
def _add_healthcheck_expect_shared(request, client):
lists = dict()
try:
if request.method == 'GET':
expect_string = request.GET.get('expect_string')
id_environment = request.GET.get('id_environment')
if expect_string != '':
client.create_ambiente().add_healthcheck_expect(id_ambiente=id_environment, expect_string=expect_string,
match_list=expect_string)
lists['expect_string'] = expect_string
lists['mensagem'] = healthcheck_messages.get('success_create')
except NetworkAPIClientError, e:
logger.error(e)
lists['mensagem'] = healthcheck_messages.get('error_create')
messages.add_message(request, messages.ERROR, e)
return HttpResponse(json.dumps(lists), content_type='application/json')
@log
@login_required
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'write': True}, ])
def pool_member_items(request):
try:
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
pool_id = request.GET.get('pool_id')
pool_data = client_api.create_pool().get_by_pk(pool_id)
return render(request, POOL_MEMBER_ITEMS, pool_data)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': VIPS_REQUEST, 'read': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': VIPS_REQUEST, 'read': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab1(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['id_server_pool'] = id_server_pool
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
lists['environment'] = pool['environment']['name']
lists['identifier'] = pool['identifier']
lists['default_port'] = pool['default_port']
lists['balancing'] = pool['lb_method']
lists['servicedownaction'] = pool['servicedownaction']['name']
lists['max_con'] = pool['default_limit']
lists['pool_created'] = pool['pool_created']
lists['health_check'] = pool['healthcheck'][
'healthcheck_type'] if pool['healthcheck'] else None
if not pool['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
return render_to_response(POOL_MANAGE_TAB1, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab2(request, id_server_pool):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['id_server_pool'] = id_server_pool
try:
pool = client.create_pool().get_pool(id_server_pool)
server_pools = pool['server_pools'][0]
lists['environment'] = None
if server_pools['environment']:
environment = client.create_ambiente().buscar_por_id(
server_pools['environment'])
lists['environment'] = environment['ambiente']['ambiente_rede']
lists['health_check'] = server_pools['healthcheck'][
'healthcheck_type'] if server_pools['healthcheck'] else None
lists['identifier'] = server_pools['identifier']
lists['default_port'] = server_pools['default_port']
lists['balancing'] = server_pools['lb_method']
lists['servicedownaction'] = server_pools['servicedownaction']['name']
lists['max_con'] = server_pools['default_limit']
lists['pool_created'] = server_pools['pool_created']
if not lists['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
return render_to_response(POOL_MANAGE_TAB2, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return render_to_response(POOL_MANAGE_TAB2, lists, context_instance=RequestContext(request))
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab3(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
group_users_list_selected = []
for group in pool['groups_permissions']:
group_users_list_selected.append(group['user_group']['id'])
environment_id = pool['environment']['id']
members = pool['server_pool_members']
healthcheck_choices = facade.populate_healthcheck_choices(client)
environment_choices = [(pool.get('environment').get('id'),
pool.get('environment').get('name'))]
if not pool['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
healthcheck = pool['healthcheck']['healthcheck_type']
healthcheck_expect = pool['healthcheck']['healthcheck_expect']
healthcheck_request = pool['healthcheck']['healthcheck_request']
healthcheck_destination = pool['healthcheck']['destination'].split(':')[
1]
healthcheck_destination = healthcheck_destination if healthcheck_destination != '*' else ''
lists['action'] = reverse('pool.manage.tab3', args=[id_server_pool])
lists['id_server_pool'] = id_server_pool
lists['identifier'] = pool['identifier']
lists['default_port'] = pool['default_port']
lists['balancing'] = pool['lb_method']
lists['servicedownaction'] = pool['servicedownaction']['name']
lists['max_con'] = pool['default_limit']
lists['healthcheck'] = healthcheck
lists['environment'] = pool['environment']['name']
if request.method == 'POST':
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST)
if form.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
healthcheck = facade.format_healthcheck(request)
servicedownaction = facade.format_servicedownaction(
client, form)
groups_permissions = []
group_users = form_group_users.cleaned_data['group_users']
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
overwrite = form_group_users.cleaned_data['overwrite']
pool = format_pool(client, form, members, healthcheck,
servicedownaction, groups_permissions, overwrite, int(id_server_pool))
client.create_pool().deploy_update_pool(pool, id_server_pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_update'))
return redirect(reverse('pool.manage.tab3', args=[id_server_pool]))
if request.method == 'GET':
form_initial = {
'id': id_server_pool,
'pool_created': pool['pool_created'],
'environment': environment_id,
'default_port': pool.get('default_port'),
'balancing': pool.get('lb_method'),
'servicedownaction': pool.get('servicedownaction').get('id'),
'maxcon': pool.get('default_limit'),
'identifier': pool.get('identifier')
}
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
initial=form_initial
)
form_initial_gu = {
'group_users': group_users_list_selected
}
form_group_users = PoolGroupUsersForm(
group_users_list, True, initial=form_initial_gu)
form_initial_hc = {
'healthcheck': healthcheck,
'healthcheck_request': healthcheck_request,
'healthcheck_expect': healthcheck_expect,
'healthcheck_destination': healthcheck_destination
}
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
initial=form_initial_hc
)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
lists['form_pool'] = form
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_MANAGE_TAB3, lists, context_instance=RequestContext(request))
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab4(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['action'] = reverse('pool.manage.tab4', args=[id_server_pool])
lists['id_server_pool'] = id_server_pool
pool = client.create_api_pool().get(
[id_server_pool], include=['groups_permissions'])
server_pools = pool['server_pools'][0]
lists['pool_created'] = pool_created = server_pools['pool_created']
if not pool_created:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
lists['environment_desc'] = None
if server_pools['environment']:
environment = client.create_ambiente().buscar_por_id(
server_pools['environment'])
lists['environment_desc'] = environment[
'ambiente']['ambiente_rede']
lists['health_check'] = server_pools['healthcheck'][
'healthcheck_type'] if server_pools['healthcheck'] else None
lists['identifier'] = server_pools['identifier']
lists['default_port'] = server_pools['default_port']
lists['balancing'] = server_pools['lb_method']
lists['servicedownaction'] = server_pools['servicedownaction']['name']
lists['max_con'] = server_pools['default_limit']
lists['environment_id'] = server_pools['environment']
lists['groups_permissions'] = server_pools['groups_permissions']
if request.method == 'POST':
server_pool_members = facade.format_server_pool_members(request, lists[
'max_con'])
server_pools['server_pool_members'] = server_pool_members
client.create_pool().deploy_update_pool(server_pools, id_server_pool)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_update'))
return redirect(lists['action'])
if request.method == 'GET':
lists['pool_members'] = facade.populate_pool_members_by_obj(
server_pools['server_pool_members'])
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return render_to_response(POOL_MANAGE_TAB4, lists, context_instance=RequestContext(request))
def format_pool(client, form, server_pool_members, healthcheck, servicedownaction, groups_permissions, overwrite, pool_id=None):
pool = dict()
pool['id'] = pool_id
pool['identifier'] = str(form.cleaned_data['identifier'])
pool['default_port'] = int(form.cleaned_data['default_port'])
pool['environment'] = int(form.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form.cleaned_data['maxcon'])
pool['server_pool_members'] = server_pool_members
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {'replace': overwrite}
for member in server_pool_members:
member['limit'] = pool['default_limit']
return pool
| 35.237716 | 128 | 0.618772 | [
"Apache-2.0"
] | pantuza/GloboNetworkAPI-WebUI | CadVlan/Pool/views.py | 53,071 | Python |
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance generation from score in Tensor2Tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from magenta.models.score2perf import datagen_beam
from magenta.models.score2perf import modalities
from magenta.models.score2perf import music_encoders
from note_seq import chord_symbols_lib
from note_seq import sequences_lib
from tensor2tensor.data_generators import problem
from tensor2tensor.layers import modalities as t2t_modalities
from tensor2tensor.models import transformer
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
# TODO(iansimon): figure out the best way not to hard-code these constants
NUM_VELOCITY_BINS = 32
STEPS_PER_SECOND = 100
MIN_PITCH = 21
MAX_PITCH = 108
# pylint: disable=line-too-long
MAESTRO_TFRECORD_PATHS = {
'train': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_train.tfrecord',
'dev': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_validation.tfrecord',
'test': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_test.tfrecord'
}
# pylint: enable=line-too-long
class Score2PerfProblem(problem.Problem):
"""Base class for musical score-to-performance problems.
Data files contain tf.Example protos with encoded performance in 'targets' and
optional encoded score in 'inputs'.
"""
@property
def splits(self):
"""Dictionary of split names and probabilities. Must sum to one."""
raise NotImplementedError()
@property
def min_hop_size_seconds(self):
"""Minimum hop size in seconds at which to split input performances."""
raise NotImplementedError()
@property
def max_hop_size_seconds(self):
"""Maximum hop size in seconds at which to split input performances."""
raise NotImplementedError()
@property
def num_replications(self):
"""Number of times entire input performances will be split."""
return 1
@property
def add_eos_symbol(self):
"""Whether to append EOS to encoded performances."""
raise NotImplementedError()
@property
def absolute_timing(self):
"""Whether or not score should use absolute (vs. tempo-relative) timing."""
return False
@property
def stretch_factors(self):
"""Temporal stretch factors for data augmentation (in datagen)."""
return [1.0]
@property
def transpose_amounts(self):
"""Pitch transposition amounts for data augmentation (in datagen)."""
return [0]
@property
def random_crop_length_in_datagen(self):
"""Randomly crop targets to this length in datagen."""
return None
@property
def random_crop_in_train(self):
"""Whether to randomly crop each training example when preprocessing."""
return False
@property
def split_in_eval(self):
"""Whether to split each eval example when preprocessing."""
return False
def performances_input_transform(self, tmp_dir):
"""Input performances beam transform (or dictionary thereof) for datagen."""
raise NotImplementedError()
def generate_data(self, data_dir, tmp_dir, task_id=-1):
del task_id
def augment_note_sequence(ns, stretch_factor, transpose_amount):
"""Augment a NoteSequence by time stretch and pitch transposition."""
augmented_ns = sequences_lib.stretch_note_sequence(
ns, stretch_factor, in_place=False)
try:
_, num_deleted_notes = sequences_lib.transpose_note_sequence(
augmented_ns, transpose_amount,
min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,
in_place=True)
except chord_symbols_lib.ChordSymbolError:
raise datagen_beam.DataAugmentationError(
'Transposition of chord symbol(s) failed.')
if num_deleted_notes:
raise datagen_beam.DataAugmentationError(
'Transposition caused out-of-range pitch(es).')
return augmented_ns
augment_params = itertools.product(
self.stretch_factors, self.transpose_amounts)
augment_fns = [
functools.partial(augment_note_sequence,
stretch_factor=s, transpose_amount=t)
for s, t in augment_params
]
datagen_beam.generate_examples(
input_transform=self.performances_input_transform(tmp_dir),
output_dir=data_dir,
problem_name=self.dataset_filename(),
splits=self.splits,
min_hop_size_seconds=self.min_hop_size_seconds,
max_hop_size_seconds=self.max_hop_size_seconds,
min_pitch=MIN_PITCH,
max_pitch=MAX_PITCH,
num_replications=self.num_replications,
encode_performance_fn=self.performance_encoder().encode_note_sequence,
encode_score_fns=dict((name, encoder.encode_note_sequence)
for name, encoder in self.score_encoders()),
augment_fns=augment_fns,
absolute_timing=self.absolute_timing,
random_crop_length=self.random_crop_length_in_datagen)
def hparams(self, defaults, model_hparams):
del model_hparams # unused
perf_encoder = self.get_feature_encoders()['targets']
defaults.modality = {'targets': t2t_modalities.ModalityType.SYMBOL}
defaults.vocab_size = {'targets': perf_encoder.vocab_size}
if self.has_inputs:
score_encoder = self.get_feature_encoders()['inputs']
if isinstance(score_encoder.vocab_size, list):
# TODO(trandustin): We default to not applying any transformation; to
# apply one, pass modalities.bottom to the model's hparams.bottom. In
# future, refactor the tuple of the "inputs" feature to be part of the
# features dict itself, i.e., have multiple inputs each with its own
# modality and vocab size.
modality_cls = t2t_modalities.ModalityType.IDENTITY
else:
modality_cls = t2t_modalities.ModalityType.SYMBOL
defaults.modality['inputs'] = modality_cls
defaults.vocab_size['inputs'] = score_encoder.vocab_size
def performance_encoder(self):
"""Encoder for target performances."""
return music_encoders.MidiPerformanceEncoder(
steps_per_second=STEPS_PER_SECOND,
num_velocity_bins=NUM_VELOCITY_BINS,
min_pitch=MIN_PITCH,
max_pitch=MAX_PITCH,
add_eos=self.add_eos_symbol)
def score_encoders(self):
"""List of (name, encoder) tuples for input score components."""
return []
def feature_encoders(self, data_dir):
del data_dir
encoders = {
'targets': self.performance_encoder()
}
score_encoders = self.score_encoders()
if score_encoders:
if len(score_encoders) > 1:
# Create a composite score encoder, only used for inference.
encoders['inputs'] = music_encoders.CompositeScoreEncoder(
[encoder for _, encoder in score_encoders])
else:
# If only one score component, just use its encoder.
_, encoders['inputs'] = score_encoders[0]
return encoders
def example_reading_spec(self):
data_fields = {
'targets': tf.VarLenFeature(tf.int64)
}
for name, _ in self.score_encoders():
data_fields[name] = tf.VarLenFeature(tf.int64)
# We don't actually "decode" anything here; the encodings are simply read as
# tensors.
data_items_to_decoders = None
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if self.has_inputs:
# Stack encoded score components depthwise as inputs.
inputs = []
for name, _ in self.score_encoders():
inputs.append(tf.expand_dims(example[name], axis=1))
del example[name]
example['inputs'] = tf.stack(inputs, axis=2)
if self.random_crop_in_train and mode == tf.estimator.ModeKeys.TRAIN:
# Take a random crop of the training example.
assert not self.has_inputs
max_offset = tf.maximum(
tf.shape(example['targets'])[0] - hparams.max_target_seq_length, 0)
offset = tf.cond(
max_offset > 0,
lambda: tf.random_uniform([], maxval=max_offset, dtype=tf.int32),
lambda: 0
)
example['targets'] = (
example['targets'][offset:offset + hparams.max_target_seq_length])
return example
elif self.split_in_eval and mode == tf.estimator.ModeKeys.EVAL:
# Split the example into non-overlapping segments.
assert not self.has_inputs
length = tf.shape(example['targets'])[0]
extra_length = tf.mod(length, hparams.max_target_seq_length)
examples = {
'targets': tf.reshape(
example['targets'][:length - extra_length],
[-1, hparams.max_target_seq_length, 1, 1])
}
extra_example = {
'targets': tf.reshape(
example['targets'][-extra_length:], [1, -1, 1, 1])
}
dataset = tf.data.Dataset.from_tensor_slices(examples)
extra_dataset = tf.data.Dataset.from_tensor_slices(extra_example)
return dataset.concatenate(extra_dataset)
else:
# If not cropping or splitting, do standard preprocessing.
return super(Score2PerfProblem, self).preprocess_example(
example, mode, hparams)
class ConditionalScore2PerfProblem(Score2PerfProblem):
"""Lightweight version of base class for musical score-to-performance problems.
This version incorporates one performance conditioning signal.
Data files contain tf.Example protos with encoded performance in 'targets' and
optional encoded score in 'inputs'.
"""
def generate_data(self, data_dir, tmp_dir, task_id=-1):
del task_id
def augment_note_sequence(ns, stretch_factor, transpose_amount):
"""Augment a NoteSequence by time stretch and pitch transposition."""
augmented_ns = sequences_lib.stretch_note_sequence(
ns, stretch_factor, in_place=False)
try:
_, num_deleted_notes = sequences_lib.transpose_note_sequence(
augmented_ns, transpose_amount,
min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,
in_place=True)
except chord_symbols_lib.ChordSymbolError:
raise datagen_beam.DataAugmentationError(
'Transposition of chord symbol(s) failed.')
if num_deleted_notes:
raise datagen_beam.DataAugmentationError(
'Transposition caused out-of-range pitch(es).')
return augmented_ns
augment_params = itertools.product(
self.stretch_factors, self.transpose_amounts)
augment_fns = [
functools.partial(augment_note_sequence,
stretch_factor=s, transpose_amount=t)
for s, t in augment_params
]
datagen_beam.generate_conditional_examples(
input_transform=self.performances_input_transform(tmp_dir),
output_dir=data_dir,
problem_name=self.dataset_filename(),
splits=self.splits,
min_pitch=MIN_PITCH,
max_pitch=MAX_PITCH,
melody=False,
noisy=False,
encode_performance_fn=self.performance_encoder().encode_note_sequence,
encode_score_fns=dict((name, encoder.encode_note_sequence)
for name, encoder in self.score_encoders()),
augment_fns=augment_fns,
num_replications=self.num_replications)
def example_reading_spec(self):
data_fields = {
'inputs': tf.VarLenFeature(tf.int64),
'targets': tf.VarLenFeature(tf.int64)
}
for name, _ in self.score_encoders():
data_fields[name] = tf.VarLenFeature(tf.int64)
# We don't actually "decode" anything here; the encodings are simply read as
# tensors.
data_items_to_decoders = None
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
return problem.preprocess_example_common(example, mode, hparams)
class ConditionalMelodyScore2PerfProblem(Score2PerfProblem):
"""Lightweight version of base class for musical score-to-performance problems.
This version incorporates one performance conditioning signal.
Data files contain tf.Example protos with encoded performance in 'targets' and
encoded score in 'melody' and 'performance'.
"""
def generate_data(self, data_dir, tmp_dir, task_id=-1):
del task_id
def augment_note_sequence(ns, stretch_factor, transpose_amount):
"""Augment a NoteSequence by time stretch and pitch transposition."""
augmented_ns = sequences_lib.stretch_note_sequence(
ns, stretch_factor, in_place=False)
try:
_, num_deleted_notes = sequences_lib.transpose_note_sequence(
augmented_ns, transpose_amount,
min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,
in_place=True)
except chord_symbols_lib.ChordSymbolError:
raise datagen_beam.DataAugmentationError(
'Transposition of chord symbol(s) failed.')
if num_deleted_notes:
raise datagen_beam.DataAugmentationError(
'Transposition caused out-of-range pitch(es).')
return augmented_ns
augment_params = itertools.product(
self.stretch_factors, self.transpose_amounts)
augment_fns = [
functools.partial(augment_note_sequence,
stretch_factor=s, transpose_amount=t)
for s, t in augment_params
]
datagen_beam.generate_conditional_examples(
input_transform=self.performances_input_transform(tmp_dir),
output_dir=data_dir,
problem_name=self.dataset_filename(),
splits=self.splits,
min_pitch=MIN_PITCH,
max_pitch=MAX_PITCH,
melody=True,
noisy=False,
encode_performance_fn=self.performance_encoder().encode_note_sequence,
encode_score_fns=dict((name, encoder.encode_note_sequence)
for name, encoder in self.score_encoders()),
augment_fns=augment_fns,
num_replications=self.num_replications)
def hparams(self, defaults, model_hparams):
del model_hparams # unused
perf_encoder = self.get_feature_encoders()['targets']
defaults.modality = {'targets': t2t_modalities.ModalityType.SYMBOL}
defaults.vocab_size = {'targets': perf_encoder.vocab_size}
if self.has_inputs:
score_encoder = self.score_encoders()
# iterate over each score encoder and update modality/vocab_size
for name, se in score_encoder:
defaults.modality[name] = t2t_modalities.ModalityType.SYMBOL
defaults.vocab_size[name] = se.vocab_size
def feature_encoders(self, data_dir):
del data_dir
encoders = {
'targets': self.performance_encoder()
}
score_encoders = self.score_encoders()
# CompositeScoreEncoder is tricky, so using a list of encoders instead.
if len(score_encoders) > 1:
for name, encoder in score_encoders:
encoders[name] = encoder
else:
# If only one score component, just use its encoder.
_, encoders['inputs'] = score_encoders[0]
return encoders
def example_reading_spec(self):
data_fields = {
'targets': tf.VarLenFeature(tf.int64),
}
for name, _ in self.score_encoders():
data_fields[name] = tf.VarLenFeature(tf.int64)
# We don't actually "decode" anything here; the encodings are simply read as
# tensors.
data_items_to_decoders = None
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
return problem.preprocess_example_common(example, mode, hparams)
class ConditionalMelodyNoisyScore2PerfProblem(
ConditionalMelodyScore2PerfProblem):
"""Lightweight version of base class for musical score-to-performance problems.
This version incorporates one performance conditioning signal.
Data files contain tf.Example protos with encoded performance in 'targets' and
encoded score in 'melody' and 'performance'.
"""
def generate_data(self, data_dir, tmp_dir, task_id=-1):
del task_id
def augment_note_sequence(ns, stretch_factor, transpose_amount):
"""Augment a NoteSequence by time stretch and pitch transposition."""
augmented_ns = sequences_lib.stretch_note_sequence(
ns, stretch_factor, in_place=False)
try:
_, num_deleted_notes = sequences_lib.transpose_note_sequence(
augmented_ns, transpose_amount,
min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,
in_place=True)
except chord_symbols_lib.ChordSymbolError:
raise datagen_beam.DataAugmentationError(
'Transposition of chord symbol(s) failed.')
if num_deleted_notes:
raise datagen_beam.DataAugmentationError(
'Transposition caused out-of-range pitch(es).')
return augmented_ns
augment_params = itertools.product(
self.stretch_factors, self.transpose_amounts)
augment_fns = [
functools.partial(augment_note_sequence,
stretch_factor=s, transpose_amount=t)
for s, t in augment_params
]
datagen_beam.generate_conditional_examples(
input_transform=self.performances_input_transform(tmp_dir),
output_dir=data_dir,
problem_name=self.dataset_filename(),
splits=self.splits,
min_pitch=MIN_PITCH,
max_pitch=MAX_PITCH,
melody=True,
noisy=True,
encode_performance_fn=self.performance_encoder().encode_note_sequence,
encode_score_fns=dict((name, encoder.encode_note_sequence)
for name, encoder in self.score_encoders()),
augment_fns=augment_fns,
num_replications=self.num_replications)
class Chords2PerfProblem(Score2PerfProblem):
"""Base class for musical chords-to-performance problems."""
def score_encoders(self):
return [('chords', music_encoders.TextChordsEncoder(steps_per_quarter=1))]
class Melody2PerfProblem(Score2PerfProblem):
"""Base class for musical melody-to-performance problems."""
def score_encoders(self):
return [
('melody', music_encoders.TextMelodyEncoder(
steps_per_quarter=4, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH))
]
class AbsoluteMelody2PerfProblem(Score2PerfProblem):
"""Base class for musical (absolute-timed) melody-to-performance problems."""
@property
def absolute_timing(self):
return True
def score_encoders(self):
return [
('melody', music_encoders.TextMelodyEncoderAbsolute(
steps_per_second=10, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH))
]
class LeadSheet2PerfProblem(Score2PerfProblem):
"""Base class for musical lead-sheet-to-performance problems."""
def score_encoders(self):
return [
('chords', music_encoders.TextChordsEncoder(steps_per_quarter=4)),
('melody', music_encoders.TextMelodyEncoder(
steps_per_quarter=4, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH))
]
@registry.register_problem('score2perf_maestro_language_uncropped_aug')
class Score2PerfMaestroLanguageUncroppedAug(Score2PerfProblem):
"""Piano performance language model on the MAESTRO dataset."""
def performances_input_transform(self, tmp_dir):
del tmp_dir
return dict(
(split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))
for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())
@property
def splits(self):
return None
@property
def min_hop_size_seconds(self):
return 0.0
@property
def max_hop_size_seconds(self):
return 0.0
@property
def add_eos_symbol(self):
return False
@property
def stretch_factors(self):
# Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
return [0.95, 0.975, 1.0, 1.025, 1.05]
@property
def transpose_amounts(self):
# Transpose no more than a minor third.
return [-3, -2, -1, 0, 1, 2, 3]
@property
def random_crop_in_train(self):
return True
@property
def split_in_eval(self):
return True
@registry.register_problem('score2perf_maestro_absmel2perf_5s_to_30s_aug10x')
class Score2PerfMaestroAbsMel2Perf5sTo30sAug10x(AbsoluteMelody2PerfProblem):
"""Generate performances from an absolute-timed melody, with augmentation."""
def performances_input_transform(self, tmp_dir):
del tmp_dir
return dict(
(split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))
for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())
@property
def splits(self):
return None
@property
def min_hop_size_seconds(self):
return 5.0
@property
def max_hop_size_seconds(self):
return 30.0
@property
def num_replications(self):
return 10
@property
def add_eos_symbol(self):
return True
@property
def stretch_factors(self):
# Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
return [0.95, 0.975, 1.0, 1.025, 1.05]
@property
def transpose_amounts(self):
# Transpose no more than a minor third.
return [-3, -2, -1, 0, 1, 2, 3]
@registry.register_problem('score2perf_maestro_perf_conditional_aug_10x')
class Score2PerfMaestroPerfConditionalAug10x(ConditionalScore2PerfProblem):
"""Generate performances from scratch (or from primer)."""
def performances_input_transform(self, tmp_dir):
del tmp_dir
return dict(
(split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))
for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())
@property
def splits(self):
return
@property
def num_replications(self):
return 10
@property
def add_eos_symbol(self):
return False
@property
def stretch_factors(self):
# Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
return [0.95, 0.975, 1.0, 1.025, 1.05]
@property
def transpose_amounts(self):
# Transpose no more than a minor third.
return [-3, -2, -1, 0, 1, 2, 3]
@property
def has_inputs(self):
encoders = self.get_feature_encoders()
return ('performance' in encoders) or ('inputs' in encoders)
def score_encoders(self):
return [
('performance', music_encoders.MidiPerformanceEncoder(
steps_per_second=100,
num_velocity_bins=32,
min_pitch=21,
max_pitch=108,
add_eos=self.add_eos_symbol))
]
@registry.register_problem('score2perf_maestro_mel_perf_conditional_aug_10x')
class Score2PerfMaestroMelPerfConditionalAug10x(
ConditionalMelodyScore2PerfProblem):
"""Generate performances from scratch (or from primer)."""
def performances_input_transform(self, tmp_dir):
del tmp_dir
return dict(
(split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))
for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())
@property
def splits(self):
return
@property
def num_replications(self):
return 10
@property
def add_eos_symbol(self):
return False
@property
def stretch_factors(self):
# Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
return [0.95, 0.975, 1.0, 1.025, 1.05]
@property
def transpose_amounts(self):
# Transpose no more than a minor third.
return [-3, -2, -1, 0, 1, 2, 3]
@property
def has_inputs(self):
encoders = self.get_feature_encoders()
return ('performance' in encoders) or ('inputs' in encoders)
def score_encoders(self):
return [
('performance', music_encoders.MidiPerformanceEncoder(
steps_per_second=100,
num_velocity_bins=32,
min_pitch=21,
max_pitch=108,
add_eos=self.add_eos_symbol)),
('melody', music_encoders.TextMelodyEncoderAbsolute(
steps_per_second=10, min_pitch=21, max_pitch=108))
]
@registry.register_problem('score2perf_maestro_mel_perf_conditional_noisy_10x')
class Score2PerfMaestroMelPerfConditionalNoisy10x(
ConditionalMelodyNoisyScore2PerfProblem):
"""Generate performances from scratch (or from primer)."""
def performances_input_transform(self, tmp_dir):
del tmp_dir
return dict(
(split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))
for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())
@property
def splits(self):
return
@property
def num_replications(self):
return 10
@property
def add_eos_symbol(self):
return False
@property
def stretch_factors(self):
# Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
return [0.95, 0.975, 1.0, 1.025, 1.05]
@property
def transpose_amounts(self):
# Transpose no more than a minor third.
return [-3, -2, -1, 0, 1, 2, 3]
@property
def has_inputs(self):
encoders = self.get_feature_encoders()
return ('performance' in encoders) or ('inputs' in encoders)
def score_encoders(self):
return [
('performance', music_encoders.MidiPerformanceEncoder(
steps_per_second=100,
num_velocity_bins=32,
min_pitch=21,
max_pitch=108,
add_eos=self.add_eos_symbol)),
('melody', music_encoders.TextMelodyEncoderAbsolute(
steps_per_second=10, min_pitch=21, max_pitch=108))
]
@registry.register_hparams
def score2perf_transformer_base():
hparams = transformer.transformer_base()
hparams.bottom['inputs'] = modalities.bottom
return hparams
| 33.594278 | 89 | 0.70384 | [
"Apache-2.0"
] | flyingleafe/magenta | magenta/models/score2perf/score2perf.py | 25,834 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import warnings
# import git
import torch
import yaml
from pythia.common.registry import registry
from pythia.utils.distributed_utils import is_main_process, synchronize
from pythia.utils.general import (ckpt_name_from_core_args,
foldername_from_config_override, updir)
class Checkpoint:
def __init__(self, trainer):
"""
Generates a path for saving model which can also be used for resuming
from a checkpoint.
"""
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = self.config.training_parameters.save_dir
self.model_name = self.config.model
self.ckpt_foldername = ckpt_name_from_core_args(self.config)
self.ckpt_foldername += foldername_from_config_override(self.trainer.args)
self.device = registry.get("current_device")
self.ckpt_prefix = ""
if hasattr(self.trainer.model, "get_ckpt_name"):
self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_"
self.config["log_foldername"] = self.ckpt_foldername
self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername)
self.pth_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + self.model_name + getattr(self.config.model_attributes,
self.model_name).code_name + "_final.pth"
)
self.models_foldername = os.path.join(self.ckpt_foldername, "models")
if not os.path.exists(self.models_foldername):
os.makedirs(self.models_foldername)
self.save_config()
self.repo_path = updir(os.path.abspath(__file__), n=3)
# self.repo = git.Repo(self.repo_path)
def save_config(self):
cfg_file = os.path.join(self.ckpt_foldername, "config.yaml")
with open(cfg_file, "w") as f:
# Pop out config_override if present to remove clutter in
# saved configuration yaml file
self.config.pop("config_override", None)
f.write(str(self.config))
def load_state_dict(self):
tp = self.config.training_parameters
if tp.resume_file is not None:
if os.path.exists(tp.resume_file):
self._load(tp.resume_file)
return
else:
raise RuntimeError("{} doesn't exist".format(tp.resume_file))
ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
if tp.resume is True:
if os.path.exists(ckpt_filepath):
self._load(ckpt_filepath)
else:
warnings.warn(
"Tried to resume but checkpoint filepath {} "
"is not present. Skipping.".format(ckpt_filepath)
)
def _load(self, file):
self.trainer.writer.write("Loading checkpoint")
ckpt = self._torch_load(file)
data_parallel = registry.get("data_parallel")
if "model" in ckpt:
ckpt_model = ckpt["model"]
else:
ckpt_model = ckpt
ckpt = {"model": ckpt}
pretrained_mapping = self.config.training_parameters.pretrained_mapping
if not self.config.training_parameters.load_pretrained:
pretrained_mapping = {}
new_dict = {}
# TODO: Move to separate function
for attr in ckpt_model:
if "fa_history" in attr:
new_dict[attr.replace("fa_history", "fa_context")] = ckpt_model[attr]
elif data_parallel is False and attr.startswith("module."):
# In case the ckpt was actually a data parallel model
# replace first module. from dataparallel with empty string
new_dict[attr.replace("module.", "", 1)] = ckpt_model[attr]
else:
new_dict[attr] = ckpt_model[attr]
if len(pretrained_mapping.items()) == 0:
final_dict = new_dict
self.trainer.model.load_state_dict(final_dict)
if "optimizer" in ckpt:
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
else:
warnings.warn(
"'optimizer' key is not present in the "
"checkpoint asked to be loaded. Skipping."
)
self.trainer.early_stopping.init_from_checkpoint(ckpt)
self.trainer.writer.write("Checkpoint loaded")
if "best_iteration" in ckpt:
self.trainer.current_iteration = ckpt["best_iteration"]
registry.register("current_iteration", self.trainer.current_iteration)
if "best_epoch" in ckpt:
self.trainer.current_epoch = ckpt["best_epoch"]
registry.register("current_epoch", self.trainer.current_epoch)
else:
final_dict = {}
model = self.trainer.model
own_state = model.state_dict()
for key, value in pretrained_mapping.items():
key += "."
value += "."
for attr in new_dict:
for own_attr in own_state:
if (
key in attr
and value in own_attr
and attr.replace(key, "") == own_attr.replace(value, "")
):
self.trainer.writer.write(
"Copying " + attr + " " + own_attr
)
own_state[own_attr].copy_(new_dict[attr])
self.trainer.writer.write("Pretrained model loaded")
def _load_state_dict_mapping(self, ckpt_model):
model = self.trainer.model
attr_mapping = {
"image_feature_encoders": "img_feat_encoders",
"image_feature_embeddings_list": "img_embeddings_list",
"image_text_multi_modal_combine_layer": "multi_modal_combine_layer",
"text_embeddings": "text_embeddings",
"classifier": "classifier",
}
data_parallel = registry.get("data_parallel")
if not data_parallel:
for key in attr_mapping:
attr_mapping[key.replace("module.", "")] = attr_mapping[key]
attr_mapping.pop(key)
for key in attr_mapping:
getattr(model, key).load_state_dict(ckpt_model[attr_mapping[key]])
def _torch_load(self, file):
if "cuda" in str(self.device):
return torch.load(file)
else:
return torch.load(file, map_location=lambda storage, loc: storage)
# def _get_vcs_fields(self):
# """Returns a dict with git fields of the current repository
#
# To reproduce an experiment directly from a checkpoint
#
# 1) Export `config` key as a yaml
# 2) Clone repository and checkout at given commit on given branch
# 3) Any local change (diff) while running the experiment is stored
# in the value with key `git/diff`, output the diff to a `path.diff`
# file and apply the patch to the current state by simply
#
# `patch -p0 < path.diff`
# """
#
# return {
# "git/branch": self.repo.active_branch.name,
# "git/commit_hash": self.repo.head.commit.name_rev,
# "git/commit_author": self.repo.head.commit.author.name,
# "git/commit_message": self.repo.head.commit.message,
# "git/diff": self.repo.git.diff("--no-prefix"),
# }
def save(self, iteration, update_best=False):
# Only save in main process
if not is_main_process():
return
ckpt_filepath = os.path.join(
self.models_foldername, "model_%d.ckpt" % iteration
)
best_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
best_iteration = self.trainer.early_stopping.best_monitored_iteration
best_metric = self.trainer.early_stopping.best_monitored_value
ckpt = {
"model": self.trainer.model.state_dict(),
"optimizer": self.trainer.optimizer.state_dict(),
"best_iteration": best_iteration,
"best_metric_value": best_metric,
"config": self.config,
}
# git_metadata_dict = self._get_vcs_fields()
# ckpt.update(git_metadata_dict)
torch.save(ckpt, ckpt_filepath)
if update_best:
torch.save(ckpt, best_ckpt_filepath)
def restore(self):
self.trainer.writer.write("Restoring checkpoint")
best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + "best.ckpt")
if os.path.exists(best_path):
ckpt = self._torch_load(best_path)
self.trainer.model.load_state_dict(ckpt["model"])
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
def finalize(self):
torch.save(self.trainer.model.state_dict(), self.pth_filepath)
| 37.510121 | 120 | 0.586508 | [
"BSD-3-Clause"
] | likenneth/mmgnn_textvqa | pythia/utils/checkpoint.py | 9,265 | Python |
from pycronserver.server import get_pycronserver, execute_funct
from pycronserver.local import get_local_pycronserver, create_config_folder
| 46.666667 | 75 | 0.9 | [
"BSD-3-Clause"
] | pyscioffice/pycronserver | pycronserver/__init__.py | 140 | Python |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen.wire.java.register import build_file_aliases as register_codegen
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.build_graph.register import build_file_aliases as register_core
from pants.java.jar.jar_dependency import JarDependency
from pants.testutil.task_test_base import TaskTestBase
from pants.contrib.thrifty.java_thrifty_gen import JavaThriftyGen
from pants.contrib.thrifty.java_thrifty_library import JavaThriftyLibrary
class JavaThriftyGenTest(TaskTestBase):
TARGET_WORKDIR = ".pants.d/bogus/workdir"
@classmethod
def task_type(cls):
return JavaThriftyGen
@classmethod
def alias_groups(cls):
return register_core().merge(register_codegen())
def _create_fake_thrifty_tool(self):
self.make_target(':thrifty-compiler', JarLibrary, jars=[
JarDependency(org='com.microsoft.thrifty', name='thrifty-compiler', rev='0.4.3'),
])
def test_compiler_args(self):
self._create_fake_thrifty_tool()
target = self.make_target('src/thrifty:simple-thrifty-target', JavaThriftyLibrary,
sources=['foo.thrift'])
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual([
'--out={}'.format(self.TARGET_WORKDIR),
'--path={}/src/thrifty'.format(self.build_root),
'src/thrifty/foo.thrift'],
task.format_args_for_target(target, self.TARGET_WORKDIR))
def test_compiler_args_deps(self):
self._create_fake_thrifty_tool()
upstream = self.make_target('src/thrifty:upstream', JavaThriftyLibrary,
sources=['upstream.thrift'])
downstream = self.make_target('src/thrifty:downstream', JavaThriftyLibrary,
sources=['downstream.thrift'], dependencies=[upstream])
context = self.context(target_roots=[upstream, downstream])
task = self.create_task(context)
self.assertEqual([
'--out={}'.format(self.TARGET_WORKDIR),
'--path={}/src/thrifty'.format(self.build_root),
'src/thrifty/downstream.thrift'],
task.format_args_for_target(downstream, self.TARGET_WORKDIR))
| 41.563636 | 91 | 0.728346 | [
"Apache-2.0"
] | SergeKireev/pants | contrib/thrifty/tests/python/pants_test/pants/contrib/thrifty/test_thrifty_gen.py | 2,286 | Python |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Create and saves a new user"""
if not email:
raise ValueError('Users must have email address')
user = self.model(email = self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=225, unique=True)
name = models.CharField(max_length=225)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD ='email'
class Tag(models.Model):
name = models.CharField(max_length=225)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(max_length=225)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
def __str__(self):
return self.title
| 28.786667 | 78 | 0.677165 | [
"MIT"
] | shadow-smoke/recipe-app-api | app/core/models.py | 2,159 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Copyright: (c) 2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
- This module only works on Python 2. If you require Python 3 support see the M(dnf) module.
options:
use_backend:
description:
- This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
"new yum" and it has an C(dnf) backend.
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
required: false
default: "auto"
choices: [ auto, yum, yum4, dnf ]
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
exclude:
description:
- Package name(s) to exclude when state=present, or latest
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is¬
enabled for this module, then C(absent) is inferred.
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
version_added: "0.6"
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
version_added: "1.2"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.3"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
type: bool
default: "yes"
version_added: "2.1"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
required: false
default: "no"
type: bool
version_added: "2.5"
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
version_added: "2.3"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.4"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
required: false
default: "no"
version_added: "2.6"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.4"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
required: false
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
required: false
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
required: false
version_added: "2.7"
default: null
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
- "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
type: bool
default: false
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in YUM config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
required: false
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
required: false
default: "no"
type: bool
version_added: "2.7"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
- In versions prior to 1.9.2 this module installed and removed each package
given to the yum module separately. This caused problems when packages
specified by filename or url had to be installed or removed together. In
1.9.2 this was fixed so that packages are installed in one yum
transaction. However, if one of the packages adds a new yum repository
that the other packages come from (such as epel-release) then that package
needs to be installed in a separate task. This mimics yum's command line
behaviour.
- 'Yum itself has two types of groups. "Package groups" are specified in the
rpm itself while "environment groups" are specified in a separate file
(usually by the distribution). Unfortunately, this division becomes
apparent to ansible users because ansible needs to operate on the group
of packages in a single transaction and yum requires groups to be specified
in different ways when used in that way. Package groups are specified as
"@development-tools" and environment groups are "@^gnome-desktop-environment".
Use the "yum group list" command to see which category of group the group
you want to install falls into.'
# informational: requirements for nodes
requirements:
- yum
author:
- Ansible Core Team
- Seth Vidal
- Eduard Snesarev (@verm666)
- Berend De Schouwer (@berenddeschouwer)
- Abhijeet Kasurde (@Akasurde)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: install the latest version of Apache
yum:
name: httpd
state: latest
- name: ensure a list of packages installed
yum:
name: "{{ packages }}"
vars:
packages:
- httpd
- httpd-tools
- name: remove the Apache package
yum:
name: httpd
state: absent
- name: install the latest version of Apache from the testing repo
yum:
name: httpd
enablerepo: testing
state: present
- name: install one specific version of Apache
yum:
name: httpd-2.2.29-1.4.amzn1
state: present
- name: upgrade all packages
yum:
name: '*'
state: latest
- name: upgrade all packages, excluding kernel & foo related packages
yum:
name: '*'
state: latest
exclude: kernel*,foo*
- name: install the nginx rpm from a remote repo
yum:
name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install nginx rpm from a local file
yum:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install the 'Development tools' package group
yum:
name: "@Development tools"
state: present
- name: install the 'Gnome desktop' environment group
yum:
name: "@^gnome-desktop-environment"
state: present
- name: List ansible packages and register result to print with debug later.
yum:
list: ansible
register: result
- name: Install package with multiple repos enabled
yum:
name: sos
enablerepo: "epel,ol7_latest"
- name: Install package with multiple repos disabled
yum:
name: sos
disablerepo: "epel,ol7_latest"
- name: Install a list of packages
yum:
name:
- nginx
- postgresql
- postgresql-server
state: present
- name: Download the nginx package but do not install it
yum:
name:
- nginx
state: latest
download_only: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
import os
import re
import tempfile
try:
import rpm
HAS_RPM_PYTHON = True
except ImportError:
HAS_RPM_PYTHON = False
try:
import yum
HAS_YUM_PYTHON = True
except ImportError:
HAS_YUM_PYTHON = False
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename, compareEVR
transaction_helpers = True
except ImportError:
transaction_helpers = False
from contextlib import contextmanager
def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
rpmbin = None
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
BUFSIZE = 65536
class YumModule(YumDnf):
"""
Yum Ansible module back-end implementation
"""
def __init__(self, module):
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# This populates instance vars for all argument spec params
super(YumModule, self).__init__(module)
def fetch_rpm_from_url(self, spec):
# FIXME: Remove this once this PR is merged:
# https://github.com/ansible/ansible/pull/19172
# download package so that we can query it
package_name, dummy = os.path.splitext(str(spec.rsplit('/', 1)[1]))
package_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, prefix=package_name, suffix='.rpm', delete=False)
self.module.add_cleanup_file(package_file.name)
try:
rsp, info = fetch_url(self.module, spec)
if not rsp:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg']))
data = rsp.read(BUFSIZE)
while data:
package_file.write(data)
data = rsp.read(BUFSIZE)
package_file.close()
except Exception as e:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, to_native(e)))
return package_file.name
def yum_base(self):
my = yum.YumBase()
my.preconf.debuglevel = 0
my.preconf.errorlevel = 0
my.preconf.plugins = True
my.preconf.enabled_plugins = self.enable_plugin
my.preconf.disabled_plugins = self.disable_plugin
if self.releasever:
my.preconf.releasever = self.releasever
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
my.preconf.root = self.installroot
my.conf.installroot = self.installroot
if self.conf_file and os.path.exists(self.conf_file):
my.preconf.fn = self.conf_file
if os.geteuid() != 0:
if hasattr(my, 'setCacheDir'):
my.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
my.repos.setCacheDir(cachedir)
my.conf.cache = 0
if self.disable_excludes:
my.conf.disable_excludes = self.disable_excludes
return my
def po_to_envra(self, po):
if hasattr(po, 'ui_envra'):
return po.ui_envra
return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
def is_group_env_installed(self, name):
name_lower = name.lower()
my = self.yum_base()
if yum.__version_info__ >= (3, 4):
groups_list = my.doGroupLists(return_evgrps=True)
else:
groups_list = my.doGroupLists()
# list of the installed groups on the first index
groups = groups_list[0]
for group in groups:
if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
return True
if yum.__version_info__ >= (3, 4):
# list of the installed env_groups on the third index
envs = groups_list[2]
for env in envs:
if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
return True
return False
def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
if qf is None:
qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
e, m, _ = my.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs and not is_pkg:
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
global rpmbin
if not rpmbin:
rpmbin = self.module.get_bin_path('rpm', required=True)
cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
# rpm localizes messages and we're screen scraping so make sure we use
# the C locale
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc != 0 and 'is not installed' not in out:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
if 'is not installed' in out:
out = ''
pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
if not pkgs and not is_pkg:
cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
else:
rc2, out2, err2 = (0, '', '')
if rc2 != 0 and 'no package provides' not in out2:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
if 'no package provides' in out2:
out2 = ''
pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
return pkgs
return []
def is_available(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
e, m, _ = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return [p for p in out.split('\n') if p.strip()]
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
updates = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e, m, _ = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = my.doPackageLists(pkgnarrow='updates').updates
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
retpkgs = (pkg for pkg in pkgs if pkg in updates)
return set(self.po_to_envra(p) for p in retpkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set()
def what_provides(self, repoq, req_spec, qf=def_qf):
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
try:
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
except Exception as e:
# If a repo with `repo_gpgcheck=1` is added and the repo GPG
# key was never accepted, quering this repo will throw an
# error: 'repomd.xml signature could not be verified'. In that
# situation we need to run `yum -y makecache` which will accept
# the key and try again.
if 'repomd.xml signature could not be verified' in to_native(e):
self.module.run_command(self.yum_basecmd + ['makecache'])
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
else:
raise
if not pkgs:
e, m, _ = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e, m, _ = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return set(self.po_to_envra(p) for p in pkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc, out, err = self.module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2, out2, err2 = self.module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([p for p in out.split('\n') if p.strip()])
if not pkgs:
pkgs = self.is_installed(repoq, req_spec, qf=qf)
return pkgs
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return set()
def transaction_exists(self, pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n, v, r, e, a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n, a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n, a))
break
return conflicts
def local_envra(self, path):
"""return envra of a local rpm passed in"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return '%s:%s-%s-%s.%s' % (
header[rpm.RPMTAG_EPOCH] or '0',
header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH]
)
@contextmanager
def set_env_proxy(self):
# setting system proxy environment and saving old, if exists
my = self.yum_base()
namepass = ""
proxy_url = ""
scheme = ["http", "https"]
old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
try:
if my.conf.proxy:
if my.conf.proxy_username:
namepass = namepass + my.conf.proxy_username
proxy_url = my.conf.proxy
if my.conf.proxy_password:
namepass = namepass + ":" + my.conf.proxy_password
elif '@' in my.conf.proxy:
namepass = my.conf.proxy.split('@')[0].split('//')[-1]
proxy_url = my.conf.proxy.replace("{0}@".format(namepass), "")
if namepass:
namepass = namepass + '@'
for item in scheme:
os.environ[item + "_proxy"] = re.sub(
r"(http://)",
r"\1" + namepass, proxy_url
)
yield
except yum.Errors.YumBaseError:
raise
finally:
# revert back to previously system configuration
for item in scheme:
if os.getenv("{0}_proxy".format(item)):
del os.environ["{0}_proxy".format(item)]
if old_proxy_env[0]:
os.environ["http_proxy"] = old_proxy_env[0]
if old_proxy_env[1]:
os.environ["https_proxy"] = old_proxy_env[1]
def pkg_to_dict(self, pkgstr):
if pkgstr.strip():
n, e, v, r, a, repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name': n,
'arch': a,
'epoch': e,
'release': r,
'version': v,
'repo': repo,
'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(self, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc, out, _ = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
return []
def list_stuff(self, repoquerybin, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
# is_installed goes through rpm instead of repoquery so it needs a slightly different format
is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.disablerepo:
repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
if self.enablerepo:
repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
if self.installroot != '/':
repoq.extend(['--installroot', self.installroot])
if self.conf_file and os.path.exists(self.conf_file):
repoq += ['-c', self.conf_file]
if stuff == 'installed':
return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
if stuff == 'updates':
return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'available':
return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'repos':
return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
return [
self.pkg_to_dict(p) for p in
sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
if p.strip()
]
def exec_install(self, items, action, pkgs, res):
cmd = self.yum_basecmd + [action] + pkgs
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc == 1:
for spec in items:
# Fail on invalid urls:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
self.module.fail_json(changed=False, msg=err, rc=rc)
res['rc'] = rc
res['results'].append(out)
res['msg'] += err
res['changed'] = True
if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
res['changed'] = False
if rc != 0:
res['changed'] = False
self.module.fail_json(**res)
# Fail if yum prints 'No space left on device' because that means some
# packages failed executing their post install scripts because of lack of
# free space (e.g. kernel package couldn't generate initramfs). Note that
# yum can still exit with rc=0 even if some post scripts didn't execute
# correctly.
if 'No space left on device' in (out or err):
res['changed'] = False
res['msg'] = 'No space left on device'
self.module.fail_json(**res)
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for each pkg in rpmdb
# look for each pkg via obsoletes
return res
def install(self, items, repoq):
pkgs = []
downgrade_pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
downgrade_candidate = False
# check if pkgspec is installed (if possible for idempotence)
if spec.endswith('.rpm'):
if '://' not in spec and not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
if '://' in spec:
with self.set_env_proxy():
package = self.fetch_rpm_from_url(spec)
else:
package = spec
# most common case is the pkg is already installed
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
installed_pkgs = self.is_installed(repoq, envra)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
continue
(name, ver, rel, epoch, arch) = splitFilename(envra)
installed_pkgs = self.is_installed(repoq, name)
# case for two same envr but differrent archs like x86_64 and i686
if len(installed_pkgs) == 2:
(cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
(cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
cur_epoch0 = cur_epoch0 or '0'
cur_epoch1 = cur_epoch1 or '0'
compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
if compare == 0 and cur_arch0 != cur_arch1:
for installed_pkg in installed_pkgs:
if installed_pkg.endswith(arch):
installed_pkgs = [installed_pkg]
if len(installed_pkgs) == 1:
installed_pkg = installed_pkgs[0]
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
cur_epoch = cur_epoch or '0'
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
# compare > 0 -> higher version is installed
# compare == 0 -> exact version is installed
# compare < 0 -> lower version is installed
if compare > 0 and self.allow_downgrade:
downgrade_candidate = True
elif compare >= 0:
continue
# else: if there are more installed packages with the same name, that would mean
# kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
pkg = package
# groups
elif spec.startswith('@'):
if self.is_group_env_installed(spec):
continue
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*', '?']).intersection(set(spec)):
installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = self.what_provides(repoq, spec)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['rc'] = 125 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if self.is_installed(repoq, this, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if self.is_installed(repoq, spec):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# Downgrade - The yum install command will only install or upgrade to a spec version, it will
# not install an older version of an RPM even if specified by the install spec. So we need to
# determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
if self.allow_downgrade:
for package in pkglist:
# Get the NEVRA of the requested package using pkglist instead of spec because pkglist
# contains consistently-formatted package names returned by yum, rather than user input
# that is often not parsed correctly by splitFilename().
(name, ver, rel, epoch, arch) = splitFilename(package)
# Check if any version of the requested package is installed
inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
if inst_pkgs:
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
if compare > 0:
downgrade_candidate = True
else:
downgrade_candidate = False
break
# If package needs to be installed/upgraded/downgraded, then pass in the spec
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
if downgrade_candidate and self.allow_downgrade:
downgrade_pkgs.append(pkg)
else:
pkgs.append(pkg)
if downgrade_pkgs:
res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
if pkgs:
res = self.exec_install(items, 'install', pkgs, res)
return res
def remove(self, items, repoq):
pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
pkgs.append(pkg)
else:
res['results'].append('%s is not installed' % pkg)
if pkgs:
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
# run an actual yum transaction
if self.autoremove:
cmd = self.yum_basecmd + ["autoremove"] + pkgs
else:
cmd = self.yum_basecmd + ["remove"] + pkgs
rc, out, err = self.module.run_command(cmd)
res['rc'] = rc
res['results'].append(out)
res['msg'] = err
if rc != 0:
if self.autoremove:
if 'No such command' not in out:
self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
else:
self.module.fail_json(**res)
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we check to see if the pkg is no longer present
for pkg in pkgs:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
self.module.fail_json(**res)
res['changed'] = True
return res
def run_check_update(self):
# run check-update to see if we have packages pending
rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
return rc, out, err
@staticmethod
def parse_check_update(check_update_output):
updates = {}
obsoletes = {}
# remove incorrect new lines in longer columns in output from yum check-update
# yum line wrapping can move the repo to the next line
#
# Meant to filter out sets of lines like:
# some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
# some-repo-label
#
# But it also needs to avoid catching lines like:
# Loading mirror speeds from cached hostfile
#
# ceph.x86_64 1:11.2.0-0.el7 ceph
# preprocess string and filter out empty lines so the regex below works
out = re.sub(r'\n[^\w]\W+(.*)', r' \1', check_update_output)
available_updates = out.split('\n')
# build update dictionary
for line in available_updates:
line = line.split()
# ignore irrelevant lines
# '*' in line matches lines like mirror lists:
# * base: mirror.corbina.net
# len(line) != 3 or 6 could be junk or a continuation
# len(line) = 6 is package obsoletes
#
# FIXME: what is the '.' not in line conditional for?
if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
continue
else:
pkg, version, repo = line[0], line[1], line[2]
name, dist = pkg.rsplit('.', 1)
updates.update({name: {'version': version, 'dist': dist, 'repo': repo}})
if len(line) == 6:
obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
obsoletes.update({obsolete_name: {'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo}})
return updates, obsoletes
def latest(self, items, repoq):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {}
pkgs['update'] = []
pkgs['install'] = []
updates = {}
obsoletes = {}
update_all = False
cmd = None
# determine if we're doing an update all
if '*' in items:
update_all = True
rc, out, err = self.run_check_update()
if rc == 0 and update_all:
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif rc == 100:
updates, obsoletes = self.parse_check_update(out)
elif rc == 1:
res['msg'] = err
res['rc'] = rc
self.module.fail_json(**res)
if update_all:
cmd = self.yum_basecmd + ['update']
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
# some guess work involved with groups. update @<group> will install the group if missing
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
# check if pkgspec is installed (if possible for idempotence)
# localpkg
elif spec.endswith('.rpm') and '://' not in spec:
if not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# get the pkg e:name-v-r.arch
envra = self.local_envra(spec)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# URL
elif '://' in spec:
# download package so that we can check if it's already installed
with self.set_env_proxy():
package = self.fetch_rpm_from_url(spec)
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# dep/pkgname - find it
else:
if self.is_installed(repoq, spec):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = self.what_provides(repoq, spec)
# FIXME..? may not be desirable to throw an exception here if a single package is missing
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if spec in pkgs['install'] and self.is_available(repoq, pkg):
nothing_to_do = False
break
# this contains the full NVR and spec could contain wildcards
# or virtual provides (like "python-*" or "smtp-daemon") while
# updates contains name only.
pkgname, _, _, _, _ = splitFilename(pkg)
if spec in pkgs['update'] and pkgname in updates:
nothing_to_do = False
will_update.add(spec)
# Massage the updates list
if spec != pkgname:
# For reporting what packages would be updated more
# succinctly
will_update_from_other_package[spec] = pkgname
break
if not self.is_installed(repoq, spec) and self.update_only:
res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
continue
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
res['rc'] = 128 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# check_mode output
if self.module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif w not in updates:
other_pkg = will_update_from_other_package[w]
to_update.append(
(
w,
'because of (at least) %s-%s.%s from %s' % (
other_pkg,
updates[other_pkg]['version'],
updates[other_pkg]['dist'],
updates[other_pkg]['repo']
)
)
)
else:
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
if self.update_only:
res['changes'] = dict(installed=[], updated=to_update)
else:
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if will_update or pkgs['install']:
res['changed'] = True
if obsoletes:
res['obsoletes'] = obsoletes
return res
# run commands
if cmd: # update all
rc, out, err = self.module.run_command(cmd)
res['changed'] = True
elif self.update_only:
if pkgs['update']:
cmd = self.yum_basecmd + ['update'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
elif pkgs['install'] or will_update and not self.update_only:
cmd = self.yum_basecmd + ['install'] + pkgs['install'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
if obsoletes:
res['obsoletes'] = obsoletes
return res
def ensure(self, repoq):
pkgs = self.names
# autoremove was provided without `name`
if not self.names and self.autoremove:
pkgs = []
self.state = 'absent'
if self.conf_file and os.path.exists(self.conf_file):
self.yum_basecmd += ['-c', self.conf_file]
if repoq:
repoq += ['-c', self.conf_file]
if self.skip_broken:
self.yum_basecmd.extend(['--skip-broken'])
if self.disablerepo:
self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
if self.enablerepo:
self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
if self.enable_plugin:
self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
if self.disable_plugin:
self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
if self.exclude:
e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
self.yum_basecmd.extend(e_cmd)
if self.disable_excludes:
self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
if self.download_only:
self.yum_basecmd.extend(['--downloadonly'])
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
e_cmd = ['--installroot=%s' % self.installroot]
self.yum_basecmd.extend(e_cmd)
if self.state in ('installed', 'present', 'latest'):
""" The need of this entire if conditional has to be chalanged
this function is the ensure function that is called
in the main section.
This conditional tends to disable/enable repo for
install present latest action, same actually
can be done for remove and absent action
As solution I would advice to cal
try: my.repos.disableRepo(disablerepo)
and
try: my.repos.enableRepo(enablerepo)
right before any yum_cmd is actually called regardless
of yum action.
Please note that enable/disablerepo options are general
options, this means that we can call those with any action
option. https://linux.die.net/man/8/yum
This docstring will be removed together when issue: #21619
will be solved.
This has been triggered by: #19587
"""
if self.update_cache:
self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
my = self.yum_base()
try:
if self.disablerepo:
for rid in self.disablerepo:
my.repos.disableRepo(rid)
current_repos = my.repos.repos.keys()
if self.enablerepo:
try:
for rid in self.enablerepo:
my.repos.enableRepo(rid)
new_repos = my.repos.repos.keys()
for i in new_repos:
if i not in current_repos:
rid = my.repos.getRepo(i)
a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
current_repos = new_repos
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
if self.state == 'latest' or self.update_only:
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
if self.security:
self.yum_basecmd.append('--security')
if self.bugfix:
self.yum_basecmd.append('--bugfix')
res = self.latest(pkgs, repoq)
elif self.state in ('installed', 'present'):
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
res = self.install(pkgs, repoq)
elif self.state in ('removed', 'absent'):
res = self.remove(pkgs, repoq)
else:
# should be caught by AnsibleModule argument_spec
self.module.fail_json(
msg="we should never get here unless this all failed",
changed=False,
results='',
errors='unexpected state'
)
return res
@staticmethod
def has_yum():
return HAS_YUM_PYTHON
def run(self):
"""
actually execute the module code backend
"""
error_msgs = []
if not HAS_RPM_PYTHON:
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if not HAS_YUM_PYTHON:
error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if self.disable_excludes and yum.__version_info__ < (3, 4):
self.module.fail_json(msg="'disable_includes' is available in yum version 3.4 and onwards.")
if error_msgs:
self.module.fail_json(msg='. '.join(error_msgs))
# fedora will redirect yum to dnf, which has incompatibilities
# with how this module expects yum to operate. If yum-deprecated
# is available, use that instead to emulate the old behaviors.
if self.module.get_bin_path('yum-deprecated'):
yumbin = self.module.get_bin_path('yum-deprecated')
else:
yumbin = self.module.get_bin_path('yum')
# need debug level 2 to get 'Nothing to do' for groupinstall.
self.yum_basecmd = [yumbin, '-d', '2', '-y']
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.install_repoquery and not repoquerybin and not self.module.check_mode:
yum_path = self.module.get_bin_path('yum')
if yum_path:
self.module.run_command('%s -y install yum-utils' % yum_path)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.list:
if not repoquerybin:
self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = {'results': self.list_stuff(repoquerybin, self.list)}
else:
# If rhn-plugin is installed and no rhn-certificate is available on
# the system then users will see an error message using the yum API.
# Use repoquery in those cases.
my = self.yum_base()
# A sideeffect of accessing conf is that the configuration is
# loaded and plugins are discovered
my.conf
repoquery = None
try:
yum_plugins = my.plugins._plugins
except AttributeError:
pass
else:
if 'rhnplugin' in yum_plugins:
if repoquerybin:
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.installroot != '/':
repoquery.extend(['--installroot', self.installroot])
results = self.ensure(repoquery)
if repoquery:
results['msg'] = '%s %s' % (
results.get('msg', ''),
'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
)
self.module.exit_json(**results)
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = YumModule(module)
module_implementation.run()
if __name__ == '__main__':
main()
| 39.533504 | 160 | 0.557216 | [
"Apache-2.0"
] | aburan28/ansible-devops-pipeline | venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py | 61,950 | Python |
# -*- coding: utf-8 -*-
import os
import sys
from click import (
argument,
command,
echo,
edit,
group,
Group,
option,
pass_context,
Option,
version_option,
BadParameter,
)
from click_completion import init as init_completion
from click_completion import get_code
from click_didyoumean import DYMCommandCollection
import crayons
import delegator
from .__version__ import __version__
from . import environments
from .environments import *
from .utils import is_valid_url
# Enable shell completion.
init_completion()
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
class PipenvGroup(Group):
"""Custom Group class provides formatted main help"""
def get_help_option(self, ctx):
from .core import format_help
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.invoked_subcommand:
# legit main help
echo(format_help(ctx.get_help()))
else:
# legit sub-command help
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.',
)
def setup_verbose(ctx, param, value):
if value:
import logging
logging.getLogger('pip').setLevel(logging.INFO)
return value
def validate_python_path(ctx, param, value):
# Validating the Python path is complicated by accepting a number of
# friendly options: the default will be boolean False to enable
# autodetection but it may also be a value which will be searched in
# the path or an absolute path. To report errors as early as possible
# we'll report absolute paths which do not exist:
if isinstance(value, (str, bytes)):
if os.path.isabs(value) and not os.path.isfile(value):
raise BadParameter('Expected Python at path %s does not exist' % value)
return value
def validate_pypi_mirror(ctx, param, value):
if value and not is_valid_url(value):
raise BadParameter('Invalid PyPI mirror URL: %s' % value)
return value
@group(
cls=PipenvGroup,
invoke_without_command=True,
context_settings=CONTEXT_SETTINGS,
)
@option(
'--where',
is_flag=True,
default=False,
help="Output project home information.",
)
@option(
'--venv',
is_flag=True,
default=False,
help="Output virtualenv information.",
)
@option(
'--py',
is_flag=True,
default=False,
help="Output Python interpreter information.",
)
@option(
'--envs',
is_flag=True,
default=False,
help="Output Environment Variable options.",
)
@option(
'--rm', is_flag=True, default=False, help="Remove the virtualenv."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--completion',
is_flag=True,
default=False,
help="Output completion (to be eval'd).",
)
@option('--man', is_flag=True, default=False, help="Display manpage.")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--site-packages',
is_flag=True,
default=False,
help="Enable site-packages for the virtualenv.",
)
@version_option(
prog_name=crayons.normal('pipenv', bold=True), version=__version__
)
@pass_context
def cli(
ctx,
where=False,
venv=False,
rm=False,
bare=False,
three=False,
python=False,
help=False,
py=False,
site_packages=False,
envs=False,
man=False,
completion=False,
):
if completion: # Handle this ASAP to make shell startup fast.
if PIPENV_SHELL:
echo(
get_code(
shell=PIPENV_SHELL.split(os.sep)[-1], prog_name='pipenv'
)
)
else:
echo(
'Please ensure that the {0} environment variable '
'is set.'.format(crayons.normal('SHELL', bold=True)),
err=True,
)
sys.exit(1)
sys.exit(0)
from .core import (
system_which,
do_py,
warn_in_virtualenv,
do_where,
project,
spinner,
cleanup_virtualenv,
ensure_project,
format_help
)
if man:
if system_which('man'):
path = os.sep.join([os.path.dirname(__file__), 'pipenv.1'])
os.execle(system_which('man'), 'man', path, os.environ)
else:
echo(
'man does not appear to be available on your system.', err=True
)
if envs:
echo(
'The following environment variables can be set, to do various things:\n'
)
for key in environments.__dict__:
if key.startswith('PIPENV'):
echo(' - {0}'.format(crayons.normal(key, bold=True)))
echo(
'\nYou can learn more at:\n {0}'.format(
crayons.green(
'http://docs.pipenv.org/advanced/#configuration-with-environment-variables'
)
)
)
sys.exit(0)
warn_in_virtualenv()
if ctx.invoked_subcommand is None:
# --where was passed...
if where:
do_where(bare=True)
sys.exit(0)
elif py:
do_py()
sys.exit()
# --venv was passed...
elif venv:
# There is no virtualenv yet.
if not project.virtualenv_exists:
echo(
crayons.red(
'No virtualenv has been created for this project yet!'
),
err=True,
)
sys.exit(1)
else:
echo(project.virtualenv_location)
sys.exit(0)
# --rm was passed...
elif rm:
# Abort if --system (or running in a virtualenv).
if PIPENV_USE_SYSTEM:
echo(
crayons.red(
'You are attempting to remove a virtualenv that '
'Pipenv did not create. Aborting.'
)
)
sys.exit(1)
if project.virtualenv_exists:
loc = project.virtualenv_location
echo(
crayons.normal(
u'{0} ({1})…'.format(
crayons.normal('Removing virtualenv', bold=True),
crayons.green(loc),
)
)
)
with spinner():
# Remove the virtualenv.
cleanup_virtualenv(bare=True)
sys.exit(0)
else:
echo(
crayons.red(
'No virtualenv has been created for this project yet!',
bold=True,
),
err=True,
)
sys.exit(1)
# --two / --three was passed...
if (python or three is not None) or site_packages:
ensure_project(
three=three, python=python, warn=True, site_packages=site_packages
)
# Check this again before exiting for empty ``pipenv`` command.
elif ctx.invoked_subcommand is None:
# Display help to user, if no commands were passed.
echo(format_help(ctx.get_help()))
@command(
short_help="Installs provided packages and adds them to Pipfile, or (if none is given), installs all packages.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@argument('package_name', default=False)
@argument('more_packages', nargs=-1)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Install package(s) in [dev-packages].",
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--system', is_flag=True, default=False, help="System pip management."
)
@option(
'--requirements',
'-r',
nargs=1,
default=False,
help="Import a requirements.txt file.",
)
@option(
'--code', '-c', nargs=1, default=False, help="Import from codebase."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--ignore-pipfile',
is_flag=True,
default=False,
help="Ignore Pipfile when installing, using the Pipfile.lock.",
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@option(
'--skip-lock',
is_flag=True,
default=False,
help=u"Ignore locking mechanisms when installing—use the Pipfile, instead.",
)
@option(
'--deploy',
is_flag=True,
default=False,
help=u"Abort if the Pipfile.lock is out–of–date, or Python version is wrong.",
)
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--selective-upgrade',
is_flag=True,
default=False,
help="Update specified packages.",
)
def install(
package_name=False,
more_packages=False,
dev=False,
three=False,
python=False,
pypi_mirror=None,
system=False,
lock=True,
ignore_pipfile=False,
skip_lock=False,
verbose=False,
requirements=False,
sequential=False,
pre=False,
code=False,
deploy=False,
keep_outdated=False,
selective_upgrade=False,
):
from .core import do_install
do_install(
package_name=package_name,
more_packages=more_packages,
dev=dev,
three=three,
python=python,
pypi_mirror=pypi_mirror,
system=system,
lock=lock,
ignore_pipfile=ignore_pipfile,
skip_lock=skip_lock,
verbose=verbose,
requirements=requirements,
sequential=sequential,
pre=pre,
code=code,
deploy=deploy,
keep_outdated=keep_outdated,
selective_upgrade=selective_upgrade,
)
@command(
short_help="Un-installs a provided package and removes it from Pipfile."
)
@argument('package_name', default=False)
@argument('more_packages', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--system', is_flag=True, default=False, help="System pip management."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option('--lock', is_flag=True, default=True, help="Lock afterwards.")
@option(
'--all-dev',
is_flag=True,
default=False,
help="Un-install all package from [dev-packages].",
)
@option(
'--all',
is_flag=True,
default=False,
help="Purge all package(s) from virtualenv. Does not edit Pipfile.",
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
def uninstall(
package_name=False,
more_packages=False,
three=None,
python=False,
system=False,
lock=False,
all_dev=False,
all=False,
verbose=False,
keep_outdated=False,
pypi_mirror=None,
):
from .core import do_uninstall
do_uninstall(
package_name=package_name,
more_packages=more_packages,
three=three,
python=python,
system=system,
lock=lock,
all_dev=all_dev,
all=all,
verbose=verbose,
keep_outdated=keep_outdated,
pypi_mirror=pypi_mirror,
)
@command(short_help="Generates Pipfile.lock.")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--requirements',
'-r',
is_flag=True,
default=False,
help="Generate output compatible with requirements.txt.",
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Generate output compatible with requirements.txt for the development dependencies.",
)
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
def lock(
three=None,
python=False,
pypi_mirror=None,
verbose=False,
requirements=False,
dev=False,
clear=False,
pre=False,
keep_outdated=False,
):
from .core import ensure_project, do_init, do_lock
# Ensure that virtualenv is available.
ensure_project(three=three, python=python)
if requirements:
do_init(dev=dev, requirements=requirements, pypi_mirror=pypi_mirror)
do_lock(
verbose=verbose, clear=clear, pre=pre, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror
)
@command(
short_help="Spawns a shell within the virtualenv.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--fancy',
is_flag=True,
default=False,
help="Run in shell in fancy mode (for elegantly configured shells).",
)
@option(
'--anyway',
is_flag=True,
default=False,
help="Always spawn a subshell, even if one is already spawned.",
)
@argument('shell_args', nargs=-1)
def shell(
three=None, python=False, fancy=False, shell_args=None, anyway=False
):
from .core import load_dot_env, do_shell
# Prevent user from activating nested environments.
if 'PIPENV_ACTIVE' in os.environ:
# If PIPENV_ACTIVE is set, VIRTUAL_ENV should always be set too.
venv_name = os.environ.get(
'VIRTUAL_ENV', 'UNKNOWN_VIRTUAL_ENVIRONMENT'
)
if not anyway:
echo(
'{0} {1} {2}\nNo action taken to avoid nested environments.'.format(
crayons.normal('Shell for'),
crayons.green(venv_name, bold=True),
crayons.normal('already activated.', bold=True),
),
err=True,
)
sys.exit(1)
# Load .env file.
load_dot_env()
# Use fancy mode for Windows.
if os.name == 'nt':
fancy = True
do_shell(
three=three, python=python, fancy=fancy, shell_args=shell_args
)
@command(
add_help_option=False,
short_help="Spawns a command installed into the virtualenv.",
context_settings=dict(
ignore_unknown_options=True,
allow_interspersed_args=False,
allow_extra_args=True,
),
)
@argument('command')
@argument('args', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
def run(command, args, three=None, python=False):
from .core import do_run
do_run(command=command, args=args, three=three, python=python)
@command(
short_help="Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--system', is_flag=True, default=False, help="Use system Python."
)
@option(
'--unused',
nargs=1,
default=False,
help="Given a code path, show potentially unused dependencies.",
)
@argument('args', nargs=-1)
def check(
three=None,
python=False,
system=False,
unused=False,
style=False,
args=None,
):
from .core import do_check
do_check(
three=three, python=python, system=system, unused=unused, args=args
)
@command(short_help="Runs lock, then sync.")
@argument('more_packages', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Install package(s) in [dev-packages].",
)
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@option(
'--outdated',
is_flag=True,
default=False,
help=u"List out–of–date dependencies.",
)
@option(
'--dry-run',
is_flag=True,
default=None,
help=u"List out–of–date dependencies.",
)
@argument('package', default=False)
@pass_context
def update(
ctx,
three=None,
python=False,
pypi_mirror=None,
system=False,
verbose=False,
clear=False,
keep_outdated=False,
pre=False,
dev=False,
bare=False,
sequential=False,
package=None,
dry_run=None,
outdated=False,
more_packages=None,
):
from .core import (
ensure_project,
do_outdated,
do_lock,
do_sync,
ensure_lockfile,
do_install,
project,
)
ensure_project(three=three, python=python, warn=True)
if not outdated:
outdated = bool(dry_run)
if outdated:
do_outdated(pypi_mirror=pypi_mirror)
if not package:
echo(
'{0} {1} {2} {3}{4}'.format(
crayons.white('Running', bold=True),
crayons.red('$ pipenv lock', bold=True),
crayons.white('then', bold=True),
crayons.red('$ pipenv sync', bold=True),
crayons.white('.', bold=True),
)
)
do_lock(
verbose=verbose, clear=clear, pre=pre, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror
)
do_sync(
ctx=ctx,
install=install,
dev=dev,
three=three,
python=python,
bare=bare,
dont_upgrade=False,
user=False,
verbose=verbose,
clear=clear,
unused=False,
sequential=sequential,
pypi_mirror=pypi_mirror,
)
else:
for package in ([package] + list(more_packages) or []):
if package not in project.all_packages:
echo(
'{0}: {1} was not found in your Pipfile! Aborting.'
''.format(
crayons.red('Warning', bold=True),
crayons.green(package, bold=True),
),
err=True,
)
sys.exit(1)
ensure_lockfile(keep_outdated=project.lockfile_exists, pypi_mirror=pypi_mirror)
# Install the dependencies.
do_install(
package_name=package,
more_packages=more_packages,
dev=dev,
three=three,
python=python,
pypi_mirror=pypi_mirror,
system=system,
lock=True,
ignore_pipfile=False,
skip_lock=False,
verbose=verbose,
requirements=False,
sequential=sequential,
pre=pre,
code=False,
deploy=False,
keep_outdated=True,
selective_upgrade=True,
)
@command(
short_help=u"Displays currently–installed dependency graph information."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option('--json', is_flag=True, default=False, help="Output JSON.")
@option('--json-tree', is_flag=True, default=False, help="Output JSON in nested tree.")
@option(
'--reverse', is_flag=True, default=False, help="Reversed dependency graph."
)
def graph(bare=False, json=False, json_tree=False, reverse=False):
from .core import do_graph
do_graph(bare=bare, json=json, json_tree=json_tree, reverse=reverse)
@command(short_help="View a given module in your editor.", name="open")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@argument('module', nargs=1)
def run_open(module, three=None, python=None):
from .core import which, ensure_project
# Ensure that virtualenv is available.
ensure_project(three=three, python=python, validate=False)
c = delegator.run(
'{0} -c "import {1}; print({1}.__file__);"'.format(
which('python'), module
)
)
try:
assert c.return_code == 0
except AssertionError:
echo(crayons.red('Module not found!'))
sys.exit(1)
if '__init__.py' in c.out:
p = os.path.dirname(c.out.strip().rstrip('cdo'))
else:
p = c.out.strip().rstrip('cdo')
echo(
crayons.normal('Opening {0!r} in your EDITOR.'.format(p), bold=True)
)
edit(filename=p)
sys.exit(0)
@command(short_help="Installs all packages specified in Pipfile.lock.")
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Additionally install package(s) in [dev-packages].",
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@pass_context
def sync(
ctx,
dev=False,
three=None,
python=None,
bare=False,
dont_upgrade=False,
user=False,
verbose=False,
clear=False,
unused=False,
package_name=None,
sequential=False,
pypi_mirror=None,
):
from .core import do_sync
do_sync(
ctx=ctx,
install=install,
dev=dev,
three=three,
python=python,
bare=bare,
dont_upgrade=dont_upgrade,
user=user,
verbose=verbose,
clear=clear,
unused=unused,
sequential=sequential,
pypi_mirror=pypi_mirror,
)
@command(
short_help="Uninstalls all packages not specified in Pipfile.lock."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--dry-run',
is_flag=True,
default=False,
help="Just output unneeded packages.",
)
@pass_context
def clean(
ctx,
three=None,
python=None,
dry_run=False,
bare=False,
user=False,
verbose=False,
):
from .core import do_clean
do_clean(
ctx=ctx, three=three, python=python, dry_run=dry_run, verbose=verbose
)
# Install click commands.
cli.add_command(graph)
cli.add_command(install)
cli.add_command(uninstall)
cli.add_command(sync)
cli.add_command(lock)
cli.add_command(check)
cli.add_command(clean)
cli.add_command(shell)
cli.add_command(run)
cli.add_command(update)
cli.add_command(run_open)
# Only invoke the "did you mean" when an argument wasn't passed (it breaks those).
if '-' not in ''.join(sys.argv) and len(sys.argv) > 1:
cli = DYMCommandCollection(sources=[cli])
if __name__ == '__main__':
cli()
| 25.212766 | 116 | 0.60444 | [
"MIT"
] | mlhamel/pipenv | pipenv/cli.py | 27,287 | Python |
import sys
import pandas as pd
def pci_wld_eci(eci_file_path, pci_file_path, ymp, ymw, year):
pcis = pd.read_csv(pci_file_path, sep="\t", compression="bz2", converters={"hs_id": str})
pcis["year"] = int(year)
pcis["month"] = "00"
pcis = pcis.set_index(["year", "month", "hs_id"])
ecis = pd.read_csv(eci_file_path, sep="\t", compression="bz2")
ecis["year"] = int(year)
ecis["month"] = "00"
ecis = ecis.set_index(["year", "month", "wld_id"])
ymp["pci"] = pcis["pci"]
ymw["eci"] = ecis["eci"]
return [ymp, ymw]
| 28.8 | 93 | 0.583333 | [
"MIT"
] | DataViva/dataviva-scripts | scripts/secex_monthly/_pci_wld_eci.py | 576 | Python |
import json
import os
import argparse
def main(split):
with open(args.data_path + '/' + split + '.json') as f:
data = json.load(f)
sparc = []
for i in range(len(data)):
d = data[i]
for j in range(len(d['interaction'])):
turn = d['interaction'][j]
sparc.append({})
sparc[-1]['interaction_id'] = i + 1
sparc[-1]['turn_id'] = j + 1
sparc[-1]['db_id'] = d['database_id']
sparc[-1]['query'] = turn['query']
sparc[-1]['question'] = turn['utterance'].replace('“', '\"').replace(
'”', '\"').replace('‘', '\"').replace('’', '\"') + '>>>'
sparc[-1]['query_toks_no_value'] = turn['query_toks_no_value']
sparc[-1]['question_toks'] = turn['utterance_toks']
if j:
sparc[-1]['question'] = sparc[-1]['question'] + \
sparc[-2]['question']
sparc[-1]['sql'] = turn['sql']
sparc[-1]['question'] = sparc[-1]['question'].replace('*', '')
sparc[-1]['question_toks'] = [tok.replace('*', '')
for tok in sparc[-1]['question_toks'] if tok != '*']
with open(os.path.join(args.data_path, split) + '.json', 'w') as f:
json.dump(sparc, f, ensure_ascii=False, indent=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", '-dp', type=str)
args = parser.parse_args()
for split in ['train', 'dev']:
main(split)
print('convert done')
| 35.840909 | 94 | 0.492708 | [
"MIT"
] | WDZRMPCBIT/chase | DuoratChar/data/convert.py | 1,585 | Python |
#!/usr/bin/env python3
import sys
import math
import numpy as np
import sympy as sp
from selfdrive.locationd.models.constants import ObservationKind
from rednose.helpers.ekf_sym import EKF_sym, gen_code
i = 0
def _slice(n):
global i
s = slice(i, i + n)
i += n
return s
class States():
# Vehicle model params
STIFFNESS = _slice(1) # [-]
STEER_RATIO = _slice(1) # [-]
ANGLE_OFFSET = _slice(1) # [rad]
ANGLE_OFFSET_FAST = _slice(1) # [rad]
VELOCITY = _slice(2) # (x, y) [m/s]
YAW_RATE = _slice(1) # [rad/s]
STEER_ANGLE = _slice(1) # [rad]
class CarKalman():
name = 'car'
x_initial = np.array([
1.0,
15.0,
0.0,
0.0,
10.0, 0.0,
0.0,
0.0,
])
# process noise
Q = np.diag([
(.05/100)**2,
.01**2,
math.radians(0.002)**2,
math.radians(0.1)**2,
.1**2, .01**2,
math.radians(0.1)**2,
math.radians(0.1)**2,
])
P_initial = Q.copy()
obs_noise = {
ObservationKind.STEER_ANGLE: np.atleast_2d(math.radians(0.01)**2),
ObservationKind.ANGLE_OFFSET_FAST: np.atleast_2d(math.radians(5.0)**2),
ObservationKind.STEER_RATIO: np.atleast_2d(5.0**2),
ObservationKind.STIFFNESS: np.atleast_2d(5.0**2),
ObservationKind.ROAD_FRAME_X_SPEED: np.atleast_2d(0.1**2),
}
maha_test_kinds = [] # [ObservationKind.ROAD_FRAME_YAW_RATE, ObservationKind.ROAD_FRAME_XY_SPEED]
global_vars = [
sp.Symbol('mass'),
sp.Symbol('rotational_inertia'),
sp.Symbol('center_to_front'),
sp.Symbol('center_to_rear'),
sp.Symbol('stiffness_front'),
sp.Symbol('stiffness_rear'),
]
@staticmethod
def generate_code(generated_dir):
dim_state = CarKalman.x_initial.shape[0]
name = CarKalman.name
maha_test_kinds = CarKalman.maha_test_kinds
# globals
m, j, aF, aR, cF_orig, cR_orig = CarKalman.global_vars
# make functions and jacobians with sympy
# state variables
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
# Vehicle model constants
x = state[States.STIFFNESS, :][0, 0]
cF, cR = x * cF_orig, x * cR_orig
angle_offset = state[States.ANGLE_OFFSET, :][0, 0]
angle_offset_fast = state[States.ANGLE_OFFSET_FAST, :][0, 0]
sa = state[States.STEER_ANGLE, :][0, 0]
sR = state[States.STEER_RATIO, :][0, 0]
u, v = state[States.VELOCITY, :]
r = state[States.YAW_RATE, :][0, 0]
A = sp.Matrix(np.zeros((2, 2)))
A[0, 0] = -(cF + cR) / (m * u)
A[0, 1] = -(cF * aF - cR * aR) / (m * u) - u
A[1, 0] = -(cF * aF - cR * aR) / (j * u)
A[1, 1] = -(cF * aF**2 + cR * aR**2) / (j * u)
B = sp.Matrix(np.zeros((2, 1)))
B[0, 0] = cF / m / sR
B[1, 0] = (cF * aF) / j / sR
x = sp.Matrix([v, r]) # lateral velocity, yaw rate
x_dot = A * x + B * (sa - angle_offset - angle_offset_fast)
dt = sp.Symbol('dt')
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.VELOCITY.start + 1, 0] = x_dot[0]
state_dot[States.YAW_RATE.start, 0] = x_dot[1]
# Basic descretization, 1st order integrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
#
# Observation functions
#
obs_eqs = [
[sp.Matrix([r]), ObservationKind.ROAD_FRAME_YAW_RATE, None],
[sp.Matrix([u, v]), ObservationKind.ROAD_FRAME_XY_SPEED, None],
[sp.Matrix([u]), ObservationKind.ROAD_FRAME_X_SPEED, None],
[sp.Matrix([sa]), ObservationKind.STEER_ANGLE, None],
[sp.Matrix([angle_offset_fast]), ObservationKind.ANGLE_OFFSET_FAST, None],
[sp.Matrix([sR]), ObservationKind.STEER_RATIO, None],
[sp.Matrix([x]), ObservationKind.STIFFNESS, None],
]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state, maha_test_kinds=maha_test_kinds, global_vars=CarKalman.global_vars)
def __init__(self, generated_dir, steer_ratio=15, stiffness_factor=1, angle_offset=0):
self.dim_state = self.x_initial.shape[0]
x_init = self.x_initial
x_init[States.STEER_RATIO] = steer_ratio
x_init[States.STIFFNESS] = stiffness_factor
x_init[States.ANGLE_OFFSET] = angle_offset
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.x_initial, self.P_initial, self.dim_state, self.dim_state, maha_test_kinds=self.maha_test_kinds, global_vars=self.global_vars)
@property
def x(self):
return self.filter.state()
@property
def P(self):
return self.filter.covs()
def predict(self, t):
return self.filter.predict(t)
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=False)
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data, R=None):
if len(data) > 0:
data = np.atleast_2d(data)
if R is None:
R = self.get_R(kind, len(data))
self.filter.predict_and_update_batch(t, kind, data, R)
if __name__ == "__main__":
generated_dir = sys.argv[2]
CarKalman.generate_code(generated_dir)
| 27.494898 | 191 | 0.642234 | [
"MIT"
] | qiubit/openpilot | selfdrive/locationd/models/car_kf.py | 5,389 | Python |
import os
def iterate_files(directory):
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
if _is_log_file(filename):
yield os.path.join(root, filename)
def _is_log_file(filename):
basename, extension = os.path.splitext(filename)
return ((extension == '.xml') and (not basename.endswith('-assets')))
| 26.928571 | 73 | 0.660477 | [
"MIT"
] | mtlynch/chat_unifier | chat_unifier/file_iterators/trillian_xml.py | 377 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests_cache
import datetime
import pandas as pd
from datetime import timedelta
import pandas as pd
from pandas.io.common import ZipFile
from pandas.compat import BytesIO, StringIO, PY2
def main():
expire_after = timedelta(days=1)
if PY2:
filename = 'cache_py2'
else:
filename = 'cache'
session = requests_cache.CachedSession(cache_name=filename, expire_after=expire_after)
dt = pd.to_datetime("2014-01-01")
symbol = "AUD/USD"
symbol = symbol.replace("/", "").upper()
year = dt.year
month = dt.month
month_name = datetime.datetime(year=1970, month=month, day=1).strftime('%B').upper()
#url = "http://www.truefx.com/dev/data/2014/JANUARY-2014/AUDUSD-2014-01.zip"
url = "http://www.truefx.com/dev/data/{year:04d}/{month_name}-{year:04d}/{symbol}-{year:04d}-{month:02d}.zip".format(year=year, month=month, symbol=symbol, month_name=month_name)
response = session.get(url)
zip_data = BytesIO(response.content)
filename = "{symbol}-{year:04d}-{month:02d}.csv".format(year=year, month=month, symbol=symbol)
with ZipFile(zip_data, 'r') as zf:
#filename = zf.namelist()[0]
zfile = zf.open(filename)
#print(zfile)
#(symb, dt, ask, bid) = zfile.read().split(',')
#print(zfile.__dict__)
data = zfile.readlines()
#df = pd.read_csv(zfile._fileobj) # ToFix: can't make it work correctly
#return
df = pd.DataFrame(data)
#df = df[:100] # just for test
df[0] = df[0].str.decode('utf8')
df[0] = df[0].str.replace('\n', '')
df[0] = df[0].map(lambda s: s.split(','))
df['Symbol'] = df[0].map(lambda t: t[0])
df['Date'] = df[0].map(lambda t: pd.to_datetime(t[1]))
df['Bid'] = df[0].map(lambda t: t[2]).astype(float)
df['Ask'] = df[0].map(lambda t: t[3]).astype(float)
del df[0]
df = df.set_index('Date')
print(df)
if __name__ == "__main__":
main()
| 34.37931 | 182 | 0.622869 | [
"BSD-3-Clause"
] | femtotrader/pandas_datareaders | draft/truefx/truefx_tick.py | 1,994 | Python |
from rasa.core.events import ActionExecuted, SlotSet, UserUttered
from rasa.core.training import visualization
def test_style_transfer():
r = visualization._transfer_style({"class": "dashed great"}, {"class": "myclass"})
assert r["class"] == "myclass dashed"
def test_style_transfer_empty():
r = visualization._transfer_style({"class": "dashed great"}, {"something": "else"})
assert r["class"] == "dashed"
def test_common_action_prefix():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
# until this point they are the same
SlotSet("my_slot", "a"),
ActionExecuted("a"),
ActionExecuted("after_a"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
# until this point they are the same
SlotSet("my_slot", "b"),
ActionExecuted("b"),
ActionExecuted("after_b"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_equal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_unequal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
]
other = [
ActionExecuted("greet"),
ActionExecuted("action_listen"),
UserUttered("hey"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 0
async def test_graph_persistence(default_domain, tmpdir):
from os.path import isfile
from networkx.drawing import nx_pydot
from rasa.core.training.dsl import StoryFileReader
from rasa.core.interpreter import RegexInterpreter
story_steps = await StoryFileReader.read_from_file(
"data/test_stories/stories.md", default_domain, interpreter=RegexInterpreter()
)
out_file = tmpdir.join("graph.html").strpath
generated_graph = await visualization.visualize_stories(
story_steps,
default_domain,
output_file=out_file,
max_history=3,
should_merge_nodes=False,
)
generated_graph = nx_pydot.to_pydot(generated_graph)
assert isfile(out_file)
with open(out_file, "r") as graph_file:
content = graph_file.read()
assert "isClient = true" in content
assert "graph = `{}`".format(generated_graph.to_string()) in content
| 28.754902 | 87 | 0.65837 | [
"Apache-2.0"
] | 1tupac/rasa | tests/core/test_visualization.py | 2,933 | Python |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of graph theory routines used mainly
to reorder matrices for iterative steady state solvers.
"""
import numpy as np
import scipy.sparse as sp
from qutip.cy.graph_utils import (
_pseudo_peripheral_node, _breadth_first_search, _node_degrees,
_rcm, _bfs_matching, _weighted_bfs_matching)
from qutip.settings import debug
from warnings import warn
if debug:
import inspect
def graph_degree(A):
"""
Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row).
"""
if A.__class__.__name__=='Qobj':
return _node_degrees(A.data.indices, A.data.indptr, A.shape[0])
else:
return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A,start):
"""
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : qobj, csr_matrix
Input graph in CSR matrix form
start : int
Staring node for BFS traversal.
Returns
-------
order : array
Order in which nodes are traversed from starting node.
levels : array
Level of the nodes in the order that they are traversed.
"""
if A.__class__.__name__=='Qobj':
A=A.data
num_rows=A.shape[0]
start=int(start)
order, levels = _breadth_first_search(A.indices,A.indptr, num_rows, start)
#since maybe not all nodes are in search, check for unused entires in arrays
return order[order!=-1], levels[levels!=-1]
def symrcm(A, sym=False):
"""
Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj
in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,
this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not symmetric. This
is because it is faster to do A+Trans(A) than it is to check for symmetry for
a generic matrix. If you are guaranteed that the matrix is symmetric in structure
(values of matrix element do not matter) then set *sym=True*
Parameters
----------
A : csr_matrix, qobj
Input sparse csr_matrix or Qobj.
sym : bool {False, True}
Flag to set whether input matrix is symmetric.
Returns
-------
perm : array
Array of permuted row and column indices.
Notes
-----
This routine is used primarily for internal reordering of Lindblad super-operators
for use in iterative solver routines.
References
----------
E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices",
ACM '69 Proceedings of the 1969 24th national conference, (1969).
"""
nrows = A.shape[0]
if A.__class__.__name__=='Qobj':
if not sym:
A = A.data+A.data.transpose()
return _rcm(A.indices, A.indptr, nrows)
else:
return _rcm(A.data.indices, A.data.indptr, nrows)
else:
if not sym:
A=A+A.transpose()
return _rcm(A.indices, A.indptr, nrows)
def bfs_matching(A):
"""
Returns an array of row permutations that removes nonzero elements
from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is
nonsingular.
This function looks at the structure of the matrix only.
Parameters
----------
A : csc_matrix
Input matrix
Returns
-------
perm : array
Array of row permutations.
Notes
-----
This function relies on a maximum cardinality bipartite matching algorithm
based on a breadth-first search (BFS) of the underlying graph[1]_.
References
----------
.. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and
Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.
38, no. 2, (2011).
"""
nrows = A.shape[0]
if A.shape[0]!=A.shape[1]:
raise ValueError('bfs_matching requires a square matrix.')
if A.__class__.__name__=='Qobj':
A = A.data.tocsc()
elif not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
warn('bfs_matching requires CSC matrix format.',
sp.SparseEfficiencyWarning)
perm = _bfs_matching(A.indices, A.indptr, nrows)
if np.any(perm==-1):
raise Exception('Possibly singular input matrix.')
return perm
def weighted_bfs_matching(A):
"""
Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying matrix.
Parameters
----------
A : csc_matrix
Input matrix
Returns
-------
perm : array
Array of row permutations.
Notes
-----
This function uses a weighted maximum cardinality bipartite matching
algorithm based on breadth-first search (BFS). The columns are weighted
according to the element of max ABS value in the associated rows and
are traversed in descending order by weight. When performing the BFS
traversal, the row associated to a given column is the one with maximum
weight. Unlike other techniques[1]_, this algorithm does not guarantee the
product of the diagonal is maximized. However, this limitation is offset
by the substantially faster runtime of this method.
References
----------
.. [1] I. S. Duff and J. Koster, "The design and use of algorithms for
permuting large entries to the diagonal of sparse matrices", SIAM J.
Matrix Anal. and Applics. 20, no. 4, 889 (1997).
"""
nrows = A.shape[0]
if A.shape[0]!=A.shape[1]:
raise ValueError('weighted_bfs_matching requires a square matrix.')
if A.__class__.__name__=='Qobj':
A = A.data.tocsc()
elif not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
warn('weighted_bfs_matching requires CSC matrix format',
sp.SparseEfficiencyWarning)
perm = _weighted_bfs_matching(
np.asarray(np.abs(A.data), dtype=float),
A.indices, A.indptr, nrows)
if np.any(perm==-1):
raise Exception('Possibly singular input matrix.')
return perm
| 34.862205 | 90 | 0.658385 | [
"BSD-3-Clause"
] | trxw/qutip | qutip/graph.py | 8,855 | Python |
#!/usr/bin/env python
import sys
import json
import re
import logging
import os
import os.path
import codecs
import time
import conf
import logs
import kqueue
log = logging.getLogger(__name__)
# constant to select bind() for attaching the socket
BIND = 1
# constant to select connect() for attaching the socket
CONNECT = 2
SERVICE = ""
INITTED = False
KCONNECTION = None
def init(args=None):
# init logger
# load/get the config
# eventually this needs a search path for the config
# should be env(QFU_CONFIG);./queue.conf;/etc/embers/queue.conf;tcp://localhost:3473
# use 3473 as the global control channel
global SERVICE, INITTED
cf = None
conf.init(args)
if args and args.service:
SERVICE = args.service
else:
SERVICE = os.environ.get('UPSTART_JOB', "")
INITTED = True
def connect(force_new=False):
global KCONNECTION
if force_new:
return kqueue.connect()
else:
if not KCONNECTION:
KCONNECTION = kqueue.connect()
return KCONNECTION
class JsonMarshal(object):
def __init__(self, encoding='utf8', **kw):
# raises an error if you get a bogus encoding
codecs.lookup(encoding)
self.encoding = encoding
self.remove_newline = kw.get('remove_newline', False)
def encode(self, obj):
msg = json.dumps(obj, encoding=self.encoding, ensure_ascii=False)
# U+0085(Next Line), U+2028(Line Separator), U+2029(Paragraph Separator)
if self.remove_newline:
msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+', ur'\\n', msg)
#msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+|\\n|\\r|\\f|\\v', '\\n', msg)
#msg = msg.replace("…", '')
if isinstance(msg, str):
msg = unicode(msg)
return msg
def decode(self, data):
return json.loads(data, encoding=self.encoding)
#def send(self, socket, data, flags=0):
# socket.send_unicode(data, encoding=self.encoding, flags=flags)
#def recv(self, socket, flags=0):
# b = socket.recv(flags=flags)
# return unicode(b, encoding=self.encoding, errors='replace')
class UnicodeMarshal(JsonMarshal):
def __init__(self, **kw):
super(UnicodeMarshal, self).__init__(**kw)
def encode(self, obj):
return unicode(obj)
def decode(self, data):
# exception if this is not decodeable (str, stream etc.)
return unicode(data)
# send and recv are handled in JsonMarshall
class RawMarshal(object):
def encode(self, obj):
return obj
def decode(self, obj):
return obj
#def send(self, socket, data, flags=0):
# if isinstance(data, unicode):
# socket.send_unicode(data, flags)
# else:
# socket.send(data, flags=flags)
#def recv(self, socket, flags=0):
# return socket.recv(flags=flags)
class StreamCaptureProbe(object):
def __init__(self, encoding='utf8', stream=sys.stdout):
self._s = codecs.getwriter(encoding)(stream)
self._s.flush() # make sure its good
def __call__(self, action, message):
if action == Queue.SENT:
self._s.write(message)
self._s.write('\n')
self._s.flush()
class QueueStatsProbe(object):
def __init__(self, interval_min=5):
self.interval = datetime.timedelta(minutes=interval_min)
self.start = datetime.datetime.now()
self.sent_bytes = 0
self.sent_msg = 0
self.recv_bytes = 0
self.recv_msg = 0
def __call__(self, action, message):
if action == Queue.SENT:
self.sent_bytes += len(message)
self.sent_msg += 1
if action == Queue.RECEIVED:
self.recv_bytes += len(message)
self.recv_msg += 1
# TODO - if delta past period report the stats
class Queue(object):
"""Docstring for Queue """
SENT = 1
RECEIVED = 2
def __init__(self, ename, mode, qname="", no_ack=True, capture=False,
remove_newline=False, marshal=None, force_new_connection=False):
"""@todo: to be defined
:param ename: @todo
:param mode: @todo
:param qname: @todo
:param no_ack: @todo
:param capture: @todo
:param remove_newline: @todo
"""
if not INITTED:
log.warn("QUEUE INIT Not called, calling")
init()
self._ename = ename
self._mode = mode
self._qname = qname
self._no_ack = no_ack
self._probes = [] # probes for tracing events
self._last_poll = None
self._marshal = marshal or JsonMarshal()
self.connection = connect(force_new_connection)
if not isinstance(self._ename, list):
self._ename = [self._ename]
exclusive = (SERVICE == "")
self._exchanges = [kqueue.Exchange(e[0], type="fanout", durable=False) for e in self._ename]
self._queues = [kqueue.Queue(e[1], ex, exclusive=exclusive)
for e, ex in zip(self._ename, self._exchanges)]
self._name = [e[0] for e in self._ename]
def open(self):
"""@todo: Docstring for open
:returns: @todo
"""
if not INITTED:
init()
if "r" in self._mode:
self._queue = kqueue.KReadQueue(self.connection,
self._queues,
no_ack=self._no_ack,
queue_declare=True)
elif "w" in self._mode:
self._queue = kqueue.KWriteQueue(self.connection,
self._queues[0],
exchange_declare=True)
def read(self):
"""Reads one message from the queue
:returns: @todo
"""
if self._last_poll is not None:
msg = self._last_poll
self._last_poll = None
else:
msg = self._queue.get(block=True)
msg = msg.payload
self.notify(Queue.RECEIVED, msg)
msg = self._marshal.decode(msg)
return msg
def read_without_polling(self):
"""Reads socket without first polling it, guaranteed block if no data
exists.
:returns: @todo
"""
return self.read()
def poll(self, timeout=None, flags=0):
if self._last_poll is not None:
return True
else:
try:
msg = self._queue.get(block=True, timeout=timeout)
except kqueue.Empty:
msg = None
self._last_poll = msg
return self._last_poll is not None
def write(self, data):
"""@todo: Docstring for write
:param data: @todo
:returns: @todo
"""
data = self._marshal.encode(data)
self._queue.put(data)
self.notify(Queue.SENT, data)
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
return ",".join(self._name)
# be an iterator
# http://docs.python.org/library/stdtypes.html#iterator-types
def __iter__(self):
return self
def next(self):
return self.read()
# support contextmanager
# see http://docs.python.org/library/stdtypes.html#context-manager-types
# with queue.open(...) as q: ...
def __enter__(self):
return self
def __exit__(self, ex_type, ex_val, ex_trace):
self.close()
# tell any open control channels we are exiting
return False
def close(self):
"""@todo: Docstring for close
:returns: @todo
"""
pass
# probes for tracing messages
# this is how you can do dumps of messages as they are read/written
# and stuff like collecting metrics on messages
def add_probe(self, probe):
assert hasattr(probe, '__call__'), "Object must be callable."
self._probes.append(probe)
def notify(self, action, msg):
for p in self._probes:
try:
p(action, json.dumps(msg))
except KeyboardInterrupt:
raise
except:
log.exception('Failed to notify probe.')
class StreamQueue(object):
"""
An object to make a stream (typically stdin or stdout)
conform to the Queue interface so we can write code that treats
them interchangeably.
"""
def __init__(self, stream,
mode='r',
name=None,
encoding='utf8',
marshal=JsonMarshal(),
end_of_record='\n',
**ignore):
assert stream, "Need to a stream to read or write to."
assert marshal, "Need a message marshaller to encode and decode messages."
self._marshal = marshal
self.end_of_record = end_of_record
if encoding:
if mode == 'w':
self._stream = codecs.getwriter(encoding)(stream, 'replace')
else: # default read
self._stream = codecs.getreader(encoding)(stream, 'replace')
else: # accept what they give you
self._stream = stream
if not name:
self._name = None
else:
self._name = name
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
l = len(self._name)
if l == 1:
return self._name[0]
elif l > 1:
sout = self._name[0]
for i in range(1, l):
sout = sout + "," + self._name[i]
return sout
else:
return None
def poll(self, timeout=None, flags=0): # zmq.POLLIN):
raise NotImplementedError
def read(self, flags=0):
"""Read the next item from the stream.
This deals with blank lines and EOF by passing
on the values from the stream's read(). Blanks lines
are a string with a newline (and maybe other whitespace)
and EOF is returned as ''. I.e. not s.read() => EOF.
"""
msg = self._stream.readline()
if msg.strip(): # skip empty lines
return self._marshal.decode(msg)
else: # pass it on - blank line is '\n', EOF is ''
return msg
def write(self, obj, flags=0):
if not obj:
return
msg = self._marshal.encode(obj).strip()
self._stream.write(msg)
self._stream.write(self.end_of_record)
def __iter__(self):
self._iter = self._stream.__iter__()
return self
def next(self):
if self._iter:
msg = self._iter.next()
if msg.strip(): # skip blank lines
return self._marshal.decode(msg)
else:
return msg
else:
raise Exception('No iterator initialized')
def close(self): # No action necessary. Stubbed so this class can follow the usage patterns of other I/O classes
return
def __enter__(self):
self._ctx = self._stream.__enter__()
return self._ctx
def __exit__(self, ex_type, ex_val, ex_trace):
if self._ctx:
return self._ctx.__exit__()
else:
return False
def resolve_address(qname, qtype="r", attach=None):
"""
Resolve qname into a queue specification,
either from embers.conf or by treating it as a
fully qualified name if it is not in the conf.
Minimal check on form of fully qualified name.
The attach parameter overrides the default attachment type
(BIND or CONNECT) for queues doing special connections.
"""
#(host, port) = conf.get_queue_info(qname)
if qtype in ("w", ): # (zmq.PUB, zmq.REP):
result = (qname, "")
elif qtype in ("r", ):
result = (qname, SERVICE)
else:
assert False, "Invalid type, Queue no longer supports zmq"
return result
def get_conf_entry(qname):
"""
Return the entire JSON expression for a given qname.
"""
return conf.get_conf_entry(qname)
def open(name, mode='r', capture=False, service=None, exclusive=None, **kw):
"""
Open a queue with file-like semantics. E.g.:
q = open('sample-1', 'w') - publish
q = open('sample-1', 'r') - subscribe
options:
name - a queue name, either a full ZMQ-style URL or a name found in queue.conf
mode - the queue open more. One of r (SUB), w (PUB), r+ (REP), w+ (REQ).
marshal - class to use to marshal messages, default JsonMarshal
capture - capture and log messages as they are sent. Can be True, or a stream, or a Capture instance.
"""
# this is somewhat goofy, but once you have
# a metaphor you might as well run it into the ground
assert mode in {"r", "w"}, 'Mode %s is not a valid mode. Use one of r, w'
typ = mode
service = service or SERVICE
# special case '-' -> use stdin or stdout
if isinstance(name, list) and '-' in name or name == '-':
if mode in ('w', ):
s = sys.stdout
name = 'stdout'
else:
s = sys.stdin
name = 'stdin'
log.info('Reading from stdin' if name == 'stdin' else 'Writing to stdout')
return StreamQueue(s, name=name, mode=mode, **kw)
# normal queue case
if typ in ("w", ):
if not name:
name = conf.get_default_queue_names(service, 'out')
log.info('Writing to %s' % name)
else:
if not name:
name = conf.get_default_queue_names(service, 'in')
log.info('Reading from %s' % name)
if isinstance(name, basestring):
addr = [resolve_address(name,
qtype=typ,
attach=kw.get('attach', None))]
else:
addr = [resolve_address(n,
qtype=typ,
attach=kw.get('attach', None))
for n in name]
if "qname" in kw:
qname = kw["qname"]
addr = [(e[0], qname) for e in addr]
result = Queue(addr, typ, **kw)
assert addr, "Could not resolve an address from %s." % (name,)
result.open()
if capture:
result.add_probe(StreamCaptureProbe())
return result
def main():
"""
A little utility to handle reading and writing streams
to and from a queue.
--pub <queue> : publish what's read from stdin to <queue>
--sub <queue> : read from <queue> and write the messages to stdout
--cat : when used with --pub, write all published messages to stdout
--clean : check in incoming and outgoing messages.
Verify the message is correct JSON and add
an embersId if needed.
--log_file : Path to write the log file to
--log_level : Logging level
Other standard EMBERS options (e.g. --verbose).
"""
import args
import message
global log
ap = args.get_parser()
ap.add_argument('--clean', action="store_true",
help='Verify message format and add standard fields such as embersId.')
ap.add_argument('--addfeed', action="store_true", help='Add feed and feedPath fields to published message.')
ap.add_argument('--cat', action="store_true", help='Write all published messages to stdout.')
ap.add_argument('--rm', nargs="+", help="delete queue")
arg = ap.parse_args()
log = logs.getLogger(log_name=arg.log_file)
logs.init(arg, l=arg.log_level, logfile=arg.log_file)
init(arg)
if arg.rm and not arg.sub:
for queue in arg.rm:
print "Deleting", queue,
queue = kqueue.Queue(queue)
queue.maybe_bind(connect())
queue.delete()
print "."
return
try:
# need to use the raw/utf handler unless we are doing clean
marshal = UnicodeMarshal()
if arg.clean or arg.addfeed:
marshal = JsonMarshal()
if arg.sub is None and os.environ.get('UPSTART_JOB') is None:
arg.sub = '-' # stdin
subq = open(arg.sub, 'r') #, marshal=marshal, ssh_key=arg.ssh_key, ssh_conn=arg.tunnel)
if arg.pub is None and os.environ.get('UPSTART_JOB') is None:
arg.pub = '-' # stdout
pubq = open(arg.pub, 'w', capture=arg.cat, marshal=marshal)
except Exception as e:
log.exception("Exception opening queues: %s" % e)
# "Human-readable" queue name can be retrieved as
#
# sname = subq.get_name()
# pname = pubq.get_name()
rc = 0
try:
it = subq.__iter__()
while True:
m = ''
try:
m = it.next()
if arg.clean:
m = message.clean(m)
if m:
if arg.addfeed:
m = message.add_embers_ids(m, feed=pubq.get_name(), feedPath=pubq.get_name())
pubq.write(m)
except StopIteration:
break
except KeyboardInterrupt:
break
except Exception as e:
rc += 1
if m:
log.exception('Could not process message %s: %s' % (m, e))
else:
log.exception('Unknown processing error %s' % e)
except KeyboardInterrupt:
pass
except Exception as e:
rc = 1
log.exception('Top level exception %s' % e)
return rc
if __name__ == '__main__':
sys.exit(main())
| 30.018581 | 117 | 0.565978 | [
"MIT"
] | nwself/geocoding | twitter_countryGeo/twitter-geo/etool/queue.py | 17,771 | Python |
# -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.18.575'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND
from ccxt.base.decimal_to_precision import number_to_string
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
# -----------------------------------------------------------------------------
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
try:
long # long integer was removed in Python 3
except NameError:
long = int
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
# web3/0x imports
try:
# from web3.auto import w3
from web3 import Web3, HTTPProvider
from web3.utils.encoding import hex_encode_abi_type
except ImportError:
Web3 = HTTPProvider = None # web3/0x not supported in Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
version = None
certified = False
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
tickers = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': DDoSProtection,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
trades = None
transactions = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchFundingFees': False,
'fetchL2OrderBook': True,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchFundingFee': False,
'fetchFundingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': True,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresWeb3 = False
web3 = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.orders = dict() if self.orders is None else self.orders
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {})
self.session = self.session if self.session else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
if self.requiresWeb3 and Web3 and not self.web3:
# self.web3 = w3 if w3 else Web3(HTTPProvider())
self.web3 = Web3(HTTPProvider())
def __del__(self):
if self.session:
self.session.close()
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['api_backup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'api_backup' in self.urls:
self.urls['api'] = self.urls['api_backup']
del self.urls['api_backup']
@classmethod
def define_rest_api(cls, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
def partialer():
outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method}
@functools.wraps(entry)
def inner(_self, params=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def raise_error(self, exception_type, url=None, method=None, error=None, details=None):
if error:
error = str(error)
output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None])
raise exception_type(output)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def handle_errors(self, code, reason, url, method, headers, body, response):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
if body:
body = body.encode()
self.session.cookies.clear()
response = None
http_response = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies
)
http_response = response.text
json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None
headers = response.headers
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
print("\nResponse:", method, url, response.status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
self.raise_error(RequestTimeout, method, url, e)
except TooManyRedirects as e:
self.raise_error(ExchangeError, url, method, e)
except SSLError as e:
self.raise_error(ExchangeError, url, method, e)
except HTTPError as e:
self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_errors(e, response.status_code, http_response, url, method)
self.raise_error(ExchangeError, url, method, e, http_response)
except RequestException as e: # base exception class
error_string = str(e)
if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string):
self.raise_error(NetworkError, url, method, e)
else:
self.raise_error(ExchangeError, url, method, e)
self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_response(http_response, json_response, url, method, headers, body)
if json_response is not None:
return json_response
return http_response
def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'):
error = None
string_code = str(http_status_code)
if string_code in self.httpExceptions:
error = self.httpExceptions[string_code]
if error == ExchangeNotAvailable:
if re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE):
error = DDoSProtection
if error:
self.raise_error(error, url, method, exception if exception else http_status_code, response)
def handle_rest_response(self, response, json_response, url, method='GET', headers=None, body=None):
if self.is_json_encoded_object(response) and json_response is None:
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
self.raise_error(DDoSProtection, method, url, None, response)
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
self.raise_error(ExchangeNotAvailable, method, url, None, message)
self.raise_error(ExchangeError, method, url, ValueError('failed to decode json'), response)
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return json.loads(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key:
value = float(dictionary[key])
else:
value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if key is None or (key not in dictionary):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
if value:
grouped = Exchange.group_by(array, key)
if value in grouped:
return grouped[value]
return []
return array
@staticmethod
def filterBy(self, array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
for element in array:
if (key in element) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
for key in params:
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def url(path, params={}):
result = Exchange.implode_params(path, params)
query = Exchange.omit(params, Exchange.extract_params(path))
if query:
result += '?' + _urlencode.urlencode(query)
return result
@staticmethod
def urlencode(params={}):
if (type(params) is dict) or isinstance(params, collections.OrderedDict):
return _urlencode.urlencode(params)
return params
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri):
return _urlencode.quote(uri, safe="~()*!.'")
@staticmethod
def omit(d, *args):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_to_string(s):
return s.decode('ascii')
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'):
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encodedHeader = Exchange.base64urlencode(header)
encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encodedHeader + '.' + encodedData
hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary')
signature = Exchange.base64urlencode(hmac)
return token + '.' + signature
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
self.raise_error(AuthenticationError, details='requires `' + key + '`')
else:
return error
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
self.raise_error(InvalidAddress, details='address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': 0.0,
'used': 0.0,
'total': 0.0,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
if self.currencies:
if commonCode in self.currencies:
return self.currencies[commonCode]['id']
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if 'baseId' in market else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if 'quoteId' in market else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.raise_error(NotSupported, details='create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='cancel_order() not supported yet')
def fetch_bids_asks(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_tickers(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order() is not supported yet')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order_trades() is not supported yet')
def fetch_transactions(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_transactions() is not supported yet')
def fetch_deposits(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_deposits() is not supported yet')
def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_withdrawals() is not supported yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return self.sort_by(result, 0)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, 'info').keys()
for account in ['free', 'used', 'total']:
balance[account] = {}
for currency in currencies:
balance[account][currency] = balance[currency][account]
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
self.raise_error(NotSupported, details='fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
self.raise_error(NotSupported, details='fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs):
result = []
for i in range(0, len(ohlcvs['t'])):
result.append([
ohlcvs['t'][i] * 1000,
ohlcvs['o'][i],
ohlcvs['h'][i],
ohlcvs['l'][i],
ohlcvs['c'][i],
ohlcvs['v'][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs):
result = {
't': [],
'o': [],
'h': [],
'l': [],
'c': [],
'v': [],
}
for i in range(0, len(ohlcvs)):
result['t'].append(int(ohlcvs[i][0] / 1000))
result['o'].append(ohlcvs[i][1])
result['h'].append(ohlcvs[i][2])
result['l'].append(ohlcvs[i][3])
result['c'].append(ohlcvs[i][4])
result['v'].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' in unit:
scale = 60 * 60 * 24 * 365
elif 'M' in unit:
scale = 60 * 60 * 24 * 30
elif 'w' in unit:
scale = 60 * 60 * 24 * 7
elif 'd' in unit:
scale = 60 * 60 * 24
elif 'h' in unit:
scale = 60 * 60
else:
scale = 60 # 1m by default
return amount * scale
def parse_trades(self, trades, market=None, since=None, limit=None):
array = self.to_array(trades)
array = [self.parse_trade(trade, market) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_ledger(self, data, currency=None, since=None, limit=None):
array = self.to_array(data)
array = [self.parse_ledger_entry(item, currency) for item in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None):
array = self.to_array(orders)
array = [self.parse_order(order, market) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def safe_currency_code(self, data, key, currency=None):
code = None
currency_id = self.safe_string(data, key)
if currency_id in self.currencies_by_id:
currency = self.currencies_by_id[currency_id]
else:
code = self.common_currency_code(currency_id)
if currency is not None:
code = currency['code']
return code
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None):
array = self.to_array(array)
if value:
array = [entry for entry in array if entry[field] == value]
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit)
def filter_by_since_limit(self, array, since=None, limit=None):
array = self.to_array(array)
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
self.raise_error(ExchangeError, details='Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
self.raise_error(ExchangeError, details='Does not have currency code ' + str(code))
def find_market(self, string):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(string, basestring):
if string in self.markets_by_id:
return self.markets_by_id[string]
if string in self.markets:
return self.markets[string]
return string
def find_symbol(self, string, market=None):
if market is None:
market = self.find_market(string)
if isinstance(market, dict):
return market['symbol']
return string
def market(self, symbol):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
self.raise_error(ExchangeError, details='No market symbol ' + str(symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
# -------------------------------------------------------------------------
# web3 / 0x methods
@staticmethod
def has_web3():
return Web3 is not None
def check_required_dependencies(self):
if not Exchange.has_web3():
raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py")
def eth_decimals(self, unit='ether'):
units = {
'wei': 0, # 1
'kwei': 3, # 1000
'babbage': 3, # 1000
'femtoether': 3, # 1000
'mwei': 6, # 1000000
'lovelace': 6, # 1000000
'picoether': 6, # 1000000
'gwei': 9, # 1000000000
'shannon': 9, # 1000000000
'nanoether': 9, # 1000000000
'nano': 9, # 1000000000
'szabo': 12, # 1000000000000
'microether': 12, # 1000000000000
'micro': 12, # 1000000000000
'finney': 15, # 1000000000000000
'milliether': 15, # 1000000000000000
'milli': 15, # 1000000000000000
'ether': 18, # 1000000000000000000
'kether': 21, # 1000000000000000000000
'grand': 21, # 1000000000000000000000
'mether': 24, # 1000000000000000000000000
'gether': 27, # 1000000000000000000000000000
'tether': 30, # 1000000000000000000000000000000
}
return self.safe_value(units, unit)
def eth_unit(self, decimals=18):
units = {
0: 'wei', # 1000000000000000000
3: 'kwei', # 1000000000000000
6: 'mwei', # 1000000000000
9: 'gwei', # 1000000000
12: 'szabo', # 1000000
15: 'finney', # 1000
18: 'ether', # 1
21: 'kether', # 0.001
24: 'mether', # 0.000001
27: 'gether', # 0.000000001
30: 'tether', # 0.000000000001
}
return self.safe_value(units, decimals)
def fromWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
amount = int(amount) * (10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return float(Web3.fromWei(int(amount), unit))
def toWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
# this case has known yet unsolved problems:
# toWei(1.999, 'ether', 17) == '199900000000000011'
# toWei(1.999, 'ether', 19) == '19989999999999999991'
# the best solution should not involve additional dependencies
amount = Decimal(amount) / Decimal(10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return str(Web3.toWei(amount, unit))
def decryptAccountFromJSON(self, value, password):
return self.decryptAccount(json.loads(value) if isinstance(value, basestring) else value, password)
def decryptAccount(self, key, password):
return self.web3.eth.accounts.decrypt(key, password)
def decryptAccountFromPrivateKey(self, privateKey):
return self.web3.eth.accounts.privateKeyToAccount(privateKey)
def soliditySha3(self, array):
values = self.solidityValues(array)
types = self.solidityTypes(values)
return self.web3.soliditySha3(types, values).hex()
def soliditySha256(self, values):
types = self.solidityTypes(values)
solidity_values = self.solidityValues(values)
encoded_values = [hex_encode_abi_type(abi_type, value)[2:] for abi_type, value in zip(types, solidity_values)]
hex_string = '0x' + ''.join(encoded_values)
return '0x' + self.hash(self.encode(self.web3.toText(hex_string)), 'sha256')
def solidityTypes(self, array):
return ['address' if self.web3.isAddress(value) else 'uint256' for value in array]
def solidityValues(self, array):
return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else int(value) for value in array]
def getZeroExOrderHash2(self, order):
return self.soliditySha3([
order['exchangeContractAddress'], # address
order['maker'], # address
order['taker'], # address
order['makerTokenAddress'], # address
order['takerTokenAddress'], # address
order['feeRecipient'], # address
order['makerTokenAmount'], # uint256
order['takerTokenAmount'], # uint256
order['makerFee'], # uint256
order['takerFee'], # uint256
order['expirationUnixTimestampSec'], # uint256
order['salt'], # uint256
])
def getZeroExOrderHash(self, order):
unpacked = [
self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
types = [
'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.maker, type: types_1.SolidityTypes.Address },
'address', # { value: order.taker, type: types_1.SolidityTypes.Address },
'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
return self.web3.soliditySha3(types, unpacked).hex()
def remove_0x_prefix(self, value):
if value[:2] == '0x':
return value[2:]
return value
def getZeroExOrderHashV2(self, order):
# https://github.com/0xProject/0x-monorepo/blob/development/python-packages/order_utils/src/zero_ex/order_utils/__init__.py
def pad_20_bytes_to_32(twenty_bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i):
return i.to_bytes(32, byteorder="big")
def to_bytes(value):
if not isinstance(value, str):
raise TypeError("Value must be an instance of str")
if len(value) % 2:
value = "0x0" + self.remove_0x_prefix(value)
return base64.b16decode(self.remove_0x_prefix(value), casefold=True)
domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5"
order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o'
header = b"\x19\x01"
domain_struct_hash = self.web3.sha3(
domain_struct_header +
pad_20_bytes_to_32(to_bytes(order["exchangeAddress"]))
)
order_struct_hash = self.web3.sha3(
order_schema_hash +
pad_20_bytes_to_32(to_bytes(order["makerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["takerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) +
pad_20_bytes_to_32(to_bytes(order["senderAddress"])) +
int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["makerFee"])) +
int_to_32_big_endian_bytes(int(order["takerFee"])) +
int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) +
int_to_32_big_endian_bytes(int(order["salt"])) +
self.web3.sha3(to_bytes(order["makerAssetData"])) +
self.web3.sha3(to_bytes(order["takerAssetData"]))
)
sha3 = self.web3.sha3(
header +
domain_struct_hash +
order_struct_hash
)
return '0x' + base64.b16encode(sha3).decode('ascii').lower()
def signZeroExOrder(self, order, privateKey):
orderHash = self.getZeroExOrderHash(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'ecSignature': signature, # todo fix v if needed
})
def signZeroExOrderV2(self, order, privateKey):
orderHash = self.getZeroExOrderHashV2(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'signature': self._convertECSignatureToSignatureHex(signature),
})
def _convertECSignatureToSignatureHex(self, signature):
# https://github.com/0xProject/0x-monorepo/blob/development/packages/order-utils/src/signature_utils.ts
v = signature["v"]
if v != 27 and v != 28:
v = v + 27
return (
"0x" +
self.remove_0x_prefix(hex(v)) +
self.remove_0x_prefix(signature["r"]) +
self.remove_0x_prefix(signature["s"]) +
"03"
)
def hashMessage(self, message):
message_bytes = bytes.fromhex(message)
return self.web3.sha3(b"\x19Ethereum Signed Message:\n" + str(len(message_bytes)).encode() + message_bytes).hex()
def signHash(self, hash, privateKey):
signature = self.web3.eth.account.signHash(hash[-64:], private_key=privateKey[-64:])
return {
'v': signature.v, # integer
'r': self.web3.toHex(signature.r), # '0x'-prefixed hex string
's': self.web3.toHex(signature.s), # '0x'-prefixed hex string
}
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def totp(key):
def dec_to_bytes(n):
if n > 0:
return dec_to_bytes(n // 256) + bytes([n % 256])
else:
return b''
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(dec_to_bytes(epoch).rjust(8, b'\x00'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
| 40.56631 | 299 | 0.595025 | [
"MIT"
] | newdime/ccxt | python/ccxt/base/exchange.py | 75,863 | Python |
"""
Utility for cloning ScrapyCloud jobs
Features tagging of cloned from/to jobs (both source and destination) and avoids to clone source jobs already cloned.
By default cloned jobs are scheduled in the same project as source job. If --project-id is given, target project
is overriden.
"""
import logging
from shub_workflow.script import BaseScript
from shub_workflow.utils import dash_retry_decorator
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.INFO)
def _transform_cmd(job_cmd):
if isinstance(job_cmd, list):
return " ".join(["'%s'" % cmd for cmd in job_cmd[1:]])
return job_cmd
_COPIED_FROM_META = {
"job_cmd": ("cmd_args", _transform_cmd),
"units": (None, None),
"spider_args": ("job_args", None),
"tags": ("add_tag", None),
"job_settings": (None, None),
}
class BaseClonner(BaseScript):
@staticmethod
def is_cloned(job):
for tag in job.metadata.get("tags") or []:
if tag.startswith("ClonedTo="):
_LOG.warning(f"Job {job.key} already cloned. Skipped.")
return True
return False
@dash_retry_decorator
def is_cloned_by_jobkey(self, jobkey):
job = self.client.get_job(jobkey)
return self.is_cloned(job)
def job_params_hook(self, job_params):
pass
def clone_job(self, job_key, units=None, extra_tags=None):
extra_tags = extra_tags or []
job = self.client.get_job(job_key)
spider = job.metadata.get("spider")
job_params = dict()
for key, (target_key, _) in _COPIED_FROM_META.items():
if target_key is None:
target_key = key
job_params[target_key] = job.metadata.get(key)
add_tag = job_params.setdefault("add_tag", [])
add_tag = list(filter(lambda x: not x.startswith("ClonedFrom="), add_tag))
add_tag.append(f"ClonedFrom={job_key}")
add_tag.extend(extra_tags)
job_params["add_tag"] = add_tag
if units is not None:
job_params["units"] = units
self.job_params_hook(job_params)
for key, (target_key, transform) in _COPIED_FROM_META.items():
target_key = target_key or key
if transform is None:
def transform(x):
return x
job_params[target_key] = transform(job_params[target_key])
project_id, _, _ = job_key.split("/")
project = self.get_project(self.project_id or project_id)
new_job = self.schedule_generic(project, spider, **job_params)
_LOG.info("Cloned %s to %s", job_key, new_job.key)
jobtags = job.metadata.get("tags")
jobtags.append(f"ClonedTo={new_job.key}")
job.metadata.update({"tags": jobtags})
return job, new_job
@dash_retry_decorator
def schedule_generic(self, project, spider, **job_params):
return project.jobs.run(spider, **job_params)
class CloneJobScript(BaseClonner):
flow_id_required = False
@property
def description(self):
return __doc__
def parse_project_id(self, args):
project_id = super().parse_project_id(args)
if project_id:
return project_id
if args.key:
return args.key[0].split("/")[0]
if args.tag_spider:
return args.tag_spider.split("/")[0]
def add_argparser_options(self):
super().add_argparser_options()
self.argparser.add_argument(
"--key",
type=str,
action="append",
default=[],
help="Target job key. Can be given multiple times. All must be in same project.",
)
self.argparser.add_argument(
"--tag-spider",
help="In format <project_id>/<tag>/<spider name>," "clone given spider from given project id, by tag",
)
self.argparser.add_argument("--units", help="Set number of units. Default is the same as cloned job.", type=int)
def run(self):
if self.args.key:
keys = filter(lambda x: not self.is_cloned_by_jobkey(x), self.args.key)
elif self.args.tag_spider:
keys = []
project_id, tag, spider = self.args.tag_spider.split("/")
for job in self.get_project(project_id).jobs.iter(spider=spider, state=["finished"], has_tag=tag):
if not self.is_cloned_by_jobkey(job["key"]):
keys.append(job["key"])
else:
self.argparser.error("You must provide either --key or --tag-spider.")
for job_key in keys:
try:
self.clone_job(job_key, self.args.units, self.args.tag)
except Exception as e:
_LOG.error("Could not restart job %s: %s", job_key, e)
if __name__ == "__main__":
script = CloneJobScript()
script.run()
| 32.125 | 120 | 0.612124 | [
"BSD-3-Clause"
] | curita/shub-workflow | shub_workflow/clone_job.py | 4,883 | Python |
import os
from conans import tools, ConanFile, CMake
from conans.errors import ConanInvalidConfiguration, ConanException
class CMakeConan(ConanFile):
name = "cmake"
description = "Conan installer for CMake"
topics = ("conan", "cmake", "build", "installer")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Kitware/CMake"
license = "BSD-3-Clause"
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"with_openssl": [True, False, "auto"],
}
default_options = {
"with_openssl": "auto",
}
_source_subfolder = "source_subfolder"
_cmake = None
def _minor_version(self):
return ".".join(str(self.version).split(".")[:2])
@property
def _with_openssl(self):
if self.options.with_openssl == "auto":
return self.settings.os != "Windows"
return self.options.with_openssl
def configure(self):
if self.settings.os == "Macos" and self.settings.arch == "x86":
raise ConanInvalidConfiguration("CMake does not support x86 for macOS")
minimal_cpp_standard = "11"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "5",
"clang": "3.3",
"apple-clang": "9",
"Visual Studio": "14",
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
"{} recipe lacks information about the {} compiler standard version support".format(self.name, compiler))
self.output.warn(
"{} requires a compiler that supports at least C++{}".format(self.name, minimal_cpp_standard))
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration(
"{} requires a compiler that supports at least C++{}".format(self.name, minimal_cpp_standard))
def requirements(self):
if self._with_openssl:
self.requires("openssl/1.1.1h")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if not self._cmake:
self._cmake = CMake(self)
if not self.settings.compiler.cppstd:
self._cmake.definitions["CMAKE_CXX_STANDARD"] = 11
self._cmake.definitions["CMAKE_BOOTSTRAP"] = False
if self.settings.os == "Linux":
self._cmake.definitions["CMAKE_USE_OPENSSL"] = self._with_openssl
if self._with_openssl:
self._cmake.definitions["OPENSSL_USE_STATIC_LIBS"] = not self.options["openssl"].shared
self._cmake.configure(source_folder=self._source_subfolder)
return self._cmake
def build(self):
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"project(CMake)",
"project(CMake)\ninclude(\"{}/conanbuildinfo.cmake\")\nconan_basic_setup(NO_OUTPUT_DIRS)".format(
self.install_folder.replace("\\", "/")))
if self.settings.os == "Linux":
tools.replace_in_file(os.path.join(self._source_subfolder, "Utilities", "cmcurl", "CMakeLists.txt"),
"list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})",
"list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES} ${CMAKE_DL_LIBS} pthread)")
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("Copyright.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "doc"))
def package_id(self):
self.info.options.with_openssl = self._with_openssl
del self.info.settings.compiler
def package_info(self):
minor = self._minor_version()
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
self.env_info.CMAKE_ROOT = self.package_folder
mod_path = os.path.join(self.package_folder, "share", "cmake-%s" % minor, "Modules")
self.env_info.CMAKE_MODULE_PATH = mod_path
if not os.path.exists(mod_path):
raise ConanException("Module path not found: %s" % mod_path)
| 39.848739 | 127 | 0.615141 | [
"MIT"
] | LunarWatcher/conan-center-index | recipes/cmake/3.x.x/conanfile.py | 4,742 | Python |
import os
import sys
filename = __file__[:-5] + '-input'
with open(filename) as f:
board = list(map(lambda s: list(map(int, list(s))), f.read().splitlines()))
max_row = len(board)
max_col = len(board[0])
def get_neighbors(row, col):
n = []
if(row > 0):
n.append((row-1,col))
if(row+1 < max_row):
n.append((row+1,col))
if(col > 0):
n.append((row,col-1))
if(col+1 < max_col):
n.append((row,col+1))
return n
low_points = []
basin_size = {}
for i, row in enumerate(board):
for j, val in enumerate(row):
neighbors = [board[r][c] for r,c in get_neighbors(i,j)]
if all([val < elem for elem in neighbors ]):
low_points.append((i,j))
for r,c in low_points:
visited = []
to_explore = [(r,c)]
while len(to_explore) > 0:
visited.append(to_explore[0])
cur_r, cur_c = to_explore.pop(0)
to_explore.extend([(r,c) for r,c in get_neighbors(cur_r, cur_c) if board[r][c] < 9 and (r,c) not in visited and (r,c) not in to_explore])
basin_size[(r,c)] = len(visited)
largest_basins = sorted(basin_size, key=basin_size.get, reverse=True)[:3]
print(basin_size[largest_basins[0]]*basin_size[largest_basins[1]]*basin_size[largest_basins[2]]) | 24.72549 | 145 | 0.610626 | [
"MIT"
] | lsangers/AdventOfCode | 2021/09-2.py | 1,261 | Python |
import logging
import os
import shutil
import tempfile
import uuid
import re
from typing import Tuple, List, Text, Set, Union, Optional, Iterable
from rasa.nlu.training_data import loading
from rasa.utils.io import DEFAULT_ENCODING
logger = logging.getLogger(__name__)
def get_core_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
"""Recursively collects all Core training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found Core training files.
"""
core_files, _ = get_core_nlu_files(paths)
return _copy_files_to_new_dir(core_files)
def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
"""Recursively collects all NLU training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found NLU training files.
"""
_, nlu_files = get_core_nlu_files(paths)
return _copy_files_to_new_dir(nlu_files)
def get_core_nlu_directories(
paths: Optional[Union[Text, List[Text]]],
) -> Tuple[Text, Text]:
"""Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to directory containing the Core files and path to directory
containing the NLU training files.
"""
story_files, nlu_data_files = get_core_nlu_files(paths)
story_directory = _copy_files_to_new_dir(story_files)
nlu_directory = _copy_files_to_new_dir(nlu_data_files)
return story_directory, nlu_directory
def get_core_nlu_files(
paths: Optional[Union[Text, List[Text]]]
) -> Tuple[List[Text], List[Text]]:
"""Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Tuple of paths to story and NLU files.
"""
story_files = set()
nlu_data_files = set()
if paths is None:
paths = []
elif isinstance(paths, str):
paths = [paths]
for path in set(paths):
if not path:
continue
if _is_valid_filetype(path):
if is_nlu_file(path):
nlu_data_files.add(os.path.abspath(path))
elif is_story_file(path):
story_files.add(os.path.abspath(path))
else:
new_story_files, new_nlu_data_files = _find_core_nlu_files_in_directory(
path
)
story_files.update(new_story_files)
nlu_data_files.update(new_nlu_data_files)
return sorted(story_files), sorted(nlu_data_files)
def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[Text]]:
story_files = set()
nlu_data_files = set()
for root, _, files in os.walk(directory, followlinks=True):
# we sort the files here to ensure consistent order for repeatable training results
for f in sorted(files):
full_path = os.path.join(root, f)
if not _is_valid_filetype(full_path):
continue
if is_nlu_file(full_path):
nlu_data_files.add(full_path)
elif is_story_file(full_path):
story_files.add(full_path)
return story_files, nlu_data_files
def _is_valid_filetype(path: Text) -> bool:
is_file = os.path.isfile(path)
is_datafile = path.endswith(".json") or path.endswith(".md")
return is_file and is_datafile
def is_nlu_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa compatible nlu file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a nlu file, otherwise `False`.
"""
return loading.guess_format(file_path) != loading.UNK
def is_story_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa story file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a story file, otherwise `False`.
"""
if not file_path.endswith(".md"):
return False
try:
with open(
file_path, encoding=DEFAULT_ENCODING, errors="surrogateescape"
) as lines:
return any(_contains_story_pattern(line) for line in lines)
except Exception as e:
# catch-all because we might be loading files we are not expecting to load
logger.error(
f"Tried to check if '{file_path}' is a story file, but failed to "
f"read it. If this file contains story data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
def _contains_story_pattern(text: Text) -> bool:
story_pattern = r".*##.+"
return re.match(story_pattern, text) is not None
def is_domain_file(file_path: Text) -> bool:
"""Checks whether the given file path is a Rasa domain file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a domain file, otherwise `False`.
"""
file_name = os.path.basename(file_path)
return file_name in ["domain.yml", "domain.yaml"]
def is_config_file(file_path: Text) -> bool:
"""Checks whether the given file path is a Rasa config file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a Rasa config file, otherwise `False`.
"""
file_name = os.path.basename(file_path)
return file_name in ["config.yml", "config.yaml"]
def _copy_files_to_new_dir(files: Iterable[Text]) -> Text:
directory = tempfile.mkdtemp()
for f in files:
# makes sure files do not overwrite each other, hence the prefix
unique_prefix = uuid.uuid4().hex
unique_file_name = unique_prefix + "_" + os.path.basename(f)
shutil.copy2(f, os.path.join(directory, unique_file_name))
return directory
| 28.809302 | 91 | 0.658541 | [
"Apache-2.0"
] | Amirali-Shirkh/rasa-for-botfront | rasa/data.py | 6,194 | Python |
import requests
import sys
import json
requests.packages.urllib3.disable_warnings()
from requests.packages.urllib3.exceptions import InsecureRequestWarning
SDWAN_IP = "10.10.20.90"
SDWAN_USERNAME = "admin"
SDWAN_PASSWORD = "C1sco12345"
class rest_api_lib:
def __init__(self, vmanage_ip, username, password):
self.vmanage_ip = vmanage_ip
self.session = {}
self.login(self.vmanage_ip, username, password)
def login(self, vmanage_ip, username, password):
"""Login to vmanage"""
base_url_str = 'https://%s:8443/'%vmanage_ip
login_action = 'j_security_check'
login_data = {'j_username' : username, 'j_password' : password}
login_url = base_url_str + login_action
url = base_url_str + login_url
sess = requests.session()
login_response = sess.post(url=login_url, data=login_data, verify=False)
if b'<html>' in login_response.content:
print ("Login Failed")
sys.exit(0)
self.session[vmanage_ip] = sess
def get_request(self, api):
url = "https://%s:8443/dataservice/%s"%(self.vmanage_ip, api)
response = self.session[self.vmanage_ip].get(url, verify=False)
return response
Sdwan = rest_api_lib(SDWAN_IP, SDWAN_USERNAME, SDWAN_PASSWORD)
def Wan_edge_Health():
try:
resp = Sdwan.get_request(api = "device/hardwarehealth/summary?isCached=true")
data = resp.json()
string = str(data['data'][0]['statusList'][0]['count'])+','+str(data['data'][0]['statusList'][1]['count'])+','+str(data['data'][0]['statusList'][2]['count'])
print(string)
except:
print("Wrong")
sys.exit()
Wan_edge_Health()
| 32.226415 | 165 | 0.652225 | [
"MIT"
] | victornguyen98/luanvan2020 | app/Http/Controllers/Dashboard/Wan_edge_Health.py | 1,708 | Python |
"""
Run all TDDA tests
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from tdda.referencetest import ReferenceTestCase
from tdda.constraints.testconstraints import *
from tdda.rexpy.testrexpy import *
from tdda.referencetest.tests.alltests import *
if __name__ == '__main__':
ReferenceTestCase.main()
| 19.842105 | 48 | 0.803714 | [
"MIT"
] | Daniel-Mietchen/tdda | tdda/testtdda.py | 377 | Python |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_deployment_operations_get_at_management_group_scope_request, build_deployment_operations_get_at_scope_request, build_deployment_operations_get_at_subscription_scope_request, build_deployment_operations_get_at_tenant_scope_request, build_deployment_operations_get_request, build_deployment_operations_list_at_management_group_scope_request, build_deployment_operations_list_at_scope_request, build_deployment_operations_list_at_subscription_scope_request, build_deployment_operations_list_at_tenant_scope_request, build_deployment_operations_list_request, build_deployments_calculate_template_hash_request, build_deployments_cancel_at_management_group_scope_request, build_deployments_cancel_at_scope_request, build_deployments_cancel_at_subscription_scope_request, build_deployments_cancel_at_tenant_scope_request, build_deployments_cancel_request, build_deployments_check_existence_at_management_group_scope_request, build_deployments_check_existence_at_scope_request, build_deployments_check_existence_at_subscription_scope_request, build_deployments_check_existence_at_tenant_scope_request, build_deployments_check_existence_request, build_deployments_create_or_update_at_management_group_scope_request_initial, build_deployments_create_or_update_at_scope_request_initial, build_deployments_create_or_update_at_subscription_scope_request_initial, build_deployments_create_or_update_at_tenant_scope_request_initial, build_deployments_create_or_update_request_initial, build_deployments_delete_at_management_group_scope_request_initial, build_deployments_delete_at_scope_request_initial, build_deployments_delete_at_subscription_scope_request_initial, build_deployments_delete_at_tenant_scope_request_initial, build_deployments_delete_request_initial, build_deployments_export_template_at_management_group_scope_request, build_deployments_export_template_at_scope_request, build_deployments_export_template_at_subscription_scope_request, build_deployments_export_template_at_tenant_scope_request, build_deployments_export_template_request, build_deployments_get_at_management_group_scope_request, build_deployments_get_at_scope_request, build_deployments_get_at_subscription_scope_request, build_deployments_get_at_tenant_scope_request, build_deployments_get_request, build_deployments_list_at_management_group_scope_request, build_deployments_list_at_scope_request, build_deployments_list_at_subscription_scope_request, build_deployments_list_at_tenant_scope_request, build_deployments_list_by_resource_group_request, build_deployments_validate_at_management_group_scope_request, build_deployments_validate_at_scope_request, build_deployments_validate_at_subscription_scope_request, build_deployments_validate_at_tenant_scope_request, build_deployments_validate_request, build_deployments_what_if_at_subscription_scope_request_initial, build_deployments_what_if_request_initial, build_operations_list_request, build_providers_get_at_tenant_scope_request, build_providers_get_request, build_providers_list_at_tenant_scope_request, build_providers_list_request, build_providers_register_request, build_providers_unregister_request, build_resource_groups_check_existence_request, build_resource_groups_create_or_update_request, build_resource_groups_delete_request_initial, build_resource_groups_export_template_request_initial, build_resource_groups_get_request, build_resource_groups_list_request, build_resource_groups_update_request, build_resources_check_existence_by_id_request, build_resources_check_existence_request, build_resources_create_or_update_by_id_request_initial, build_resources_create_or_update_request_initial, build_resources_delete_by_id_request_initial, build_resources_delete_request_initial, build_resources_get_by_id_request, build_resources_get_request, build_resources_list_by_resource_group_request, build_resources_list_request, build_resources_move_resources_request_initial, build_resources_update_by_id_request_initial, build_resources_update_request_initial, build_resources_validate_move_resources_request_initial, build_tags_create_or_update_request, build_tags_create_or_update_value_request, build_tags_delete_request, build_tags_delete_value_request, build_tags_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_operations_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_operations_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.Resources/operations"} # type: ignore
class DeploymentsOperations: # pylint: disable=too-many-public-methods
"""DeploymentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_at_scope_initial( # pylint: disable=inconsistent-return-statements
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_scope_request_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_scope_initial.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_scope( # pylint: disable=inconsistent-return-statements
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_scope_initial(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_at_scope_request_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_scope_initial.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_scope(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at a given scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_scope( # pylint: disable=inconsistent-return-statements
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_scope(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
@distributed_trace_async
async def export_template_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_scope(
self,
scope: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments at the given scope.
:param scope: The scope of a deployment.
:type scope: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_scope_request(
scope=scope,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_scope_request(
scope=scope,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_at_tenant_scope_initial( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_tenant_scope_request_initial(
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_tenant_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_tenant_scope_initial.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_tenant_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_tenant_scope_initial(
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_tenant_scope_initial(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_create_or_update_at_tenant_scope_request_initial(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_tenant_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_tenant_scope_initial.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_tenant_scope(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at tenant scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_tenant_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_tenant_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_tenant_scope(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_validate_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
@distributed_trace_async
async def export_template_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments at the tenant scope.
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_tenant_scope_request(
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_tenant_scope_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_at_management_group_scope_initial( # pylint: disable=inconsistent-return-statements
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_management_group_scope_request_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_management_group_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_management_group_scope_initial.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_management_group_scope( # pylint: disable=inconsistent-return-statements
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_management_group_scope_initial(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_create_or_update_at_management_group_scope_request_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_management_group_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_management_group_scope_initial.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at management group scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_management_group_scope( # pylint: disable=inconsistent-return-statements
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_validate_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
@distributed_trace_async
async def export_template_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a management group.
:param group_id: The management group ID.
:type group_id: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_at_subscription_scope_initial( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_subscription_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_subscription_scope_initial(
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_subscription_scope_initial(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at subscription scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_subscription_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
async def _what_if_at_subscription_scope_initial(
self,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> Optional["_models.WhatIfOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.WhatIfOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DeploymentWhatIf')
request = build_deployments_what_if_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._what_if_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def begin_what_if_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> AsyncLROPoller["_models.WhatIfOperationResult"]:
"""Returns changes that will be made by the deployment if executed at the scope of the
subscription.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to What If.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentWhatIf
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.WhatIfOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WhatIfOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._what_if_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_what_if_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def export_template_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_subscription_scope(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a subscription.
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. Deleting a template deployment does
not affect the state of the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted. The Location response
header contains the URI that is used to obtain the status of the process. While the process is
running, a call to the URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param resource_group_name: The name of the resource group with the deployment to delete. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the deployment to check. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or link to JSON files.
:param resource_group_name: The name of the resource group to deploy the resources to. The name
is case insensitive. The resource group must already exist.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resource group
partially deployed.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
async def _what_if_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> Optional["_models.WhatIfOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.WhatIfOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DeploymentWhatIf')
request = build_deployments_what_if_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._what_if_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def begin_what_if(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> AsyncLROPoller["_models.WhatIfOperationResult"]:
"""Returns changes that will be made by the deployment if executed at the scope of the resource
group.
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentWhatIf
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.WhatIfOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WhatIfOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._what_if_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_what_if.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def export_template(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the deployments to get. The
name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/"} # type: ignore
@distributed_trace_async
async def calculate_template_hash(
self,
template: Any,
**kwargs: Any
) -> "_models.TemplateHashResult":
"""Calculate the hash of the given template.
:param template: The template provided to calculate hash.
:type template: any
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateHashResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TemplateHashResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateHashResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(template, 'object')
request = build_deployments_calculate_template_hash_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.calculate_template_hash.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TemplateHashResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_template_hash.metadata = {'url': "/providers/Microsoft.Resources/calculateTemplateHash"} # type: ignore
class ProvidersOperations:
"""ProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def unregister(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Unregisters a subscription from a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to unregister.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_unregister_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.unregister.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"} # type: ignore
@distributed_trace_async
async def register(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Registers a subscription with a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to register.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_register_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.register.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"} # type: ignore
@distributed_trace
def list(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ProviderListResult"]:
"""Gets all resource providers for a subscription.
:param top: The number of results to return. If null is passed returns all deployments. Default
value is None.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers"} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ProviderListResult"]:
"""Gets all resource providers for the tenant.
:param top: The number of results to return. If null is passed returns all providers. Default
value is None.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_at_tenant_scope_request(
api_version=api_version,
top=top,
expand=expand,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_providers_list_at_tenant_scope_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_get_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"} # type: ignore
@distributed_trace_async
async def get_at_tenant_scope(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider at the tenant level.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_get_at_tenant_scope_request(
resource_provider_namespace=resource_provider_namespace,
api_version=api_version,
expand=expand,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/{resourceProviderNamespace}"} # type: ignore
class ResourcesOperations: # pylint: disable=too-many-public-methods
"""ResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceListResult"]:
"""Get all the resources for a resource group.
:param resource_group_name: The resource group with the resources to get.
:type resource_group_name: str
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1'. When you filter by a tag name and value,
the tags for each resource are not returned in the results.:code:`<br>`:code:`<br>`You can use
some properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId. Default
value is None.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``. Default value is None.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
expand=expand,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"} # type: ignore
async def _move_resources_initial( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourcesMoveInfo')
request = build_resources_move_resources_request_initial(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._move_resources_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"} # type: ignore
@distributed_trace_async
async def begin_move_resources( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move_resources.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"} # type: ignore
async def _validate_move_resources_initial( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourcesMoveInfo')
request = build_resources_validate_move_resources_request_initial(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._validate_move_resources_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_validate_move_resources_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"} # type: ignore
@distributed_trace_async
async def begin_validate_move_resources( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Validates whether resources can be moved from one resource group to another resource group.
This operation checks whether the specified resources can be moved to the target. The resources
to move must be in the same source resource group. The target resource group may be in a
different subscription. If validation succeeds, it returns HTTP response code 204 (no content).
If validation fails, it returns HTTP response code 409 (Conflict) with an error message.
Retrieve the URL in the Location header value to check the result of the long-running
operation.
:param source_resource_group_name: The name of the resource group containing the resources to
validate for move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._validate_move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate_move_resources.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceListResult"]:
"""Get all the resources in a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1'. When you filter by a tag name and value,
the tags for each resource are not returned in the results.:code:`<br>`:code:`<br>`You can use
some properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId. Default
value is None.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``. Default value is None.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
expand=expand,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resources"} # type: ignore
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> bool:
"""Checks whether a resource exists.
:param resource_group_name: The name of the resource group containing the resource to check.
The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider of the resource to check.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to check whether it exists.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_check_existence_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_delete_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource.
:param resource_group_name: The name of the resource group that contains the resource to
delete. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to delete.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_create_or_update_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create.
:type resource_type: str
:param resource_name: The name of the resource to create.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Parameters for creating or updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_update_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update.
:type resource_type: str
:param resource_name: The name of the resource to update.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Parameters for updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> "_models.GenericResource":
"""Gets a resource.
:param resource_group_name: The name of the resource group containing the resource to get. The
name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_get_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def check_existence_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> bool:
"""Checks by ID whether a resource exists.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_check_existence_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.check_existence_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
async def _delete_by_id_initial( # pylint: disable=inconsistent-return-statements
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_delete_by_id_request_initial(
resource_id=resource_id,
api_version=api_version,
template_url=self._delete_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def begin_delete_by_id( # pylint: disable=inconsistent-return-statements
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_by_id_initial(
resource_id=resource_id,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
async def _create_or_update_by_id_initial(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_create_or_update_by_id_request_initial(
resource_id=resource_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._create_or_update_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Create or update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
async def _update_by_id_initial(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_update_by_id_request_initial(
resource_id=resource_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._update_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def begin_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def get_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> "_models.GenericResource":
"""Gets a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_get_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
class ResourceGroupsOperations:
"""ResourceGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
**kwargs: Any
) -> bool:
"""Checks whether a resource group exists.
:param resource_group_name: The name of the resource group to check. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_resource_groups_check_existence_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroup",
**kwargs: Any
) -> "_models.ResourceGroup":
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceGroup')
request = build_resource_groups_create_or_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_resource_groups_delete_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource group.
When you delete a resource group, all of its resources are also deleted. Deleting a resource
group deletes all of its template deployments and currently stored operations.
:param resource_group_name: The name of the resource group to delete. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ResourceGroup":
"""Gets a resource group.
:param resource_group_name: The name of the resource group to get. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_resource_groups_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroupPatchable",
**kwargs: Any
) -> "_models.ResourceGroup":
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroupPatchable
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceGroupPatchable')
request = build_resource_groups_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
async def _export_template_initial(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> Optional["_models.ResourceGroupExportResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ResourceGroupExportResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ExportTemplateRequest')
request = build_resource_groups_export_template_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._export_template_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_template_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"} # type: ignore
@distributed_trace_async
async def begin_export_template(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.ResourceGroupExportResult"]:
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters for exporting the template.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ExportTemplateRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ResourceGroupExportResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroupExportResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupExportResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._export_template_initial(
resource_group_name=resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_template.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceGroupListResult"]:
"""Gets all the resource groups for a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`You can filter by
tag names and values. For example, to filter for a tag name and value, use $filter=tagName eq
'tag1' and tagValue eq 'Value1'. Default value is None.
:type filter: str
:param top: The number of results to return. If null is passed, returns all resource groups.
Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups"} # type: ignore
class TagsOperations:
"""TagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def delete_value( # pylint: disable=inconsistent-return-statements
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> None:
"""Deletes a tag value.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to delete.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"} # type: ignore
@distributed_trace_async
async def create_or_update_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> "_models.TagValue":
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to create.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagValue, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagValue', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_value.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
tag_name: str,
**kwargs: Any
) -> "_models.TagDetails":
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case insensitive. Tag names created by
Azure have prefixes of microsoft, azure, or windows. You cannot create tags with one of these
prefixes.
:param tag_name: The name of the tag to create.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagDetails, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
tag_name: str,
**kwargs: Any
) -> None:
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete it.
:param tag_name: The name of the tag.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}"} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.TagsListResult"]:
"""Gets the names and values of all resource tags that are defined in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagsListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.TagsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames"} # type: ignore
class DeploymentOperationsOperations:
"""DeploymentOperationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_at_scope(
self,
scope: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_scope(
self,
scope: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get_at_tenant_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_tenant_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_subscription_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_subscription_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"} # type: ignore
| 44.411035 | 4,257 | 0.6624 | [
"MIT"
] | AikoBB/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | 311,499 | Python |
steps = 0
c = {}
m = 1
def collatz(n):
global steps
if n in c:
steps += c[n]
return
if n == 1:
return
steps += 1
if n % 2 == 0:
collatz(n/2)
return
collatz(3 * n + 1)
def main(max):
global steps
global m
for i in range(1, max):
collatz(i)
c[i] = steps
if steps > c[m]:
m = i
steps = 0
main(1000000)
print(m)
| 11.487179 | 27 | 0.419643 | [
"MIT"
] | Dinoosawruss/project-euler | Python/14 - Longest Collatz sequence/main.py | 448 | Python |
class Constants:
"""Storing all constants of the project."""
_author = "muhammad abdullah"
_email = "[email protected]"
_version = "0.0.4"
@property
def author(self):
return self._author
@property
def email(self):
return self._email
@property
def version(self):
return self._version
| 20 | 47 | 0.622222 | [
"MIT"
] | iAbdullahMughal/espionage | espionage/constants.py | 360 | Python |
from __future__ import absolute_import
from django.conf.urls import *
from django.views.static import serve
from ajax import views
import django
import os
JAVASCRIPT_PATH = "%s/js" % os.path.dirname(__file__)
if django.VERSION < (1, 8):
urlpatterns = patterns('ajax.views',
(r'^(?P<application>\w+)/(?P<model>\w+).json', 'endpoint_loader'),
(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<method>\w+).json', 'endpoint_loader'),
(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<pk>\d+)/(?P<method>\w+)/?(?P<taggit_command>(add|remove|set|clear|similar))?.json$', 'endpoint_loader'),
(r'^js/(?P<path>.*)$', serve,
{'document_root': JAVASCRIPT_PATH}),
)
else:
urlpatterns = [
url(r'^(?P<application>\w+)/(?P<model>\w+).json', views.endpoint_loader),
url(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<method>\w+).json', views.endpoint_loader),
url(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<pk>\d+)/(?P<method>\w+)/?(?P<taggit_command>(add|remove|set|clear|similar))?.json$', views.endpoint_loader),
url(r'^js/(?P<path>.*)$', serve,
{'document_root': JAVASCRIPT_PATH}),
]
| 44.692308 | 163 | 0.598967 | [
"BSD-3-Clause"
] | joestump/django-ajax | ajax/urls.py | 1,162 | Python |
import random
def ordenamiento_por_mezcla(lista):
if len(lista) > 1:
medio = len(lista) // 2
izquierda = lista[:medio]
derecha = lista[medio:]
# llamada recursiva en cada mitad
ordenamiento_por_mezcla(izquierda)
ordenamiento_por_mezcla(derecha)
# Iteradores para recorrer las dos sublistas
i = 0
j = 0
# Iterador para la lista principal
k = 0
while i < len(izquierda) and j < len(derecha):
if izquierda[i] < derecha[j]:
lista[k] = izquierda[i]
i += 1
else:
lista[k] = derecha[j]
j += 1
k += 1
while i < len(izquierda):
lista[k] = izquierda[i]
i += 1
k += 1
while j < len(derecha):
lista[k] = derecha[j]
j += 1
k += 1
return lista
if __name__ == '__main__':
tamano_de_lista = int(input('De que tamaño será la lista? '))
lista = [random.randint(0, 100) for i in range(tamano_de_lista)]
print(lista)
print('-' * 20)
lista_ordenada = ordenamiento_por_mezcla(lista)
print(lista_ordenada) | 20.098039 | 66 | 0.612683 | [
"MIT"
] | lmonsalve22/Learning-to-Code | python/poo_algoritmos/merge_sort.py | 1,027 | Python |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import logging
from owslib.util import log, makeString
# function to save writing out WCS namespace in full each time
def ns(tag):
return '{http://www.opengis.net/wcs}' + tag
class WebCoverageService_1_0_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.0.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in list(self.__getattribute__('contents').keys()):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None, timeout=30):
super(WebCoverageService_1_0_0, self).__init__(auth)
self.version = '1.0.0'
self.url = url
self.cookies = cookies
self.timeout = timeout
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
# check for exceptions
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
self.updateSequence = self._capabilities.attrib.get('updateSequence')
# serviceIdentification metadata
subelem = self._capabilities.find(ns('Service'))
self.identification = ServiceIdentification(subelem)
# serviceProvider metadata
subelem = self._capabilities.find(ns('Service/') + ns('responsibleParty'))
self.provider = ServiceProvider(subelem)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns('Capability/') + ns('Request'))[:]:
self.operations.append(OperationMetadata(elem))
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('CoverageOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# Some WCS servers (wrongly) advertise 'Content' OfferingBrief instead.
if self.contents == {}:
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('ContentOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [f.text for f in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getCoverage(self, identifier=None, bbox=None, time=None, format=None, crs=None, width=None, height=None,
resx=None, resy=None, resz=None, parameter=None, method='Get', timeout=30, **kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
"""
if log.isEnabledFor(logging.DEBUG):
msg = 'WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier={}, bbox={}, time={}, format={}, crs={}, width={}, height={}, resx={}, resy={}, resz={}, parameter={}, method={}, other_arguments={}' # noqa
log.debug(msg.format(
identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetCoverage').methods
if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
log.debug('WCS 1.0.0 DEBUG: base url of server: %s' % base_url)
# process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service': 'WCS'}
assert len(identifier) > 0
request['Coverage'] = identifier
# request['identifier'] = ','.join(identifier)
if bbox:
request['BBox'] = ','.join([makeString(x) for x in bbox])
else:
request['BBox'] = None
if time:
request['time'] = ','.join(time)
if crs:
request['crs'] = crs
request['format'] = format
if width:
request['width'] = width
if height:
request['height'] = height
if resx:
request['resx'] = resx
if resy:
request['resy'] = resy
if resz:
request['resz'] = resz
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
log.debug('WCS 1.0.0 DEBUG: Second part of URL: %s' % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class OperationMetadata(object):
"""Abstraction for WCS metadata.
Implements IMetadata.
"""
def __init__(self, elem):
"""."""
self.name = elem.tag.split('}')[1]
# self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')] # noqa
self.methods = []
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Get/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Get', 'url': url})
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Post/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Post', 'url': url})
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification metadata """
def __init__(self, elem):
# properties
self.type = 'OGC:WCS'
self.version = '1.0.0'
self.service = testXMLValue(elem.find(ns('name')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.title = testXMLValue(elem.find(ns('label')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
# note: differs from 'rights' in interface
self.fees = elem.find(ns('fees')).text
self.accessConstraints = elem.find(ns('accessConstraints')).text
class ServiceProvider(object):
""" Abstraction for WCS ResponsibleParty
Implements IServiceProvider"""
def __init__(self, elem):
# it's not uncommon for the service provider info to be missing
# so handle case where None is passed in
if elem is None:
self.name = None
self.url = None
self.contact = None
else:
self.name = testXMLValue(elem.find(ns('organisationName')))
self.url = self.name # there is no definitive place for url WCS, repeat organisationName
self.contact = ContactMetadata(elem)
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find(ns('individualName')).text
except AttributeError:
self.name = None
try:
self.organization = elem.find(ns('organisationName')).text
except AttributeError:
self.organization = None
try:
self.address = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('deliveryPoint')).text
except AttributeError:
self.address = None
try:
self.city = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('city')).text
except AttributeError:
self.city = None
try:
self.region = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('administrativeArea')).text
except AttributeError:
self.region = None
try:
self.postcode = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('postalCode')).text
except AttributeError:
self.postcode = None
try:
self.country = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('country')).text
except AttributeError:
self.country = None
try:
self.email = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('electronicMailAddress')).text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
# self._parent=parent
self._elem = elem
self._service = service
self.id = elem.find(ns('name')).text
self.title = testXMLValue(elem.find(ns('label')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns('lonLatEnvelope'))
if b is not None:
gmlpositions = b.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]), float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}RectifiedGrid') # noqa
if gridelem is not None:
grid = RectifiedGrid(gridelem)
else:
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Grid') # noqa
grid = Grid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints.
# WCS servers can declare one or both or neither of these.
def _getTimeLimits(self):
timepoints, timelimits = [], []
b = self._elem.find(ns('lonLatEnvelope'))
if b is not None:
timepoints = b.findall('{http://www.opengis.net/gml}timePosition')
else:
# have to make a describeCoverage request...
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepoints.append(pos)
if timepoints:
timelimits = [timepoints[0].text, timepoints[1].text]
return timelimits
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepositions.append(pos.text)
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
''' incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod.'''
bboxes = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Envelope'): # noqa
bbox = {}
bbox['nativeSrs'] = envelope.attrib['srsName']
gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text.split()
uc = gmlpositions[1].text.split()
bbox['bbox'] = (
float(lc[0]), float(lc[1]),
float(uc[0]), float(uc[1])
)
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('responseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('requestResponseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('nativeCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedFormats/') + ns('formats')):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('rangeSet/') + ns('RangeSet/') + ns('axisDescription/') + ns('AxisDescription')): # noqa
axisDescs.append(AxisDescription(elem)) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide access
# to the information in the GML.
class Grid(object):
''' Simple grid class to provide axis and value information for a gml grid '''
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get('dimension'))
self.lowlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}low').text.split(' ') # noqa
self.highlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}high').text.split(' ') # noqa
for axis in grid.findall('{http://www.opengis.net/gml}axisName'):
self.axislabels.append(axis.text)
class RectifiedGrid(Grid):
''' RectifiedGrid class, extends Grid with additional offset vector information '''
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
'{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos').text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall('{http://www.opengis.net/gml}offsetVector'):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
''' Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels'''
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns('name'):
self.name = elem.text
elif elem.tag == ns('label'):
self.label = elem.text
elif elem.tag == ns('values'):
for child in elem.getchildren():
self.values.append(child.text)
| 43.446469 | 222 | 0.604677 | [
"BSD-3-Clause"
] | ferreteleco/OWSLib | owslib/coverage/wcs100.py | 19,073 | Python |
import json
import os
from flask_app.config import get_config
from flask_app.logging import get_logger
import boto3
class SQSEvents:
def __init__(self, logger=None, config=None):
# logger
self.logger = logger if logger is not None else get_logger()
# configurations
self.config = config if config is not None else get_config()
# last_exception
self.exception = None
def connect(self):
connection = None
try:
endpoint_url = self.config.SQS_ENDPOINT
profile = os.environ['AWS_PROFILE'] if 'AWS_PROFILE' in os.environ else None
self.logger.info('SQSEvents - profile: {}'.format(profile))
self.logger.info('SQSEvents - endpoint_url: {}'.format(endpoint_url))
self.logger.info('SQSEvents - self.config.REGION_NAME: {}'.format(self.config.REGION_NAME))
if profile:
session = boto3.session.Session(profile_name=profile)
connection = session.resource(
'sqs',
endpoint_url=endpoint_url,
region_name=self.config.REGION_NAME
)
else:
connection = boto3.resource(
'sqs',
endpoint_url=endpoint_url,
region_name=self.config.REGION_NAME
)
self.logger.info('SQSEvents - Connected')
except Exception as err:
self.logger.error(err)
return connection
def send_message(self, message, queue_url):
sqs = self.connect()
if queue_url is None:
raise Exception('Queue name must be informed')
queue_name = os.path.basename(queue_url)
try:
# Get the queue
queue = sqs.get_queue_by_name(QueueName=queue_name)
# Avoid double json encode
if not isinstance(message, str):
try:
message = json.dumps(message)
except Exception as err:
self.logger.error(err)
message = str(message)
# Create a new message
response = queue.send_message(MessageBody=message)
except Exception as err:
self.logger.error(err)
self.exception = err
response = None
return response
def get_message(self, queue_url):
sqs = self.connect()
if queue_url is None:
raise Exception('Queue name must be informed')
queue_name = os.path.basename(queue_url)
try:
# Get the queue
queue = sqs.get_queue_by_name(QueueName=queue_name)
# Create a new message
message = queue.receive_messages(
AttributeNames=[
'All'
],
MaxNumberOfMessages=1,
VisibilityTimeout=5,
WaitTimeSeconds=1
)
except Exception as err:
self.logger.error(err)
self.exception = err
message = None
return message
def create_queue(self, queue_name, attributes=None):
queue = None
if not attributes:
attributes = {'DelaySeconds': '5'}
sqs = self.connect()
try:
# Create the queue. This returns an SQS.Queue instance
queue = sqs.create_queue(QueueName=queue_name, Attributes=attributes)
except Exception as err:
self.logger.error(err)
self.exception = err
return queue
def delete_queue(self, queue_name):
result = True
sqs = self.connect()
try:
# Get the queue
queue = sqs.get_queue_by_name(QueueName=queue_name)
if queue is not None:
queue_url = queue.url
client = sqs.meta.client
client.delete_queue(QueueUrl=queue_url)
else:
raise Exception('queue not exists')
# QueueUrl
except Exception as err:
self.logger.error(err)
self.exception = err
result = False
return result
| 29.964539 | 103 | 0.550296 | [
"MIT"
] | andersoncontreira/aws-ecs-flask-skeleton-python | flask_app/events/aws/sqs.py | 4,225 | Python |
import requests
files = {
'username': (None, 'davidwalsh'),
'password': (None, 'something'),
}
response = requests.post('http://domain.tld/post-to-me.php', files=files)
| 19.888889 | 73 | 0.648045 | [
"MIT"
] | 7alva7/curlconverter | fixtures/python/post_form.py | 179 | Python |
from uuid import uuid4
from typing import Optional, Dict, Any, List
from appgate.openapi.types import OpenApiDict, AttribType, AttributesDict, \
IGNORED_EQ_ATTRIBUTES, OpenApiParserException, InstanceMakerConfig, UUID_REFERENCE_FIELD, K8S_LOADERS_FIELD_NAME
write_only_formats = {'PEM', 'password'}
class SimpleAttribMaker:
def __init__(self, name: str, tpe: type, base_tpe: type, default: Optional[AttribType],
factory: Optional[type], definition: OpenApiDict, repr: bool = True) -> None:
self.base_tpe = base_tpe
self.name = name
self.tpe = tpe
self.default = default
self.factory = factory
self.repr = repr
self.definition = definition
@property
def metadata(self) -> Dict[str, Any]:
return self.definition.get('metadata', {})
@property
def is_password(self) -> bool:
return False
@property
def has_default(self) -> bool:
"""
Checks if attrs as a default field value
"""
return self.factory is not None or self.default is not None
def values(self, attributes: Dict[str, 'SimpleAttribMaker'], required_fields: List[str],
instance_maker_config: InstanceMakerConfig) -> AttributesDict:
required = self.name in required_fields
definition = self.definition
read_only = definition.get('readOnly', False)
format = definition.get('format')
if type(format) is not dict and format in write_only_formats:
write_only = True
else:
write_only = definition.get('writeOnly', False)
if instance_maker_config.level == 0 and self.name == 'id':
# We dont want to save id on k8s
read_only = True
attribs: AttributesDict = {}
attribs['metadata'] = {
'name': self.name,
'readOnly': read_only,
'writeOnly': write_only,
'format': format,
'base_type': self.base_tpe,
}
if 'description' in definition:
attribs['metadata']['description'] = definition['description']
if 'example' in definition:
if isinstance(definition['example'], List):
attribs['metadata']['example'] = frozenset(definition['example'])
else:
attribs['metadata']['example'] = definition['example']
if UUID_REFERENCE_FIELD in definition:
attribs['metadata'][UUID_REFERENCE_FIELD] = definition[UUID_REFERENCE_FIELD]
if self.name in IGNORED_EQ_ATTRIBUTES or write_only or read_only:
attribs['eq'] = False
# Set type
if not required or read_only or write_only:
attribs['type'] = Optional[self.tpe]
attribs['metadata']['type'] = str(Optional[self.tpe])
elif required and (read_only or write_only):
raise OpenApiParserException(f'readOnly/writeOnly attribute {self.name} '
'can not be required')
else:
attribs['type'] = self.tpe
attribs['metadata']['type'] = str(self.tpe)
if instance_maker_config.level == 0 and self.name == 'id':
attribs['factory'] = lambda: str(uuid4())
elif self.factory and not (read_only or write_only):
attribs['factory'] = self.factory
elif not required or read_only or write_only:
attribs['default'] = definition.get('default',
None if (read_only or write_only) else self.default)
attribs['repr'] = self.repr
return attribs
class DeprecatedAttribMaker(SimpleAttribMaker):
pass
class DefaultAttribMaker(SimpleAttribMaker):
def values(self, attributes: Dict[str, 'SimpleAttribMaker'], required_fields: List[str],
instance_maker_config: InstanceMakerConfig) -> AttributesDict:
vs = {
'type': Optional[self.tpe],
'eq': False,
'metadata': {
'base_type': self.tpe,
'name': self.name,
},
'repr': self.repr,
}
if self.default:
vs['default'] = self.default
vs['type'] = self.tpe
elif self.factory:
vs['factory'] = self.factory
vs['type'] = self.tpe
return vs
def create_default_attrib(name: str, attrib_value: Any) -> DefaultAttribMaker:
return DefaultAttribMaker(
tpe=type(attrib_value),
base_tpe=type(attrib_value),
name=name,
default=attrib_value,
factory=None,
definition={})
| 36.598425 | 116 | 0.596386 | [
"MIT"
] | appgate/sdp-operator | appgate/openapi/attribmaker.py | 4,648 | Python |
"""Exemplo de como virar[flip] imagems."""
from PIL import Image
# Abre a imagem
im = Image.open('beijo_menor.jpg')
# Flip
im.transpose(Image.FLIP_LEFT_RIGHT).show() # Invete na horizontal
im.transpose(Image.FLIP_TOP_BOTTOM).show() # Invete na vertical
# Transposição
im.transpose(Image.ROTATE_90).show()
im.transpose(Image.ROTATE_180).show()
im.transpose(Image.ROTATE_270).show()
im.transpose(Image.TRANSPOSE).show()
im.transpose(Image.TRANSVERSE).show()
| 27.117647 | 66 | 0.761388 | [
"MIT"
] | BrunoPontesLira/live-de-python | codigo/Live176/exemplos_dos_slides/exemplo_05.py | 463 | Python |
# coding: utf-8
"""
Deep Lynx
The construction of megaprojects has consistently demonstrated challenges for project managers in regard to meeting cost, schedule, and performance requirements. Megaproject construction challenges are common place within megaprojects with many active projects in the United States failing to meet cost and schedule efforts by significant margins. Currently, engineering teams operate in siloed tools and disparate teams where connections across design, procurement, and construction systems are translated manually or over brittle point-to-point integrations. The manual nature of data exchange increases the risk of silent errors in the reactor design, with each silent error cascading across the design. These cascading errors lead to uncontrollable risk during construction, resulting in significant delays and cost overruns. Deep Lynx allows for an integrated platform during design and operations of mega projects. The Deep Lynx Core API delivers a few main features. 1. Provides a set of methods and endpoints for manipulating data in an object oriented database. This allows us to store complex datatypes as records and then to compile them into actual, modifiable objects at run-time. Users can store taxonomies or ontologies in a readable format. 2. Provides methods for storing and retrieving data in a graph database. This data is structured and validated against the aformentioned object oriented database before storage. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_import_data_response import GetImportDataResponse # noqa: E501
from swagger_client.rest import ApiException
class TestGetImportDataResponse(unittest.TestCase):
"""GetImportDataResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetImportDataResponse(self):
"""Test GetImportDataResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_import_data_response.GetImportDataResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 58.1 | 1,455 | 0.790017 | [
"MIT"
] | idaholab/Deep-Lynx-Python-Package | test/test_get_import_data_response.py | 2,324 | Python |
#! /usr/bin/env python 3
import os
import sublime
import sublime_plugin
import random
import re
import sys
import math
__version__ = '0.1.0'
__authors__ = ['Ryan Grannell (@RyanGrannell)']
class BabelCommand (sublime_plugin.WindowCommand):
"""
babel loads a random file from your
currently open folders.
"""
def run (self):
window = self.window
open_folders = window.folders()
# todo
| 11.885714 | 50 | 0.704327 | [
"MIT"
] | rgrannell1/sublime-exec | sublime_exec.py | 416 | Python |
# import pandas as pd
#
# csv_data = pd.read_csv('E:\\AI_Object_Detect\\Code\\TextRecognitionDataGenerator\\idcard_file.txt', sep=',', header=0, encoding='UTF-8')
# N = 5
# csv_batch_data = csv_data.tail(N)
# print(csv_batch_data.shape)
import csv
import os
idcard_file = 'E:\\AI_Object_Detect\\Code\\TextRecognitionDataGenerator\\idcard_file.txt'
idcard_data = []
with open(idcard_file, 'r', encoding='UTF-8') as csvfile:
csv_reader = csv.reader(csvfile) # 使用csv.reader读取csvfile中的文件
birth_header = next(csv_reader) # 读取第一行每一列的标题
for row in csv_reader: # 将csv 文件中的数据保存到birth_data中
tmp_str = row[10]
if 'issueAuthority' in tmp_str:
front = row[10].split(':')[1] + row[11].split(':')[1]
idcard_data.append(front.replace('"', '').replace("}",''))
elif 'address' in tmp_str:
back = row[10].split(':')[1] + row[11].split(':')[1] + row[12].split(':')[1] + row[13].split(':')[1] + row[14].split(':')[1] + row[15].split(':')[1]
idcard_data.append(back.replace('"', '').replace("}",''))
# print(str + '\r\n')
lang = 'char_std_5991'
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
lang_dict = [ch.strip('\n') for ch in lang_dict]
for text in idcard_data:
for character in text:
try:
p = lang_dict.index(character)
except ValueError:
lang_dict.append(character)
print(character)
# file=open('texts/data.txt','w+', encoding='UTF-8')
# for strgin in idcard_data:
# file.write(strgin + '\n')
# file.close()
# for cnt in idcard_data:
# print(cnt)
# print('\n')
# idcard_data = [[float(x) for x in row] for row in idcard_data] # 将数据从string形式转换为float形式
# birth_data = np.array(birth_data) # 将list数组转化成array数组便于查看数据结构
# birth_header = np.array(birth_header)
# print(birth_data.shape) # 利用.shape查看结构。
# print(birth_header.shape) | 33.372881 | 160 | 0.630269 | [
"MIT"
] | yuliangzhang/TextRecognitionDataGenerator | TextRecognitionDataGenerator/idcard_file_parse.py | 2,097 | Python |
from typing import Tuple, Union
import numpy as np
import pandas as pd
import tensorflow as tf
from src.models.dnn_regressor_funcs import (
_compile_model,
_create_keras_model,
_fit_model,
_to_input_list,
)
def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array:
"""
predict function
Args:
model: keras model fit by fit_model
X_test: Test features
cate_cols: categorical columns list
Returns: y_pred
"""
X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols)
y_pred = model.predict(X_test_list)
return y_pred
def train(
X_train: pd.DataFrame,
y_train: Union[pd.Series, np.array],
X_val: pd.DataFrame,
y_val: Union[pd.Series, np.array],
layers: list,
num_classes: int,
cate_cols: list,
learning_rate: float,
epochs: int,
batch_size: int,
dropout_rate: float = 0.3,
) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]:
"""
Training main function that takes dataset and parameters as input and returns the trained model with history
Args:
X_train: Train features
y_train: train labels
X_val: Validation labels
y_val: validation labels
layers: List of nodes in hidden layers
num_classes: Number of classes in target variable
cate_cols: categorical columns list
learning_rate: learning rate
epochs: number of epochs
batch_size: batch size
dropout_rate: dropout rate
Returns: history of training, trained model
"""
X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols)
X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols)
# if len(y_train.shape) == 1:
# y_train_categorical = tf.keras.utils.to_categorical(
# y_train, num_classes=num_classes, dtype="float32"
# )
#
# y_val_categorical = tf.keras.utils.to_categorical(
# y_val, num_classes=num_classes, dtype="float32"
# )
y_train = np.array(y_train)
y_val = np.array(y_val)
model = _create_keras_model(
X_train=X_train,
layers=layers,
num_classes=num_classes,
dropout_rate=dropout_rate,
cate_cols=cate_cols,
)
_compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate)
history = _fit_model(
model=model,
X_train_list=X_train_list,
y_train=y_train,
X_val_list=X_val_list,
y_val=y_val,
epochs=epochs,
batch_size=batch_size,
)
return history, model
| 26.612245 | 112 | 0.662577 | [
"MIT"
] | onurerkin/prohack | src/models/dnn_regressor.py | 2,608 | Python |
from pathlib import Path
from napari import Viewer
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QErrorMessage,
QFileDialog,
QPushButton,
QVBoxLayout,
QWidget,
)
from ..animation import Animation
from .animationslider_widget import AnimationSliderWidget
from .frame_widget import FrameWidget
from .keyframelistcontrol_widget import KeyFrameListControlWidget
from .keyframeslist_widget import KeyFramesListWidget
class AnimationWidget(QWidget):
"""Widget for interatviely making animations using the napari viewer.
Parameters
----------
viewer : napari.Viewer
napari viewer.
Attributes
----------
viewer : napari.Viewer
napari viewer.
animation : napari_animation.Animation
napari-animation animation in sync with the GUI.
"""
def __init__(self, viewer: Viewer, parent=None):
super().__init__(parent=parent)
# Store reference to viewer and create animation
self.viewer = viewer
self.animation = Animation(self.viewer)
# Initialise UI
self._init_ui()
# establish key bindings and callbacks
self._add_keybind_callbacks()
self._add_callbacks()
def _init_ui(self):
"""Initialise user interface"""
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._init_keyframes_list_control_widget()
self._init_keyframes_list_widget()
self._init_frame_widget()
self._init_save_button()
self._init_animation_slider_widget()
def _add_keybind_callbacks(self):
"""Bind keys"""
self.animation.viewer.bind_key(
"Alt-f", self._capture_keyframe_callback
)
self.animation.viewer.bind_key(
"Alt-r", self._replace_keyframe_callback
)
self.animation.viewer.bind_key("Alt-d", self._delete_keyframe_callback)
self.animation.viewer.bind_key("Alt-a", self._key_adv_frame)
self.animation.viewer.bind_key("Alt-b", self._key_back_frame)
def _add_callbacks(self):
"""Establish callbacks"""
self.keyframesListControlWidget.deleteButton.clicked.connect(
self._delete_keyframe_callback
)
self.keyframesListControlWidget.captureButton.clicked.connect(
self._capture_keyframe_callback
)
self.saveButton.clicked.connect(self._save_callback)
self.animationsliderWidget.valueChanged.connect(
self._move_animationslider_callback
)
self.viewer.events.theme.connect(
lambda e: self.keyframesListWidget._update_theme(e.value)
)
def _release_callbacks(self):
"""Release keys"""
self.animation.viewer.bind_key("Alt-f", None)
self.animation.viewer.bind_key("Alt-r", None)
self.animation.viewer.bind_key("Alt-d", None)
self.animation.viewer.bind_key("Alt-a", None)
self.animation.viewer.bind_key("Alt-b", None)
def _init_frame_widget(self):
self.frameWidget = FrameWidget(parent=self)
self._layout.addWidget(self.frameWidget)
def _init_keyframes_list_control_widget(self):
self.keyframesListControlWidget = KeyFrameListControlWidget(
animation=self.animation, parent=self
)
self._layout.addWidget(self.keyframesListControlWidget)
def _init_keyframes_list_widget(self):
self.keyframesListWidget = KeyFramesListWidget(
self.animation, parent=self
)
self.keyframesListWidget._update_theme(self.viewer.theme)
self._layout.addWidget(self.keyframesListWidget)
def _init_save_button(self):
self.saveButton = QPushButton("Save Animation", parent=self)
self._layout.addWidget(self.saveButton)
def _init_animation_slider_widget(self):
self.animationsliderWidget = AnimationSliderWidget(
self.animation, orientation=Qt.Horizontal, parent=self
)
self._layout.addWidget(self.animationsliderWidget)
def _get_interpolation_steps(self):
return int(self.frameWidget.stepsSpinBox.value())
def _get_easing_function(self):
return self.frameWidget.get_easing_func()
def _capture_keyframe_callback(self, event=None):
"""Record current key-frame"""
self.animation.capture_keyframe(
steps=self._get_interpolation_steps(),
ease=self._get_easing_function(),
)
if len(self.animation.key_frames) == 1:
self.keyframesListControlWidget.deleteButton.setEnabled(True)
self.keyframesListWidget.setEnabled(True)
self.frameWidget.setEnabled(True)
self.animationsliderWidget.requires_update = True
def _update_frame_widget_from_animation(self):
self.frameWidget.update_from_animation()
def _replace_keyframe_callback(self, event=None):
"""Replace current key-frame with new view"""
self.animation.capture_keyframe(
steps=self._get_interpolation_steps(),
ease=self._get_easing_function(),
insert=False,
)
self.animationsliderWidget.requires_update = True
def _delete_keyframe_callback(self, event=None):
"""Delete current key-frame"""
if len(self.animation.key_frames) > 0:
self.animation.key_frames.pop(self.animation.frame)
if len(self.animation.key_frames) == 0:
self.keyframesListControlWidget.deleteButton.setEnabled(False)
self.keyframesListWidget.setEnabled(False)
self.frameWidget.setEnabled(False)
self.animationsliderWidget.requires_update = True
def _key_adv_frame(self, event=None):
"""Go forwards in key-frame list"""
new_frame = (self.animation.frame + 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self.keyframesListWidget.setCurrentRow(new_frame)
def _key_back_frame(self, event=None):
"""Go backwards in key-frame list"""
new_frame = (self.animation.frame - 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self.keyframesListWidget.setCurrentRow(new_frame)
def _save_callback(self, event=None):
if len(self.animation.key_frames) < 2:
error_dialog = QErrorMessage()
error_dialog.showMessage(
f"You need at least two key frames to generate \
an animation. Your only have {len(self.animation.key_frames)}"
)
error_dialog.exec_()
else:
filters = (
"Video files (*.mp4 *.gif *.mov *.avi *.mpg *.mpeg *.mkv *.wmv)"
";;Folder of PNGs (*)" # sep filters with ";;"
)
filename, _filter = QFileDialog.getSaveFileName(
self, "Save animation", str(Path.home()), filters
)
if filename:
self.animation.animate(filename)
def _move_animationslider_callback(self, event=None):
"""Scroll through interpolated states. Computes states if key-frames changed"""
self.animationsliderWidget.synchronise()
new_frame = self.animationsliderWidget.value()
self.animation._set_viewer_state(
self.animationsliderWidget.interpol_states[new_frame]
)
# This gets the index of the first key frame whose frame count is above new_frame
new_key_frame = (
self.animationsliderWidget.cumulative_frame_count > new_frame
).argmax()
new_key_frame -= 1 # to get the previous key frame
new_key_frame = int(new_key_frame) # to enable slicing a list with it
self.keyframesListWidget.setCurrentRowBlockingSignals(new_key_frame)
self.animation.frame = new_key_frame
def close(self):
self._release_callbacks()
super().close()
| 35.565022 | 89 | 0.668012 | [
"BSD-3-Clause"
] | tlambert-forks/napari-animation | napari_animation/_qt/animation_widget.py | 7,931 | Python |
from django.contrib import admin
from profiles_api import models
# Register your models here.
admin.site.register(models.UserProfile) # registers the model on the admin site
admin.site.register(models.ProfileFeedItem)
| 31.285714 | 79 | 0.826484 | [
"MIT"
] | yash-hash/Profiles-rest-api | profiles_api/admin.py | 219 | Python |
"""initial
Revision ID: 3741581c7fc4
Revises:
Create Date: 2017-10-02 09:13:51.619334
"""
# revision identifiers, used by Alembic.
revision = '3741581c7fc4'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('discussions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('type', sa.Integer(), nullable=True),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('identifier', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('identifier')
)
op.create_index('disc_long_lat_idx', 'discussions', ['latitude', 'longitude'], unique=False)
op.create_index(op.f('ix_discussions_created'), 'discussions', ['created'], unique=False)
op.create_table('highlight_markers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('type', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('highlight_long_lat_idx', 'highlight_markers', ['latitude', 'longitude'], unique=False)
op.create_table('markers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('type', sa.Integer(), nullable=True),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('provider_code', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('subtype', sa.Integer(), nullable=True),
sa.Column('severity', sa.Integer(), nullable=True),
sa.Column('address', sa.Text(), nullable=True),
sa.Column('locationAccuracy', sa.Integer(), nullable=True),
sa.Column('roadType', sa.Integer(), nullable=True),
sa.Column('roadShape', sa.Integer(), nullable=True),
sa.Column('dayType', sa.Integer(), nullable=True),
sa.Column('unit', sa.Integer(), nullable=True),
sa.Column('mainStreet', sa.Text(), nullable=True),
sa.Column('secondaryStreet', sa.Text(), nullable=True),
sa.Column('junction', sa.Text(), nullable=True),
sa.Column('one_lane', sa.Integer(), nullable=True),
sa.Column('multi_lane', sa.Integer(), nullable=True),
sa.Column('speed_limit', sa.Integer(), nullable=True),
sa.Column('intactness', sa.Integer(), nullable=True),
sa.Column('road_width', sa.Integer(), nullable=True),
sa.Column('road_sign', sa.Integer(), nullable=True),
sa.Column('road_light', sa.Integer(), nullable=True),
sa.Column('road_control', sa.Integer(), nullable=True),
sa.Column('weather', sa.Integer(), nullable=True),
sa.Column('road_surface', sa.Integer(), nullable=True),
sa.Column('road_object', sa.Integer(), nullable=True),
sa.Column('object_distance', sa.Integer(), nullable=True),
sa.Column('didnt_cross', sa.Integer(), nullable=True),
sa.Column('cross_mode', sa.Integer(), nullable=True),
sa.Column('cross_location', sa.Integer(), nullable=True),
sa.Column('cross_direction', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id', 'provider_code')
)
op.create_index('acc_long_lat_idx', 'markers', ['latitude', 'longitude'], unique=False)
op.create_index(op.f('ix_markers_created'), 'markers', ['created'], unique=False)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('first_name', sa.String(length=50), nullable=True),
sa.Column('last_name', sa.String(length=50), nullable=True),
sa.Column('access_token', sa.String(length=100), nullable=True),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('facebook_id', sa.String(length=50), nullable=True),
sa.Column('facebook_url', sa.String(length=100), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('new_features_subscription', sa.Boolean(), nullable=True),
sa.Column('password', sa.String(length=256), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.Column('social_id', sa.String(length=64), nullable=True),
sa.Column('nickname', sa.String(length=64), nullable=True),
sa.Column('provider', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('social_id'),
sa.UniqueConstraint('username')
)
op.create_table('general_preferences',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('minimum_displayed_severity', sa.Integer(), nullable=True),
sa.Column('resource_type', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id')
)
op.create_table('involved',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider_code', sa.Integer(), nullable=True),
sa.Column('accident_id', sa.Integer(), nullable=True),
sa.Column('involved_type', sa.Integer(), nullable=True),
sa.Column('license_acquiring_date', sa.Integer(), nullable=True),
sa.Column('age_group', sa.Integer(), nullable=True),
sa.Column('sex', sa.Integer(), nullable=True),
sa.Column('car_type', sa.Integer(), nullable=True),
sa.Column('safety_measures', sa.Integer(), nullable=True),
sa.Column('home_city', sa.Integer(), nullable=True),
sa.Column('injury_severity', sa.Integer(), nullable=True),
sa.Column('injured_type', sa.Integer(), nullable=True),
sa.Column('injured_position', sa.Integer(), nullable=True),
sa.Column('population_type', sa.Integer(), nullable=True),
sa.Column('home_district', sa.Integer(), nullable=True),
sa.Column('home_nafa', sa.Integer(), nullable=True),
sa.Column('home_area', sa.Integer(), nullable=True),
sa.Column('home_municipal_status', sa.Integer(), nullable=True),
sa.Column('home_residence_type', sa.Integer(), nullable=True),
sa.Column('hospital_time', sa.Integer(), nullable=True),
sa.Column('medical_type', sa.Integer(), nullable=True),
sa.Column('release_dest', sa.Integer(), nullable=True),
sa.Column('safety_measures_use', sa.Integer(), nullable=True),
sa.Column('late_deceased', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['accident_id', 'provider_code'], [u'markers.id', u'markers.provider_code'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('report_preferences',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('line_number', sa.Integer(), nullable=False),
sa.Column('historical_report', sa.Boolean(), nullable=True),
sa.Column('how_many_months_back', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('radius', sa.Float(), nullable=True),
sa.Column('minimum_severity', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'line_number')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table('vehicles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider_code', sa.Integer(), nullable=True),
sa.Column('accident_id', sa.Integer(), nullable=True),
sa.Column('engine_volume', sa.Integer(), nullable=True),
sa.Column('manufacturing_year', sa.Integer(), nullable=True),
sa.Column('driving_directions', sa.Integer(), nullable=True),
sa.Column('vehicle_status', sa.Integer(), nullable=True),
sa.Column('vehicle_attribution', sa.Integer(), nullable=True),
sa.Column('vehicle_type', sa.Integer(), nullable=True),
sa.Column('seats', sa.Integer(), nullable=True),
sa.Column('total_weight', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['accident_id', 'provider_code'], [u'markers.id', u'markers.provider_code'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('vehicles')
op.drop_table('roles_users')
op.drop_table('report_preferences')
op.drop_table('involved')
op.drop_table('general_preferences')
op.drop_table('users')
op.drop_table('roles')
op.drop_index(op.f('ix_markers_created'), table_name='markers')
op.drop_index('acc_long_lat_idx', table_name='markers')
op.drop_table('markers')
op.drop_index('highlight_long_lat_idx', table_name='highlight_markers')
op.drop_table('highlight_markers')
op.drop_index(op.f('ix_discussions_created'), table_name='discussions')
op.drop_index('disc_long_lat_idx', table_name='discussions')
op.drop_table('discussions')
### end Alembic commands ###
| 48.955224 | 125 | 0.683232 | [
"BSD-3-Clause"
] | ShacharOch/anyway | alembic/versions/3741581c7fc4_initial.py | 9,840 | Python |
from pydantic import BaseModel, HttpUrl
from typing import List
class RegisterHostModel(BaseModel):
name: str
url: HttpUrl
ports: List[int]
| 19.25 | 39 | 0.74026 | [
"MIT"
] | xrzhev/abysswatcher-api | src/models/HostsModel.py | 154 | Python |
## this tool is the core function of cnv and snv analysis
## author: taozhou
## email: [email protected]
import matplotlib as mpl
mpl.use('Agg')
import warnings
warnings.filterwarnings("ignore")
import itertools
import seaborn as sns
import matplotlib.pylab as plt
import matplotlib.colors as mc
from genecast_package.svm_analysis import feature_select, evaluate_model
from sklearn.decomposition import PCA
from collections import OrderedDict
from collections import defaultdict
import datetime
import pandas as pd
from scipy.stats import ranksums
import os
import sh
import warnings
warnings.filterwarnings("ignore")
def z_score(data, axis):
if axis == 3:
return data
if axis == 1:
z_scored = data
else:
z_scored = data.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
def pheatmap(data, length, col_cluster=True, xticklabels=True, yticklabels=True, color=None, name=None, args=None):
data = z_score(data, axis=args.z_score)
if len(data.columns) > 30:
xticklabels = False
if len(data) > 80:
yticklabels = False
vmin, vmax = data.unstack().quantile([.05, .95])
if args.z_score == 3:
vmin, vmax = 0, 4
re = sns.clustermap(data, cmap=args.cmp, row_cluster=True, method=args.cluster_method, col_cluster=col_cluster, figsize=(13, 10), \
xticklabels=True, yticklabels=yticklabels, vmin=vmin, vmax=vmax, col_colors=color)
re.ax_heatmap.set_xticklabels(re.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
re.ax_heatmap.set_yticklabels(re.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
if col_cluster == False:
for group, number in length.items():
re.ax_col_colors.text((number[0] + number[1])/2 + 1.5 - len(group)/2, 1.2, group, size=30)
re.savefig(name + "." + args.save)
else:
re.savefig(name + "_col_cluster." + args.save)
plt.close()
def make_col_color_heatmap(group_dic, args=None):
common_color = ["blue", "red", "green", "grey"]
color = {}; length = {}
temp = 0
i = 0
for name, group in group_dic.items():
length[name] = [temp, temp + len(group)]
temp += len(group)
for sample in group:
color[sample] = common_color[i]
i += 1
if args.ac and args.bc:
color[group1] = args.ac
color[group2] = args.bc
color = pd.Series(color)
color.name = "group"
return color, length
def pca(data, group_dic, n=None, args=None):
pca = PCA(n_components=2)
group = []
length = OrderedDict()
temp = 0
for name, g in group_dic.items():
length[name] = [temp, temp + len(g)]
temp += len(g)
group += g
data = data[group]
newData = pca.fit_transform(data.T)
colors = {}
colors1 = ["blue", "red", "green", 'turquoise', "grey"]
i = 0
for name, number in length.items():
colors[name] = colors1[i]
i += 1
if args.ac and args.bc:
colors[group1] = args.ac
colors[group2] = args.bc
for name, number in length.items():
plt.scatter(newData[number[0]:number[1], 0], newData[number[0]:number[1], 1], label=name, color=colors[name])
plt.title("PCA analysis", size=20)
pc1 = 100*pca.explained_variance_ratio_[0]
pc2 = 100*pca.explained_variance_ratio_[1]
plt.xlabel("PC1(%.1f)" % pc1, size=15)
plt.ylabel("PC1(%.1f)" % pc2, size=15)
plt.legend()
plt.savefig("PCA_%s.png" % n)
plt.close()
def plot_box(data, which, outname, palette, regulation, group, args=None):
fig, ax1 = plt.subplots(figsize=(8,12))
box_data = defaultdict(list)
names = []
if which == "cnv":
how = "mean"
for name, g in group.items():
names.append(name)
box_data[name] = data[g]
else:
how = "sum"
for name, g in group.items():
names.append(name)
box_data[name] = data[g]
z, p = ranksums(box_data[names[0]], box_data[names[1]])
if p >= 0.05:
plt.close()
return
data.to_csv(outname + "_box_data_%s" % (regulation) + ".txt", sep="\t")
if args.ac and args.bc:
group1 = list(group.keys())[0]
group2 = list(group.keys())[1]
palette[group1] = args.ac
palette[group2] = args.bc
sns.boxplot(data=pd.DataFrame(box_data), ax=ax1, width=0.2, linewidth=.5, palette=palette)
ax1.set_title("Difference of %s (p = %f)" % (which, p), size=30)
ax1.set_ylabel('%s value' % (which), size=30)
fig.autofmt_xdate(ha='center', rotation=0)
plt.xticks(rotation=0, size=30)
plt.legend()
fig.savefig(r'%s_box_data_%s_%s_Boxplot.%s' % (outname, regulation, how, args.save), dpi=600, size=0.5)
plt.close()
def databox(raw, which, outname=None, group=None, args=None):
palette_up = {}; palette_down = {}
up = []; down = []
group1_data = raw[list(group.values())[0]]; group1 = list(group.keys())[0]
group2_data = raw[list(group.values())[1]]; group2 = list(group.keys())[1]
for gene in raw.index:
if group1_data.ix[gene].sum() - group2_data.ix[gene].sum() >= 0:
up.append(gene); palette_up[group1] = "red"; palette_up[group2] = "blue"
else:
down.append(gene); palette_down[group1] = "blue"; palette_down[group2] = "red"
if len(palette_up) > 0:
for i in up:
plot_box(raw.ix[i], which, i, palette_up, "up", group, args=args)
if len(palette_down) > 0:
for i in down:
plot_box(raw.ix[i], which, i, palette_down, "down", group, args=args)
def save_data_pdf(data, name, length, color, group_dic, which, args=None):
data.to_csv("%s.txt" % name, sep="\t")
length = {key.split("/")[-1]: value for key, value in length.items()}
group_dic = {key.split("/")[-1]: value for key, value in group_dic.items()}
try:
pheatmap(data, length, col_cluster=True, color=color, name=name, args=args)
pheatmap(data, length, col_cluster=False, color=color, name=name, args=args)
except MemoryError:
print("you gene need too much MemoryError and i, so pass and do next")
pca(data, group_dic, n=name, args=args)
databox(data, which, outname=name, group=group_dic, args=args)
def save_parameters(args=None):
f = open("parameters.txt", "w")
for arg in dir(args):
if not arg.startswith("_"):
f.write(arg + ": " + str(getattr(args, arg)) + "\n")
f.close()
def make_result_folder(args=None, which="cnv", fun=None):
feature_genes = []; gene_lists = {}; color_length = {}
os.chdir(args.outdir)
i = datetime.datetime.now()
# for two_group in itertools.combinations([args.group1, args.group2], 2):
two_group = [args.group1[0].split("/")[-2], args.group2[0].split("/")[-2]]
target = args.group1[0].split("/")[-2] + "_VS_" + args.group2[0].split("/")[-2] + "_%s%s%s_%s%s" % (i.year, i.month, i.day, i.hour, i.minute)
try:
os.mkdir(target)
except FileExistsError:
sh.rm("-rf",target)
os.mkdir(target)
if which == "cnv":
name = "cnv_median_" + args.data_type
gene_list, a_group, b_group = fun(args=args)
else:
if args.cal_type == "num":
name = "snv_number"
else:
name = "snv_mean"
gene_list, a_group, b_group = fun(args=args)
# feature_gene = feature_select(gene_list, a_group, b_group, pval=args.pval, method=args.feature_selection_method,\
# criterion=args.criterion, penalty=args.penalty, C=args.C, threshold=args.threshold)
feature_gene = feature_select(gene_list, a_group, b_group, args=args)
feature_genes.append(feature_gene)
gene_lists[two_group[0]] = gene_list[a_group]; gene_lists[two_group[1]] = gene_list[b_group]
os.chdir(target)
save_parameters(args=args)
group_dic = {two_group[0]: a_group, two_group[1]: b_group}
color_length[two_group[0]] = a_group; color_length[two_group[1]] = b_group
color, length = make_col_color_heatmap(group_dic, args=args)
save_data_pdf(gene_list, "host_gene_%s" % name, length, color, group_dic, which, args=args)
pd.DataFrame({"gene":feature_gene}).to_csv("feature_gene_pval%0.2f.txt" % args.pval, sep="\t", index=False)
feature_gene_cnv = gene_list.ix[feature_gene]
evaluate_model(gene_list, a_group, b_group, feature_gene, name="feature_gene_%s" % name, args=args)
save_data_pdf(feature_gene_cnv, "feature_gene_%s" % name, length, color, group_dic, which, args=args)
os.chdir(args.outdir)
# if len(args.group1 + args.group2) > 2:
# try:
# os.mkdir("intersection")
# except FileExistsError:
# pass
# os.chdir("intersection")
# color, length = make_col_color_heatmap(color_length)
# intersection_feature_gene = list(set(feature_genes[0]).intersection(*feature_genes[1:]))
# intersection_feature_gene_cnv = pd.concat([data.ix[intersection_feature_gene] for [args.group1, args.group2], data in gene_lists.items()], axis=1)
# try:
# save_data_pdf(intersection_feature_gene_cnv, "intersection", length, color, color_length)
# except Exception:
# print("no intersection\njob finish...")
# os.chdir(args.outdir)
| 39.23431 | 156 | 0.630266 | [
"Apache-2.0"
] | 861934367/genecast | genecast_package/core.py | 9,377 | Python |
#--coding: utf8--
from django import forms
from invoices.models import Customer
class InvoiceForm(forms.Form):
FORMAT_CHOICES = (
('pdf', 'PDF'),
('docx', 'MS Word'),
('html', 'HTML'),
)
number = forms.CharField(label='Invoice #')
customer = forms.ModelChoiceField(queryset=Customer.objects.all())
subject = forms.CharField()
amount = forms.DecimalField()
format = forms.ChoiceField(choices=FORMAT_CHOICES)
| 24.368421 | 70 | 0.647948 | [
"MIT"
] | Barolina/templated-docs | example/invoices/forms.py | 463 | Python |
# Generated by Django 2.2.6 on 2019-10-29 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_driven_acquisition', '0004_acl'),
]
operations = [
migrations.AlterModelOptions(
name='file',
options={'get_latest_by': 'created_at', 'permissions': (('can_edit_content', 'Can edit file content'),)},
),
migrations.AlterModelOptions(
name='folder',
options={'get_latest_by': 'created_at', 'permissions': (('can_set_properties', 'Can set properties on folder'), ('can_propagate_properties', 'Can propagate properties to children.'), ('can_edit_child_content', 'Can edit content of children.'))},
),
migrations.AlterModelOptions(
name='packagetemplate',
options={'get_latest_by': 'created_at', 'permissions': (('can_deploy', 'Can deploy from template'),)},
),
migrations.AddField(
model_name='packagetemplate',
name='title',
field=models.CharField(default='lorem ipsum', max_length=256),
preserve_default=False,
),
]
| 36.84375 | 257 | 0.616624 | [
"CC0-1.0"
] | adam-grandt-tts/data-driven-acquisition | data_driven_acquisition/migrations/0005_auto_20191029_1531.py | 1,179 | Python |
#!/usr/bin/env python3
from __future__ import annotations
import contextlib
import json
import os
import socket
import struct
import boldui.hotrefresh
from simplexp import Expr, var, Oplist
from typing import List
class Actions:
UPDATE_SCENE = 0
HANDLER_REPLY = 1
SET_VAR = 2
WATCH_ACK = 3
def stringify_op(obj, indent=0):
result = ''
if isinstance(obj, list):
result += '['
if len(obj) != 0:
result += '\n'
for op in obj:
result += ' ' * (indent + 2) + stringify_op(op, indent + 2) + ',\n'
if len(obj) != 0:
result += ' ' * indent
result += ']'
return result
elif isinstance(obj, dict) and 'type' in obj:
if obj['type'] in ('clear', 'rect', 'rrect', 'reply', 'setVar', 'evtHnd', 'watch', 'ackWatch', 'if', 'text', 'save',
'restore', 'clipRect', 'image'):
result += 'Ops.' + obj['type'] + '('
if len(obj.keys()) != 1:
result += '\n'
for key in obj.keys():
if key == 'type':
continue
result += ' ' * (indent + 2) + f'{key}={stringify_op(obj[key], indent + 2)},\n'
result += ' ' * indent
result += ')'
return result
return repr(obj)
class Ops:
@staticmethod
def clear(color):
return {'type': 'clear', 'color': color}
@staticmethod
def rect(rect, color):
return {'type': 'rect', 'rect': rect, 'color': color}
@staticmethod
def rrect(rect, color, radius):
return {'type': 'rrect', 'rect': rect, 'color': color, 'radius': radius}
@staticmethod
def reply(ident: int, data: List[Expr | int | float | None]):
return {'type': 'reply', 'id': ident, 'data': data}
@staticmethod
def set_var(name: str, value: Expr):
return {'type': 'setVar', 'name': name, 'value': value}
@staticmethod
def event_handler(rect, events, handler, oplist):
return {
'type': 'evtHnd',
'rect': rect,
'events': events,
'handler': handler,
'oplist': oplist,
}
@staticmethod
def watch_var(id, cond, wait_for_roundtrip, handler):
return {
'type': 'watch',
'id': id,
'cond': cond,
'waitForRoundtrip': wait_for_roundtrip,
'handler': handler
}
@staticmethod
def ack_watch(id):
return {
'type': 'ackWatch',
'id': id,
}
@staticmethod
def text(text, x, y, font_size, color):
return {
'type': 'text',
'text': text,
'x': x,
'y': y,
'fontSize': font_size,
'color': color,
}
@staticmethod
def if_(cond, t, f):
return {'type': 'if', 'cond': cond, 'then': t, 'else': f}
@staticmethod
def save():
return {'type': 'save'}
@staticmethod
def restore():
return {'type': 'restore'}
@staticmethod
def clip_rect(rect):
return {'type': 'clipRect', 'rect': rect}
@staticmethod
def image(uri, rect):
return {'type': 'image', 'uri': uri, 'rect': rect}
class ProtocolServer:
def __init__(self, address, reply_handler=None):
self.pending_vars = {}
self.address = address
self._scene = None
self._cached_scene = None
self.reply_handler = reply_handler
if os.path.exists(address):
os.remove(address)
SYSTEMD_SOCK_FD = 3
self.server = socket.fromfd(SYSTEMD_SOCK_FD, socket.AF_UNIX, socket.SOCK_STREAM)
self.socket = None
self._is_batch = False
self._batch_scene_updated = False
self._batch_vars = None
hotrefresh.init(self)
@property
def scene(self):
if self._cached_scene is None:
if callable(self._scene):
self._cached_scene = self._scene()
else:
self._cached_scene = self._scene
return self._cached_scene
@scene.setter
def scene(self, value):
self._scene = value
self._cached_scene = None
if self._is_batch:
self._batch_scene_updated = True
else:
self._send_scene()
def refresh_scene(self):
self._cached_scene = None
if self._is_batch:
self._batch_scene_updated = True
else:
self._send_scene()
@contextlib.contextmanager
def batch_update(self):
assert not self._is_batch
self._is_batch = True
self._batch_scene_updated = False
self._batch_vars = {}
yield
if self._batch_scene_updated:
self._send_scene()
elif self._batch_vars:
self._send_remote_var([(name, val) for name, val in self._batch_vars.items()])
self._is_batch = False
self._batch_scene_updated = False
self._batch_vars = None
def serve(self):
while True:
print('Waiting for connection...')
self.server.listen(1)
self.socket, addr = self.server.accept()
print('Client connected', addr)
self.socket.send(b"BoldUI\x00\x01")
# Read header
header = self.socket.recv(8)
if header != b"BoldUI\x00\x01":
print("Invalid header, disconnecting")
break
print("Handshake complete, sending initial scene")
if self.scene:
self._send_scene()
for var in self.pending_vars:
self.set_remote_var(var, self.pending_vars[var][0], self.pending_vars[var][1])
print(f'Server PID is {os.getpid()}')
while True:
packet = b''
packet_length = self.socket.recv(4)
if not packet_length:
break
packet_length = int.from_bytes(packet_length, 'big')
while len(packet) < packet_length:
packet += self.socket.recv(packet_length - len(packet))
if not packet:
break
self._handle_packet(packet)
print('Client disconnected')
break
def _send_packet(self, packet):
# print('Sending packet:', packet)
self.socket.send(len(packet).to_bytes(4, 'big') + packet)
def _handle_packet(self, packet):
action = int.from_bytes(packet[:4], 'big')
data = packet[4:]
if action == Actions.HANDLER_REPLY:
reply_count = int.from_bytes(data[:2], 'big')
data = data[2:]
with self.batch_update():
for i in range(reply_count):
reply_len = int.from_bytes(data[:2], 'big')
reply_id = int.from_bytes(data[2:6], 'big')
reply_data = data[6:6+reply_len]
data_array = []
while reply_data:
item_type = reply_data[0]
if item_type == 0:
data_array.append(int.from_bytes(reply_data[1:9], 'big', signed=True))
reply_data = reply_data[9:]
elif item_type == 1:
data_array.append(struct.unpack('>d', reply_data[1:9])[0])
reply_data = reply_data[9:]
else:
raise ValueError(f"Unknown item type {item_type}")
if self.reply_handler:
# print(f'Reply: {hex(reply_id)} : {data_array}')
self.reply_handler(reply_id, data_array)
else:
print('[app] Unknown packet type:', packet)
def _send_scene(self):
if self.socket:
combined_scene = self.scene
if self._batch_vars is not None:
for key, value in self._batch_vars.items():
combined_scene['vars'][key]['value'] = json.dumps(Oplist(Expr.to_dict(value)).to_list())
self._send_packet(Actions.UPDATE_SCENE.to_bytes(4, 'big') + json.dumps(self.scene).encode())
def set_remote_var(self, name, val_type, value):
self.pending_vars[name] = (val_type, value)
if self._is_batch:
self._batch_vars[name] = value
else:
self._send_remote_var([(name, value)])
def _send_remote_var(self, set_vars):
if self.socket:
parts = []
for name, value in set_vars:
value = Oplist(Expr.to_dict(value)).to_list()
parts.append(name.encode() + b'\x00' + json.dumps(value).encode())
self._send_packet(Actions.SET_VAR.to_bytes(4, 'big') + b'\x00'.join(parts))
def send_watch_ack(self, ack_id: int):
if self.socket:
self._send_packet(Actions.WATCH_ACK.to_bytes(4, 'big') + ack_id.to_bytes(8, 'big'))
| 31.106164 | 124 | 0.526148 | [
"MIT"
] | Wazzaps/boldui | boldui/__init__.py | 9,083 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network losses.
See @{$python/contrib.losses}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.losses.python.losses import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'absolute_difference',
'add_loss',
'hinge_loss',
'compute_weighted_loss',
'cosine_distance',
'get_losses',
'get_regularization_losses',
'get_total_loss',
'log_loss',
'mean_pairwise_squared_error',
'mean_squared_error',
'sigmoid_cross_entropy',
'softmax_cross_entropy',
'sparse_softmax_cross_entropy',
]
remove_undocumented(__name__, _allowed_symbols)
| 31.145833 | 80 | 0.720401 | [
"Apache-2.0"
] | 18802459097/tensorflow | tensorflow/contrib/losses/__init__.py | 1,495 | Python |
from __future__ import print_function
from time import sleep
import matplotlib.pyplot as plt
import signal
def sigIntHandler(signal, frame):
raise KeyboardInterrupt
def publishPose(q, problem, t=0.0):
problem.getScene().Update(q, t)
problem.getScene().getSolver().publishFrames()
def publishTrajectory(traj, T, problem):
if len(traj) == 0:
print("Trajectory has zero elements")
raise
signal.signal(signal.SIGINT, sigIntHandler)
print('Playing back trajectory '+str(T)+'s')
dt = float(T)/float(len(traj))
t = 0
while True:
try:
publishPose(traj[t], problem, float(t)*dt)
sleep(dt)
t = (t+1) % len(traj)
except KeyboardInterrupt:
return False
return True
def publishTimeIndexedTrajectory(traj, Ts, problem, once=False):
if len(traj) == 0:
print("Trajectory has zero elements")
raise
signal.signal(signal.SIGINT, sigIntHandler)
print('Playing back trajectory '+str(len(Ts)) +
' states in '+str(Ts[len(Ts)-1]))
idx = 0
while True:
try:
for i in range(1, len(Ts)-1):
publishPose(traj[i], problem, Ts[i])
sleep(Ts[i]-Ts[i-1])
if once:
break
except KeyboardInterrupt:
return False
return True
def plot(solution):
print('Plotting the solution')
plt.plot(solution, '.-')
plt.show()
| 24.847458 | 64 | 0.595498 | [
"BSD-3-Clause"
] | LongfeiProjects/exotica | exotica_python/src/pyexotica/publish_trajectory.py | 1,466 | Python |
__author__ = 'atzorvas'
| 12 | 23 | 0.75 | [
"MIT"
] | atzorvas/droughtmeteo | views/stations/__init__.py | 24 | Python |
from Utilities.CSV import csv_data_line
from Utilities import date_formating
import logging
from datetime import date
import time
import datetime
from shared_types import DateDict
logging.basicConfig(filename='../../CrawlerLogs' + 'Crawlerlog-' +
date.today().strftime("%b-%Y") + '.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
def process_file(filename: str) -> DateDict:
"""
Method that take path to crawled file and outputs date dictionary:
Date dictionary is a dictionary where keys are dates in format YYYY-mm-dd-hh (2018-04-08-15)
and value is dictionary where keys are devices (specified in configuration file)
and value is CSVDataLine.csv_data_line with device,date and occurrence
Args:
filename: name of processed file
Returns:
None if not implemented
date_dict when implemented
"""
date_dict = {}
with open(filename, "r") as file:
YEAR_START = 1
YEAR_END = 11
for line in file:
array = line.split(";")
#pick later time
time_ = max(
array[2][1:-1],
array[3][1:-1],
key=lambda x: time.mktime(
datetime.datetime.strptime(x, "%H:%M").timetuple()))
date = date_formating.date_time_formatter(
array[14][YEAR_START:YEAR_END] + " " + time_)
name = array[10][1:-1]
if name == "":
continue
if date not in date_dict:
date_dict[date] = {}
if name in date_dict[date]:
date_dict[date][name].occurrence = int(array[12])
else:
date_dict[date][name] = csv_data_line.CSVDataLine(
name, date, int(array[12]))
return date_dict
| 29.09375 | 96 | 0.575188 | [
"MIT"
] | kivzcu/heatmap.zcu | modules/crawler/DatasetProcessing/OBSAZENIMISTNOSTI_processor.py | 1,862 | Python |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_assume_role(field, value):
return get_default_field_value(field, value)
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_cache_metric_wildcards(field, value):
return True
def instance_cache_shared_labels(field, value):
return True
def instance_collect_counters_with_distributions(field, value):
return False
def instance_collect_histogram_buckets(field, value):
return True
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_health_service_check(field, value):
return True
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_extra_metrics(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_histogram_buckets_as_distributions(field, value):
return False
def instance_hostname_format(field, value):
return get_default_field_value(field, value)
def instance_hostname_label(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_jmx_exporter_port(field, value):
return 11001
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return get_default_field_value(field, value)
def instance_node_exporter_port(field, value):
return 11002
def instance_non_cumulative_histogram_buckets(field, value):
return False
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_openmetrics_endpoint(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_prometheus_metrics_path(field, value):
return '/metrics'
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_raw_line_filters(field, value):
return get_default_field_value(field, value)
def instance_raw_metric_prefix(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_region_name(field, value):
return get_default_field_value(field, value)
def instance_rename_labels(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_share_labels(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_telemetry(field, value):
return False
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return False
def instance_use_latest_spec(field, value):
return False
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_openmetrics(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
| 20.4 | 75 | 0.78483 | [
"BSD-3-Clause"
] | florusb/integrations-core | amazon_msk/datadog_checks/amazon_msk/config_models/defaults.py | 5,814 | Python |
# IDLE (Python 3.8.0)
# module_for_lists_of_terms
def termal_generator(lict):
length_of_termal_generator = 16
padding = length_of_termal_generator - len(lict)
count = padding
while count != 0:
lict.append([''])
count = count - 1
termal_lict = []
for first_inner in lict[0]:
for second_inner in lict[1]:
for third_inner in lict[2]:
for fourth_inner in lict[3]:
for fifth_inner in lict[4]:
for sixth_inner in lict[5]:
for seventh_inner in lict[6]:
for eighth_inner in lict[7]:
for ninth_inner in lict[8]:
for tenth_inner in lict[9]:
for eleventh_inner in lict[10]:
for twelfth_inner in lict[11]:
for thirteenth_inner in lict[12]:
for fourteenth_inner in lict [13]:
for fifteenth_inner in lict [14]:
for sixteenth_inner in lict[15]:
term = (
first_inner + second_inner +
third_inner + fourth_inner +
fifth_inner + sixth_inner +
seventh_inner + eighth_inner +
ninth_inner + tenth_inner +
eleventh_inner + twelfth_inner +
thirteenth_inner + fourteenth_inner +
fifteenth_inner + sixteenth_inner
)
termal_lict.append(term)
return termal_lict
def user_input_handling_function_second(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
lict = []
while good_to_go == 'no':
for key in dictionary:
lict.append(key)
for element in user_input:
if element not in lict:
print('The form can only contain a combination of the characters that represent the lists of characters.')
errors.append('yes')
break
if len(user_input) < 2:
print('The form is too short. It can\'t be less than two-characters long.')
errors.append('yes')
if len(user_input) > 8:
print('The form is too long. It can\'t be more than eight-characters long.')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_third():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
yes_or_no = ['yes', 'no']
while good_to_go == 'no':
if user_input not in yes_or_no:
print('You have to answer yes or no.')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_fourth(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
if user_input not in dictionary:
print('The form you entered does not match one of the forms in your termal_dictionary. Each form in your')
print('termal_dictionary is a name (key) that has an associated definition (value) that is a list of terms')
print('that all have the same form as the name (key).')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_eighth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']
while good_to_go == 'no':
if user_input == 'None':
user_input = None
return user_input
else:
for inner in user_input:
if inner not in digits:
print('The number must be an integer that consists of digits. For example: 1, -2, etc. or the keyword:')
print('None.')
errors.append('yes')
break
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return int(user_input)
def user_input_handling_function_ninth():
''' a parser '''
print()
user_input = input('Enter: ')
print()
term = ''
lict = []
for element in user_input:
if element != ' ':
term = term + element
else:
lict.append(term)
term = ''
lict.append(term) # because term might not be empty....
return lict
def user_input_handling_function_tenth(dictionary):
''' a dictionary checker '''
user_input = user_input_handling_function_ninth()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
string = ''
lict = []
for element in user_input:
string = string + element
for key in dictionary:
for element in dictionary[key]:
lict.append(element)
for element in string:
if element not in lict:
print('One of your unwanted characters or combination of characters does not match the characters you')
print('entered earlier.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def print_vertical_lict(lict):
for element in lict:
print(element)
def print_horizontal_lict(lict):
string = ''
for element in lict:
string = string + str(element) + ', '
print(string)
print()
def write_vertical_lict(file_name, lict): # <--
file = open(file_name, 'w')
for element in lict:
element = str(element) + '\n'
file.write(element)
file.close()
def write_horizontal_lict(file_name, lict):
if '.txt' not in file_name:
file_name = file_name + '.txt'
row = ''
for index in range(len(lict)):
lict[index] = str(lict[index]) + ', '
if len(row + lict[index]) > 100:
lict[index - 1] = lict[index - 1] + '\n'
row = lict[index]
else:
row = row + lict[index]
file = open(file_name, 'w')
for term in lict:
file.write(term)
file.close()
| 35.370536 | 124 | 0.47015 | [
"MIT"
] | ShawnJSavoie2/ToBeRedone | lists_of_terms/shodule_for_lists_of_terms.py | 7,923 | Python |
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils import timezone
from django.utils.translation import ugettext as _
from rr.forms.contact import ContactForm
from rr.models.contact import Contact
from rr.utils.serviceprovider import get_service_provider
logger = logging.getLogger(__name__)
@login_required
def contact_list(request, pk):
"""
Displays a list of :model:`rr.Contact` linked to
:model:`rr.ServiceProvider`.
Includes a ModelForm for adding :model:`rr.Contact` to
:model:`rr.ServiceProvider`.
**Context**
``object_list``
List of :model:`rr.Contact`.
``form``
ModelForm for creating a :model:`rr.Contact`
``object``
An instance of :model:`rr.ServiceProvider`.
**Template:**
:template:`rr/contact.html`
"""
sp = get_service_provider(pk, request.user)
form = ContactForm(sp=sp)
if request.method == "POST":
if "add_contact" in request.POST:
form = _add_contact(request, sp)
elif "remove_contact" in request.POST:
_remove_contacts(request, sp)
contacts = Contact.objects.filter(sp=sp, end_at=None)
return render(request, "rr/contact.html", {'object_list': contacts,
'form': form,
'object': sp})
def _add_contact(request, sp):
form = ContactForm(request.POST, sp=sp)
if form.is_valid():
contact_type = form.cleaned_data['type']
firstname = form.cleaned_data['firstname']
lastname = form.cleaned_data['lastname']
email = form.cleaned_data['email']
Contact.objects.create(sp=sp,
type=contact_type,
firstname=firstname,
lastname=lastname,
email=email)
sp.save_modified()
logger.info("Contact added for {sp} by {user}"
.format(sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Contact added.'))
form = ContactForm(sp=sp)
return form
def _remove_contacts(request, sp):
for key, value in request.POST.dict().items():
if value == "on":
contact = Contact.objects.get(pk=key)
if contact.sp == sp:
contact.end_at = timezone.now()
contact.save()
sp.save_modified()
logger.info("Contact removed from {sp} by {user}"
.format(sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Contact removed.')) | 33.301205 | 83 | 0.596599 | [
"MIT"
] | UniversityofHelsinki/sp-registry | rr/views/contact.py | 2,764 | Python |
'''
Test the cert_update plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ports
Test.Summary = '''
Test cert_update plugin.
'''
Test.SkipUnless(
Condition.HasProgram("openssl", "Openssl need to be installed on system for this test to work")
)
# Set up origin server
server = Test.MakeOriginServer("server")
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
# Set up ATS
ts = Test.MakeATSProcess("ts", command="traffic_manager", enable_tls=1)
# Set up ssl files
ts.addSSLfile("ssl/server1.pem")
ts.addSSLfile("ssl/server2.pem")
ts.addSSLfile("ssl/client1.pem")
ts.addSSLfile("ssl/client2.pem")
# reserve port, attach it to 'ts' so it is released later
ports.get_port(ts, 's_server_port')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'cert_update',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem'
)
ts.Disk.remap_config.AddLines([
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port),
'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port)
])
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: "*foo.com"',
' client_cert: "client1.pem"',
])
# Set up plugin
Test.PreparePlugin(os.path.join(Test.Variables.AtsExampleDir, 'plugins', 'c-api', '.libs', 'cert_update.so'), ts)
# Server-Cert-Pre
# curl should see that Traffic Server presents bar.com cert from alice
tr = Test.AddTestRun("Server-Cert-Pre")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = (
'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.Streams.stderr = "gold/server-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
# Server-Cert-Update
tr = Test.AddTestRun("Server-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR, ts.Variables.SSLDir)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
# Server-Cert-After
# after use traffic_ctl to update server cert, curl should see bar.com cert from bob
tr = Test.AddTestRun("Server-Cert-After")
tr.Processes.Default.Env = ts.Env
tr.Command = 'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.Streams.stderr = "gold/server-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
# Client-Cert-Pre
# s_server should see client (Traffic Server) as alice.com
tr = Test.AddTestRun("Client-Cert-Pre")
s_server = tr.Processes.Process(
"s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir, ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
# Client-Cert-Update
tr = Test.AddTestRun("Client-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'mv {0}/client2.pem {0}/client1.pem && {1}/traffic_ctl plugin msg cert_update.client {0}/client1.pem'.format(
ts.Variables.SSLDir, ts.Variables.BINDIR)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
# Client-Cert-After
# after use traffic_ctl to update client cert, s_server should see client (Traffic Server) as bob.com
tr = Test.AddTestRun("Client-Cert-After")
s_server = tr.Processes.Process(
"s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir, ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Processes.Default.Env = ts.Env
# Move client2.pem to replace client1.pem since cert path matters in client context mapping
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
| 41.412587 | 166 | 0.738433 | [
"Apache-2.0"
] | zds05/trafficserver | tests/gold_tests/pluginTest/cert_update/cert_update.test.py | 5,922 | Python |
# Question 9
# List all files in a directory.
from os import listdir
print(listdir('/home/elif/Desktop'))
| 11.1 | 36 | 0.711712 | [
"MIT"
] | CodedLadiesInnovateTech/-python-challenge-solutions | elif_bayindir/phase_1/python_basic_1/day_6/q9.py | 111 | Python |
from django.db import models
class alumno(models.Model):
alum_id = models.AutoField(primary_key=True)
alum_nom = models.CharField(max_length=100, help_text="Nombre del alumno", unique=True)
alum_ape = models.CharField(max_length=100, help_text="Apellido del alumno", unique=True)
def __str__(self):
return '{}'.format(self.alum_nom, self.alum_ape)
class Meta:
db_table = "alumno"
verbose_name_plural="alumnos"
class curso(models.Model):
cur_id = models.AutoField(primary_key=True)
cur_nom = models.CharField(max_length=100, help_text="Nombre del alumno", unique=True)
def __str__(self):
return '{}'.format(self.cur_nom)
class Meta:
db_table = "curso"
verbose_name_plural="cursos"
class alm_cur(models.Model):
almcur_id = models.AutoField(primary_key=True)
alum_id = models.ForeignKey(alumno, on_delete=models.CASCADE)
cur_id = models.ForeignKey(curso, on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.almcur_id)
class Meta:
db_table = "alm_cur"
verbose_name_plural="alm_cursos"
class asistencia(models.Model):
asis_id = models.AutoField(primary_key=True)
asis_fecha = models.Date()
asis_est = models.BooleanField(default=False)
almcur_id = models.ForeignKey(alm_cur, on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.asis_fecha, self.asis_est)
class Meta:
db_table = "asistencia"
verbose_name_plural="asistencias"
| 27.083333 | 93 | 0.659692 | [
"MIT"
] | GuidoTorres/codigo8 | Semana10/Dia5/alumnos/alumnos1/models.py | 1,625 | Python |
import sys
from PySide2.QtWidgets import QApplication,QWidget,QMenuBar,QPushButton,QVBoxLayout,QMainWindow
from MainWindow import MainWindow
def start():
app = QApplication(sys.argv)
app.setApplicationName("My Little ERP")
mainWindow = MainWindow(app)
mainWindow.show()
sys.exit(app.exec_())
| 26.166667 | 95 | 0.761146 | [
"MIT"
] | Steins7/MyLittleERP | src/GUI/GUI.py | 314 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=unused-import
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
The weights for all 16 models are obtained and translated
from TensorFlow checkpoints found at
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md
# Reference
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf))
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
from tensorflow.python.keras._impl.keras.engine.topology import shape_type_conversion
from tensorflow.python.keras._impl.keras.layers import Activation
from tensorflow.python.keras._impl.keras.layers import BatchNormalization
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dropout
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import Reshape
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
def relu6(x):
return K.relu(x, max_value=6)
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation')..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
@shape_type_conversion
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],
input_dim, self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, training=None):
outputs = K.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.bias:
outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
Arguments:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only TensorFlow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(128,128), (160,160), (192,192), or (224, 224)).'
' Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
logging.warning('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(
inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D( # pylint: disable=not-callable
(3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
inputs)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
| 41.451095 | 95 | 0.650736 | [
"Apache-2.0"
] | DylanDmitri/tensorflow | tensorflow/python/keras/_impl/keras/applications/mobilenet.py | 28,394 | Python |
import pyowm
owm = pyowm.OWM('ce688b67bbf90c2a0236d4eb23d8c7bd') # You MUST provide a valid API key
# Will it be sunny tomorrow at this time in Milan (Italy) ?
#forecast = owm.daily_forecast('panama')
tomorrow = pyowm.timeutils.tomorrow()
#forecast.will_be_sunny_at(tomorrow) # Always True in Italy, right? ;-)
# Search for current weather in London (UK)
observation = owm.weather_at_place('ayangue')
w = observation.get_weather()
rete=w.get_reference_time(timeformat='date')
refe=w.get_reference_time('iso')
estatus=w.get_status()
time=w.get_sunset_time('iso')
wind=(w.get_wind()['speed'])
wind1=w.get_wind()
tempe=w.get_temperature('celsius')
tempe1=w.get_temperature('celsius')['temp_max']
l = observation.get_location()
lugar = l.get_country()
# status=Clouds>
# Weather details
#print(forecast)
print(lugar)
print(w) # <Weather - reference time=2013-12-18 09:20,
#print(rete)
#print(time)
print(estatus)# status=Clouds>
print(refe)
#print(tomorrow)#Dia de mañana
print(wind)#velocidad de viento
print(wind1)
print(w.get_humidity())#humedad
print(tempe)#temperatura
print(tempe1)
#w.get_wind() # {'speed': 4.6, 'deg': 330}
#w.get_humidity() # 87
#w.get_temperature('celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0} | 29.568182 | 87 | 0.703305 | [
"MIT"
] | andrew962/Python | Apis/App/api.py | 1,302 | Python |
import os, sys, subprocess
def writetofile(args):
with open(args[0], 'w') as f:
f.write(' '.join(args[1:]))
def writeenvtofile(args):
with open(args[0], 'w') as f:
f.write(os.environ[args[1]])
def writesubprocessenvtofile(args):
with open(args[0], 'w') as f:
p = subprocess.Popen([sys.executable, "-c",
"import os; print(os.environ['%s'])" % args[1]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
assert p.returncode == 0
f.write(stdout.decode("utf-8"))
def convertasplode(arg):
try:
return int(arg)
except:
return (None if arg == "None" else arg)
def asplode(args):
arg0 = convertasplode(args[0])
sys.exit(arg0)
def asplode_return(args):
arg0 = convertasplode(args[0])
return arg0
def asplode_raise(args):
raise Exception(args[0])
def delayloadfn(args):
import delayload
| 23.692308 | 74 | 0.627706 | [
"MIT"
] | JeremyMarshall/pymake | tests/pycmd.py | 924 | Python |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from tiveU.articles.models import Article
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['articles'] = Article.objects.all()
return context
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', 'email', 'picture', 'job_title', 'bio', 'phone', 'gender']
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| 33.723404 | 80 | 0.705363 | [
"MIT"
] | rds0751/newtiveu | tiveU/users/views.py | 1,585 | Python |
import os
import cv2
import shutil
import argparse
import torch
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
from .preprocess import DocScanner
import modules.detection as detection
import modules.ocr as ocr
import modules.retrieval as retrieval
import modules.correction as correction
from tool.config import Config
from tool.utils import download_pretrained_weights
CACHE_DIR = '.cache'
class Preprocess:
def __init__(
self,
find_best_rotation=True,
det_model=None,
ocr_model=None):
self.find_best_rotation = find_best_rotation
if self.find_best_rotation:
self.crop_path = os.path.join(CACHE_DIR, 'crops')
if os.path.exists(self.crop_path):
shutil.rmtree(self.crop_path)
os.mkdir(self.crop_path)
self.det_model = det_model if det_model is not None else Detection()
self.ocr_model = ocr_model if ocr_model is not None else OCR()
self.scanner = DocScanner()
def __call__(self, image, return_score=False):
output = self.scanner.scan(image)
if self.find_best_rotation:
_ = self.det_model(
output,
crop_region=True,
return_result=False,
output_path=CACHE_DIR)
orientation_scores = np.array([0.,0.,0.,0.])
num_crops = len(os.listdir(self.crop_path))
for i in range(num_crops):
single_crop_path = os.path.join(self.crop_path, f'{i}.jpg')
if not os.path.isfile(single_crop_path):
continue
img = cv2.imread(single_crop_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
orientation_scores += ocr.find_rotation_score(img, self.ocr_model)
best_orient = np.argmax(orientation_scores)
print(f"Rotate image by {best_orient*90} degrees")
# Rotate the original image
output = ocr.rotate_img(output, best_orient)
if return_score:
return output, orientation_scores
else:
return output
class Detection:
def __init__(self, config_path=None, weight_path=None, model_name=None):
if config_path is None:
config_path = 'tool/config/detection/configs.yaml'
config = Config(config_path)
self.model_name = model_name
if weight_path is None:
if self.model_name is None:
self.model_name = "pan_resnet18_default"
tmp_path = os.path.join(CACHE_DIR, f'{self.model_name}.pth')
download_pretrained_weights(self.model_name, cached=tmp_path)
weight_path = tmp_path
self.model = detection.PAN(config, model_path=weight_path)
def __call__(
self,
image,
crop_region=False,
return_result=False,
output_path=None):
"""
Input: path to image
Output: boxes (coordinates of 4 points)
"""
if output_path is None:
assert crop_region, "Please specify output_path"
else:
output_path = os.path.join(output_path, 'crops')
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
# Detect and OCR for final result
_, boxes_list, _ = self.model.predict(
image,
output_path,
crop_region=crop_region)
if return_result:
img = detection.draw_bbox(image, boxes_list)
if return_result:
return boxes_list, img
else:
return boxes_list
class OCR:
def __init__(self, config_path=None, weight_path=None, model_name=None):
if config_path is None:
config_path = 'tool/config/ocr/configs.yaml'
config = Config(config_path)
ocr_config = ocr.Config.load_config_from_name(config.model_name)
ocr_config['cnn']['pretrained']=False
ocr_config['device'] = 'cuda:0'
ocr_config['predictor']['beamsearch']=False
self.model_name = model_name
if weight_path is None:
if self.model_name is None:
self.model_name = "transformerocr_default_vgg"
tmp_path = os.path.join(CACHE_DIR, f'{self.model_name}.pth')
download_pretrained_weights(self.model_name, cached=tmp_path)
weight_path = tmp_path
ocr_config['weights'] = weight_path
self.model = ocr.Predictor(ocr_config)
def __call__(self, img, return_prob=False):
if isinstance(img, np.ndarray):
img = Image.fromarray(img)
return self.model.predict(img, return_prob)
def predict_folder(self, img_paths, return_probs=False):
texts = []
if return_probs:
probs = []
for i, img_path in enumerate(img_paths):
img = Image.open(img_path)
if return_probs:
text, prob = self(img, True)
texts.append(text)
probs.append(prob)
else:
text = self(img, False)
texts.append(text)
if return_probs:
return texts, probs
else:
return texts
class Retrieval:
def __init__(self, class_mapping, dictionary=None, mode="all", bert_weight=None):
assert mode in ["all", "bert", "trie", "ed"], "Mode is not supported"
self.mode = mode
self.dictionary = dictionary
self.class_mapping = class_mapping
self.idx_mapping = {v:k for k,v in class_mapping.items()}
if self.mode == 'bert':
self.use_bert = True
if self.mode == 'trie':
self.use_trie = True
if self.mode == 'ed':
self.use_ed = True
if self.mode == 'all':
self.use_bert = True
self.use_trie = True
self.use_ed = True
if self.use_bert:
self.bert = retrieval.PhoBERT(self.idx_mapping, bert_weight)
if self.use_ed:
self.ed = retrieval.get_heuristic_retrieval('diff')
if self.use_trie:
self.trie = retrieval.get_heuristic_retrieval('trie')
if self.use_ed or self.use_trie:
if self.dictionary is None:
self.dictionary = {}
df = pd.read_csv('./modules/retrieval/heuristic/custom-dictionary.csv')
for id, row in df.iterrows():
self.dictionary[row.text.lower()] = row.lbl
def ensemble(self, df):
preds = []
probs = []
for id, row in df.iterrows():
if row["timestamp"] == 1:
preds.append("TIMESTAMP")
probs.append(5.0)
elif row["bert_labels"] == row["diff_labels"]:
preds.append(row["bert_labels"])
probs.append(row["bert_probs"] + row["diff_probs"])
elif row["bert_labels"] == row["trie_labels"]:
preds.append(row["bert_labels"])
probs.append(row["bert_probs"] + row["trie_probs"])
elif row["trie_labels"] == row["diff_labels"]:
preds.append(row["trie_labels"])
probs.append(row["trie_probs"] + row["diff_probs"])
else:
if row["diff_probs"] >= 0.4:
preds.append(row["diff_labels"])
probs.append(row["diff_probs"])
elif row["trie_probs"] >= 0.25:
preds.append(row["trie_labels"])
probs.append(row["trie_probs"])
else:
preds.append(row["bert_labels"])
probs.append(row["bert_probs"]/3)
return preds, probs
def __call__(self, query_texts):
df = pd.DataFrame()
if self.use_bert:
preds, probs = self.bert(query_texts)
df["bert_labels"] = preds
df["bert_probs"] = probs
if self.use_ed:
preds, probs = self.ed(query_texts, self.dictionary)
df["diff_labels"] = [self.idx_mapping[x] for x in preds]
df["diff_probs"] = probs
if self.use_trie:
preds, probs = self.trie(query_texts, self.dictionary)
df["trie_labels"] = [self.idx_mapping[x] for x in preds]
df["trie_probs"] = probs
timestamps = retrieval.regex_timestamp(query_texts)
df["timestamp"] = timestamps
preds, probs = self.ensemble(df)
return preds, probs
class Correction:
def __init__(self, dictionary=None, mode="ed"):
assert mode in ["trie", "ed"], "Mode is not supported"
self.mode = mode
self.dictionary = dictionary
self.use_trie = False
self.use_ed = False
if self.mode == 'trie':
self.use_trie = True
if self.mode == 'ed':
self.use_ed = True
if self.use_ed:
self.ed = correction.get_heuristic_correction('diff')
if self.use_trie:
self.trie = correction.get_heuristic_correction('trie')
if self.use_ed or self.use_trie:
if self.dictionary is None:
self.dictionary = {}
df = pd.read_csv('./modules/retrieval/heuristic/custom-dictionary.csv')
for id, row in df.iterrows():
self.dictionary[row.text.lower()] = row.lbl
def __call__(self, query_texts, return_score=False):
if self.use_ed:
preds, score = self.ed(query_texts, self.dictionary)
if self.use_trie:
preds, score = self.trie(query_texts, self.dictionary)
if return_score:
return preds, score
else:
return preds | 34.686411 | 87 | 0.571974 | [
"Apache-2.0"
] | kaylode/vietnamese-ocr-toolbox | modules/__init__.py | 9,955 | Python |
import json
import sys
from rsa import PrivateKey
with open(sys.argv[1], 'rb') as input:
key = PrivateKey.load_pkcs1(input.read())
d = {}
d['n'] = key.n
d['e'] = key.e
d['d'] = key.d
d['p'] = key.p
d['q'] = key.q
with open(sys.argv[2], 'w') as output:
output.write(json.dumps(d))
| 21.4 | 45 | 0.548287 | [
"MIT"
] | 425629/esp32-weather-google-sheets | scripts/extract_key.py | 321 | Python |
import os
class DefaultConfig:
""" Bot Configuration """
HOST = "0.0.0.0"
PORT = 3978
CONNECTION_NAME = os.environ.get("CONNECTION_NAME", "echo-bot")
APP_ID = os.environ.get("MICROSOFT_APP_ID", "")
APP_PASSWORD = os.environ.get("MICROSOFT_APP_PASSWORD", "")
LUIS_APP_ID = os.environ.get("LUIS_APP_ID", "")
LUIS_API_KEY = os.environ.get("LUIS_API_KEY", "")
# LUIS endpoint host name, ie "westus.api.cognitive.microsoft.com"
LUIS_API_HOST_NAME = os.environ.get(
"LUIS_API_HOST_NAME", "westeurope.api.cognitive.microsoft.com"
)
LUIS_IS_DISABLED = True if os.environ.get("LUIS_IS_DISABLED", "False") == "True" else False
# cosmos storage
COSMOS_DB_SERVICE_ENDPOINT = os.environ.get("COSMOS_DB_SERVICE_ENDPOINT", "")
COSMOS_DB_KEY = os.environ.get("COSMOS_DB_KEY", "")
COSMOS_DB_DATABASE_ID = os.environ.get("COSMOS_DB_DATABASE_ID", "")
COSMOS_DB_CONTAINER_ID = os.environ.get("COSMOS_DB_CONTAINER_ID", "")
| 35.178571 | 95 | 0.694416 | [
"MIT"
] | kwahome/delivery-bot | config.py | 985 | Python |
# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import binascii
import dns.rdata
import dns.rdatatype
class TLSA(dns.rdata.Rdata):
"""TLSA record
@ivar usage: The certificate usage
@type usage: int
@ivar selector: The selector field
@type selector: int
@ivar mtype: The 'matching type' field
@type mtype: int
@ivar cert: The 'Certificate Association Data' field
@type cert: string
@see: RFC 6698"""
__slots__ = ['usage', 'selector', 'mtype', 'cert']
def __init__(self, rdclass, rdtype, usage, selector,
mtype, cert):
super(TLSA, self).__init__(rdclass, rdtype)
self.usage = usage
self.selector = selector
self.mtype = mtype
self.cert = cert
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %d %s' % (self.usage,
self.selector,
self.mtype,
dns.rdata._hexify(self.cert,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
usage = tok.get_uint8()
selector = tok.get_uint8()
mtype = tok.get_uint8()
cert_chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
cert_chunks.append(t.value.encode())
cert = b''.join(cert_chunks)
cert = binascii.unhexlify(cert)
return cls(rdclass, rdtype, usage, selector, mtype, cert)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
file.write(header)
file.write(self.cert)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BBB", wire[current: current + 3])
current += 3
rdlen -= 3
cert = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
| 35.614458 | 75 | 0.624154 | [
"Apache-2.0"
] | 10088/hue | desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/TLSA.py | 2,956 | Python |
#!/usr/bin/python3 -u
import json
import time
import os
import hashlib
import redis
from typing import NamedTuple
import requests
import psycopg2
USER=os.environ['EXODUS_USER']
PASS=os.environ['EXODUS_PASS']
RETRY_COUNT=int(os.environ['EXODUS_RETRY_COUNT'])
PSQL_DB_NAME=os.environ['EXODUS_PSQL_DB_NAME']
REDIS_HOST=os.environ['POSDA_REDIS_HOST']
class SubmitFailedError(RuntimeError): pass
class File(NamedTuple):
export_event_id: int
import_event_id: int
file_id: int
file_path: str
base_url: str
apikey: str
delete_after_transfer: int
def main_loop(redis_db, psql_db):
while True:
sr = redis_db.brpop("posda_to_posda_transfer", 5)
if sr is None:
continue
_, value = sr
file = File(*json.loads(value))
try:
submit_file(file)
update_success(psql_db, file.file_id, file.export_event_id)
except SubmitFailedError as e:
# probably should put this onto a failed-file list now?
print(e)
insert_errors(psql_db, file.file_id, file.export_event_id, e)
def update_success(psql_db, file_id, export_event_id):
try:
psql_db.execute("""
update file_export set
when_transferred = now(),
transfer_status = 'success'
where export_event_id = %s
and file_id = %s
""", [export_event_id, file_id])
except Exception as e:
print(e)
def insert_errors(psql_db, file_id, export_event_id, errors):
transfer_status_id = None
try:
psql_db.execute("""
insert into transfer_status
values (default, %s)
returning transfer_status_id
""", [str(errors)])
transfer_status_id, = psql_db.fetchone()
except psycopg2.IntegrityError:
psql_db.execute("""
select transfer_status_id
from transfer_status
where transfer_status_message = %s
""", [str(errors)])
transfer_status_id, = psql_db.fetchone()
if transfer_status_id is None:
print("Unable to create or get transfer_status_id for following error")
print(str(errors))
try:
psql_db.execute("""
update file_export set
when_transferred = now(),
transfer_status = 'failed permanent',
transfer_status_id = %s
where export_event_id = %s
and file_id = %s
""", [transfer_status_id, export_event_id, file_id])
except Exception as e:
print(e)
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def submit_file(file):
try:
params = {'import_event_id': file.import_event_id,
'digest': md5sum(file.file_path)}
headers = {}
if(file.apikey):
headers['apikey'] = file.apikey
with open(file.file_path, "rb") as infile:
req = requests.put(file.base_url + "/v1/import/file",
headers=headers,
params=params,
data=infile)
if req.status_code == 200:
print(file.file_id)
if(file.delete_after_transfer):
os.remove(file.file_path)
return
else:
raise SubmitFailedError((req.status_code, req.content))
except SubmitFailedError as e:
raise SubmitFailedError(("Failed to submit the file; error details follow", file, e))
except IOError as e:
raise SubmitFailedError(("Failed to open the file; error details follow", file, e))
def main():
print("exodus, starting up...")
redis_db = redis.StrictRedis(host=REDIS_HOST, db=0)
print("connected to redis")
psql_db_conn = psycopg2.connect(dbname=PSQL_DB_NAME)
psql_db_conn.autocommit = True
psql_db_cur = psql_db_conn.cursor()
print("connected to postgres")
main_loop(redis_db, psql_db_cur)
if __name__ == "__main__":
main()
| 29.884892 | 93 | 0.613866 | [
"Apache-2.0"
] | UAMS-DBMI/PosdaTools | exodus/exodus.py | 4,154 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = "Copyright (C) 2021 Luciano Fiandesio"
__license__ = "GNU GPLv2"
import argparse
import os
import sys
import glob
import yaml
from config import *
from arg_parser import *
def main():
args = eval_args('Move bank csv file to processing folder')
config = init_config(args.file, args.debug)
if not os.path.isdir(config.csv.download_path):
print("folder: %s does not exist!"%(config.csv.download_path))
sys.exit(-1)
if not os.path.isdir(config.csv.target):
os.mkdir(config.csv.target)
# count number of files starting with:
file_count = len(glob.glob1(config.csv.download_path, config.csv.name + "*"))
if file_count > 1:
print("more than one file starting with %s found in %s. Can not continue."%(config.csv.name,config.csv.download_path))
sys.exit(-1)
if file_count == 0:
print("No file found in %s with name starting with: %s"%(config.csv.download_path, config.csv.name))
sys.exit(-1)
for f in os.listdir(config.csv.download_path):
if f.startswith(config.csv.name):
os.rename(config.csv.download_path + "/" + f, config.csv.target + "/" + config.csv.ref + ".csv")
print("Done :) ")
if __name__ == "__main__":
main()
| 27.479167 | 126 | 0.648976 | [
"BSD-2-Clause"
] | StefanD986/beanborg | beanborg/bb_mover.py | 1,319 | Python |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('frontend.urls')),
path('admin/', admin.site.urls),
]
| 20.875 | 39 | 0.688623 | [
"MIT"
] | AveraqeDev/django-react | app/app/urls.py | 167 | Python |
"""Pretraining on GPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import math
import json
import time
import numpy as np
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import data_utils
import model_utils
from gpu_utils import assign_to_gpu, average_grads_and_vars
import function_builder
# GPU config
flags.DEFINE_integer("num_hosts", default=1,
help="Number of hosts")
flags.DEFINE_integer("num_core_per_host", default=8,
help="Number of cores per host")
flags.DEFINE_bool("use_tpu", default=False,
help="Whether to use TPUs for training.")
# Experiment (data/checkpoint/directory) config
flags.DEFINE_integer("num_passes", default=1,
help="Number of passed used for training.")
flags.DEFINE_string("record_info_dir", default=None,
help="Path to local directory containing `record_info-lm.json`.")
flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
flags.DEFINE_string("init_checkpoint", default=None,
help="checkpoint path for initializing the model.")
# Optimization config
flags.DEFINE_float("learning_rate", default=1e-4,
help="Maximum learning rate.")
flags.DEFINE_float("clip", default=1.0,
help="Gradient clipping value.")
# for cosine decay
flags.DEFINE_float("min_lr_ratio", default=0.001,
help="Minimum ratio learning rate.")
flags.DEFINE_integer("warmup_steps", default=0,
help="Number of steps for linear lr warmup.")
flags.DEFINE_float("adam_epsilon", default=1e-8,
help="Adam epsilon")
flags.DEFINE_string("decay_method", default="poly",
help="poly or cos")
flags.DEFINE_float("weight_decay", default=0.0,
help="weight decay")
# Training config
flags.DEFINE_integer("train_batch_size", default=16,
help="Size of train batch.")
flags.DEFINE_integer("train_steps", default=100000,
help="Total number of training steps.")
flags.DEFINE_integer("iterations", default=1000,
help="Number of iterations per repeat loop.")
flags.DEFINE_integer("save_steps", default=None,
help="number of steps for model checkpointing.")
# Data config
flags.DEFINE_integer('seq_len', default=0,
help='Sequence length for pretraining.')
flags.DEFINE_integer('reuse_len', default=0,
help="How many tokens to be reused in the next batch. "
"Could be half of seq_len")
flags.DEFINE_bool("bi_data", default=True,
help="Use bidirectional data streams, i.e., forward & backward.")
flags.DEFINE_integer("mask_alpha", default=6,
help="How many tokens to form a group.")
flags.DEFINE_integer("mask_beta", default=1,
help="How many tokens to mask within each group.")
flags.DEFINE_integer("num_predict", default=None,
help="Number of tokens to predict in partial prediction.")
flags.DEFINE_integer('perm_size', default=None,
help='perm size.')
flags.DEFINE_bool("uncased", False,
help="Use uncased inputs or not.")
flags.DEFINE_integer("n_token", 32000, help="Vocab size")
# Model config
flags.DEFINE_integer("mem_len", default=0,
help="Number of steps to cache")
flags.DEFINE_bool("same_length", default=False,
help="Same length attention")
flags.DEFINE_integer("clamp_len", default=-1,
help="Clamp length")
flags.DEFINE_integer("n_layer", default=6,
help="Number of layers.")
flags.DEFINE_integer("d_model", default=32,
help="Dimension of the model.")
flags.DEFINE_integer("d_embed", default=32,
help="Dimension of the embeddings.")
flags.DEFINE_integer("n_head", default=4,
help="Number of attention heads.")
flags.DEFINE_integer("d_head", default=8,
help="Dimension of each attention head.")
flags.DEFINE_integer("d_inner", default=32,
help="Dimension of inner hidden size in positionwise feed-forward.")
flags.DEFINE_float("dropout", default=0.0,
help="Dropout rate.")
flags.DEFINE_float("dropatt", default=0.0,
help="Attention dropout rate.")
flags.DEFINE_bool("untie_r", default=False,
help="Untie r_w_bias and r_r_bias")
flags.DEFINE_string("summary_type", default="last",
help="Method used to summarize a sequence into a compact vector.")
flags.DEFINE_string("ff_activation", default="relu",
help="Activation type used in position-wise feed-forward.")
flags.DEFINE_bool("use_bfloat16", False,
help="Whether to use bfloat16.")
# Parameter initialization
flags.DEFINE_enum("init", default="normal",
enum_values=["normal", "uniform"],
help="Initialization method.")
flags.DEFINE_float("init_std", default=0.02,
help="Initialization std when init is normal.")
flags.DEFINE_float("init_range", default=0.1,
help="Initialization std when init is uniform.")
FLAGS = flags.FLAGS
def get_model_fn():
def model_fn(features, labels, mems, is_training):
#### Get loss from inputs
total_loss, new_mems, monitor_dict = function_builder.get_loss(
FLAGS, features, labels, mems, is_training)
#### Check model parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
# GPU
assert is_training
all_vars = tf.trainable_variables()
grads = tf.gradients(total_loss, all_vars)
grads_and_vars = list(zip(grads, all_vars))
return total_loss, new_mems, grads_and_vars
return model_fn
def single_core_graph(is_training, features, mems):
model_fn = get_model_fn()
model_ret = model_fn(
features=features,
labels=None,
mems=mems,
is_training=is_training)
return model_ret
def create_mems_tf(bsz_per_core):
mems = [tf.placeholder(dtype=tf.float32,
shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model])
for layer in range(FLAGS.n_layer)]
return mems
def initialize_mems_np(bsz_per_core):
mems_np = [np.zeros(shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model],
dtype=np.float32)
for layer in range(FLAGS.n_layer)]
return mems_np
def train(ps_device):
##### Get input function and model function
train_input_fn, record_info_dict = data_utils.get_input_fn(
tfrecord_dir=FLAGS.record_info_dir,
split="train",
bsz_per_host=FLAGS.train_batch_size,
seq_len=FLAGS.seq_len,
reuse_len=FLAGS.reuse_len,
bi_data=FLAGS.bi_data,
num_hosts=1,
num_core_per_host=1, # set to one no matter how many GPUs
perm_size=FLAGS.perm_size,
mask_alpha=FLAGS.mask_alpha,
mask_beta=FLAGS.mask_beta,
uncased=FLAGS.uncased,
num_passes=FLAGS.num_passes,
use_bfloat16=FLAGS.use_bfloat16,
num_predict=FLAGS.num_predict)
# for key, info in record_info_dict.items():
tf.logging.info("num of batches {}".format(record_info_dict["num_batch"]))
##### Create input tensors / placeholders
bsz_per_core = FLAGS.train_batch_size // FLAGS.num_core_per_host
params = {
"batch_size": FLAGS.train_batch_size # the whole batch
}
train_set = train_input_fn(params)
example = train_set.make_one_shot_iterator().get_next()
if FLAGS.num_core_per_host > 1:
examples = [{} for _ in range(FLAGS.num_core_per_host)]
for key in example.keys():
vals = tf.split(example[key], FLAGS.num_core_per_host, 0)
for device_id in range(FLAGS.num_core_per_host):
examples[device_id][key] = vals[device_id]
else:
examples = [example]
##### Create computational graph
tower_mems, tower_losses, tower_new_mems, tower_grads_and_vars = [], [], [], []
for i in range(FLAGS.num_core_per_host):
reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, ps_device)), \
tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
# The mems for each tower is a dictionary
mems_i = {}
if FLAGS.mem_len:
mems_i["mems"] = create_mems_tf(bsz_per_core)
loss_i, new_mems_i, grads_and_vars_i = single_core_graph(
is_training=True,
features=examples[i],
mems=mems_i)
tower_mems.append(mems_i)
tower_losses.append(loss_i)
tower_new_mems.append(new_mems_i)
tower_grads_and_vars.append(grads_and_vars_i)
## average losses and gradients across towers
if len(tower_losses) > 1:
loss = tf.add_n(tower_losses) / len(tower_losses)
grads_and_vars = average_grads_and_vars(tower_grads_and_vars)
else:
loss = tower_losses[0]
grads_and_vars = tower_grads_and_vars[0]
## get train op
train_op, learning_rate, gnorm = model_utils.get_train_op(FLAGS, None,
grads_and_vars=grads_and_vars)
global_step = tf.train.get_global_step()
##### Training loop
# initialize mems
tower_mems_np = []
for i in range(FLAGS.num_core_per_host):
mems_i_np = {}
for key in tower_mems[i].keys():
mems_i_np[key] = initialize_mems_np(bsz_per_core)
tower_mems_np.append(mems_i_np)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(allow_growth=True)
model_utils.init_from_checkpoint(FLAGS, global_vars=True)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]
total_loss, prev_step = 0., -1
while True:
feed_dict = {}
for i in range(FLAGS.num_core_per_host):
for key in tower_mems_np[i].keys():
for m, m_np in zip(tower_mems[i][key], tower_mems_np[i][key]):
feed_dict[m] = m_np
fetched = sess.run(fetches, feed_dict=feed_dict)
loss_np, tower_mems_np, curr_step = fetched[:3]
total_loss += loss_np
if curr_step > 0 and curr_step % FLAGS.iterations == 0:
curr_loss = total_loss / (curr_step - prev_step)
tf.logging.info("[{}] | gnorm {:.2f} lr {:8.6f} "
"| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}".format(
curr_step, fetched[-3], fetched[-2],
curr_loss, math.exp(curr_loss), curr_loss / math.log(2)))
total_loss, prev_step = 0., curr_step
if curr_step > 0 and curr_step % FLAGS.save_steps == 0:
save_path = os.path.join(FLAGS.model_dir, "model.ckpt")
saver.save(sess, save_path)
tf.logging.info("Model saved in path: {}".format(save_path))
if curr_step >= FLAGS.train_steps:
break
def main(unused_argv):
del unused_argv # Unused
tf.logging.set_verbosity(tf.logging.INFO)
# Get corpus info
FLAGS.n_token = data_utils.VOCAB_SIZE
tf.logging.info("n_token {}".format(FLAGS.n_token))
if not tf.gfile.Exists(FLAGS.model_dir):
tf.gfile.MakeDirs(FLAGS.model_dir)
train("/gpu:0")
if __name__ == "__main__":
tf.app.run()
| 32.851064 | 81 | 0.70383 | [
"Apache-2.0"
] | 365andreas/xlnet | train_gpu.py | 10,808 | Python |
import os
import utils
import pytest
from utils import argo_utils
def compile_and_run_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
):
pipeline_path = os.path.join(output_file_dir, pipeline_name)
utils.run_command(
f"dsl-compile --py {pipeline_definition} --output {pipeline_path}.yaml"
)
run = client.run_pipeline(
experiment_id, pipeline_name, f"{pipeline_path}.yaml", input_params
)
return run.id
def wait_for_job_status(client, run_id, timeout, status_to_check="succeeded"):
response = None
try:
response = client.wait_for_run_completion(run_id, timeout)
except TimeoutError:
print(f"run-id: {run_id} did not stop within specified timeout")
response = client.get_run(run_id)
status = False
if response and response.run.status:
status = response.run.status.lower() == status_to_check
return status
def get_workflow_json(client, run_id):
# API not in readthedocs
# Refer: https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/_client.py#L663
return client._get_workflow_json(run_id)
def compile_run_monitor_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
timeout,
status_to_check="succeeded",
check=True,
):
run_id = compile_and_run_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
)
status = wait_for_job_status(client, run_id, timeout, status_to_check)
workflow_json = get_workflow_json(client, run_id)
if check and not status:
argo_utils.print_workflow_logs(workflow_json["metadata"]["name"])
pytest.fail(f"Test Failed: {pipeline_name}. Run-id: {run_id}")
return run_id, status, workflow_json
| 26.493151 | 93 | 0.701138 | [
"Apache-2.0"
] | Hydrospheredata/pipelines | components/aws/sagemaker/tests/integration_tests/utils/kfp_client_utils.py | 1,934 | Python |
import time
def bubblesort_Alg(arr, drawData, timeSpeed):
for i in range(len(arr)-1):
for j in range(len(arr)-1):
if(arr[j] > arr[j+1]):
arr[j], arr[j+1] = arr[j+1], arr[j]
# To draw the bars
drawData(arr, ['red' if x == j or x == j +
1 else 'blue' for x in range(len(arr))])
time.sleep(timeSpeed)
drawData(arr, ['red' for i in range(len(arr))])
return arr
| 32.4 | 71 | 0.475309 | [
"CC0-1.0"
] | HarshOza36/hacktoberfest2021 | Python/Sort_Visualizer/bubbleSort.py | 486 | Python |
# coding:utf-8
__author__ = "gaunt"
import enum
# layui页面框架的表格成功标识
layui_table_code = 0
class BaseEnum(enum.Enum):
pass
class ResultEnum(BaseEnum):
success = {"code": 200, "msg": "操作成功"}
error = {"code": 500, "msg": "操作失败"}
error400 = {"code": 400, "msg": "400 - 请求参数错误"}
error401 = {"code": 401, "msg": "401 - 未授权"}
error404 = {"code": 404, "msg": "404 - 未找到资源"}
error405 = {"code": 405, "msg": "405 - 没有找到请求方法"}
error422 = {"code": 422, "msg": "422 - 请求参数不完整"}
login_error = {"code": 1000, "msg": "用户名或密码失败"}
def success_result(data=None, code=None):
value = ResultEnum.success.value
return {
"code": code if code is not None else value["code"],
"msg": value["msg"],
"data": data if data is not None else ""
}
def error_result(data=None, code=None, msg=None):
value = ResultEnum.error.value
return {
"code": code if code is not None else value["code"],
"msg": msg if msg is not None else value["msg"],
"data": data if data is not None else ""
}
class Logical(BaseEnum):
AND = "and"
OR = "or"
class UserStateEnum(BaseEnum):
NORMAL = 1 # 正常
FREEZE = 0 # 冻结
if __name__ == "__main__":
print(Logical.AND.value)
| 22.428571 | 60 | 0.585987 | [
"Apache-2.0"
] | chenghao/haoAdmin | enums.py | 1,376 | Python |
from autoflow.workflow.components.regression_base import AutoFlowRegressionAlgorithm
__all__ = ["DecisionTreeRegressor"]
class DecisionTreeRegressor(AutoFlowRegressionAlgorithm):
module__ = "sklearn.tree"
class__ = "DecisionTreeRegressor"
| 27.777778 | 84 | 0.824 | [
"BSD-3-Clause"
] | auto-flow/auto-flow | autoflow/workflow/components/regression/decision_tree.py | 250 | Python |
"""Implements an async kernel client"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Type # type: ignore
from jupyter_client.channels import HBChannel
from jupyter_client.channels import ZMQSocketChannel
from jupyter_client.client import KernelClient
from jupyter_client.client import reqrep
def wrapped(meth, channel):
def _(self, *args, **kwargs):
reply = kwargs.pop("reply", False)
timeout = kwargs.pop("timeout", None)
msg_id = meth(self, *args, **kwargs)
if not reply:
return msg_id
return self._async_recv_reply(msg_id, timeout=timeout, channel=channel)
return _
class AsyncKernelClient(KernelClient):
"""A KernelClient with async APIs
``get_[channel]_msg()`` methods wait for and return messages on channels,
raising :exc:`queue.Empty` if no message arrives within ``timeout`` seconds.
"""
# --------------------------------------------------------------------------
# Channel proxy methods
# --------------------------------------------------------------------------
get_shell_msg = KernelClient._async_get_shell_msg
get_iopub_msg = KernelClient._async_get_iopub_msg
get_stdin_msg = KernelClient._async_get_stdin_msg
get_control_msg = KernelClient._async_get_control_msg
wait_for_ready = KernelClient._async_wait_for_ready
# The classes to use for the various channels
shell_channel_class = Type(ZMQSocketChannel)
iopub_channel_class = Type(ZMQSocketChannel)
stdin_channel_class = Type(ZMQSocketChannel)
hb_channel_class = Type(HBChannel)
control_channel_class = Type(ZMQSocketChannel)
_recv_reply = KernelClient._async_recv_reply
# replies come on the shell channel
execute = reqrep(wrapped, KernelClient.execute)
history = reqrep(wrapped, KernelClient.history)
complete = reqrep(wrapped, KernelClient.complete)
inspect = reqrep(wrapped, KernelClient.inspect)
kernel_info = reqrep(wrapped, KernelClient.kernel_info)
comm_info = reqrep(wrapped, KernelClient.comm_info)
is_alive = KernelClient._async_is_alive
execute_interactive = KernelClient._async_execute_interactive
# replies come on the control channel
shutdown = reqrep(wrapped, KernelClient.shutdown, channel="control")
| 36.875 | 80 | 0.7 | [
"MIT"
] | 12mabold/AR-VR-CW1 | venv/lib/python3.8/site-packages/jupyter_client/asynchronous/client.py | 2,360 | Python |
#!/usr/bin/env python
"""
Benchmark a single Dense layer with no host/device data transfers.
The Items/sec reported at the end of the benchmark is based on wall time.
Run with -h or --help for options.
"""
import inspect
import os
import sys
import tensorflow as tf
from tensorflow.python.ipu import utils
def dense(opts, inputs):
# Add ReLU activation function if appropriate option is set
if opts.activation:
return tf.layers.dense(units=opts.size, inputs=inputs, activation=tf.nn.relu)
else:
return tf.layers.dense(units=opts.size, inputs=inputs)
def inputs(opts, index):
value = tf.cast(index, tf.float16)
return {
"inputs": tf.broadcast_to(value, [opts.batch_size, opts.size]),
}
def graph_builder(opts, inputs):
output = dense(opts, inputs["inputs"])
if opts.train:
# Loss is the mean across output matrix:
loss = tf.reduce_mean(output)
optimiser = tf.train.GradientDescentOptimizer(0.01)
with tf.variable_scope("train", reuse=tf.AUTO_REUSE):
# We need to ensure that the train op is executed as part of
# the benchmarking loop by maintaining a step variable and
# forcing a control dependency between it and the train op:
global_step = tf.get_variable(
"step_control", dtype=tf.int32, shape=[])
grads_and_vars = optimiser.compute_gradients(
loss, tf.trainable_variables())
train = optimiser.apply_gradients(grads_and_vars, global_step)
with tf.control_dependencies([train]):
global_step = tf.identity(global_step)
return global_step
return output
def initializer():
utils.move_variable_initialization_to_cpu()
return tf.global_variables_initializer()
def add_args(parser):
parser.add_argument("--batch-size", default=32, type=int,
help="Number of inputs in a mini-batch")
parser.add_argument("--size", default=1024, type=int,
help="Dense layer size")
parser.add_argument("--train", action='store_true', dest='train',
help="Compute loss and optimization pass")
parser.add_argument("--include-activation", action='store_true', dest='activation',
help="Include ReLU activation (otherwise linear/no activation")
parser.set_defaults(train=False, batches_per_step=5000, steps=5)
return parser
def iteration_report(opts, time):
return "{:5f} items/sec".format(opts.batch_size * opts.batches_per_step / time)
if __name__ == '__main__':
# Add benchmark module to path
cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
sys.path.insert(1, os.path.join(cwd, '..', '..', '..', 'utils',
'benchmarks', 'tensorflow'))
import benchmark
module = benchmark.Benchmark(
graph_builder,
inputs,
initializer,
add_args,
iteration_report
)
options = benchmark.parse_opts(module, False)
if options.shards > 0:
raise NotImplementedError(
"--shards option has not been implemented with this example")
# Log Benchmark Message
print(" Dense layer {} Synthetic benchmark.\n"
" Batch size {}.\n"
" Batches per Step {}.\n"
" Dense size {}.\n"
.format(
"Training" if options.train else "Inference",
options.batch_size,
options.batches_per_step if not options.cycle_report else "n/a",
options.size))
benchmark.run(module, options)
| 33.109091 | 87 | 0.635365 | [
"MIT"
] | Splendon/examples | code_examples/tensorflow/kernel_benchmarks/dense.py | 3,642 | Python |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from function_benchmark import FunctionBenchmark, Inspec
class Case:
def __init__(self, shape, axis, rtol=1e-6):
# rtol (relative tolerance) 1e-6 is default for assert_allclose
self.shape = shape
self.axis = axis
self.rtol = rtol
# Print this message by pytest when a test fails.
def __repr__(self):
return 'Case(shape=' + str(self.shape) + \
' axes=' + str(self.axis) + \
', rtol=' + str(self.rtol) + ')'
test_cases = [
# --------------------------------
# Common use case
# --------------------------------
# Axis 0
Case((512, 512), 0),
Case((512, 1024), 0),
Case((512, 2048), 0),
Case((1024, 512), 0),
Case((1024, 1024), 0),
Case((1024, 2048), 0),
Case((2048, 512), 0),
Case((2048, 1024), 0),
Case((2048, 2048), 0),
# Axis 1
Case((512, 512), 1),
Case((512, 1024), 1),
Case((512, 2048), 1),
Case((1024, 512), 1),
Case((1024, 1024), 1),
Case((1024, 2048), 1),
Case((2048, 512), 1),
Case((2048, 1024), 1),
Case((2048, 2048), 1),
# --------------------------------
# Large cases
# --------------------------------
Case((1024*1024, 32), 1),
Case((32, 1024*1024), 0),
Case((2048, 2048), 1),
Case((2048, 2048), 0),
Case((2024*2024, 2), 0),
Case((2, 2024*2024), 1),
# Weak cases
# PyTorch uses Cub library in these cases.
Case((2024*2024, 1), 0),
Case((1, 2024*2024), 1),
]
def create_cumprod_input(rng, shape, axis, with_mask):
x = (rng.randn(*shape)).astype(np.float32)
if with_mask:
# Make zero elements with the probability of `1 / x_shape[axis]`.
# It is the probability of existence of one zero element in each scan axis.
mask = rng.rand(*shape) > (1.0 / shape[axis])
x = x * mask
return x
@pytest.mark.parametrize("seed", [123])
@pytest.mark.parametrize("test_case", test_cases)
@pytest.mark.parametrize('exclusive', [False, True])
@pytest.mark.parametrize('reverse', [False, True])
@pytest.mark.parametrize("with_mask", [True, False])
def test_cumprod(seed, test_case, exclusive, reverse, with_mask, nnabla_opts):
x_shape = test_case.shape
axis = test_case.axis
def init(shape):
rng = np.random.RandomState(seed)
return create_cumprod_input(rng, shape, axis, with_mask)
need_grad = True
inputs = [Inspec(x_shape, init, need_grad)]
func_kwargs = dict(
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
fb = FunctionBenchmark(
F.cumprod, inputs, [], func_kwargs,
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
| 29.634783 | 83 | 0.601232 | [
"Apache-2.0"
] | Pandinosaurus/nnabla | python/benchmark/function/test_cumprod.py | 3,408 | Python |
# Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
from bos.operators.utils import PROTOCOL
API_VERSION = 'v1'
SERVICE_NAME = 'cray-bos'
ENDPOINT = "%s://%s/%s" % (PROTOCOL, SERVICE_NAME, API_VERSION)
| 47.62963 | 76 | 0.773717 | [
"MIT"
] | Cray-HPE/bos | src/bos/operators/utils/clients/bos/__init__.py | 1,286 | Python |
from sujson.Csv2json import Csv2json
import unittest
import filecmp
class ConvertCsvToJson(unittest.TestCase):
def setUp(self):
self.csv_to_json = Csv2json()
def test_conversion(self):
self.csv_to_json.load("files/Netflix.csv", delimiter=";")
self.csv_to_json.convert("files/Netflix_jtest.json")
self.assertTrue(filecmp.cmp("files/Netflix_jtest.json", "files/Netflix.json"))
if __name__ == '__main__':
unittest.main()
| 27.411765 | 86 | 0.712446 | [
"MIT"
] | PotasnikM/translator-to-suJSON | tests/test_tidy_csv_to_sureal_json.py | 466 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import JdcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(JdcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'xwMWGTnBNUmGxMm8vfAdbL45bWXyVTYctd': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'xwMWGTnBNUmGxMm8vfAdbL45bWXyVTYctd': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| 42.171429 | 118 | 0.676603 | [
"MIT"
] | anandsinha095/JDCION | test/functional/rpc_signrawtransaction.py | 4,428 | Python |
from accomodation_website.secrets import DB_PWD
docker_compose = """---
version: '3'
services:
web:
build: .
publish:
- 80
links:
- db:db
db:
image: mariadb
environment:
MYSQL_DATABASE: cannes_db
MYSQL_ROOT_PASSWORD: """ + DB_PWD
with open('docker-compose.yml', 'w') as f:
f.write(docker_compose)
| 31.5 | 60 | 0.349206 | [
"MIT"
] | Xogiga/CPOA_INEC_SAVIGNY_VALADE | cannes_accomodation/gen_docker_compose.py | 630 | Python |
"""Tests for Lib/fractions.py."""
from decimal import Decimal
from test.support import requires_IEEE_754
import math
import numbers
import operator
import fractions
import sys
import unittest
import warnings
from copy import copy, deepcopy
from pickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
# shouldn't be calling __float__ at all when doing comparisons
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
class DummyRational(object):
"""Test comparison of Fraction with a naive rational implementation."""
def __init__(self, num, den):
g = math.gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
class DummyFraction(fractions.Fraction):
"""Dummy Fraction subclass for copy and deepcopy testing."""
class GcdTest(unittest.TestCase):
def testMisc(self):
# fractions.gcd() is deprecated
with self.assertWarnsRegex(DeprecationWarning, r'fractions\.gcd'):
gcd(1, 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'fractions\.gcd',
DeprecationWarning)
self.assertEqual(0, gcd(0, 0))
self.assertEqual(1, gcd(1, 0))
self.assertEqual(-1, gcd(-1, 0))
self.assertEqual(1, gcd(0, 1))
self.assertEqual(-1, gcd(0, -1))
self.assertEqual(1, gcd(7, 1))
self.assertEqual(-1, gcd(7, -1))
self.assertEqual(1, gcd(-23, 15))
self.assertEqual(12, gcd(120, 84))
self.assertEqual(-12, gcd(84, -120))
self.assertEqual(gcd(120.0, 84), 12.0)
self.assertEqual(gcd(120, 84.0), 12.0)
self.assertEqual(gcd(F(120), F(84)), F(12))
self.assertEqual(gcd(F(120, 77), F(84, 55)), F(12, 385))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
"""Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEqual(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEqual((0, 1), _components(F()))
self.assertEqual((7, 1), _components(F(7)))
self.assertEqual((7, 3), _components(F(F(7, 3))))
self.assertEqual((-1, 1), _components(F(-1, 1)))
self.assertEqual((-1, 1), _components(F(1, -1)))
self.assertEqual((1, 1), _components(F(-2, -2)))
self.assertEqual((1, 2), _components(F(5, 10)))
self.assertEqual((7, 15), _components(F(7, 15)))
self.assertEqual((10**23, 1), _components(F(10**23)))
self.assertEqual((3, 77), _components(F(F(3, 7), 11)))
self.assertEqual((-9, 5), _components(F(2, F(-10, 9))))
self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
self.assertRaises(TypeError, F, 1, 2, 3)
@requires_IEEE_754
def testInitFromFloat(self):
self.assertEqual((5, 2), _components(F(2.5)))
self.assertEqual((0, 1), _components(F(-0.0)))
self.assertEqual((3602879701896397, 36028797018963968),
_components(F(0.1)))
# bug 16469: error types should be consistent with float -> int
self.assertRaises(ValueError, F, float('nan'))
self.assertRaises(OverflowError, F, float('inf'))
self.assertRaises(OverflowError, F, float('-inf'))
def testInitFromDecimal(self):
self.assertEqual((11, 10),
_components(F(Decimal('1.1'))))
self.assertEqual((7, 200),
_components(F(Decimal('3.5e-2'))))
self.assertEqual((0, 1),
_components(F(Decimal('.000e20'))))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaises(ValueError, F, Decimal('nan'))
self.assertRaises(ValueError, F, Decimal('snan'))
self.assertRaises(OverflowError, F, Decimal('inf'))
self.assertRaises(OverflowError, F, Decimal('-inf'))
def testFromString(self):
self.assertEqual((5, 1), _components(F("5")))
self.assertEqual((3, 2), _components(F("3/2")))
self.assertEqual((3, 2), _components(F(" \n +3/2")))
self.assertEqual((-3, 2), _components(F("-3/2 ")))
self.assertEqual((13, 2), _components(F(" 013/02 \n ")))
self.assertEqual((16, 5), _components(F(" 3.2 ")))
self.assertEqual((-16, 5), _components(F(" -3.2 ")))
self.assertEqual((-3, 1), _components(F(" -3. ")))
self.assertEqual((3, 5), _components(F(" .6 ")))
self.assertEqual((1, 3125), _components(F("32.e-5")))
self.assertEqual((1000000, 1), _components(F("1E+06")))
self.assertEqual((-12300, 1), _components(F("-1.23e4")))
self.assertEqual((0, 1), _components(F(" .0e+0\t")))
self.assertEqual((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
# Imitate float's parsing.
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEqual((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEqual((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEqual((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEqual(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEqual((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEqual((bigint, 1), _components(F.from_float(bigint)))
self.assertEqual((0, 1), _components(F.from_float(-0.0)))
self.assertEqual((10, 1), _components(F.from_float(10.0)))
self.assertEqual((-5, 2), _components(F.from_float(-2.5)))
self.assertEqual((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEqual(float(10**23), float(F.from_float(1e23)))
self.assertEqual((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEqual(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
# bug 16469: error types should be consistent with float -> int
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_float, inf)
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_float, -inf)
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEqual(F(10, 1), F.from_decimal(10))
self.assertEqual(F(0), F.from_decimal(Decimal("-0")))
self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEqual(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEqual(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
for i in (0, -1):
self.assertRaisesMessage(
ValueError, "max_denominator should be at least 1",
F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertTypedEquals(-2, math.floor(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-10, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertTypedEquals(0, round(F(-1, 10)))
self.assertTypedEquals(0, round(F(-5, 10)))
self.assertTypedEquals(-2, round(F(-15, 10)))
self.assertTypedEquals(-1, round(F(-7, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
# numerator and denominator to float before dividing.
self.assertRaises(OverflowError, float, int('2'*400+'7'))
self.assertAlmostEqual(2.0/3,
float(F(int('2'*400+'7'), int('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testRound(self):
self.assertTypedEquals(F(-200), round(F(-150), -2))
self.assertTypedEquals(F(-200), round(F(-250), -2))
self.assertTypedEquals(F(30), round(F(26), -1))
self.assertTypedEquals(F(-2, 10), round(F(-15, 100), 1))
self.assertTypedEquals(F(-2, 10), round(F(-25, 100), 1))
def testArithmetic(self):
self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEqual(F(1, 25), F(1, 10) * F(2, 5))
self.assertEqual(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
self.assertEqual(F(1, 1), +F(1, 1))
z = pow(F(-1), F(1, 2))
self.assertAlmostEqual(z.real, 0)
self.assertEqual(z.imag, 1)
# Regression test for #27539.
p = F(-1, 2) ** 0
self.assertEqual(p, F(1, 1))
self.assertEqual(p.numerator, 1)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -1
self.assertEqual(p, F(-2, 1))
self.assertEqual(p.numerator, -2)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -2
self.assertEqual(p, F(4, 1))
self.assertEqual(p.numerator, 4)
self.assertEqual(p.denominator, 1)
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# No need for divmod since we don't override it.
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
z = pow(-1, F(1, 2))
self.assertAlmostEqual(0, z.real)
self.assertEqual(1, z.imag)
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
self.assertRaises(ZeroDivisionError, operator.pow,
F(0, 1), -2)
def testMixingWithDecimal(self):
# Decimal refuses mixed arithmetic (but not mixed comparisons)
self.assertRaises(TypeError, operator.add,
F(3,11), Decimal('3.1415926'))
self.assertRaises(TypeError, operator.add,
Decimal('3.1415926'), F(3,11))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testComparisonsDummyRational(self):
self.assertTrue(F(1, 2) == DummyRational(1, 2))
self.assertTrue(DummyRational(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == DummyRational(3, 4))
self.assertFalse(DummyRational(3, 4) == F(1, 2))
self.assertTrue(F(1, 2) < DummyRational(3, 4))
self.assertFalse(F(1, 2) < DummyRational(1, 2))
self.assertFalse(F(1, 2) < DummyRational(1, 7))
self.assertFalse(F(1, 2) > DummyRational(3, 4))
self.assertFalse(F(1, 2) > DummyRational(1, 2))
self.assertTrue(F(1, 2) > DummyRational(1, 7))
self.assertTrue(F(1, 2) <= DummyRational(3, 4))
self.assertTrue(F(1, 2) <= DummyRational(1, 2))
self.assertFalse(F(1, 2) <= DummyRational(1, 7))
self.assertFalse(F(1, 2) >= DummyRational(3, 4))
self.assertTrue(F(1, 2) >= DummyRational(1, 2))
self.assertTrue(F(1, 2) >= DummyRational(1, 7))
self.assertTrue(DummyRational(1, 2) < F(3, 4))
self.assertFalse(DummyRational(1, 2) < F(1, 2))
self.assertFalse(DummyRational(1, 2) < F(1, 7))
self.assertFalse(DummyRational(1, 2) > F(3, 4))
self.assertFalse(DummyRational(1, 2) > F(1, 2))
self.assertTrue(DummyRational(1, 2) > F(1, 7))
self.assertTrue(DummyRational(1, 2) <= F(3, 4))
self.assertTrue(DummyRational(1, 2) <= F(1, 2))
self.assertFalse(DummyRational(1, 2) <= F(1, 7))
self.assertFalse(DummyRational(1, 2) >= F(3, 4))
self.assertTrue(DummyRational(1, 2) >= F(1, 2))
self.assertTrue(DummyRational(1, 2) >= F(1, 7))
def testComparisonsDummyFloat(self):
x = DummyFloat(1./3.)
y = F(1, 3)
self.assertTrue(x != y)
self.assertTrue(x < y or x > y)
self.assertFalse(x == y)
self.assertFalse(x <= y and x >= y)
self.assertTrue(y != x)
self.assertTrue(y < x or y > x)
self.assertFalse(y == x)
self.assertFalse(y <= x and y >= x)
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
self.assertFalse(float('inf') < F(1, 2))
self.assertTrue(float('-inf') < F(0, 10))
self.assertFalse(float('nan') < F(-3, 7))
self.assertTrue(F(1, 2) < float('inf'))
self.assertFalse(F(17, 12) < float('-inf'))
self.assertFalse(F(144, -89) < float('nan'))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
self.assertFalse(float('inf') <= F(1, 2))
self.assertTrue(float('-inf') <= F(0, 10))
self.assertFalse(float('nan') <= F(-3, 7))
self.assertTrue(F(1, 2) <= float('inf'))
self.assertFalse(F(17, 12) <= float('-inf'))
self.assertFalse(F(144, -89) <= float('nan'))
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
# The first test demonstrates why these are important.
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23))
self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23))
x = F(3, 8)
z = complex(0.375, 0.0)
w = complex(0.375, 0.2)
self.assertTrue(x == z)
self.assertFalse(x != z)
self.assertFalse(x == w)
self.assertTrue(x != w)
for op in operator.lt, operator.le, operator.gt, operator.ge:
self.assertRaises(TypeError, op, x, z)
self.assertRaises(TypeError, op, z, x)
self.assertRaises(TypeError, op, x, w)
self.assertRaises(TypeError, op, w, x)
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
self.assertFalse(F(5, 2) == float('nan'))
self.assertFalse(float('nan') == F(3, 7))
self.assertFalse(F(5, 2) == float('inf'))
self.assertFalse(float('-inf') == F(2, 5))
def testStringification(self):
self.assertEqual("Fraction(7, 3)", repr(F(7, 3)))
self.assertEqual("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEqual("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEqual("7/3", str(F(7, 3)))
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
hmod = sys.hash_info.modulus
hinf = sys.hash_info.inf
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
self.assertEqual(hinf, hash(F(1, hmod)))
# Check that __hash__ produces the same value as hash(), for
# consistency with int and Decimal. (See issue #10356.)
self.assertEqual(hash(F(-1)), F(-1).__hash__())
def testApproximatePi(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEqual(math.pi, s)
def testApproximateCos1(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEqual(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
self.assertNotEqual(id(dr), id(copy(dr)))
self.assertNotEqual(id(dr), id(deepcopy(dr)))
self.assertTypedEquals(dr, copy(dr))
self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
# Issue 4998
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
if __name__ == '__main__':
unittest.main()
| 41.737421 | 78 | 0.573102 | [
"Apache-2.0"
] | 4nkitd/pyAutomation | Mark_attandance_py_selenium/py/App/Python/Lib/test/test_fractions.py | 26,545 | Python |
# Copyright 2019-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cirq Operation class
====================
**Module name:** :mod:`pennylane_cirq.cirq_operation`
.. currentmodule:: pennylane_cirq.cirq_operation
An helper class that wraps the native Cirq operations and provides an interface for PennyLane.
Classes
-------
.. autosummary::
CirqOperation
Code details
~~~~~~~~~~~~
"""
from collections.abc import Sequence
import cirq
import pennylane as qml
class CirqOperation:
"""A helper class that wraps the native Cirq operations and provides an
interface for parametrization and application."""
def __init__(self, parametrization):
"""Initializes the CirqOperation
Args:
parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the
PennyLane gate parameters to an ordered list of gates that are to be applied.
"""
self.parametrization = parametrization
self.parametrized_cirq_gates = None
self.is_inverse = False
def parametrize(self, *args):
"""Parametrizes the CirqOperation.
Args:
*args (float): the parameters for the operations
"""
self.parametrized_cirq_gates = self.parametrization(*args)
if not isinstance(self.parametrized_cirq_gates, Sequence):
self.parametrized_cirq_gates = [self.parametrized_cirq_gates]
if self.is_inverse:
# Cirq automatically reverses the order if it gets an iterable
self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates)
def apply(self, *qubits):
"""Applies the CirqOperation.
Args:
*qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed.
"""
if not self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.")
return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates)
def inv(self):
"""Inverses the CirqOperation."""
# We can also support inversion after parametrization, but this is not necessary for the
# PennyLane-Cirq codebase at the moment.
if self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.")
self.is_inverse = not self.is_inverse
| 33.235955 | 97 | 0.694388 | [
"Apache-2.0"
] | PennyLaneAI/pennylane-cirq | pennylane_cirq/cirq_operation.py | 2,958 | Python |
import traceback
import discord
import asyncio
from discord.ext import commands, flags
from helpers import exceptions, log, utilityfunctions as util
from data import database as db
logger = log.get_logger(__name__)
command_logger = log.get_logger("commands")
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
"""The event triggered when an error is raised while invoking a command."""
if hasattr(ctx.command, "on_error"):
return
error = getattr(error, "original", error)
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.MissingRequiredArgument):
return await util.send_command_help(ctx)
command_logger.error(
f'{type(error).__name__:25} > {ctx.guild} ? {ctx.author} "{ctx.message.content}" > {error}'
)
if isinstance(error, util.ErrorMessage):
return await ctx.send(str(error))
if isinstance(error, commands.MissingPermissions):
perms = ", ".join(f"`{x}`" for x in error.missing_perms)
return await ctx.send(
f":warning: You require {perms} permission to use this command!"
)
elif isinstance(error, commands.BotMissingPermissions):
perms = ", ".join(f"`{x}`" for x in error.missing_perms)
return await ctx.send(
f":warning: Cannot execute command! Bot is missing permission {perms}"
)
elif isinstance(error, commands.CommandOnCooldown):
if db.is_patron(ctx.author.id, (2, 3)):
return await ctx.reinvoke()
else:
return await ctx.send(
f":hourglass: This command is on a cooldown! (`{error.retry_after:.2f}s` remaining)"
)
elif isinstance(error, commands.DisabledCommand):
await ctx.send(f":warning: `{ctx.command}` has been disabled!")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.author.send(
":warning: You cannot use this command in private messages"
)
elif isinstance(error, util.PatronCheckFailure):
await ctx.send(":no_entry: Support me on patreon to use this command! <https://patreon.com/joinemm>")
elif isinstance(error, (commands.NotOwner, commands.CheckFailure)):
await ctx.send(
":warning: Sorry, you are not authorized to use this command!"
)
elif isinstance(error, exceptions.BlacklistTrigger):
if error.blacklist_type == "command":
message = "This command has been blacklisted by the server moderators"
elif error.blacklist_type == "channel":
message = "Command usage on this channel has been blacklisted by the server moderators"
elif error.blacklist_type == "user":
message = "You have been blacklisted from using commands by the server moderators"
elif error.blacklist_type == "global":
message = "You have been blacklisted from using Miso Bot"
delete = error.do_delete
await ctx.send(
f":no_entry_sign: `{message}`", delete_after=(5 if delete else None)
)
if delete:
await asyncio.sleep(5)
await ctx.message.delete()
elif isinstance(error, (commands.BadArgument, flags._parser.ArgumentParsingError)):
await ctx.send(f"```{str(error)}```")
elif isinstance(error, discord.errors.Forbidden):
try:
await ctx.send(f"```{str(error)}```")
except discord.errors.Forbidden:
try:
await ctx.message.add_reaction("🙊")
except discord.errors.Forbidden:
logger.error(str(error))
elif isinstance(error, exceptions.LastFMError):
await ctx.send(f"```{str(error)}```")
else:
traceback.print_exception(type(error), error, error.__traceback__)
await ctx.send(f"```\n{type(error).__name__}: {str(error)}```")
def setup(bot):
bot.add_cog(Events(bot))
| 37.929825 | 113 | 0.598289 | [
"MIT"
] | ZackHart2400/miso-bot | cogs/errorhandler.py | 4,327 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.