blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e3fd258d656b4aae266bf98af8ec3d8ea66dd36 | 7891b57b0f65181bbbfbf69abbd6a86d9e293459 | /programs/tic_tac_toe.py | ec5f4964fc0be3040ae71f7c99d38aa86a5cdaee | [] | no_license | NIKsaurabh/python_programs1 | 79a465c2bad8c29c21e470e035ef571ae84199a9 | 29db29763f9b70ed20f562a9b1514b5f90d685e1 | refs/heads/master | 2020-06-04T18:45:23.052348 | 2019-06-23T11:02:12 | 2019-06-23T11:02:12 | 192,149,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | import numpy
board=numpy.array([['_','_','_'],['_','_','_'],['_','_','_']])
p1s='X'
p2s='O'
def check_rows(symbol):
for r in range(3):
count=0
for c in range(3):
if board[r][c]==symbol:
count=count+1
if count==3:
print(symbol, " won")
return True
return False
def check_cols(symbol):
for c in range(3):
count=0
for r in range(3):
if board[r][c]==symbol:
count=count+1
if count==3:
print(symbol, " won")
return True
return False
def check_diagonals(symbol):
if board[0][0]==board[1][1] and board[1][1]==board[2][2] and board[1][1]==symbol:
print(symbol, " won")
return True
if board[0][2]==board[1][1] and board[1][1]==board[2][0] and board [1][1]==symbol:
print(symbol, " won")
return True
return False
def won(symbol):
return check_rows(symbol) or check_cols(symbol) or check_diagonals(symbol)
def place(symbol):
print(numpy.matrix(board))
while(1):
row=int(input("Enter row (1 or 2 or 3) :"))
col=int(input("Enter column (1 or 2 or 3) :"))
if row>0 and row<4 and col>0 and col<4 and board[row-1][col-1]=='_':
break
else:
print("Invalid input, please enter again")
board[row-1][col-1]=symbol
def play():
for turn in range(9):
if turn%2==0:
print("X turn")
place(p1s)
if won(p1s):
break
else:
print("O turn")
place(p2s)
if won(p2s):
break
if not(won(p1s)) and not(won(p2s)):
print("Draw")
play() | [
"[email protected]"
] | |
9c09878a0a534b65405d82807aec9244664c52db | ec8a3782ba4b625626dc8e205209ff6576cb6d26 | /gtex_analysis.py | 994738733257dd69bd3b2a3da19f9ef3d2ebf948 | [
"MIT"
] | permissive | joan-smith/covid19 | 04c3d08a36a966e430552353ad3bd8d4b09bba2d | fef22a26cb2354cbe791f674c578fc2c35630a2a | refs/heads/master | 2021-05-17T17:03:18.041609 | 2020-05-11T02:32:55 | 2020-05-11T02:32:55 | 250,886,925 | 5 | 3 | MIT | 2020-05-11T02:32:57 | 2020-03-28T20:26:38 | Python | UTF-8 | Python | false | false | 3,277 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 19:26:43 2020
@author: Joan Smith
"""
#%%
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
gtex = pd.read_csv("data/raw-data/GTEX/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_tpm.gct", sep="\t", header=2, index_col=1)
gtex = gtex.drop('Name', axis=1).astype(float)
attributes = pd.read_csv("data/raw-data/GTEX/GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt", sep='\t', index_col=0, dtype=None)
phenotypes = pd.read_csv("data/raw-data/GTEX/GTEx_Analysis_v8_Annotations_SubjectPhenotypesDS.txt", sep='\t', index_col=0)
phenotypes[['SEX']] = phenotypes[['SEX']].replace(2, 'F')
phenotypes[['SEX']] = phenotypes[['SEX']].replace(1, 'M')
#%%
def set_labels(ax, labels):
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
#%%
def plot_by_pheno(ace2_tissue_w_pheno, col, tissue):
fig, ax = plt.subplots()
plt.title(tissue + '\nACE2 by ' + col.lower())
pheno = [(i[0], i[1].ACE2.dropna()) for i in ace2_tissue_w_pheno.groupby(col)]
labels, split = zip(*pheno)
ax.violinplot(split, showmeans=True)
set_labels(ax, labels)
ace2_tissue_w_pheno.reset_index().pivot_table(columns='SEX', values='ACE2', index='index').to_csv('gtex_' + tissue.lower() + '_sex.csv')
ace2_tissue_w_pheno.reset_index().pivot_table(columns='AGE', values='ACE2', index='index').to_csv('gtex_' + tissue.lower() + '_age.csv')
#%% LUNG
ace2 = gtex.loc['ACE2']
samples_w_lung = attributes[attributes['SMTS'] == 'Lung'].index
ace2_lung = gtex.loc['ACE2'][samples_w_lung].astype(float)
ace2_lung.index = ace2_lung.index.str[0:10]
ace2_lung_w_pheno = phenotypes.join(ace2_lung, how='inner')
plot_by_pheno(ace2_lung_w_pheno, 'AGE', 'Lung')
plot_by_pheno(ace2_lung_w_pheno, 'SEX', 'Lung')
#%% Esophogeal Mucosa
samples_w_esoph_muc = attributes[attributes['SMTSD'] == 'Esophagus - Mucosa'].index
ace2_esoph = gtex.loc['ACE2'][samples_w_esoph_muc].astype(float)
ace2_esoph.index = ace2_esoph.index.str[0:10]
ace2_esoph_w_pheno = phenotypes.join(ace2_esoph, how='inner')
plot_by_pheno(ace2_esoph_w_pheno, 'AGE', 'Esophagus - Mucosa')
plot_by_pheno(ace2_esoph_w_pheno, 'SEX', 'Esophagus - Mucosa')
#%% Salivary
samples_w_salivary = attributes[attributes['SMTS'] == 'Salivary Gland'].index
ace2_sal = gtex.loc['ACE2'][samples_w_salivary].astype(float)
ace2_sal.index = ace2_sal.index.str[0:10]
ace2_sal_w_pheno = phenotypes.join(ace2_sal, how='inner')
plot_by_pheno(ace2_sal_w_pheno, 'AGE', 'Salivary Gland')
plot_by_pheno(ace2_sal_w_pheno, 'SEX', 'Salivary Gland')
#%% All Tissue
#%% Plot All Tissue
ace2_tissue = gtex.loc[['ACE2']].T.join(attributes)
ace2_tissue['ACE2'] = np.log2(ace2_tissue['ACE2'] + 1)
fig, ax = plt.subplots()
plt.title('ACE2 by tissue')
order = ace2_tissue.groupby('SMTS')['ACE2'].apply(np.mean).sort_values()
print(order)
g = {i[0]: i[1].ACE2.dropna() for i in ace2_tissue.groupby('SMTS')}
ordered_g= [(k, g[k]) for k in order.index]
labels, split = zip(*ordered_g)
ax.violinplot(split, showmeans=True)
set_labels(ax, labels)
plt.xticks(rotation=45)
plt.show()
#%% Export Data for All Tissue
ace2_tissue.reset_index().pivot_table(columns='SMTS', values='ACE2', index='index').to_csv('ace2_by_tissue.csv')
| [
"[email protected]"
] | |
a1998a2770535081f87356b6c61202ca5dfedb68 | f70da40112a40431a579c2512f4697f9f77b9b5b | /ALDS1/ALDS1_4_C.py | ce35334aefd168ad81273518b7eba233323ee6d0 | [] | no_license | ushiko/AOJ | cc5572cf6ce8d56eeb84973d72e43dc564541a39 | 24b26e1e6094a654d152247043e7907e365a8793 | refs/heads/master | 2020-04-04T16:19:32.868570 | 2018-12-15T01:29:04 | 2018-12-15T01:29:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ALDS1_4_B&lang=jp
# Binary Search : python3
# 2018.12.08 yonezawa
#from collections import deque
#import sys
#input = sys.stdin.readline
#import cProfile
def main():
dic = {}
for i in range(int(input())):
l = list(map(str,input().split()))
if l[0] == 'insert':
dic[l[1]] = 'yes'
else:
try:
print (dic[l[1]] )
except:
print ('no')
if __name__ == '__main__':
main()
# pr = cProfile.Profile()
# pr.runcall(main)
# pr.print_stats() | [
"[email protected]"
] | |
b4c87370b54e8398f6c23424e37dce01724410c3 | 31d7fb05f94decc77b388d6e0acc17666920b263 | /ximalayapy/ximalaya.py | 4e311b2ae27bacb376d2621c599ae2fc241cafb2 | [] | no_license | skygongque/login-ximalaya | 0842a93ae699f64d9e2c0a347014c32de9d0b050 | ad00a9f2177e906bf4cef2d934cb388736c84da7 | refs/heads/master | 2023-06-01T01:17:41.452836 | 2020-07-22T06:09:06 | 2020-07-22T06:09:06 | 241,030,534 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | py | import requests
from get_captcha import Captcha
import execjs
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
import base64
import hashlib
import json
import time
requests.packages.urllib3.disable_warnings()
def get_sessionId():
jstext = """
function get_sessionId(){
var t, o;
var sessionId;
o = +new Date,
sessionId = "" + (t || "xm_") + o.toString(36) + Math.random().toString(36).substr(2, 6)
return sessionId
}
"""
ctx = execjs.compile(jstext)
sessionId = ctx.call('get_sessionId')
return sessionId
class Ximalaya:
def __init__(self):
self.sessionId = get_sessionId()
# self.sessionId = 'xm_k6ptqdnoapge1w'
self.nonce_url = 'https://passport.ximalaya.com/web/nonce/'
self.login_url = 'https://passport.ximalaya.com/web/login/pwd/v1'
self.headers = {
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36',
}
self.captcha = Captcha(self.sessionId)
self.token = None
self.session = requests.Session()
self.web_pl_url = "https://mermaid.ximalaya.com/collector/web-pl/v1"
self.login_headers = {
'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7',
# 'Connection': 'keep-alive',
# 'Content-Length': '359',
'Content-Type': 'application/json',
# 'Cookie': '_xmLog=xm_k6nhsw6tl2n3fq; x_xmly_traffic=utm_source%253A%2526utm_medium%253A%2526utm_campaign%253A%2526utm_content%253A%2526utm_term%253A%2526utm_from%253A; Hm_lvt_4a7d8ec50cfd6af753c4f8aee3425070=1581837776,1581840339,1581850690,1581905520; Hm_lpvt_4a7d8ec50cfd6af753c4f8aee3425070=1581905520; fds_otp=7400888312179453443',
'Host': 'passport.ximalaya.com',
'Origin': 'https://www.ximalaya.com',
'Referer': 'https://www.ximalaya.com/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
}
self.session.headers.update(self.login_headers)
def captcha_verify(self):
""" 获得滑动验证通过后的token,最后的登录post需要添加到cookie中 """
for i in range(10):
if self.token:
break
self.token = self.captcha.check_captcha()
time.sleep(1)
time.sleep(1)
def get_nonce(self):
# 此处cookies可加可不加
cookies={'fds_otp':self.token}
# print(cookies)
response = self.session.get(self.nonce_url+Captcha.get_time(),verify=False)
return response.json()['nonce']
def encrypt_password(self,password):
""" rsa加密密码,并用base64编码 """
modules = "009585A4773ABEECB949701D49762F2DFAB9599BA19DFE1E1A2FA200E32E0444F426DA528912D9EA8669515F6F1014C454E1343B97ABF7C10FE49D520A6999C66B230E0730C3F802D136A892501FF2B13D699B5C7ECBBFEF428AC36D3D83A5BD627F18746A7FDC774C12A38DE2760A3B95C653C10D7EB7F84722976251F649556B"
rsa_public_key = RSA.construct((int(modules,16),int('10001',16)))
cipher_rsa = PKCS1_v1_5.new(rsa_public_key)
temp = cipher_rsa.encrypt(password.encode())
return base64.b64encode(temp)
def get_signature(self,account,nonce,password):
""" sha1进行签名 """
# 签名前大写upper()
raw = f"account={account}&nonce={nonce}&password={password}&WEB-V1-PRODUCT-E7768904917C4154A925FBE1A3848BC3E84E2C7770744E56AFBC9600C267891F"
return hashlib.sha1(raw.upper().encode()).hexdigest()
def get_login_data(self,account,password):
nonce = self.get_nonce()
encrypted_password = self.encrypt_password(password)
encrypted_password = str(encrypted_password,'utf-8')
post_data = {
'account': account,
'password': encrypted_password,
'nonce': nonce,
'signature': self.get_signature(account,nonce,encrypted_password),
'rememberMe': 'false',
}
return post_data
def login(self,account,password):
post_data = self.get_login_data(account,password)
print(json.dumps(post_data))
cookies={'fds_otp':self.token}
# 最核心post请求cookie必须加
response = self.session.post(self.login_url,data=json.dumps(post_data),cookies=cookies,verify=False)
if response.status_code==200:
print(response.json())
else:
print(response.text)
def run(self):
account = ''
password = ''
self.captcha_verify()
self.login(account,password)
return self.session.cookies
if __name__ == "__main__":
t = Ximalaya()
login_cookie = t.run()
print(login_cookie) | [
"[email protected]"
] | |
db7606a7fccbd613232e00a03aa0db37b1915dbd | 125a328f222e5dab133c5819913728c6e2f94b3e | /core02/student.py | 3e1d1becb88c188ef8cf2aa99b82a980c4e04fa7 | [] | no_license | nbwuwei/itany-python | be1fb948cf55ef6328657cee65d07087780e22fa | 762bece0c2e7b0ecd39f78048fa43b6ae2547695 | refs/heads/master | 2021-01-25T13:10:58.284397 | 2018-03-01T02:55:59 | 2018-03-01T02:55:59 | 123,535,559 | 1 | 0 | null | 2018-03-02T05:40:27 | 2018-03-02T05:40:27 | null | UTF-8 | Python | false | false | 1,322 | py | class Student:
# 类的静态属性
some_static_attr = 1
# 是类的构造方法
# 方法名__init__(self)
# self 是this对象
# self 名称可以改变 但是不建议修改
# !!! 一个类中所有成员方法的第一个参数都是self
# 但是,在调用类中的方法的时候,不需要为self传值,由Python解释器传值
# Python的成员属性,不需要声明在类中
# 直接在init方法中使用self.属性名 声明成员属性
def __init__(self,name="",age=""):
self.id = None
self.name = name
self.age = age
self.score = None
pass
def some_method(self):
pass
@classmethod
def static_method(cls):
print(cls)
pass
Student.static_method()
# s = Student()
# # s.some_method()
# s1 = Student()
# # 可以对Python对象进行属性扩展
# s.addr = "南京"
# print(s.addr)
#
# print(s1.addr)
#
# # print(s)
# Python 一定要使用类名访问静态属性
print(Student.some_static_attr)
s = Student()
# 找的是s的成员属性,由于成员属性不存在,找了静态属性 >> 1
print(s.some_static_attr)
# 为s添加一个成员属性,值是2
s.some_static_attr = 2
# s存在成员属性,值是2
print(s.some_static_attr)
print(Student.some_static_attr) | [
"[email protected]"
] | |
b1319b3f4d9e59d7f6923b1bb1c3a7a3441f32f9 | eb58833180d76311876c5d1f6d086d5f4d11ad94 | /blog/migrations/0002_auto_20190410_1846.py | 9e67bf531e92799e5e13b5f1fb8d35c12b0dcfff | [] | no_license | alchupin/django_s_1 | 9ac61a02de997857f6e947ea6af5768975304a7f | 0331a4ff5cfd420a7a695b174fafcf0ac02b358d | refs/heads/master | 2020-05-14T00:44:52.484383 | 2019-04-30T13:45:55 | 2019-04-30T13:45:55 | 181,681,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # Generated by Django 2.2 on 2019-04-10 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, related_name='posts', to='blog.Tag'),
),
]
| [
"[email protected]"
] | |
5ee926f717a43257bc8e159cccb1a427dae87f2a | bba4887f2a2a8e672fd4ec7ced8109a2762230ef | /app/models.py | 6e7ad97a7ffa5e438e89a4e1907ed96941910052 | [] | no_license | cadyherron/microblog | a5f621ea8a43f2de4d704ce36bdbb7578ce393c4 | 5a5727acbb4da1ecaeff0ce5c7b83c10a52d3384 | refs/heads/master | 2021-01-20T18:07:43.466141 | 2016-06-06T16:25:28 | 2016-06-06T16:25:28 | 60,217,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | from app import db, app
from hashlib import md5
# this table only has foreign keys, so let's create it using SQLAlchemy API:
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True) # column type(max length), index, uniqueness
email = db.Column(db.String(120), index=True, unique=True)
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
followed = db.relationship('User',
secondary=followers, # association table
primaryjoin=(followers.c.follower_id == id), # odd syntax because followers table is not a model
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'),
lazy='dynamic')
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return True
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def avatar(self, size):
return 'http://www.gravatar.com/avatar/%s?d=mm&s=%d' % (md5(self.email.encode('utf-8')).hexdigest(), size)
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname=nickname).first() is None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname=new_nickname).first() is None:
break
version += 1
return new_nickname
def __repr__(self):
return '<User %r>' % self.nickname # print method, for debugging
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
return self.followed.filter(followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(followers, (followers.c.followed_id == Post.user_id))\
.filter(followers.c.follower_id == self.id)\
.order_by(Post.timestamp.desc())
import sys
if sys.version_info >= (3, 0):
enable_search = False
else:
enable_search = True
import flask_whooshalchemy as whooshalchemy
class Post(db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post %r>' % (self.body)
if enable_search:
whooshalchemy.whoosh_index(app, Post) | [
"[email protected]"
] | |
2fe822f0708c0a3845e8e05bab744214f00e7eab | 72fcac20e1d55d59e50fa6a552dceea1246114e7 | /comments/comments_page/models.py | 5f93874ee1ef80655c412555a439688d212d2a79 | [] | no_license | NataliaPlatova/Comments | 45f1471191c635ebaf9b3e29f0f84599263e71a7 | 41e7a7e4f329a2a1501292459320f53faa8209fc | refs/heads/master | 2023-04-27T09:06:28.417830 | 2019-08-15T12:57:49 | 2019-08-15T12:57:49 | 202,290,088 | 0 | 0 | null | 2022-04-22T22:10:20 | 2019-08-14T06:39:22 | Python | UTF-8 | Python | false | false | 398 | py | from django.db import models
class Review(models.Model):
"""Model for book reviews"""
my_email = models.CharField(max_length=40)
books_name = models.CharField(max_length=80)
rating = models.IntegerField(default=1)
comment = models.TextField(max_length=1000)
pub_date = models.DateField('date published')
def __str__(self):
return self.books_name
| [
"[email protected]"
] | |
35725d3eedf167d9a9ccd076852853f75d9cfdcf | 7491ff06e0245ab23eab524fc241ddece4e7b2c7 | /env/bin/gunicorn | 4b2b98b8846aa58741b28f0cbd50f770fcb2c215 | [] | no_license | ekiprop/switchapi | 1694cedc7b01de07f9fc3367c3651b94918c15da | dd257be3e4655b3e1dd1ce196425aa0c7031a6c9 | refs/heads/master | 2021-09-26T11:18:50.387737 | 2020-03-21T20:10:43 | 2020-03-21T20:10:43 | 244,697,740 | 0 | 0 | null | 2021-09-22T18:46:41 | 2020-03-03T17:21:57 | Python | UTF-8 | Python | false | false | 248 | #!/home/ekiprop/kip_code/switchapi/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
eccc81b6d956eeebf8723b42d9e32e1425e4d0c5 | c16ab81defbeb82c1edec880fe3dc48cbf5e4b68 | /databases2019/orders/views.py | 96e6d180ba74c3e2f6f5b17ba3ff0214931f7d85 | [] | no_license | korneliakli/Bazy-danych | d68a16528531343915d314c5d76d7b9671182d30 | 73f8835b33655a4ec16d21c6ab216c9ff048e79d | refs/heads/master | 2020-12-14T11:14:40.295407 | 2020-01-18T14:41:24 | 2020-01-18T14:41:24 | 234,724,375 | 1 | 1 | null | 2020-01-18T14:46:14 | 2020-01-18T11:26:28 | Python | UTF-8 | Python | false | false | 3,967 | py | from django.shortcuts import render
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse
from django.urls import reverse
from django.views import generic
from django.forms import formset_factory, inlineformset_factory
from django.db.models import Sum
from northwind.models import Orders as OrdersModel
from northwind.models import OrderDetails as OrderDetailsModel
from northwind.models import Products
import decimal
from django.contrib import messages
from .forms import make_order_form, add_products
def make_order(request):
order=OrdersModel()
if request.method == 'POST':
order_form = make_order_form(request.POST)
if order_form.is_valid():
order = order_form.save(commit=False)
order.save()
messages.success(request, f'Succesfully created new order no. {order.order_id}')
return redirect('add_order_details/' + str(order.order_id) + '/')
else:
order_form = make_order_form(instance=order)
return render(request, 'orders/add_order.html', context = {'order_form': order_form,'order': order})
def add_order_details(request, order_id):
order_details = OrderDetailsModel()
if request.method == 'POST':
order_details_form = add_products(request.POST)
if order_details_form.is_valid():
order_details = order_details_form.save(commit=False)
prd_id = order_details_form.cleaned_data.get('product_id')
ordered_product = Products.objects.filter(product_id = prd_id).first()
in_stock = ordered_product.units_in_stock
on_order = ordered_product.units_on_order
if order_details.quantity > (in_stock - on_order):
messages.warning(request, f'Provided quantity is too large. Available quantity: {in_stock - on_order}')
return redirect('/orders/add_order_details/' + str(order_id) + '/')
else:
order_details.order_id_id = order_id
order_details.product_id_id = int(order_details.product_id)
unit_price = Products.objects.filter(product_id = prd_id).first().unit_price
order_details.unit_price = unit_price
order_details.discount /= 100
order_details.save()
total_price = (unit_price * order_details.quantity * decimal.Decimal(1 - order_details.discount)).quantize(decimal.Decimal('0.01'))
messages.success(request, f'Products of total value {total_price} EUR added to order no. {order_id}')
if 'more_products' in request.POST:
return redirect('/orders/add_order_details/' + str(order_id) + '/')
else:
return redirect('/orders/' + str(order_id) + '/')
else:
order_details_form= add_products(instance=order_details)
return render(request, 'orders/add_order_details.html', context = {'order_details_form': order_details_form, 'order_id': order_id})
class OrderList(generic.ListView):
model = OrdersModel
template_name = 'orders/show_orders.html'
context_object_name = 'my_orders'
queryset = OrdersModel.objects.order_by('order_id')
class order_detail(generic.DetailView):
model = OrdersModel
template_name = 'orders/order_details.html'
def totalPrice(self):
total = 0
for i in self.get_object().orderdetails_set.values():
total += ( 1 - decimal.Decimal(i.get('discount')) ) * i.get('quantity') * i.get('unit_price')
return total + self.get_object().freight
def order_detail_view(self, request, primary_key):
orders = OrdersModel.objects.get(order_id = primary_key)
return render(request, 'orders/order_details.html', context = {'orders': orders})
| [
"[email protected]"
] | |
03687a1d41829240c683f48a9856d079e26b5cc2 | a3abf65332e356c7ec19b7c5ca777d755951fbfc | /meiduo_mall/meiduo_mall/apps/contents/urls.py | 5caecb69e5fcd8e5b10cd46bb5e756187cbfcfd0 | [
"MIT"
] | permissive | YangJaZeng/meiduo_probject | 55cbacb05de34134f017f016fa5be289278fc840 | 7daf305e70ec9d64c896c23a53b52ad2231c85ce | refs/heads/master | 2020-05-23T10:27:17.157774 | 2019-06-02T11:26:51 | 2019-06-02T11:26:51 | 186,719,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.IndexView.as_view(), name="index")
] | [
"[email protected]"
] | |
fc4949f67fbe412b180ea9333641ba6fa1ddf3ba | 2bbd384b83f1cf4bb654573622ad1b7aea305d6c | /04.py | 935f270f0d64041b229f488580e45ec148729ae3 | [] | no_license | kennygao/adventofcode2020 | f90156edf41becce51beb1eae42118af3b04f962 | d9c2a0ed59652a66ef0747cbba53b3df80c6659f | refs/heads/master | 2023-02-02T00:47:25.870120 | 2020-12-17T00:47:48 | 2020-12-17T00:47:48 | 317,445,469 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | import re
with open("input/04.txt") as f:
passports = [
dict(field.split(":") for field in passport.split())
for passport in f.read().split("\n\n")
]
def valid1(passport):
return {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"} <= set(passport.keys())
def valid2(passport):
return valid1(passport) and (
1920 <= int(passport["byr"]) <= 2002
and 2010 <= int(passport["iyr"]) <= 2020
and 2020 <= int(passport["eyr"]) <= 2030
and (
passport["hgt"].endswith("cm")
and 150 <= int(passport["hgt"][:-2]) <= 193
or passport["hgt"].endswith("in")
and 59 <= int(passport["hgt"][:-2]) <= 76
)
and bool(re.fullmatch(r"#[0-9a-f]{6}", passport["hcl"]))
and passport["ecl"] in {"amb", "blu", "brn", "gry", "grn", "hzl", "oth"}
and bool(re.fullmatch(r"[0-9]{9}", passport["pid"]))
)
part1 = sum(map(valid1, passports))
print(f"{part1=}")
part2 = sum(map(valid2, passports))
print(f"{part2=}")
| [
"[email protected]"
] | |
c709311188ed27d6fd515437e9b1b352de597ad1 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/infra/aeppd.py | 4983bd5d01de03dfb0cf643882482f48d1314385 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,904 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AEpPD(Mo):
meta = ClassMeta("cobra.model.infra.AEpPD")
meta.isAbstract = True
meta.moClassName = "infraAEpPD"
meta.moClassName = "infraAEpPD"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x405
meta.readAccessMask = 0x405
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.comp.RsCtrlrP")
meta.childClasses.add("cobra.model.comp.RsLocalEpCP")
meta.childNamesAndRnPrefix.append(("cobra.model.comp.RsLocalEpCP", "rslocalEpCP"))
meta.childNamesAndRnPrefix.append(("cobra.model.comp.RsCtrlrP", "rsctrlrP-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.comp.Entity")
meta.superClasses.add("cobra.model.naming.NamedIdentifiedObject")
meta.superClasses.add("cobra.model.comp.AEpPD")
meta.concreteSubClasses.add("cobra.model.infra.EpPD")
meta.concreteSubClasses.add("cobra.model.infra.EpPDDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "bdDn", "bdDn", 25844, PropCategory.REGULAR)
prop.label = "BD DN"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("bdDn", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "classPref", "classPref", 23402, PropCategory.REGULAR)
prop.label = "Classification Preference"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "encap"
prop._addConstant("encap", "packet-encapsulation", 0)
prop._addConstant("useg", "useg", 1)
meta.props.add("classPref", prop)
prop = PropMeta("str", "configFlags", "configFlags", 20925, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("add-del-task-pending", "whether-an-add-or-delete-extpol-task-is-pending-to-be-executed", 16)
prop._addConstant("none", "none", 0)
prop._addConstant("skip-encap-validation", "skip-validation-of-encapsulation-value", 1)
prop._addConstant("skip-ep-attach", "skip-ep-attach", 2)
prop._addConstant("skip-inner-pvlan", "skip-allocation-on-inner-pvlan-for-ave-eppd", 8)
prop._addConstant("skip-pg-create", "skip-pg-creation-on-vds", 4)
meta.props.add("configFlags", prop)
prop = PropMeta("str", "crtrnEnabled", "crtrnEnabled", 18594, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("crtrnEnabled", prop)
prop = PropMeta("str", "deployIssues", "deployIssues", 979, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("attr-dom-not-supported", "attribute-based-epg-is-not-supported-for-the-domain-type", 131072)
prop._addConstant("ctrlr-not-deployable", "controller-not-deployable.", 32768)
prop._addConstant("ctrlr-unsupported", "unsupported-controller.", 16384)
prop._addConstant("duplicate-static-encap", "duplicate-epg-encapsulation-value", 2097152)
prop._addConstant("dvs-config-failed", "failed-to-configure-dvs.", 4096)
prop._addConstant("dvs-error", "failed-to-get-dvs-config.", 2)
prop._addConstant("dvs-prep-error", "dvs-preparation-error-for-vxlan.", 32)
prop._addConstant("dvs-version-error", "could-not-get-dvs-version.", 512)
prop._addConstant("epg-pol-enforce-missing-pri_encap", "missing-primary-encap-in-vmm-domain-relation-for-policy-enforced-epg", 8388608)
prop._addConstant("epg-pol-enforce-redhat-unsupported", "intra-epg-isolation-is-not-supported-for-redhat-domains", 34359738368)
prop._addConstant("epg-pol-enforce-unsupported", "intra-epg-isolation-is-not-supported-in-avs-vlan-mode-and-ave-vlan-mode-vmm-domains", 16777216)
prop._addConstant("epg-useg-unsupported", "microsegmentation-is-not-supported-for-the-controller", 67108864)
prop._addConstant("epp-mcast-addr-match-fabric", "epg-mcast-address-is-the-same-as-fabric-mcast-address.-domain-fabric-multicast-address-overlaps-multicast-address-pool.", 1048576)
prop._addConstant("internal-vlan-pool-full", "insufficient-vlans.-internal-vlan-pool-full-for-epgs-in-ave-domain", 8589934592)
prop._addConstant("invalid-encap", "no-valid-encapsulation-identifier-allocated-for-the-epg", 1)
prop._addConstant("invalid-encap-mode", "encap-mode-cannot-be-vxlan-when-switching-mode-is-native-for-this-vmm-domain", 17179869184)
prop._addConstant("invalid-intra-epg-isolation", "enforcing-intra-epg-isolation-is-invalid-without-primary-and-secondary-vlans-set", 274877906944)
prop._addConstant("invalid-lag-policy-association", "invalid-enhanced-lag-policy-associated-with-this-epg", 137438953472)
prop._addConstant("invalid-mcast-addr", "no-valid-mcast-address-allocated-for-the-epg", 65536)
prop._addConstant("invalid-netflow-preference", "netflow-cannot-be-enabled-for-epg-associated-to-vmm-domain-in-avs-mode", 1073741824)
prop._addConstant("invalid-static-encap", "epg-encapsulation-value-is-not-part-of-any-static-encapsulation-block-for-the-vmm-domain", 524288)
prop._addConstant("invalid-switching-mode", "vmm-domain-needs-to-be-of-type-cisco-ave-for-switching-mode-ave", 4194304)
prop._addConstant("invalid-trunk-portgroup-range", "trunk-portgroup-vlan-ranges-are-out-of-domain-vlan-pool", 134217728)
prop._addConstant("invalid-usegepg-encap", "primary-encap-property-and-encap-property-both-need-to-be-set", 33554432)
prop._addConstant("invalid-vip", "vip-has-not-been-allocated", 262144)
prop._addConstant("invalid-vlan-encap", "vlan-encap-mode-is-not-allowed-for-ave-non-local-switching-domain", 68719476736)
prop._addConstant("ipv6-dstaddr-unsupported", "netflow-exporter-dstaddr-supports-ipv6-from-dvs-version-6.0-and-higher.-please-use-an-ipv4-address-for-the-netflow-exporter-dstaddr", 2147483648)
prop._addConstant("lacp-apply-failed", "lacp-update-for-portgroup-failed.", 2048)
prop._addConstant("missing-internal-vlan-pool", "missing-internal-vlan-pools-for-epgs-in-ave-domain", 4294967296)
prop._addConstant("no-ctrlr-dn", "controller-dn-missing.", 256)
prop._addConstant("no-ctrlr-map", "controller-missing-from-internal-map.", 128)
prop._addConstant("none", "n/a", 0)
prop._addConstant("pg-api-error", "portgroup-property-get-api-failed.", 1024)
prop._addConstant("policy-not-found", "failed-to-find-policy-container-for-controller.", 8192)
prop._addConstant("prep-vxlan-error", "vxlan-cluster-preparation-error.", 8)
prop._addConstant("proc-vmm-error", "cluster-type-object-missing-from-controller.", 16)
prop._addConstant("resimedcy-unsupported", "resolution-immediacy-not-supported-for-this-domain", 536870912)
prop._addConstant("trunk-portgroup-unsupported", "trunk-portgroup-not-supported", 268435456)
prop._addConstant("unsupported-ctrlr", "invalid-controller-scope.", 64)
prop._addConstant("vdn-error", "network-scope-creation-error.", 4)
meta.props.add("deployIssues", prop)
prop = PropMeta("str", "descr", "descr", 5587, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "encap", "encap", 983, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("encap", prop)
prop = PropMeta("str", "encapAllocKey", "encapAllocKey", 23403, PropCategory.REGULAR)
prop.label = "Encapsulation allocation Key"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("encapAllocKey", prop)
prop = PropMeta("str", "encapChanged", "encapChanged", 43443, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("encapChanged", prop)
prop = PropMeta("str", "encapCtx", "encapCtx", 18204, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("encapCtx", prop)
prop = PropMeta("str", "epgPKey", "epgPKey", 980, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("epgPKey", prop)
prop = PropMeta("str", "eppDn", "eppDn", 981, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("eppDn", prop)
prop = PropMeta("str", "faultDKey", "faultDKey", 15775, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("faultDKey", prop)
prop = PropMeta("str", "id", "id", 1003, PropCategory.REGULAR)
prop.label = "Id"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "idConsumerDn", "idConsumerDn", 23404, PropCategory.REGULAR)
prop.label = "Ident Consumer DN"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("idConsumerDn", prop)
prop = PropMeta("str", "instrImedcy", "instrImedcy", 34224, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "lazy"
prop._addConstant("immediate", "immediate", 1)
prop._addConstant("lazy", "on-demand", 2)
meta.props.add("instrImedcy", prop)
prop = PropMeta("str", "issues", "issues", 34206, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("ds-update-fault", "failed-to-update-ds(data-store)-property", 4096)
prop._addConstant("hpnic-miss-epg-fault", "hpnic-is-attached-to-an-invalid-uplink-dvs-port-group", 32)
prop._addConstant("hv-disconnect-fault", "esx-host-is-disconnected-or-not-responding", 128)
prop._addConstant("hv-miss-dvs-fault", "esx-host-is-configured-with-invalid-dvs", 64)
prop._addConstant("hv-miss-pnic-fault", "esx-host-does-not-have-any-pnic", 1024)
prop._addConstant("hv-miss-vmknic-fault", "esx-host-does-not-have-any-vmknic", 256)
prop._addConstant("hv-update-fault", "failed-to-update-hv(esx-host)-property", 2)
prop._addConstant("lnode-property-fault", "failed-to-get-dvs-property", 1)
prop._addConstant("mgmtnic-miss-dvs-fault", "vmk-nic-is-configured-with-invalid-dvs", 512)
prop._addConstant("none", "none", 0)
prop._addConstant("opflex-channels-down-ave", "both-the-opflex-channels-are-down-for-ave.-please-check-the-connections-on-the-host.", 16384)
prop._addConstant("prep-iso-file-fault", "failed-to-prepare-or-push-iso-file-for-service-vm-deployment.-please-check-if-correct-datastore-is-selected-in-policy.", 131072)
prop._addConstant("rp-update-fault", "failed-to-update-rp(resource-pool)-property", 8192)
prop._addConstant("tagging-cat-fault", "failed-to-retrieve-all-tag-category-information", 65536)
prop._addConstant("tagging-tag-fault", "failed-to-retrieve-all-tag-information", 32768)
prop._addConstant("vm-deploy-task-fault", "deployvm-task-failed-on-vcenter", 2048)
prop._addConstant("vm-miss-hv-fault", "vm-is-attached-to-an-invalid-hv(esx-host)", 8)
prop._addConstant("vm-update-fault", "failed-to-update-vm(virtual-machine)-property", 4)
prop._addConstant("vmfolder-update-fault", "failed-to-update-vm(virtual-machine)-folder-property", 262144)
prop._addConstant("vnic-miss-epg-fault", "vnic-is-attached-to-an-invalid-dvs-port-group-or-unable-to-communicate-with-vcenter", 16)
meta.props.add("issues", prop)
prop = PropMeta("str", "lagPolicyName", "lagPolicyName", 44358, PropCategory.REGULAR)
prop.label = "Enhanced LAG Policy Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("lagPolicyName", prop)
prop = PropMeta("str", "lbAlgo", "lbAlgo", 985, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "mac-pin"
prop._addConstant("explicit-failover", "explicit-failover", 4)
prop._addConstant("ip-hash", "ip-hash", 1)
prop._addConstant("mac-pin", "mac-pin", 2)
prop._addConstant("mac-pin-nicload", "mac-pin-nicload", 3)
meta.props.add("lbAlgo", prop)
prop = PropMeta("str", "mcastAddr", "mcastAddr", 984, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("mcastAddr", prop)
prop = PropMeta("str", "name", "name", 1004, PropCategory.REGULAR)
prop.label = "Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "resImedcy", "resImedcy", 982, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "lazy"
prop._addConstant("immediate", "immediate", 1)
prop._addConstant("lazy", "on-demand", 2)
prop._addConstant("pre-provision", "pre-provision", 3)
meta.props.add("resImedcy", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "txId", "txId", 26232, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("txId", prop)
prop = PropMeta("str", "untagged", "untagged", 54963, PropCategory.REGULAR)
prop.label = "Untagged Access Port"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("untagged", prop)
prop = PropMeta("str", "updateTs", "updateTs", 18595, PropCategory.REGULAR)
prop.label = "Update Timestamp"
prop.isOper = True
meta.props.add("updateTs", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
828f6140088a8bd190c9d6b11508434f7467f36e | 99b99a4e4ff98df47badf26065f5fda22b756820 | /classes.py | 14a8e07aa5a8a49af785e8cba7f0ace52c7356d3 | [] | no_license | witcold/project-freecell | 962dbc655acd906f72a341d88245c4858c2ad299 | 80661fe5b7ffb68661d40dffb91b6ca278cab2f5 | refs/heads/master | 2021-01-10T02:21:18.582453 | 2016-01-31T13:35:56 | 2016-01-31T13:35:56 | 43,798,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | # coding=utf-8
from enum import Enum
from random import shuffle
class Suit(Enum):
__order__ = 'SPADES CLUBS DIAMONDS HEARTS'
SPADES = '♠'
CLUBS = '♣'
DIAMONDS = '♦'
HEARTS = '♥'
class Value(Enum):
__order__ = 'ACE TWO THREE FOUR FIVE SIX SEVEN EIGHT NINE TEN JACK QUEEN KING'
ACE = 'A'
TWO = '2'
THREE = '3'
FOUR = '4'
FIVE = '5'
SIX = '6'
SEVEN = '7'
EIGHT = '8'
NINE = '9'
TEN = '10'
JACK = 'J'
QUEEN = 'Q'
KING = 'K'
class Card(object):
def __init__(self, suit, value):
"""Creates a Card.
:type suit: Suit
:type value: Value
:rtype: Card
"""
self.suit = suit
self.value = value
def __repr__(self):
return self.value.value + self.suit.value
class Stack(object):
def __init__(self):
self.cards = []
def __getitem__(self, item):
return self.cards.__getitem__(item)
def __iter__(self):
return self.cards.__iter__()
def __len__(self):
return self.cards.__len__()
def append(self, card):
self.cards.append(card)
class Deck(Stack):
def __init__(self):
self.cards = []
for suit in Suit:
for value in Value:
self.cards.append(Card(suit, value))
def shuffle(self):
shuffle(self.cards)
| [
"[email protected]"
] | |
b44f2d6b782d7a4ace7de749cdf7a45ce0e5a286 | 54a0b86d4c3f731487ad4470fb365907970472e6 | /P1/studentparameters/Project1_Parameters_pd.py | a43613ec4d1fccd11ba26156898bdb1308d827dc | [] | no_license | samiurrahman98/ece458-computer-security | 26aa46e174b0bf77f748e6451dd2e0e4183feebd | cf79430b98e3679ffcd687a0c96b5e979187e1e3 | refs/heads/master | 2022-11-25T01:26:36.874094 | 2020-07-31T21:24:53 | 2020-07-31T21:24:53 | 280,979,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # Select the file name that matches your first two letters of your last name on Learn
# Read those parameters as your ECE458 project 1 parameters
# p,q,g are DSA domain parameters, sk_i (secret keys) are used in each signature and verification
p=16158504202402426253991131950366800551482053399193655122805051657629706040252641329369229425927219006956473742476903978788728372679662561267749592756478584653187379668070077471640233053267867940899762269855538496229272646267260199331950754561826958115323964167572312112683234368745583189888499363692808195228055638616335542328241242316003188491076953028978519064222347878724668323621195651283341378845128401263313070932229612943555693076384094095923209888318983438374236756194589851339672873194326246553955090805398391550192769994438594243178242766618883803256121122147083299821412091095166213991439958926015606973543
q=13479974306915323548855049186344013292925286365246579443817723220231
g=9891663101749060596110525648800442312262047621700008710332290803354419734415239400374092972505760368555033978883727090878798786527869106102125568674515087767296064898813563305491697474743999164538645162593480340614583420272697669459439956057957775664653137969485217890077966731174553543597150973233536157598924038645446910353512441488171918287556367865699357854285249284142568915079933750257270947667792192723621634761458070065748588907955333315440434095504696037685941392628366404344728480845324408489345349308782555446303365930909965625721154544418491662738796491732039598162639642305389549083822675597763407558360
sk1=3945480033374835914527758803915536754190239131061378591380430726165
sk2=8718754536153569348573180981684560806604429048984811441243860897622
sk3=998326477562698219433839499133285944350668707227262675157007747889
| [
"[email protected]"
] | |
82c650b0ac290467d3a50aad83b7ccd4abe4d061 | 6b6cebb1708f4666f39052f5ae70bf87b27117d8 | /Exercicios/ex109_files/moeda.py | 3c8941f9e182416e66c985a1badd1643787f4cf9 | [] | no_license | claudiodornelles/CursoEmVideo-Python | 04abc7d3ab819c77be4e8f3c059af9df351beda7 | 7a80129d4ac90a9ea45720ff0ce80a0c83311cb4 | refs/heads/master | 2023-06-02T17:40:43.765928 | 2021-06-15T20:11:57 | 2021-06-15T20:11:57 | 360,858,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | def moeda(valor = 0):
return (f'R${valor:.2f}')
def aumentar(valor = 0, taxa = 0, formatar = False):
valor *= 1 + (taxa / 100)
if formatar:
return moeda(valor)
else:
return valor
def diminuir(valor = 0, taxa = 0, formatar = False):
valor *= 1 - (taxa / 100)
if formatar:
return moeda(valor)
else:
return valor
def dobro(valor = 0, formatar = False):
valor *=2
if formatar:
return moeda(valor)
else:
return valor
def metade(valor = 0, formatar = False):
valor /= 2
if formatar:
return moeda(valor)
else:
return valor
| [
"[email protected]"
] | |
7fcff98621381339d39981dd8216bbb9fb3ad8b0 | 39d4504ec1da8975fac526d6801b94f4348b6b61 | /research/syntaxnet/dragnn/tools/model_trainer.py | fe0d0f73b9e6cdd9d9a49cc855a774796908a6d2 | [
"Apache-2.0"
] | permissive | vincentcheny/models | fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad | afb1a59fc1bc792ac72d1a3e22e2469020529788 | refs/heads/master | 2020-07-23T21:38:24.559521 | 2019-11-15T07:50:11 | 2019-11-15T07:50:11 | 207,712,649 | 1 | 0 | Apache-2.0 | 2019-09-11T03:12:31 | 2019-09-11T03:12:31 | null | UTF-8 | Python | false | false | 8,218 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trainer for generic DRAGNN models.
This trainer uses a "model directory" for both input and output. When invoked,
the model directory should contain the following inputs:
<model_dir>/config.txt: A stringified dict that defines high-level
configuration parameters. Unset parameters default to False.
<model_dir>/master.pbtxt: A text-format MasterSpec proto that defines
the DRAGNN network to train.
<model_dir>/hyperparameters.pbtxt: A text-format GridPoint proto that
defines training hyper-parameters.
<model_dir>/targets.pbtxt: (Optional) A text-format TrainingGridSpec whose
"target" field defines the training targets. If missing, then default
training targets are used instead.
On success, the model directory will contain the following outputs:
<model_dir>/checkpoints/best: The best checkpoint seen during training, as
measured by accuracy on the eval corpus.
<model_dir>/tensorboard: TensorBoard log directory.
Outside of the files and subdirectories named above, the model directory should
contain any other necessary files (e.g., pretrained embeddings). See the model
builders in dragnn/examples.
"""
import ast
import collections
import os
import os.path
from absl import app
from absl import flags
import tensorflow as tf
from google.protobuf import text_format
from dragnn.protos import spec_pb2
from dragnn.python import evaluation
from dragnn.python import graph_builder
from dragnn.python import sentence_io
from dragnn.python import spec_builder
from dragnn.python import trainer_lib
from syntaxnet.ops import gen_parser_ops
from syntaxnet.util import check
FLAGS = flags.FLAGS
flags.DEFINE_string('tf_master', '',
'TensorFlow execution engine to connect to.')
flags.DEFINE_string('model_dir', None, 'Path to a prepared model directory.')
flags.DEFINE_string(
'pretrain_steps', None,
'Comma-delimited list of pre-training steps per training target.')
flags.DEFINE_string(
'pretrain_epochs', None,
'Comma-delimited list of pre-training epochs per training target.')
flags.DEFINE_string(
'train_steps', None,
'Comma-delimited list of training steps per training target.')
flags.DEFINE_string(
'train_epochs', None,
'Comma-delimited list of training epochs per training target.')
flags.DEFINE_integer('batch_size', 4, 'Batch size.')
flags.DEFINE_integer('report_every', 200,
'Report cost and training accuracy every this many steps.')
def _read_text_proto(path, proto_type):
"""Reads a text-format instance of |proto_type| from the |path|."""
proto = proto_type()
with tf.gfile.FastGFile(path) as proto_file:
text_format.Parse(proto_file.read(), proto)
return proto
def _convert_to_char_corpus(corpus):
"""Converts the word-based |corpus| into a char-based corpus."""
with tf.Session(graph=tf.Graph()) as tmp_session:
conversion_op = gen_parser_ops.segmenter_training_data_constructor(corpus)
return tmp_session.run(conversion_op)
def _get_steps(steps_flag, epochs_flag, corpus_length):
"""Converts the |steps_flag| or |epochs_flag| into a list of step counts."""
if steps_flag:
return map(int, steps_flag.split(','))
return [corpus_length * int(epochs) for epochs in epochs_flag.split(',')]
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
check.NotNone(FLAGS.model_dir, '--model_dir is required')
check.Ne(FLAGS.pretrain_steps is None, FLAGS.pretrain_epochs is None,
'Exactly one of --pretrain_steps or --pretrain_epochs is required')
check.Ne(FLAGS.train_steps is None, FLAGS.train_epochs is None,
'Exactly one of --train_steps or --train_epochs is required')
config_path = os.path.join(FLAGS.model_dir, 'config.txt')
master_path = os.path.join(FLAGS.model_dir, 'master.pbtxt')
hyperparameters_path = os.path.join(FLAGS.model_dir, 'hyperparameters.pbtxt')
targets_path = os.path.join(FLAGS.model_dir, 'targets.pbtxt')
checkpoint_path = os.path.join(FLAGS.model_dir, 'checkpoints/best')
tensorboard_dir = os.path.join(FLAGS.model_dir, 'tensorboard')
with tf.gfile.FastGFile(config_path) as config_file:
config = collections.defaultdict(bool, ast.literal_eval(config_file.read()))
train_corpus_path = config['train_corpus_path']
tune_corpus_path = config['tune_corpus_path']
projectivize_train_corpus = config['projectivize_train_corpus']
master = _read_text_proto(master_path, spec_pb2.MasterSpec)
hyperparameters = _read_text_proto(hyperparameters_path, spec_pb2.GridPoint)
targets = spec_builder.default_targets_from_spec(master)
if tf.gfile.Exists(targets_path):
targets = _read_text_proto(targets_path, spec_pb2.TrainingGridSpec).target
# Build the TensorFlow graph.
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(hyperparameters.seed)
builder = graph_builder.MasterBuilder(master, hyperparameters)
trainers = [
builder.add_training_from_config(target) for target in targets
]
annotator = builder.add_annotation()
builder.add_saver()
# Read in serialized protos from training data.
train_corpus = sentence_io.ConllSentenceReader(
train_corpus_path, projectivize=projectivize_train_corpus).corpus()
tune_corpus = sentence_io.ConllSentenceReader(
tune_corpus_path, projectivize=False).corpus()
gold_tune_corpus = tune_corpus
# Convert to char-based corpora, if requested.
if config['convert_to_char_corpora']:
# NB: Do not convert the |gold_tune_corpus|, which should remain word-based
# for segmentation evaluation purposes.
train_corpus = _convert_to_char_corpus(train_corpus)
tune_corpus = _convert_to_char_corpus(tune_corpus)
pretrain_steps = _get_steps(FLAGS.pretrain_steps, FLAGS.pretrain_epochs,
len(train_corpus))
train_steps = _get_steps(FLAGS.train_steps, FLAGS.train_epochs,
len(train_corpus))
check.Eq(len(targets), len(pretrain_steps),
'Length mismatch between training targets and --pretrain_steps')
check.Eq(len(targets), len(train_steps),
'Length mismatch between training targets and --train_steps')
# Ready to train!
tf.logging.info('Training on %d sentences.', len(train_corpus))
tf.logging.info('Tuning on %d sentences.', len(tune_corpus))
tf.logging.info('Creating TensorFlow checkpoint dir...')
summary_writer = trainer_lib.get_summary_writer(tensorboard_dir)
checkpoint_dir = os.path.dirname(checkpoint_path)
if tf.gfile.IsDirectory(checkpoint_dir):
tf.gfile.DeleteRecursively(checkpoint_dir)
elif tf.gfile.Exists(checkpoint_dir):
tf.gfile.Remove(checkpoint_dir)
tf.gfile.MakeDirs(checkpoint_dir)
with tf.Session(FLAGS.tf_master, graph=graph) as sess:
# Make sure to re-initialize all underlying state.
sess.run(tf.global_variables_initializer())
trainer_lib.run_training(sess, trainers, annotator,
evaluation.parser_summaries, pretrain_steps,
train_steps, train_corpus, tune_corpus,
gold_tune_corpus, FLAGS.batch_size, summary_writer,
FLAGS.report_every, builder.saver, checkpoint_path)
tf.logging.info('Best checkpoint written to:\n%s', checkpoint_path)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
74824ac6e1e4b41330a99318321da4dd36121cd9 | e3cd564e2be64cfc767438bcba47aa403a15d97c | /Problem47.py | 9e4b74a7768d12ed16ca7b3f86d575f40d0f4761 | [] | no_license | NiltonGMJunior/project-euler | 1058826f6f52b7cdb7f83b283ba70399a82da15f | 91d82dbfdf1a65e0d1e9c93040c875aec4fb736d | refs/heads/master | 2020-03-25T03:12:33.349969 | 2018-11-10T22:45:12 | 2018-11-10T22:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | def isPrime(n):
if n < 2:
return False
elif (n == 2) or (n == 3):
return True
elif n % 2 == 0:
return False
k = 3
while k < n:
if n % k == 0:
return False
k += 2
return True
def primeDiv(n):
div = []
tf1, tf2 = True, True
ref = n
while tf1 == True:
if ref % 2 == 0:
ref /= 2
div.append(2)
else:
tf1 = False
while tf2 == True:
p = 3
if (ref % p == 0) and (isPrime(p) == True):
ref /= p
div.append(p)
else:
p += 2
if ref == 1:
tf2 = False
return div
def main():
# consec = 0
# n = 646
# while consec < 4:
print(primeDiv(14))
return
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
bbe57d763b216dbe2a08f88b6f8dae07b110d804 | 66ca8f005d89257269aafcae8a26b75d530d59b2 | /unittests/test_osrelease.py | 441955e7e98a3b89173b55e4847e45f40fa65677 | [
"MIT"
] | permissive | eccles/lnxproc | dea935a732e571a8a237f7ab96f044d5565712d2 | f9d9a75a0a233d2def76b150784108dc453ad9ad | refs/heads/master | 2023-06-08T04:16:35.018369 | 2023-05-29T09:24:48 | 2023-05-29T09:24:48 | 6,925,816 | 1 | 0 | MIT | 2023-05-29T09:24:50 | 2012-11-29T17:53:31 | Python | UTF-8 | Python | false | false | 336 | py | '''
Test OSrelease()
'''
from lnxproc import osrelease
from .basetestcase import BaseTestCase
class TestOSrelease(BaseTestCase):
'''
Test OSrelease class
'''
key = 'OSrelease'
module = osrelease
def test_osrelease(self):
'''
Test normal instantiation
'''
self.generic_test()
| [
"[email protected]"
] | |
a724e6f175e77b719ff3ed3648565d8808140b6e | 866e1e9816301baefc2e178c1eeabc2ff2942781 | /django_learnit/views/detail.py | df8b93ce5599854ea8b413d39a57e9e1764a6c51 | [] | no_license | florianpaquet/django-learnit | b89f0d24014bd7435c861b73a0d44c4ec81d183f | e7713cf11e5af23e28dadd9aff226ea7c14de36b | refs/heads/master | 2020-12-01T04:08:29.212629 | 2016-09-08T20:32:08 | 2016-09-08T20:32:08 | 67,214,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | from django.views.generic import TemplateView
from .base import LearningModelMixin
class LearningModelDetailView(LearningModelMixin, TemplateView):
"""
LearningModel detail view
"""
def get(self, *args, **kwargs):
self.learning_model = self.get_learning_model()
return super(LearningModelDetailView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
"""
Adds learning model context data
"""
context = super(LearningModelDetailView, self).get_context_data(**kwargs)
# Add the 10 most recently edited LabelledDocuments
context['recently_updated_labelled_documents'] = self.learning_model\
.get_labelled_documents_queryset()\
.order_by('-modified')[:10]
return context
def get_template_names(self):
"""
Returns learning model specific template name along with a default one
to easily allow template overriding
"""
return [
'django_learnit/learning_models/%(name)s_detail.html' % {
'name': self.learning_model.get_name()
},
'django_learnit/learning_models/detail.html'
]
| [
"[email protected]"
] | |
611246ab14211021ee7f0fa861fcdc92c2c37b1c | 5ddc9fc8e2b078f94e18db5cb3dc00a988470fa8 | /plotserver.py | 4566b8aa4c2c4c14ab746e085893fd4b07630256 | [
"MIT"
] | permissive | cpthackray/source-receptor-vis | 0d41418869920519c51d509bf4a4db6a2999cd84 | 2cdb905dbaf2a625e92a883b0abdcf1d19115192 | refs/heads/main | 2023-06-18T00:37:50.008747 | 2021-07-15T18:36:35 | 2021-07-15T18:36:35 | 317,356,961 | 0 | 0 | MIT | 2021-07-15T18:36:35 | 2020-11-30T22:02:11 | Python | UTF-8 | Python | false | false | 3,309 | py | """
Docstring
"""
import matplotlib.pyplot as plt
import numpy as np
import io
from flask import Flask, send_file, make_response, request
from flask_restful import Resource, Api
from wtforms import StringField, SubmitField, SelectField, FormField
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired, Length
from flask import render_template, redirect, url_for
from myplots import legal_congeners, legal_emistypes
from myplots import plot_dep, plot_dep_point, legal_congener_names
from myconfig import SECRET_KEY
import base64
app = Flask(__name__)
#api = Api(app)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
data = {}
class DataEntryDep(FlaskForm):
congener = SelectField(label="Deposited congener:",
choices=legal_congener_names)
latlon = StringField(label='Deposition sites: (lat,lon), ...',
# validators=[DataRequired()])
validators=[])
submit = SubmitField("Submit")
def get_latlonlist(self,):
latlist = []
lonlist = []
pairs = self.latlon.data.split('),')
for pair in pairs:
if pair == '':
continue
print(pair)
ll = pair.lstrip('(').rstrip(')').split(',')
latlist.append(ll[0])
lonlist.append(ll[1])
return latlist, lonlist
def plot2serial(bytes_obj):
return base64.b64encode(bytes_obj.getvalue()).decode()
@app.route('/dep', methods=['GET', 'POST'])
def dep():
form = DataEntryDep()
# Default values:
uniform = 1.
landfill = 1.
wwtp = 1.
hazwaste = 1.
incinerator = 1.
population = 1.
map_url, point_url = None, None
if form.validate_on_submit():
uniform = request.form['uniform']
landfill = request.form['landfill']
wwtp = request.form['wwtp']
hazwaste = request.form['hazwaste']
incinerator = request.form['incinerator']
population = request.form['population']
cong = form.congener.data.upper()
emis = {'uniform': float(uniform),
'landfill': float(landfill),
'wwtp': float(wwtp),
'hazwaste': float(hazwaste),
'incinerator': float(incinerator),
'population': float(population),
}
# lats, lons = form.latitude.data, form.longitude.data
# latlist, lonlist = lats.split(','), lons.split(',')
latlist, lonlist = form.get_latlonlist()
map_url = plot2serial(
plot_dep(cong, emis, latlist=latlist, lonlist=lonlist))
point_url = plot2serial(plot_dep_point(latlist, lonlist, cong, emis))
return render_template('dep.html', form=form,
map_url=map_url, point_url=point_url,
uniform=uniform, landfill=landfill,
wwtp=wwtp, hazwaste=hazwaste,
incinerator=incinerator, population=population)
@app.after_request
def add_header(response):
# response.cache_control.no_store = True
if 'Cache-Control' not in response.headers:
response.headers['Cache-Control'] = 'no-store'
return response
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
d63386cf5630ec5d7cdb3ba25e51403aaddd35ff | c7ce0a2a6b203fc97447e272fb4426f998d96e34 | /11/WebScrapingExercise_6.py | 5db1a17fa34af320808ef32080e5210cf09864ae | [] | no_license | vasiliv/MoshHamedaniPython | 5eac1fa8a000a63590899438f14f341e27d14804 | 41dd14c7b88365ededeb142ed21a843553252066 | refs/heads/master | 2023-01-18T18:25:45.931350 | 2020-12-03T14:37:38 | 2020-12-03T14:37:38 | 316,267,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #https://www.w3resource.com/python-exercises/web-scraping/index.php
#Write a Python program to extract h1 tag from example.com
import requests
from bs4 import BeautifulSoup
url = "http://example.com/"
page = requests.get(url, "html.parser")
#print(page.status_code)
soup = BeautifulSoup(page.content, 'html.parser')
print(soup.find_all('h1'))
| [
"[email protected]"
] | |
79699dd691887e6258b00716f2abc1b190890f0e | 6bc44b7c93c354311eb76924c987a79b6722cad5 | /a_726.py | 9b501c314d4ca70c62280f580e529a0103349d3b | [] | no_license | sun510001/leetcode_jianzhi_offer_2 | a15d032491ed711ee280e1107c9322e9b91fd213 | b1680014ce3f55ba952a1e64241c0cbb783cc436 | refs/heads/master | 2023-06-06T21:31:18.292362 | 2021-07-09T01:16:36 | 2021-07-09T01:16:36 | 351,375,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | # -*- coding:utf-8 -*-
"""
726. 原子的数量
给定一个化学式formula(作为字符串),返回每种原子的数量。
原子总是以一个大写字母开始,接着跟随0个或任意个小写字母,表示原子的名字。
如果数量大于 1,原子后会跟着数字表示原子的数量。如果数量等于 1 则不会跟数字。
例如,H2O 和 H2O2 是可行的,但 H1O2 这个表达是不可行的。
两个化学式连在一起是新的化学式。例如 H2O2He3Mg4 也是化学式。
一个括号中的化学式和数字(可选择性添加)也是化学式。例如 (H2O2) 和 (H2O2)3 是化学式。
给定一个化学式 formula ,返回所有原子的数量。格式为:第一个(按字典序)原子的名字,跟着它的数量(如果数量大于 1),
然后是第二个原子的名字(按字典序),跟着它的数量(如果数量大于 1),以此类推。
示例 1:
输入:formula = "H2O"
输出:"H2O"
解释:
原子的数量是 {'H': 2, 'O': 1}。
示例 2:
输入:formula = "Mg(OH)2"
输出:"H2MgO2"
解释:
原子的数量是 {'H': 2, 'Mg': 1, 'O': 2}。
示例 3:
输入:formula = "K4(ON(SO3)2)2"
输出:"K4N2O14S4"
解释:
原子的数量是 {'K': 4, 'N': 2, 'O': 14, 'S': 4}。
示例 4:
输入:formula = "Be32"
输出:"Be32"
提示:
1 <= formula.length <= 1000
formula 由小写英文字母、数字 '(' 和 ')' 组成。
formula 是有效的化学式。
"""
class Solution:
def countOfAtoms(self, formula: str) -> str:
"""
双指针扫描 i, j
告辞!
:param formula:
:return:
"""
def
if __name__ == '__main__':
input = "H2O"
sol = Solution()
print(sol.countOfAtoms(input)) | [
"[email protected]"
] | |
d1932ac69b4c613b0286ed22ed76ab3836f4e473 | 3f470f62f7cb4bc383f44ab33557aa3e6174649b | /app.py | ff6627bd2f51f004a411569d2f50aae9b8139360 | [] | no_license | reinev/image-style-transform | ddfb014ddde332f9eeaf3614a9ff2805a14decca | c9e06ea8c32711227cdd64b2672e455bb08fb284 | refs/heads/master | 2020-04-19T16:15:46.022836 | 2019-01-30T08:48:53 | 2019-01-30T08:48:53 | 168,298,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,442 | py | from __future__ import print_function
import os
import time, datetime
import tensorflow as tf
from flask import Flask, render_template, request, send_from_directory
import model
import reader
from preprocessing import preprocessing_factory
app = Flask(__name__)
app.config['SECRET_KEY'] = '123456'
app.static_folder = 'static'
UPLOAD_FOLDER = 'static/img/uploads/'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
tf.app.flags.DEFINE_string('loss_model', 'vgg_16', 'The name of the architecture to evaluate. '
'You can view all the support models in nets/nets_factory.py')
tf.app.flags.DEFINE_integer('image_size', 256, 'Image size to train.')
tf.app.flags.DEFINE_string("model_file", "models.ckpt", "")
tf.app.flags.DEFINE_string("image_file", "a.jpg", "")
FLAGS = tf.app.flags.FLAGS
import logging
log = logging.getLogger()
log.setLevel('INFO')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
#from cassandra.cluster import Cluster
#from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
KEYSPACE = "mykeyspace"
def createKeySpace():
cluster = Cluster(contact_points=['127.0.0.1'],port=9042)
session = cluster.connect()
log.info("Creating keyspace...")
try:
session.execute("""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""" % KEYSPACE)
log.info("setting keyspace...")
session.set_keyspace(KEYSPACE)
log.info("creating table...")
session.execute("""
CREATE TABLE imagerecord (
time timestamp,
style text,
input_name text,
output_name text,
PRIMARY KEY (time)
);
""")
except Exception as e:
log.error("Unable to create table")
log.error(e)
createKeySpace();
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/')
def index():
return render_template('index.html')
@app.route('/transform', methods=['GET', 'POST'])
def deal_image():
models_dict = {'cubist': 'cubist.ckpt-done',
'denoised_starry': 'denoised_starry.ckpt-done',
'feathers': 'feathers.ckpt-done',
'mosaic': 'mosaic.ckpt-done',
'scream': 'scream.ckpt-done',
'udnie': 'udnie.ckpt-done',
'wave': 'wave.ckpt-done',
'painting': 'painting.ckpt-done',
}
if request.method == 'POST':
file = request.files['pic']
style = request.form['style']
if file and allowed_file(file.filename):
if os.path.exists(app.config['UPLOAD_FOLDER']) is False:
os.makedirs(app.config['UPLOAD_FOLDER'])
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
model_file = 'wave.ckpt-done'
if style != '':
if models_dict[style] != '':
model_file = models_dict[style]
style_transform(style, 'models/' + model_file, os.path.join(app.config['UPLOAD_FOLDER']) + file.filename, file.filename,
style + '_res_' + file.filename)
return render_template('transformed.html', style='img/style/' + style + '.jpg',
upload='img/uploads/' + file.filename,
transformed='img/generated/' + style + '_res_' + file.filename)
return 'transform error:file format error'
return 'transform error:method not post'
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory('static/img/generated/', filename)
def style_transform(style, model_file, img_file, img_filename, result_file):
height = 0
width = 0
with open(img_file, 'rb') as img:
img_data = img
with tf.Session().as_default() as sess:
if img_file.lower().endswith('png'):
image = sess.run(tf.image.decode_png(img.read()))
else:
image = sess.run(tf.image.decode_jpeg(img.read()))
height = image.shape[0]
width = image.shape[1]
print('Image size: %dx%d' % (width, height))
with tf.Graph().as_default():
with tf.Session().as_default() as sess:
image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
FLAGS.loss_model,
is_training=False)
image = reader.get_image(img_file, height, width, image_preprocessing_fn)
image = tf.expand_dims(image, 0)
generated = model.net(image, training=False)
generated = tf.cast(generated, tf.uint8)
generated = tf.squeeze(generated, [0])
saver = tf.train.Saver(tf.global_variables())
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
FLAGS.model_file = os.path.abspath(model_file)
saver.restore(sess, FLAGS.model_file)
generated_file = 'static/img/generated/' + result_file
if os.path.exists('static/img/generated') is False:
os.makedirs('static/img/generated')
with open(generated_file, 'wb') as img:
result_data = img
start_time = time.time()
img.write(sess.run(tf.image.encode_jpeg(generated)))
end_time = time.time()
now = datetime.datetime.now()
print('Elapsed time: %fs' % (end_time - start_time))
print('Done. Please check %s.' % generated_file)
cluster = Cluster(contact_points=['127.0.0.1'],port=9042)
session = cluster.connect()
log.info("setting keyspace...")
session.set_keyspace(KEYSPACE)
session.execute(
"""
INSERT INTO imagerecord (time,style,input_name,output_name)
VALUES (%s,%s,%s,%s)
""",
(now,style,img_filename,result_file)
)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| [
"[email protected]"
] | |
7c033e6ea087f5ce359b4191ed726cd49b7ac9b0 | c28a9c55042a1fe7cae3b5f210594c9de1438fe5 | /players/randomPlayer/player.py | 9a9f2c5a8236fe52b769aed90f0046aa91f68ddf | [] | no_license | mehtank/dinoparmfish | afbb2a389c652139c74104270d4a12c4e856e73c | 3a66bfb7245ec6f2bd39e835e1847aa7d0c8597b | refs/heads/master | 2020-05-18T16:29:06.210912 | 2011-06-27T23:33:41 | 2011-06-27T23:33:41 | 1,823,807 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | import random
from ..engine import card
class Player:
def output(self, s):
if not self.debug:
return
print "Player " + repr(self.index) + ": ",
print s
def __init__(self, debug=False):
self.debug = debug
def setup(self, index, handSizes, hand):
self.index = index
self.numPlayers = len(handSizes)
self.hand = hand
s = "Initialized : "
for c in hand:
s += "\n " + repr(c)
self.output(s)
def passTo(self):
return (self.index + 2) % self.numPlayers;
def getAsk(self):
target = random.randint(0, (self.numPlayers / 2) - 1)
target = target * 2 + 1
target = (target + self.index) % self.numPlayers
myCard = random.choice(self.hand)
values = range(card.NUMVALUES)
for c in self.hand:
if c.suit == myCard.suit:
values.pop(values.index(c.value))
value = random.choice(values)
ask = card.Card(suit=myCard.suit, value=value)
self.output("asking player " + repr(target) + " for " + repr(ask))
return (target, ask)
def tellAsk(self, currentPlayer, target, card, askSuccessful):
if askSuccessful:
if (target == self.index):
self.output("Gave " + repr(card) + " to player " + repr(currentPlayer))
self.hand.pop(self.hand.index(card))
if (currentPlayer == self.index):
self.output("Got " + repr(card) + " from player " + repr(target))
self.hand.append(card)
def getDeclaration(self):
for suit in range(card.NUMSUITS):
count = 0
for c in self.hand:
if c.suit == suit:
count += 1
if count == card.NUMVALUES:
self.output("Declaring suit: " + repr(suit))
return (suit, [self.index] * card.NUMVALUES)
else:
return (None, None)
def tellDeclaration(self, currentPlayer, suit, attrib, declarationSuccessful, trueAttrib):
if declarationSuccessful is None:
return
topop = []
for c in self.hand:
if c.suit == suit:
topop.append(c)
for c in topop:
self.hand.pop(self.hand.index(c))
| [
"[email protected]"
] | |
d896bd55a2384700f3317a027172df79bff3c361 | 0fc6502b33ad94418980b15036743e672e57628f | /todo/migrations/0001_initial.py | bdeadddd0761ca4c280f022089f12e2e47e3041b | [] | no_license | laltman/pomodoro | 6bcc841ce94318a02fd92d9caee9ca6bb4290a7f | 0da13103bb2bc78a702d234e76859b03dc5a5611 | refs/heads/master | 2020-12-24T20:25:00.020631 | 2016-05-15T22:04:18 | 2016-05-15T22:04:18 | 58,744,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-10 20:37
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250, unique=True)),
('created_date', models.DateTimeField(default=datetime.datetime.now)),
('priority', models.IntegerField(choices=[(1, 'Low'), (2, 'Normal'), (3, 'High')], default=2)),
('completed', models.BooleanField(default=False)),
('pomnumber', models.IntegerField(default=1)),
],
options={
'ordering': ['-priority', 'title'],
},
),
]
| [
"[email protected]"
] | |
ec203b96221cca0904285fe6bd03f1a909039e94 | 90a9e36d7ad6ab97a3e0a38b00942917c9168d04 | /on_demanded_service/doAugment.py | a821ff90795668b8452b3e6fbc5819f4d56dccbb | [] | no_license | oryondark/CloudAug | 6bf8b10310759e8b9fb954ae00c29fcf6e04f90e | 05d35c6057626b02e2eed91cb9e097051305361a | refs/heads/master | 2020-12-12T23:26:42.624064 | 2020-04-17T14:12:03 | 2020-04-17T14:12:03 | 234,256,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | from generateKeyPair import PrepareKeyPairItemUsingRanges
from cacheService import PrePareCacheService
from threading import Thread
import boto3
import json
import numpy as np
import os, sys
import time
def do_augmentation(Host='aws.elasticache.endpoint:11211', Train_on=True, invocation_name="lambda", r=3):
'''
HOST : Memcached Host
Train_on : is train ?
invocation_name : lambda function name
'''
s3cli = boto3.client('s3')
cifar10_meta_name = "./cifar10.meta.s3"
Bucket = "datasetBucket"
meta_path = "cifar10/cifar10.meta.s3"
s3cli.download_file(Bucket, meta_path, cifar10_meta_name)
with open(cifar10_meta_name, 'r') as meta:
meta = meta.read()
meta = json.loads(meta)
genKeyPairRange = PrepareKeyPairItemUsingRanges(meta, 512, Train_on)
cacheStore = PrePareCacheService(meta, Host, invocation_name, Train_on)
count = 0
augment_time = time.time()
length = genKeyPairRange.__len__()
rush = r
labels_dict = {}
for j in range(rush):
for i in range(length):
setTime = time.time()
ranges, keys = genKeyPairRange[i] # prepare a set of train/test data
cacheStore.thread_run(ranges, keys)
print("augment set time : {}".format(time.time() - setTime))
print("[{}]Done makes data number {}".format(time.time() - augment_time, count))
return count
| [
"[email protected]"
] | |
f66209a409797209c65861ee637738fb6f00d6d1 | a61ebd1507eeaa334aff44800b022ef0a258752a | /Code/CodeChef/CQ1LSTR.py | e9652c5d38e46c48dbe9296ec2be369ebacbefb0 | [
"MIT"
] | permissive | Jimut123/competitive_programming | 14ce0ab65414e6086763519f95487cddc91205a9 | b4cdebaceee719c1a256921829ebafda11c515f5 | refs/heads/master | 2023-03-05T15:42:57.194176 | 2022-04-08T08:53:26 | 2022-04-08T08:53:26 | 156,541,142 | 1 | 0 | null | 2019-05-29T17:10:28 | 2018-11-07T12:09:55 | C++ | UTF-8 | Python | false | false | 326 | py | T = int(input())
for i in range(T):
m_str = input()
s_str = input()
num = int(input())
lis_ = list(s_str)
k = 0
for item in lis_:
if item not in m_str:
k += 1
#print(item)
if int(len(s_str) - k) >=num:
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
2dd418cdd638fc975092d42d9c2aa1ea285f844d | 0b179d9d210d2ba5a3300f1c5584a5671abf1736 | /Mizhiwu/apps/ss/migrations/0002_auto_20180216_2029.py | c44a45364082b4f851fef59ad48d4d21a912a7a5 | [] | no_license | sjl421/django-practices | 72bd451b2ab182c83c8337801059743482426359 | e1ccb1cd6832cddd39d4b3f4d91db507c2e591ac | refs/heads/master | 2020-03-18T05:32:15.830284 | 2018-02-18T06:23:48 | 2018-02-18T06:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 2.0.1 on 2018-02-16 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ss', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='node',
name='show',
field=models.IntegerField(choices=[(1, '显示'), (0, '不显示')], default=1, verbose_name='是否显示'),
),
]
| [
"[email protected]"
] | |
9dde29e9014c7175238d9605c834650b0e9601fd | 6e8089c9ab0b6f233e3455b087546159a41aeaeb | /daffydav/vfs/__init__.py | 0e6a7752ad47a634c9383ca128eac1aa2c110501 | [] | no_license | mtpi/daffydav | 761d6fe824a1f43922f5514750dcde7ab42212eb | 25e5aff972d9c0131c749f350d35d569c5a3bdb8 | refs/heads/master | 2021-01-25T10:00:08.918311 | 2009-10-30T23:17:56 | 2009-10-30T23:17:56 | 39,693,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Virtual File System backends
Created by Matteo Pillon on 2009-07-26.
Copyright (c) 2009 Matteo Pillon. All rights reserved.
"""
from fs.base import FSError
from webob import exc
from daffydav.lib.registry import authenticator, vfs
def check_if_forbidden(path, raise_error=True):
"""Raise an HTTPForbidden if there's any exception accessing file/dir"""
# check if there are any exception
try:
if vfs.isdir(path):
vfs.listdir(path)
else:
vfs.open(path).close()
except FSError, e:
if raise_error:
##FIXME: explanation too verbose?
raise exc.HTTPForbidden(explanation=str(e))
else:
return True
else:
return False
def path_join(path, other_path):
if path[-1]=='/' or other_path[0]=='/':
return path+other_path
else:
return path+'/'+other_path
def isdir_alone(path):
"""
Returns true when path is a directory without subdirectories
"""
if check_if_forbidden(path, raise_error=False):
return True
child_dirs = [elem for elem in vfs.listdir(path) if vfs.isdir(path_join(path, elem))]
if len(child_dirs) > 0:
return False
else:
return True
class VFSImpersonationWrapper(object):
"""
This wrapper incapsultates any vfs object in order to impersonate
the logged-in user before any method call
"""
def __init__(self, vfs):
"""
VFSImpersonationWrapper(vfs)
vfs: any vfs instance
authenticator: any daffydav.authenticators.Authenticator instance
"""
self.vfs = vfs
def __getattr__(self, name):
##FIXME: assumed all vfs attributes used in the code are methods
def wrapper(*args, **kwargs):
function = getattr(self.vfs, name)
return authenticator.run_as_current_user(function, *args, **kwargs)
return wrapper
| [
"matteo.pillon@6251b506-c5a9-11de-b12b-e93134c88cf6"
] | matteo.pillon@6251b506-c5a9-11de-b12b-e93134c88cf6 |
cb5c227562ff575532899df08a9b6d72398d0922 | 17be80a5a40d6989ff11fa46947eec2cc73fcec0 | /src/0217/0217.py | 68d5964b9105d000404fa15ea2abd9319a68dd17 | [] | no_license | zhangjiahuan17/LeetCode | 61cd1997cb9805a1d5a29c97cadfc29da7bc7b0e | cbc22a6826755ffb5a8b7048b964c19682703a40 | refs/heads/master | 2020-05-14T17:28:25.159743 | 2018-12-05T13:40:18 | 2018-12-05T13:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums_len = len(nums)
set_len = len(set(nums))
if nums_len == set_len:
return False
else:
return True
| [
"[email protected]"
] | |
d7dfed4644f708677c8364f5fbed726502e8096e | e904230b361c4f2e6ecfaa9212ad91622d7ce0b8 | /args.py | b335c1bcb11e60c8938ff6057c9fbaea1373b7bd | [] | no_license | nikhildoifode/NLP_project | 7d3dc2fe4883aa9c89876a26425b44375ff397e8 | c75be46f9c015739e345b220ff7bc270f42d72bd | refs/heads/master | 2023-01-25T05:09:03.054356 | 2020-12-10T04:21:49 | 2020-12-10T04:21:49 | 319,818,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | from argparse import ArgumentParser
def get_args():
parser = ArgumentParser(description="Soccer chatbot")
parser.add_argument('--no_cuda', action='store_false', help='do not use cuda', dest='cuda')
parser.add_argument('--gpu', type=bool, default=False)
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--dataset', type=str, default="EntityDetection")
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--seed', type=int, default=3435)
parser.add_argument('--dev_every', type=int, default=2000)
parser.add_argument('--log_every', type=int, default=1000)
parser.add_argument('--patience', type=int, default=10)
parser.add_argument('--save_path', type=str, default='saved_checkpoints')
parser.add_argument('--specify_prefix', type=str, default='id1')
parser.add_argument('--words_dim', type=int, default=300)
parser.add_argument('--num_layer', type=int, default=2)
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--input_size', type=int, default=300)
parser.add_argument('--hidden_size', type=int, default=50)
parser.add_argument('--rnn_dropout', type=float, default=0.3)
parser.add_argument('--clip_gradient', type=float, default=0.6, help='gradient clipping')
parser.add_argument('--stoi', type=str, default="vocab/w2i.npy")
parser.add_argument('--vocab_glove', type=str, default="vocab/glove300.npy")
parser.add_argument('--weight_decay',type=float, default=0)
parser.add_argument('--teacher_forcing',type=int, default=4)
parser.add_argument('--fix_embed', action='store_false', dest='train_embed')
parser.add_argument('--hits', type=int, default=100)
parser.add_argument('--no_tqdm', default=False, action='store_true', help='disable tqdm progress bar')
parser.add_argument('--randseed', type=int, default=666, metavar='', help='random seed (default: 666)')
parser.add_argument('--trained_model', type=str, default='')
parser.add_argument('--data_dir', type=str, default='preproc_files/incar/')
parser.add_argument('--results_path', type=str, default='query_text')
parser.add_argument('--emb_drop', type=float, default=0.2)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--resp_len', type=int, default=20)
args = parser.parse_args()
return args
| [
"[email protected]"
] | |
1f1a20a963607910d43b55ae44763850b0c35aa9 | 6d865b4912db4daed5797279c8d14827332c3fbd | /treat/mesh/cuts.py | a0f7ebc34f0149743a142ca5cb1822b2396aeda2 | [
"MIT"
] | permissive | tjlaboss/tasty_treat | e098acfbf42321612066de539cb3edc26aa434e2 | 5a137b49c6648eda6500025de8bab9c8dcc78d45 | refs/heads/master | 2020-04-07T14:08:37.291156 | 2019-10-18T22:47:15 | 2019-10-18T22:47:15 | 158,435,237 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | # Mesh Cuts
#
# Replicate the mesh cuts from the Serpent model
import sys; sys.path.append("..")
import numpy as np
# Height of layers (cm)
layers = np.array(
[3.486, 27, 22.68875, 8.89, 0.762, 20, 31.59375, 5, 5, 39.37375, 20, 0.762, 22.06625, 13.64, 27, 8.02675])
# Number of axial slices per layer
n_cuts = np.array([1, 5, 4, 2, 1, 4, 5, 1, 1, 7, 4, 1, 4, 3, 5, 2])
# delta-z for each layer's slices
dzs = layers/n_cuts
# Where tallies start:
#Z0 = c.struct_CoreGuide_top # cm; z-position of the bottom of the element
#Z0 = c.struct_LowerZrSpace_top - 62.8267500
Z0 = 122.99# + 3.486
n = len(layers)
heights = np.zeros(n)
for i in range(n):
heights[i] = layers[:i+1].sum()
heights += Z0
NX = 1
NY = 1
NZ = n_cuts.sum()
| [
"[email protected]"
] | |
06ea5fb7b529ff59dde0b93d3270b96300c2905c | b9475a49c9aedbc9ce93b61fb30454a9ebd95ff5 | /CRM/views/teacher.py | f40b685f79bfa386d1e667190480c07ecfea8601 | [] | no_license | lckazml/NB | 25cc88f05ed69c1f3ec3820cef7d8ac12e376e67 | be45e364ff66e07b1bc348ab6a8f63b3fa4663dd | refs/heads/master | 2020-09-01T13:11:19.478292 | 2019-11-29T08:22:58 | 2019-11-29T08:22:58 | 218,965,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | from django.shortcuts import render, redirect, reverse, HttpResponse
from django.contrib import auth
from CRM import models
from CRM.forms import *
from utils.pagination import Pagination
from django.views import View
from django.db.models import Q
from django.db import transaction
from django.http import QueryDict
import copy
from django.utils.safestring import mark_safe
from django.conf import settings
# 班级列表展示
class ClassList(View):
def get(self, request):
# 模糊搜索
q = self.get_search_contion(['course', 'semester'])
all_class = models.ClassList.objects.filter(q)
# 获取下一级url的跳转
query_params = self.get_query_params()
# 分页的应用
page = Pagination(request, len(all_class), request.GET.copy())
return render(request, 'crm/teacher/class_list.html', {
'all_class': all_class[page.start:page.end], 'query_params': query_params, 'pagination': page.show_li
})
# 关键字搜索
def get_search_contion(self, fields_list):
query = self.request.GET.get('query', '')
q = Q()
q.connector = 'OR'
for i in fields_list:
q.children.append(Q(('{}__contains'.format(i), query)))
return q
def get_query_params(self):
# 保存当前的全路劲
url = self.request.get_full_path()
qd = QueryDict()
qd._mutable = True
qd['next'] = url
query_params = qd.urlencode()
return query_params
def classes(request,edit_id=None):
obj=models.ClassList.objects.filter(id=edit_id).first()
form_obj=ClassForm(instance=obj)
title='编辑班级' if obj else '添加班级'
if request.method=='POST':
form_obj = ClassForm(request.POST,instance=obj)
if form_obj.is_valid():
form_obj.save()
next=request.GET.get('next')
if next:
return redirect(next)
return redirect(reverse('class_list'))
return render(request,'crm/form.html',{'title':title,'form_obj':form_obj}) | [
"[email protected]"
] | |
dbcd951d31bdb7c7e00f78dd5992a719fb268dcb | 256471532845a6957beeb49c4d621358fd4b0d79 | /interactive_plot.py | 0ad852c046b09d3da1c05dec3cde90f50a21de23 | [] | no_license | davircarvalho/audio_cluster_visualization | d6baaf06c73ce8f02ab0463514436f151814e8c7 | 9a2a9f539a2f8de2e81d025e6c1a2b897e450197 | refs/heads/main | 2023-06-14T13:33:01.534844 | 2021-07-06T18:04:31 | 2021-07-06T18:04:31 | 383,550,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,558 | py | import matplotlib.pyplot as plt
import numpy as np
from operator import itemgetter
import sounddevice as sd
class interactive_plot:
def __init__(self, x, y, data, sr):
self.x = x # List of x coordinates of clustered samples
self.y = y # List of y coordinates of clustered samples
self.audio_table = np.transpose(data) # Array with audio samples: inputsize = [number_of_data_samples x audio_length]
self.sr = sr
# Play/stop plot specs
self.currently_playing = [x[0], y[0]] # initialize variable
self.color_play = 'r' # red
self.color_idle = 'k' # black
self.fig = plt.figure(figsize=(8,8))
self.cidpress = self.fig.canvas.mpl_connect(
'button_press_event', self.on_click)
self.cidkey = self.fig.canvas.mpl_connect(
'key_press_event', self.on_key)
def PlotClusters(self):
plt.scatter(self.x, self.y, c=self.color_idle)
plt.title('click on the point to listen, press a key to stop it')
plt.axis('off')
plt.show()
### CALLBACKS ###
def on_click(self, event):
"""Check whether mouse is over us; if so, store some data."""
x_data = event.xdata
y_data = event.ydata
# Change color (make sure there's always only one point in red)
plt.scatter(self.currently_playing[0], self.currently_playing[1], c=self.color_idle)
plt.draw()
# find index that matches the clicked position and play the corresponding sound
point_dist = np.sqrt((self.x - x_data)**2 + (self.y - y_data)**2 ) # distance between ploted points and click location
idx_audio = min(enumerate(point_dist), key=itemgetter(1))[0] # find the closest point to click position
self.currently_playing = [self.x[idx_audio], self.y[idx_audio]]
# Play the audio file
data = self.audio_table[:,idx_audio]
self.playing_audio = sd.play(data, self.sr)
# Change color
plt.scatter(self.currently_playing[0], self.currently_playing[1], c=self.color_play)
plt.draw()
def on_key(self, event):
sd.stop(self.playing_audio)
plt.scatter(self.currently_playing[0], self.currently_playing[1], c=self.color_idle)
plt.draw()
def disconnect(self):
"""Disconnect all callbacks."""
self.fig.canvas.mpl_disconnect(self.cidpress)
# self.fig.canvas.mpl_disconnect(self.cidrelease)
# self.fig.canvas.mpl_disconnect(self.cidmotion)
| [
"[email protected]"
] | |
9b95cebffda23c9711f94813b003775f3884d499 | aa4267505a69f53622adf37c9995b343d62e1493 | /test_lib.py | 1058dc2c5a00644f43b3357d0d25dd366479e59b | [] | no_license | codesavvysoftware/PyScripts_2016 | 370be41d63480f99d68dfdc780e7d25135556e54 | f854e9ee61b6d0ec33ff85aad747f87d6901c218 | refs/heads/master | 2021-01-10T13:04:49.984584 | 2016-02-01T17:28:49 | 2016-02-01T17:28:49 | 50,857,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | class SomeClass:
def __init__(self):
self.var = "A Test \\n"
def PrintIt(self) :
print(self.var)
| [
"[email protected]"
] | |
ad571803df4c1a17e6d465eb085f43593a66e67f | 5daf87eed9b4f006eb07e2ec87f3834f205cb47d | /djangoapp/form.py | 006ba831a8f1d95b7d55a51947de73d2adeae8f9 | [] | no_license | kundankumardec16/Implementing-Django | bb08fe8f400a559af5c0bfea0c3b39abc2dc8230 | c1c8872c351383bc37137f91cd01157224292e65 | refs/heads/master | 2023-04-01T17:37:31.762286 | 2021-04-04T17:28:49 | 2021-04-04T17:28:49 | 324,736,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | from djangoapp.models import Student
from django import forms
from djangoapp.models import UserData
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class StudentForm(forms.ModelForm):
class Meta:
model=Student
fields="__all__"
class TestForm(forms.Form):
name=forms.CharField(label="Name",max_length=30,widget=forms.TextInput(attrs={
'id': 'first_name',
'required': True,
'placeholder': 'Enter your name',
'class':'name'
}))
email=forms.CharField(label="E-mail",max_length=30,widget=forms.TextInput(attrs={
'placeholder':'Enter your e-mail'
}))
class UserForm(UserCreationForm):
class Meta:
model = User
fields = ('username','first_name', 'last_name', 'email','password1','password2')
class RegForm(forms.ModelForm):
dob=forms.DateField(label='Date of Birth')
choices=[('male','Male'),('female','Female')]
gender = forms.ChoiceField(choices=choices, widget=forms.RadioSelect)
class Meta:
model=UserData
fields=('bio','gender','dob','location')
class CustomerForm(forms.Form):
cid=forms.IntegerField()
cfname=forms.CharField(label='Enter first name',max_length=30)
clname=forms.CharField(label='Enter last name',max_length=30)
file=forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True})) | [
"[email protected]"
] | |
4734e6f9df0fd9f283d16cf3ed4a4b420d9ac1ab | 4ce4ff000a96cf76eb72260e3f7d3ba1e7708439 | /coregionalised.py | d82fc553bad6cf0b42b791989be8624ee3a0daa6 | [
"Apache-2.0"
] | permissive | thomaspinder/Doubly-Stochastic-GPs | 78e32002fece804bbd56da0aad830c95d19c0990 | 65b04d5e0a9e37b9b517a0d5c27039754b82c4ff | refs/heads/master | 2020-06-13T13:17:08.145999 | 2019-09-18T17:05:49 | 2019-09-18T17:05:49 | 194,668,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,862 | py | import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import time
import matplotlib.pyplot as plt
import gpflow
from scipy.cluster.vq import kmeans2
from sklearn.model_selection import train_test_split
import string
import random
from itertools import product
tf.logging.set_verbosity(0)
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
np.random.seed(123)
tf.set_random_seed(123)
def tester(X, gp, y=None):
mu, var = gp.predict_f(X)
results = pd.DataFrame(X_test)
results.columns = ['date', 'lat', 'lon', 'indicator']
results['mu'] = np.exp(mu)
results['var'] = var
if y:
results['truth'] = np.exp(y_test[:, 0])
results['sq_error'] = np.square(results['mu'] - results['truth'])
return results
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
if __name__ == "__main__":
# Load data
data_name = "1week"
aurn = pd.read_csv('demos/coregional_data/aurn_{}.csv'.format(data_name))
cams = pd.read_csv('demos/coregional_data/cams_{}.csv'.format(data_name)) # Get full CAMS data.
cams = cams[['date', 'lat', 'lon', 'val']]
mind = aurn.Date.drop_duplicates().tolist()[0]
aurn = aurn[['Date', 'Latitude', 'Longitude', 'pm25_value']]
aurn.columns = ['date', 'lat', 'lon', 'val']
n_sparse = 2000
if n_sparse:
zpoints = kmeans2(cams[['date', 'lat', 'lon']].values, n_sparse, minit='points')[0]
zpoints = np.vstack((zpoints, aurn[['date', 'lat', 'lon']].values))
aurn['indicator'] = 0
cams['indicator'] = 1
# Proportion of CAMS data to be subsetted
subset_denom = None
if subset_denom:
cams = cams.sample(n = int(cams.shape[0]/subset_denom))
all_data = pd.concat([aurn, cams])
# Check data dimensions
# assert all_data.shape[0] == aurn.shape[0] + cams.shape[0], "Rows lost in concatenation"
# assert all_data.shape[1] == aurn.shape[1] == cams.shape[1], "Column count mismatch in data"
print('{} observations loaded.'.format(all_data.shape[0]))
print(all_data.head())
# Transform Data
all_data.val = np.log(all_data.val)
# Split Data""
X_aug = all_data[['date', 'lat', 'lon', 'indicator']].values
y_aug = all_data[['val', 'indicator']].values
X_train, X_test, y_train, y_test = train_test_split(X_aug,
y_aug,
test_size=0.4,
random_state=123,
shuffle=True)
# Fit GP
output_dim = 2
# Dimension of X, excluding the indicator column
base_dims = X_train.shape[1] - 1
# Reference point of the index column
coreg_dim = X_train.shape[1] - 1
# Rank of w
rank = 1
# Base Kernel
k1 = gpflow.kernels.RBF(input_dim=3, active_dims=[0, 1, 2], ARD=True)
# k3 = gpflow.kernels.RBF(input_dim = 1, active_dims =[2])
# Coregeionalised kernel
k2 = gpflow.kernels.Coregion(1, output_dim=output_dim, rank=rank, active_dims=[int(coreg_dim)])
# Initialise W
k2.W = np.random.randn(output_dim, rank)
# Combine
kern = k1 * k2 # k3
# Define Likelihoods
liks = gpflow.likelihoods.SwitchedLikelihood(
[gpflow.likelihoods.Gaussian(),
gpflow.likelihoods.Gaussian()])
# Variational GP
if n_sparse:
m = gpflow.models.SVGP(X_train, y_train, kern = kern, likelihood = liks, Z = zpoints.copy(), num_latent = 1)
else:
m = gpflow.models.VGP(X_train,
y_train,
kern=kern,
likelihood=liks,
num_latent=1)
gpflow.train.ScipyOptimizer().minimize(m, maxiter=100)
gp_params = m.as_pandas_table()
gp_params.to_csv('demos/coreg_{}_{}_gp_params.csv'.format(n_sparse, data_name))
"""# Visualise the B Matrix"""
B = k2.W.value @ k2.W.value.T + np.diag(k2.kappa.value)
print('-'*80)
print('B =', B)
print('-'*80)
# plt.imshow(B)
"""## Predictions"""
mu, var = m.predict_f(X_test)
print('mu shape: {}'.format(mu.shape))
results = pd.DataFrame(X_test)
results.columns = ['date', 'lat', 'lon', 'indicator']
results['mu'] = np.exp(mu)
results['var'] = var
results['truth'] = np.exp(y_test[:, 0])
results['sq_error'] = np.square(results['mu'] - results['truth'])
print(results.head())
print("RMSE on {} held out data points: {}".format(
X_test.shape[0], np.sqrt(np.mean(results.sq_error))))
fname = 'demos/corregionalised_nonsep_gp_results_{}_sparse{}.csv'.format(data_name, n_sparse)
results.to_csv(fname, index=False)
saver = gpflow.saver.Saver()
try:
saver.save('models/coreg_model_{}_sparse{}.gpflow'.format(data_name, n_sparse), m)
except ValueError:
tempname = id_generator()
print("Filename coreg_model.gpflow already exists. \nSaving model as {}.gpflow".format(tempname))
saver.save('models/{}_{}.gpflow'.format(tempname, data_name), m)
##################################
# Make tests on a linear grid
##################################
# Generate test data
date_lims = np.arange(cams.date.min(), cams.date.max())
lats = np.round(np.linspace(cams.lat.min(), cams.lat.max(), num = 50)[:, None], 1)
lons = np.round(np.linspace(cams.lon.min(), cams.lon.max(), num = 50)[:, None], 1) # To make out of prediction samples: np.arange(cams.date.max() + 1, cams.date.max() + 7)
# Get all combinations of lat/lon
coord_set = list(product(lats, lons))
coords = np.vstack([np.hstack((coord_set[i][0], coord_set[i][1])) for i in range(len(coord_set))])
# Build a dates column
dates = np.repeat(date_lims, repeats=coords.shape[0])[:, None]
indicator = np.vstack((np.zeros_like(dates), np.ones_like(dates)))
coords_full = np.tile(coords, (date_lims.shape[0], 1))
test_data = np.hstack((np.tile(np.hstack((dates, coords_full)), (2, 1)), indicator))
mu, var = m.predict_f(test_data)
results = pd.DataFrame(test_data)
results.columns = ['date', 'lat', 'lon', 'indicator']
results['mu'] = np.exp(mu)
results['var'] = var
# results['truth'] = np.exp(y_test[:, 0])
# results['sq_error'] = np.square(results['mu'] - results['truth'])
print(results.head())
fname = 'demos/corregionalised_gp_nonsep_results_{}_sparse{}_linspace.csv'.format(data_name, n_sparse)
results.to_csv(fname, index=False)
# results['sq_error'].groupby(results.indicator).describe()
| [
"[email protected]"
] | |
693f92797a3c8c943e5f2b837b2ef4b60fca743e | 4f425dba6784cabe2a65de577b16fcca4462084c | /preprocessing/population_price.py | 94f8dfa819787d3c8a08454e9366c256b5192563 | [] | no_license | konrini/real_estate_project | 0b849706abe83af3893f08887c81d4a562542451 | 62e84d9fe25976437f27273c6fd82b49428b8a08 | refs/heads/main | 2023-08-06T20:36:50.032265 | 2021-10-08T04:14:05 | 2021-10-08T04:14:05 | 387,817,843 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | # from matplotlib import font_manager
#
# for font in font_manager.fontManager.ttflist:
# print(font.name, font.fname)
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from matplotlib import font_manager, rc
font_path = "C:/Windows/Fonts/malgunbd.ttf"
font_name = font_manager.FontProperties(fname=font_path).get_name()
rc('font', family=font_name)
plt.rcParams['axes.unicode_minus'] = False # 마이너스 부호
people = pd.read_csv('C:/Users/user/Downloads/백현동 유입인구.csv')
# people = pd.read_csv('C:/Users/user/Downloads/분당구 유입인구.csv')
people['평가평'] = people['평당 가격 평균']
for i in range(len(people['평당 가격 평균'])):
people['평가평'][i] = (people['평당 가격 평균'][i] - min(people['평당 가격 평균'])) / (max(people['평당 가격 평균']) - min(people['평당 가격 평균']))
fig = plt.figure(figsize=(8,8)) ## 캔버스 생성
fig.set_facecolor('white') ## 캔버스 색상 설정
ax = fig.add_subplot() ## 그림 뼈대(프레임) 생성
ax.plot(people['연도'], people['평가평'], label='평균 평당 가격')
# ax.plot(people['연도'], people['인구수'], label='백현동 총 인구 수')
ax.legend()
plt.show()
| [
"[email protected]"
] | |
a5797c6b65a6488b9a36676a3bf368350ea200a5 | d6b37b5ef96c188e2041d3ef84510c17ff7a6d37 | /automated_git/auto_git.py | 3e47804ac3b4de59f6b7409f349d18400d070aaf | [] | no_license | 99ashr/ashr_with_coffee | 256c9d6a4b0f2e8c01b6420fd3190334dffdfeb2 | ba8d63826beaa5c281036b2125bdb0ac89c1c218 | refs/heads/master | 2023-03-01T10:25:21.153209 | 2021-02-07T23:59:53 | 2021-02-07T23:59:53 | 266,193,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | import os
import emoji
import pickle
import pyttsx3
# &Create Object and Set voice
engine = pyttsx3.init()
voices = engine.getProperty('voices') # getting details of current voice
# changing index, changes voices. o for male
# engine.setProperty('voice', voices[0].id)
# changing index, changes voices. 1 for female
engine.setProperty('voice', voices[1].id)
# repo_name = "ashr_with_coffee"
all_repos = []
def talk(talk):
engine.say(talk)
engine.runAndWait()
talk("Please enter the name of your repository")
repo_name = input(
emoji.emojize(
"Enter the name of your repository here!:pensive_face:\nHere!:\t")
)
def writefun():
with open("myRepos.txt", "ab") as filehandle:
pickle.dump(repo_name, filehandle)
def readfun():
with open("myRepos.txt", "rb") as filehandle:
try:
while True:
all_repos.append(pickle.load(filehandle))
except EOFError:
pass
return all_repos[-1]
def current_dir():
return os.getcwd()
# print(current_dir())
def creating_md():
# repo_name = readfun()
print("Creating README File for you!")
print(emoji.emojize("\nPlease Wait...:slightly_smiling_face:"))
os.system("echo '# {}'>README.md".format(repo_name))
print(
emoji.emojize(
"\nCreated README.md successfully for you!:smiling_face_with_halo:"
)
)
def git_init():
talk("Initializing your repository")
print(
emoji.emojize(
"\nInitializing your repository...:smiling_face_with_smiling_eyes:"
)
)
os.system("git init")
def git_add():
talk("Adding files to commit")
print(emoji.emojize("\n Initializing Git Add Command for you...:smiling_face:"))
os.system("git add .")
def git_status():
talk("These files are added")
print(emoji.emojize("Here's the status of this REPO...:face_savoring_food:"))
os.system("git status")
def git_commit():
talk("Ready To Commit")
print(emoji.emojize("Ready To Commit Your MESS...?:expressionless_face:"))
talk("Please enter your commit message")
commit_msg = input(
emoji.emojize(
"Please enter your commit message here:drooling_face:\n message:")
)
commit = "git commit -m {}".format(commit_msg)
os.system(commit)
user_repo_link = "https://github.com/99ashr/"
remote = "git remote add origin"
def connect_remote():
# repo_name = readfun()
talk("Connecting to your remote directory")
print(emoji.emojize("Connecting to your remote directory...:lying_face:"))
os.system("{} {}{}.git".format(remote, user_repo_link, repo_name))
def git_push():
talk("We're good to go.")
os.system("git push origin master")
if __name__ == "__main__":
current_dir()
if repo_name == "":
try:
repo_name = readfun()
except FileNotFoundError:
talk("oops something is wrong")
print("please enter the name manually!")
else:
writefun()
creating_md()
git_init()
git_add()
git_status()
git_commit()
git_status()
connect_remote()
git_push()
engine.stop()
| [
"[email protected]"
] | |
79fa2fe849f31d2c7294d65bafd0d07e87afe8d4 | a12c1fd7e29891192f295f21823b90b10dc08885 | /Backend/SwiftlyAPI/SwiftlyAPI/API/migrations/0015_orders_items.py | f41eb7335b35997161f08ac70de4f1bb4e034a93 | [] | no_license | DaVinciTachyon/Swiftly | 35bb892550c43a9f1df800680956b15451e720c6 | f815bbb75e72aad14ac73abf87eeda85aecf7ca8 | refs/heads/master | 2020-04-21T02:26:05.065838 | 2019-04-12T10:06:31 | 2019-04-12T10:06:31 | 169,254,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # Generated by Django 2.1.7 on 2019-03-24 13:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('API', '0014_user_availability'),
]
operations = [
migrations.AddField(
model_name='orders',
name='items',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='API.OrderItems'),
),
]
| [
"[email protected]"
] | |
e1800351b23fa656e8bf4456cf029b65829b9017 | 5f4dc868dea01acaf073e04e3f73ee4cabb475d5 | /kubernetes/client/apis/apps_v1_api.py | bde7903fe2a3bddf873ddfe3514509d80b43aa40 | [
"Apache-2.0"
] | permissive | oyesam7/python-3 | dd77d856246e15a6daf4f1ba09d10aba82d7ac21 | 8414f258ad7d55c6b5ab6cb98add170e2b4239f7 | refs/heads/master | 2020-08-15T11:07:41.379059 | 2019-10-15T15:25:32 | 2019-10-15T15:25:32 | 215,330,850 | 1 | 0 | Apache-2.0 | 2019-10-15T15:18:22 | 2019-10-15T15:18:21 | null | UTF-8 | Python | false | false | 502,920 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class AppsV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_controller_revision(self, namespace, body, **kwargs):
"""
create a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_controller_revision(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ControllerRevision body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_controller_revision_with_http_info(self, namespace, body, **kwargs):
"""
create a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_controller_revision_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ControllerRevision body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_daemon_set(self, namespace, body, **kwargs):
"""
create a DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_daemon_set(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DaemonSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_daemon_set_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_daemon_set_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_daemon_set_with_http_info(self, namespace, body, **kwargs):
"""
create a DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_daemon_set_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DaemonSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_daemon_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_deployment(self, namespace, body, **kwargs):
"""
create a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_deployment(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_deployment_with_http_info(self, namespace, body, **kwargs):
"""
create a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_deployment_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_replica_set(self, namespace, body, **kwargs):
"""
create a ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_replica_set(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ReplicaSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_replica_set_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_replica_set_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_replica_set_with_http_info(self, namespace, body, **kwargs):
"""
create a ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_replica_set_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ReplicaSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_replica_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_stateful_set(self, namespace, body, **kwargs):
"""
create a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_stateful_set(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_stateful_set_with_http_info(self, namespace, body, **kwargs):
"""
create a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_stateful_set_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_controller_revision(self, namespace, **kwargs):
"""
delete collection of ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_controller_revision(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_controller_revision_with_http_info(self, namespace, **kwargs):
"""
delete collection of ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_controller_revision_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_daemon_set(self, namespace, **kwargs):
"""
delete collection of DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_daemon_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_daemon_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_daemon_set_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_daemon_set_with_http_info(self, namespace, **kwargs):
"""
delete collection of DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_daemon_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_deployment(self, namespace, **kwargs):
"""
delete collection of Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_deployment(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_deployment_with_http_info(self, namespace, **kwargs):
"""
delete collection of Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_deployment_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_replica_set(self, namespace, **kwargs):
"""
delete collection of ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_replica_set_with_http_info(self, namespace, **kwargs):
"""
delete collection of ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_stateful_set(self, namespace, **kwargs):
"""
delete collection of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
"""
delete collection of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_stateful_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_controller_revision(self, name, namespace, **kwargs):
"""
delete a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_controller_revision(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs):
"""
delete a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_controller_revision_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_daemon_set(self, name, namespace, **kwargs):
"""
delete a DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_daemon_set_with_http_info(self, name, namespace, **kwargs):
"""
delete a DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_daemon_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_deployment(self, name, namespace, **kwargs):
"""
delete a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_deployment_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_deployment_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_deployment_with_http_info(self, name, namespace, **kwargs):
"""
delete a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_deployment_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_replica_set(self, name, namespace, **kwargs):
"""
delete a ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_replica_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_replica_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_replica_set_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_replica_set_with_http_info(self, name, namespace, **kwargs):
"""
delete a ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_replica_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_replica_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_stateful_set(self, name, namespace, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_stateful_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_stateful_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_controller_revision_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
return data
def list_controller_revision_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_controller_revision_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/controllerrevisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ControllerRevisionList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_daemon_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_daemon_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DaemonSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_daemon_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_daemon_set_for_all_namespaces_with_http_info(**kwargs)
return data
def list_daemon_set_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_daemon_set_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DaemonSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_daemon_set_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/daemonsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_deployment_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_deployment_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_deployment_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_deployment_for_all_namespaces_with_http_info(**kwargs)
return data
def list_deployment_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_deployment_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_deployment_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/deployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeploymentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_controller_revision(self, namespace, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_controller_revision(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs)
return data
def list_namespaced_controller_revision_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_controller_revision_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ControllerRevisionList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_daemon_set(self, namespace, **kwargs):
"""
list or watch objects of kind DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_daemon_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DaemonSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs)
return data
def list_namespaced_daemon_set_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_daemon_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DaemonSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_deployment(self, namespace, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_deployment(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_deployment_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_deployment_with_http_info(namespace, **kwargs)
return data
def list_namespaced_deployment_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_deployment_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DeploymentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_replica_set(self, namespace, **kwargs):
"""
list or watch objects of kind ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_replica_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_replica_set_with_http_info(namespace, **kwargs)
return data
def list_namespaced_replica_set_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_replica_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_stateful_set(self, namespace, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def list_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_replica_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replica_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs)
return data
def list_replica_set_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replica_set_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ReplicaSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_replica_set_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/replicasets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_stateful_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_stateful_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
return data
def list_stateful_set_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_stateful_set_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_stateful_set_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/statefulsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs):
"""
partially update the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_daemon_set(self, name, namespace, body, **kwargs):
"""
partially update the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_daemon_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_daemon_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_daemon_set_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_daemon_set_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_daemon_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_daemon_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_daemon_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_daemon_set_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_daemon_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_daemon_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_daemon_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_daemon_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_daemon_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_daemon_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_daemon_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment(self, name, namespace, body, **kwargs):
"""
partially update the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_replica_set(self, name, namespace, body, **kwargs):
"""
partially update the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_replica_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_replica_set_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_replica_set_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_replica_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replica_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_replica_set_scale(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_replica_set_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_replica_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_replica_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replica_set_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_replica_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_replica_set_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_replica_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_replica_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_replica_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_replica_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_replica_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replica_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_replica_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
partially update the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_controller_revision(self, name, namespace, **kwargs):
"""
read the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_controller_revision(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs):
"""
read the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_controller_revision_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_daemon_set(self, name, namespace, **kwargs):
"""
read the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_daemon_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_daemon_set_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_daemon_set_with_http_info(self, name, namespace, **kwargs):
"""
read the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_daemon_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_daemon_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_daemon_set_status(self, name, namespace, **kwargs):
"""
read status of the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_daemon_set_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_daemon_set_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_daemon_set_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_daemon_set_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_daemon_set_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_daemon_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_daemon_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_daemon_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment(self, name, namespace, **kwargs):
"""
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment_scale(self, name, namespace, **kwargs):
"""
read scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_scale_with_http_info(self, name, namespace, **kwargs):
"""
read scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment_status(self, name, namespace, **kwargs):
"""
read status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_replica_set(self, name, namespace, **kwargs):
"""
read the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_replica_set_with_http_info(self, name, namespace, **kwargs):
"""
read the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_replica_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_replica_set_scale(self, name, namespace, **kwargs):
"""
read scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replica_set_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_replica_set_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_replica_set_scale_with_http_info(self, name, namespace, **kwargs):
"""
read scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_replica_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_replica_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_replica_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_replica_set_status(self, name, namespace, **kwargs):
"""
read status of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replica_set_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_replica_set_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_replica_set_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_replica_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_replica_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_replica_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set(self, name, namespace, **kwargs):
"""
read the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs):
"""
read the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set_scale(self, name, namespace, **kwargs):
"""
read scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_scale_with_http_info(self, name, namespace, **kwargs):
"""
read scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set_status(self, name, namespace, **kwargs):
"""
read status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs):
"""
replace the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ControllerRevision body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ControllerRevision body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_daemon_set(self, name, namespace, body, **kwargs):
"""
replace the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_daemon_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DaemonSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_daemon_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_daemon_set_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_daemon_set_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_daemon_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DaemonSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_daemon_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_daemon_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_daemon_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_daemon_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_daemon_set_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_daemon_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DaemonSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_daemon_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DaemonSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1DaemonSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_daemon_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_daemon_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_daemon_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_daemon_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1DaemonSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment(self, name, namespace, body, **kwargs):
"""
replace the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
"""
replace scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
replace scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_replica_set(self, name, namespace, body, **kwargs):
"""
replace the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ReplicaSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_replica_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_replica_set_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_replica_set_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ReplicaSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_replica_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_replica_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replica_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_replica_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_replica_set_scale(self, name, namespace, body, **kwargs):
"""
replace scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_replica_set_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
replace scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_replica_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_replica_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replica_set_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_replica_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_replica_set_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ReplicaSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_replica_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_replica_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_replica_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_replica_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ReplicaSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_replica_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_replica_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replica_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_replica_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ReplicaSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
replace the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs):
"""
replace scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
replace scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
c63eb3925367b61baa0f103283c8a73ca70d1c11 | 3f9e0b03c86fa4f4e28b5e28bcb9bb2e737fe7e1 | /env/Lib/site-packages/pip/_internal/index/collector.py | 14d745eefbb4c0c959c6f856a8147047578851cd | [
"Apache-2.0"
] | permissive | sinha-debojyoti/Ookla-Speedtest.net-Crawler | 58c5b9d535b9f10f54eecbc656a6d62c50cc19b7 | 02e54f5679de74f732a34a37fac260d2ac34eb12 | refs/heads/master | 2022-07-18T10:27:35.020386 | 2022-07-03T03:53:11 | 2022-07-03T03:53:11 | 218,542,102 | 17 | 33 | Apache-2.0 | 2022-07-03T03:55:41 | 2019-10-30T14:08:16 | Python | UTF-8 | Python | false | false | 17,645 | py | """
The main purpose of this module is to expose LinkCollector.collect_sources().
"""
import cgi
import collections
import functools
import html
import itertools
import logging
import os
import re
import urllib.parse
import urllib.request
import xml.etree.ElementTree
from optparse import Values
from typing import (
Callable,
Iterable,
List,
MutableMapping,
NamedTuple,
Optional,
Sequence,
Union,
)
from pip._vendor import html5lib, requests
from pip._vendor.requests import Response
from pip._vendor.requests.exceptions import RetryError, SSLError
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.session import PipSession
from pip._internal.network.utils import raise_for_status
from pip._internal.utils.filetypes import is_archive_file
from pip._internal.utils.misc import pairwise, redact_auth_from_url
from pip._internal.vcs import vcs
from .sources import CandidatesFromPage, LinkSource, build_source
logger = logging.getLogger(__name__)
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
def _match_vcs_scheme(url: str) -> Optional[str]:
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
class _NotHTML(Exception):
def __init__(self, content_type: str, request_desc: str) -> None:
super().__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response: Response) -> None:
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url: str, session: PipSession) -> None:
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
raise_for_status(resp)
_ensure_html_header(resp)
def _get_html_response(url: str, session: PipSession) -> Response:
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if is_archive_file(Link(url).filename):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
raise_for_status(resp)
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _determine_base_url(document: HTMLElement, page_url: str) -> str:
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part: str) -> str:
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib.parse.quote(urllib.parse.unquote(part))
def _clean_file_url_path(part: str) -> str:
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib.request.pathname2url(urllib.request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
def _clean_url_path(path: str, is_local_path: bool) -> str:
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts)
def _clean_link(url: str) -> str:
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib.parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib.parse.urlunparse(result._replace(path=path))
def _create_link_from_element(
anchor: HTMLElement,
page_url: str,
base_url: str,
) -> Optional[Link]:
"""
Convert an anchor element in a simple repository page to a Link.
"""
href = anchor.get("href")
if not href:
return None
url = _clean_link(urllib.parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = html.unescape(pyrequire) if pyrequire else None
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
yanked_reason = html.unescape(yanked_reason)
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link
class CacheablePageContent:
def __init__(self, page: "HTMLPage") -> None:
assert page.cache_link_parsing
self.page = page
def __eq__(self, other: object) -> bool:
return (isinstance(other, type(self)) and
self.page.url == other.page.url)
def __hash__(self) -> int:
return hash(self.page.url)
def with_cached_html_pages(
fn: Callable[["HTMLPage"], Iterable[Link]],
) -> Callable[["HTMLPage"], List[Link]]:
"""
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
"""
@functools.lru_cache(maxsize=None)
def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page: "HTMLPage") -> List[Link]:
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
@with_cached_html_pages
def parse_links(page: "HTMLPage") -> Iterable[Link]:
"""
Parse an HTML document, and yield its anchor elements as Link objects.
"""
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
class HTMLPage:
"""Represents one page, along with its URL"""
def __init__(
self,
content: bytes,
encoding: Optional[str],
url: str,
cache_link_parsing: bool = True,
) -> None:
"""
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self) -> str:
return redact_auth_from_url(self.url)
def _handle_get_page_fail(
link: Link,
reason: Union[str, Exception],
meth: Optional[Callable[..., None]] = None
) -> None:
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTMLPage:
encoding = _get_encoding_from_headers(response.headers)
return HTMLPage(
response.content,
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing)
def _get_html_page(
link: Link, session: Optional[PipSession] = None
) -> Optional["HTMLPage"]:
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning('Cannot look at %s URL %s because it does not support '
'lookup as web pages.', vcs_scheme, link)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
if (scheme == 'file' and os.path.isdir(urllib.request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib.parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.warning(
'Skipping page %s because it looks like an archive, and cannot '
'be checked by a HTTP HEAD request.', link,
)
except _NotHTML as exc:
logger.warning(
'Skipping page %s because the %s request got Content-Type: %s.'
'The only supported Content-Type is text/html',
link, exc.request_desc, exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, f"connection error: {exc}")
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return _make_html_page(resp,
cache_link_parsing=link.cache_link_parsing)
return None
class CollectedSources(NamedTuple):
find_links: Sequence[Optional[LinkSource]]
index_urls: Sequence[Optional[LinkSource]]
class LinkCollector:
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_sources() method.
"""
def __init__(
self,
session: PipSession,
search_scope: SearchScope,
) -> None:
self.search_scope = search_scope
self.session = session
@classmethod
def create(
cls, session: PipSession,
options: Values,
suppress_no_index: bool = False
) -> "LinkCollector":
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
'Ignoring indexes: %s',
','.join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links, index_urls=index_urls,
)
link_collector = LinkCollector(
session=session, search_scope=search_scope,
)
return link_collector
@property
def find_links(self) -> List[str]:
return self.search_scope.find_links
def fetch_page(self, location: Link) -> Optional[HTMLPage]:
"""
Fetch an HTML page containing package links.
"""
return _get_html_page(location, session=self.session)
def collect_sources(
self,
project_name: str,
candidates_from_page: CandidatesFromPage,
) -> CollectedSources:
# The OrderedDict calls deduplicate sources by URL.
index_url_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=False,
cache_link_parsing=False,
)
for loc in self.search_scope.get_index_urls_locations(project_name)
).values()
find_links_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=True,
cache_link_parsing=True,
)
for loc in self.find_links
).values()
if logger.isEnabledFor(logging.DEBUG):
lines = [
f"* {s.link}"
for s in itertools.chain(find_links_sources, index_url_sources)
if s is not None and s.link is not None
]
lines = [
f"{len(lines)} location(s) to search "
f"for versions of {project_name}:"
] + lines
logger.debug("\n".join(lines))
return CollectedSources(
find_links=list(find_links_sources),
index_urls=list(index_url_sources),
)
| [
"[email protected]"
] | |
fe92f12313b60ba841ba3465d1b194b44aa39da8 | cfca97ea412ac367adf5807ab8391afb4ba38b81 | /products/forms.py | 3a8c438ab736a0c0e3be8b8d41a277b3aac4aaef | [] | no_license | Feitsarenko/steel_kiwi_task | 6063a050a1e1e0cdff26db0d37a2e8281fe8012e | 2fd4cbe54cfc893bff84e62172cfd61bdb874ad8 | refs/heads/master | 2020-04-17T05:03:41.634098 | 2019-02-19T01:18:53 | 2019-02-19T01:18:53 | 166,249,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from django import forms
from .models import Comment, Product
from tinymce.widgets import TinyMCE
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['text', ]
class ProductForm(forms.ModelForm):
description = forms.CharField(widget=TinyMCE(mce_attrs={'cols': 160, 'rows': 30}))
class Meta:
model = Product
fields = ['category', 'description', 'name', 'image', 'price', 'slug', 'in_top_list', 'grade',\
'static_out_top_list', ]
| [
"[email protected]"
] | |
f423b1a76188b4de151f95e2a00eb45fe76ebc02 | 3ab8d53f4ec6ace0f568e184ef49e5a9108e7f49 | /v1/final/main_runner.py | 68288328c6d15f530764afcab2ed9b5455bbf619 | [] | no_license | ikashilov/INF-3200 | ccfddd2bbcc28db5e92b7db4ee8b7a9c8d75c4c4 | fb0bdbc95d89893ad39fd71baa3e95753e592d4d | refs/heads/master | 2021-06-29T03:14:51.852590 | 2021-01-18T10:10:06 | 2021-01-18T10:10:06 | 211,906,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | import os
import math
import json
import socket
import hashlib
import argparse
import threading
DEF_PORT = 5000 # default number of port
DEF_WORKERS = 8 # default number of nodes in chain
DEF_IP = socket.gethostname()
# These are not actual keys. These are key ids for allocation. We use hex strings, so 16
keys = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
def worker(port, id):
os.system("./node.py --port " + str(port) + ' -n ' + str(id) )
def create_alloc_table(nodes_ids):
table = {}
step = len(keys) // len(nodes_ids)
j = 0 # C-style programming ;)
if (len(keys) % len(nodes_ids)) == 0:
# case 2,4,8,16
for i in range(0, len(keys)-step+1, step):
table[nodes_ids[j]] = keys[i:i+step]
j+=1
else:
bound = len(keys) - len(nodes_ids) * step
step1 = step + 1
for i in range(0, bound):
table[nodes_ids[i]] = keys[j:j+step1]
j+=step1
for i in range(bound, len(nodes_ids)):
table[nodes_ids[i]] = keys[j:j+step]
j+=step
return table
def create_dic(node, alloc_table):
successors = {}
for succ in fingers[node]:
successors[succ] = alloc_table[succ]
d = {'own': alloc_table[node], 'succ': successors}
return d
def parse_args():
parser = argparse.ArgumentParser(prog="Chord", description="Distributed storage")
parser.add_argument("-n", "--nodes_num", type=int, default=DEF_WORKERS, help="number of nodes to be created")
parser.add_argument("-p", "--port_num", type=int, default=DEF_PORT, help="initial port for the first node")
parser.add_argument("-i", "--nodes_ips", type=str, default=DEF_IP, help="ip addresses of all nodes in chord", nargs="+")
return parser.parse_args()
###############################################################################
args = parse_args()
# Get nodes list
nodes = []
# We run at one host but different ports:
if args.nodes_ips == DEF_IP:
all_ports = [str(args.port_num+i) for i in range(args.nodes_num)]
for port in all_ports:
nodes.append(DEF_IP+':'+port)
else:
nodes = args.nodes_ips.split()
# Create nodes ring
nodes_ring = nodes+nodes
alloc_table = create_alloc_table(nodes)
# Magic m-number to create base deduction ring 2
m = int(math.ceil(math.sqrt(args.nodes_num)))
# Table of each node's successors
fingers = {}
for j, node in enumerate(nodes_ring[:len(nodes)]):
routes = []
for i in range(0, m):
routes.append(nodes_ring[j+2**i])
fingers[node] = routes
###############################################################################
thread_list = []
for i, node in enumerate(nodes):
f = open("fingers"+str(i),'w')
d = create_dic(node, alloc_table)
json.dump(d, f)
f.close()
thread = threading.Thread(target=worker, args=(all_ports[i],i,))
thread_list.append(thread)
thread.start()
#print("Whole chord has been successfully started") | [
"[email protected]"
] | |
5ff9a344e68b77e768b170d2cf0fecbcfecb9d32 | 7d09c12042280c81562f94a3148db34a10368263 | /ud120/choose_your_own/your_algorithm.py | 61a9db3606807673b19ca95606c9090e894c4000 | [] | no_license | prasannavarshan/Udacity-Machine-Learning | 3e708c4f284c1857490952786920e07a9ba84ca2 | c1d74756a5b87c3c5aa8bed7109e4d77970e4dc5 | refs/heads/master | 2020-04-13T13:37:15.197134 | 2018-06-04T16:39:08 | 2018-06-04T16:39:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow"
### points mixed together--separate them so we can give them different colors
### in the scatterplot and identify them visually
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii] == 0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii] == 0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii] == 1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii] == 1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color="b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color="r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier()
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
print "accuracy_score:", accuracy_score(pred, labels_test)
scores = cross_val_score(clf, features_train, labels_train)
print "cross_val_score:", scores.mean()
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| [
"[email protected]"
] | |
07cbe0bd1a2b160a107b9c7c726ccdf8a7a0b468 | 81409c89796e630edb93625340b21b881822099b | /cycles/demo_muscle.py | 9b63b06bbee4d9f7cdbcae05ba59eafedcbeafec | [
"Apache-2.0"
] | permissive | em-yu/BlenderToolbox | f8d1f829b8c224ac69e17845f107720ff61d550b | ac9da757c8c372237458467aa6663bcdbb86b803 | refs/heads/master | 2022-12-09T04:35:30.990704 | 2020-06-28T02:52:20 | 2020-06-28T02:52:20 | 291,939,154 | 0 | 0 | Apache-2.0 | 2020-09-01T08:19:51 | 2020-09-01T08:19:50 | null | UTF-8 | Python | false | false | 1,865 | py | import sys
sys.path.append('/Users/hsuehtil/Dropbox/BlenderToolbox/cycles')
from include import *
import bpy
outputPath = './results/demo_muscle.png'
# # init blender
imgRes_x = 720 # increase this for paper figures
imgRes_y = 720 # increase this for paper figures
numSamples = 50 # usually increase it to >200 for paper figures
exposure = 1.0
blenderInit(imgRes_x, imgRes_y, numSamples, exposure)
# read mesh
meshPath = '../meshes/spot.ply'
location = (-0.3, 0.6, -0.04)
rotation = (90, 0,0)
scale = (1.5,1.5,1.5)
mesh = readPLY(meshPath, location, rotation, scale)
# # set shading
bpy.ops.object.shade_smooth()
# bpy.ops.object.shade_flat()
# # subdivision
level = 2
subdivision(mesh, level)
# # set material
useless = (0,0,0,0)
meshColor = colorObj(useless, 0.5, 1.0, 1.4, 0.0, 0.0) # the color is fixed to blood red
fiberShape = [5.0,2.0,0.4] # this controls the muscle pattern (requires tuning)
bumpStrength = 0.4 # you can leave this as default
wrinkleness = 0.03 # you can leave this as default
maxBrightness = 0.85 # you can leave this as default
minBrightness = 0.3 # you can leave this as default
setMat_muscle(mesh, meshColor, fiberShape, bumpStrength, wrinkleness, maxBrightness,minBrightness)
# # set invisible plane (shadow catcher)
groundCenter = (0,0,0)
shadowDarkeness = 0.7
groundSize = 20
invisibleGround(groundCenter, groundSize, shadowDarkeness)
# # set camera
camLocation = (1.9,2,2.2)
lookAtLocation = (0,0,0.5)
focalLength = 45
cam = setCamera(camLocation, lookAtLocation, focalLength)
# # set sunlight
lightAngle = (-15,-34,-155)
strength = 2
shadowSoftness = 0.1
sun = setLight_sun(lightAngle, strength, shadowSoftness)
# # set ambient light
ambientColor = (0.2,0.2,0.2,1)
setLight_ambient(ambientColor)
# # save blender file
bpy.ops.wm.save_mainfile(filepath='./test.blend')
# # save rendering
renderImage(outputPath, cam) | [
"[email protected]"
] | |
acdd6e2c79fe6e2c0ad382632c952baaaf3223d2 | ea83e60e2be606813005081a9f1b9516de018c7d | /language/casper/evaluate/top_metrics.py | 590fcb6a4500098a18732c84de66406ebe5697bd | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | optimopium/language | 1562a1f150cf4374cf8d2e6a0b7ab4a44c5b8961 | bcc90d312aa355f507ed128e39b7f6ea4b709537 | refs/heads/master | 2022-04-03T03:51:28.831387 | 2022-03-16T21:41:17 | 2022-03-16T22:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Metrics for TOP and MTOP parses."""
from typing import Dict, List
from language.casper.utils import top_utils
def _safe_divide(x, y):
return x / y if y != 0 else 0.0
def top_metrics(targets: List[str], predictions: List[str]) -> Dict[str, float]:
"""Returns eval metrics for TOP and MTOP datasets."""
num_correct = 0
num_total = 0
num_invalid = 0
num_intent_correct = 0
num_frame_correct = 0
for target, predicted in zip(targets, predictions):
if target == predicted:
num_correct += 1
num_total += 1
target_lf = top_utils.deserialize_top(target)
predicted_lf = top_utils.deserialize_top(predicted)
assert target_lf is not None
if not predicted_lf:
num_invalid += 1
continue
target_frame = top_utils.get_frame_top(target_lf)
predicted_frame = top_utils.get_frame_top(predicted_lf)
target_intent = target_frame.split("-")[0]
predicted_intent = predicted_frame.split("-")[0]
num_intent_correct += int(predicted_intent == target_intent)
num_frame_correct += int(predicted_frame == target_frame)
return dict(
num_total=num_total,
full_accuracy=_safe_divide(num_correct, num_total),
intent_accuracy=_safe_divide(num_intent_correct, num_total),
intent_arg_accuracy=_safe_divide(num_frame_correct, num_total),
invalid_predictions=_safe_divide(num_invalid, num_total))
| [
"[email protected]"
] | |
bbd6d443e8b1b97a890296c60a912bbcc9f365db | f6a5b5180762b25b556145d0d8e77c4f0347dce1 | /Uri/URI_1078.py | 7aeb12cc96f8f592b809e99e59fbfabae509570e | [] | no_license | DarlanNoetzold/URI_Python | 9864f3f75ce372f99e0df650c4e7450f107fa73d | a7b3201273d8f2cd9d9e4cdbfd53652bbe66d01e | refs/heads/master | 2023-08-17T03:44:09.944136 | 2021-10-02T18:00:28 | 2021-10-02T18:00:28 | 284,829,292 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | n = int(input())
for i in range(1, 11):
print('{} x {} = {}'.format(i , n , i * n)) | [
"[email protected]"
] | |
35b543f03ab298a2ea557d017de132c0badfff58 | fc1aed46f5fc2fd742ccacd8fb95b080aaf4aa60 | /src/BEA_Algorithm/BEA.py | 88b64e05e0c4593f2871d186f9cb853bb05954e4 | [] | no_license | toan207/DistributeDatabaseProject | 20b0a8c35eb3219a01e9b60fbd10a2e367839167 | 7939355f0549115289f334f79905abfdb8b600c9 | refs/heads/main | 2023-05-26T19:24:47.698295 | 2021-06-14T17:03:17 | 2021-06-14T17:03:17 | 376,887,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,477 | py | from math import inf
s = [[3,3,7],
[0,9,3],
[8,1,9]]
aff = [sum(i) for i in s]
mx = [
[1,0,1,1,1],
[1,0,0,1,1],
[1,0,0,1,1]]
fo = open("test.txt", "r")
n = int(fo.readline())
mx = []
s = []
for i in range(n):
mx.append(list(map(int, fo.readline().split())))
m = int(fo.readline())
for i in range(m):
s.append(list(map(int, fo.readline().split())))
aff = [sum(i) for i in s]
def printMx():
print('\t\tMa tran khoi tao')
for i in range(len(mx[0])):
print('\tC' + str(i + 1), end='')
for i in range(len(s[0])):
print('\tS' + str(i + 1), end='')
print('\tAff')
a = 0
for i in range(len(s)):
print('AP' + str(a + 1), end='\t')
a += 1
for x in mx[i]:
print(x,end='\t')
for x in s[i]:
print(x,end='\t')
print(aff[i])
print()
def calcAffMX():
mx_t = [[0] * (len(mx[0])) for j in range(len(mx[0]))]
for i in range(len(mx)):
for j in range(len(mx[i])):
for x in range(len(mx[i])):
if mx[i][x] and mx[i][j] and j != x:
mx_t[x][j] += aff[i]
for i in range(len(mx_t)):
mx_t[i][i] = sum(mx_t[i])
return mx_t
def calcBondShow(posX,posY,X,Y):
bond = 0
product = []
for i in range(len(X)):
bond += X[i] * Y[i]
product.append(X[i] * Y[i])
print("\tC" + str(posX) + " = ",X,sep='')
print("\tC" + str(posY) + " = ",Y,sep='')
print("\tproduct = ",product,sep='')
print("\tBond = ",end="")
for i in range(len(X)):
print(product[i],end='')
if i != len(X) - 1:
print(" + ",end='')
print(" = " + str(bond))
print()
return bond
def calcBond(X,Y):
bond = 0
for i in range(len(X)):
bond += X[i] * Y[i]
return bond
def calcCont(X,Y,Z):
return 2*calcBond(X,Y) + 2*calcBond(Y,Z) - 2*calcBond(X,Z)
mx_t = calcAffMX()
default = [i for i in range(len(mx_t))]
def printMatrix(mx, order = default):
for i in order:
print('\tC' + str(i + 1), end='')
print()
a = 0
for i in order:
print('C' + str(i+1), end='\t')
a += 1
for j in order:
print(mx_t[i][j], end='\t')
print()
def solve():
printMx()
print('\t\tMa tran hap dan')
printMatrix(mx_t)
print()
print('\tTinh Cont va Bond sap xep lai ma tran')
order = [0,1]
n = len(mx_t)
for x in range(2,n):
print("\t\tXep C" + str(x+1))
pos = -1
v0 = [0] * n
cont = calcCont(v0,mx_t[x],mx_t[order[0]])
calcBondShow(0,x+1,v0,mx_t[x])
calcBondShow(x+1,order[0] + 1,mx_t[x],mx_t[order[0]])
calcBondShow(0,order[0] + 1,v0,mx_t[order[0]])
print("Cont(" + "C0" + ", C" + str(x+1) + ", C" + str(order[0]+1) +") = 2*" + str(calcBond(v0,mx_t[x])) + " + 2*" + str(calcBond(mx_t[x],mx_t[order[0]])) + " - 2*" + str(calcBond(v0,mx_t[order[0]])) + " = " + str(cont))
print()
for i in range(len(order)-1):
temp = calcCont(mx_t[order[i]],mx_t[x],mx_t[order[i+1]])
if cont < temp:
cont = temp
pos = i
calcBondShow(order[i] + 1,x+1,mx_t[order[i]],mx_t[x])
calcBondShow(x+1,order[i+1] + 1,mx_t[x],mx_t[order[i+1]])
calcBondShow(order[i] + 1,order[i+1] + 1,mx_t[order[i]],mx_t[order[i+1]])
print("Cont(" + "C" + str(order[i]+1) + ", C" + str(x+1) + ", C" + str(order[i+1]+1) +") = 2*" + str(calcBond(mx_t[order[i]],mx_t[x])) + " + 2*" + str(calcBond(mx_t[x],mx_t[order[i+1]])) + " - 2*" + str(calcBond(mx_t[order[i]],mx_t[order[i+1]])) + " = " + str(temp))
print()
last = calcCont(mx_t[order[-1]],mx_t[x],v0)
calcBondShow(order[-1]+1,x+1,mx_t[order[-1]],mx_t[x])
calcBondShow(x+1,0,mx_t[x],v0)
calcBondShow(order[-1]+1,0,mx_t[order[-1]],v0)
print("Cont(" + "C" + str(order[-1]+1) + ", C" + str(x+1) + ", C0" +") = 2*" + str(calcBond(mx_t[order[-1]],mx_t[x])) + " + 2*" + str(calcBond(mx_t[x],v0)) + " - 2*" + str(calcBond(v0,mx_t[order[-1]])) + " = " + str(last))
if cont < last:
cont = last
pos = x-1
if pos == -1:
print("Chon Cont(C0, C" + str(x+1) + ", C" + str(order[0]+1) + ") = " + str(cont))
elif pos == x-1:
print("Chon Cont(C" + str(order[-1]+1) + ", C" + str(x+1) + ", C" + str(0) +") = " + str(cont))
else:
print("Chon Cont(C" + str(order[pos] + 1) + ", C" + str(x+1) + ", C" + str(order[pos+1] + 1) +") = " + str(cont))
order = order[:pos+1] + [x] + order[pos+1:]
print("Ma tran sau khi sap xep la:")
printMatrix(mx_t,order)
print()
mxZ = []
pos = 0
Z = -inf
print("\t\tTinh Z lan luot tu tren xuong")
for o in range(n-1):
TC = []
BC = []
BOC = []
TCW = 0
BCW = 0
BOCW = 0
for x in range(len(mx)):
fT = 1
fB = 1
for y in range(n):
if mx[x][y] and y not in order[:o+1]:
fT = 0
if mx[x][y] and y not in order[o+1:]:
fB = 0
TCW += aff[x]*fT
BCW += aff[x]*fB
BOCW += aff[x]*(not fT and not fB)
if fT:
TC.append([x,aff[x]])
if fB:
BC.append([x,aff[x]])
if not fB and not fT:
BOC.append([x,aff[x]])
tZ = TCW * BCW - BOCW * BOCW
mxZ.append(tZ)
print("TCW = ", end='')
for i in range(len(TC)):
print('AFF(' + str(TC[i][0] + 1) + ') ',end='')
if i != len(TC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
if len(TC) > 1:
for i in range(len(TC)):
print(TC[i][1],end='')
if i != len(TC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
print(TCW)
print("BCW = ", end='')
for i in range(len(BC)):
print('AFF(' + str(BC[i][0] + 1) + ') ',end='')
if i != len(BC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
if len(BC) > 1:
for i in range(len(BC)):
print(BC[i][1],end='')
if i != len(BC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
print(BCW)
print("BOCW = ", end='')
for i in range(len(BOC)):
print('AFF(' + str(BOC[i][0] + 1) + ') ',end='')
if i != len(BOC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
if len(BOC) > 1:
for i in range(len(BOC)):
print(BOC[i][1],end='')
if i != len(BOC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
print(BOCW)
print('Z' + str(order[o+1] + 1) + ' = TCW*BCW - BOCW^2 = ' + str(TCW) + '*' + str(BCW) + ' - ' + str(BOCW) + '^2 = ' + str(tZ))
print()
if tZ > Z:
Z = tZ
pos = o
print('=> Ta co Zmax = Z' + str(order[pos+1] + 1) + ' =',Z,'tai hang va cot C' + str(order[pos + 1] + 1))
newOrder = order[1:] + order[:1]
inB = len(newOrder)
f = 0
print('\n\t\tMa tran sau khi chuyen InnerBlock')
printMatrix(mx_t,newOrder)
TC = []
BC = []
BOC = []
TCW = 0
BCW = 0
BOCW = 0
for x in range(len(mx)):
fT = 1
fB = 1
for y in range(n):
if mx[x][y] and y not in newOrder[:inB-2]:
fT = 0
if mx[x][y] and y not in newOrder[inB-2:]:
fB = 0
TCW += aff[x]*fT
BCW += aff[x]*fB
BOCW += aff[x]*(not fT and not fB)
if fT:
TC.append([x,aff[x]])
if fB:
BC.append([x,aff[x]])
if not fB and not fT:
BOC.append([x,aff[x]])
ibZ = TCW * BCW - BOCW * BOCW
mxZ.append(ibZ)
print("TCW = ", end='')
for i in range(len(TC)):
print('AFF(' + str(TC[i][0] + 1) + ') ',end='')
if i != len(TC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
if len(TC) > 1:
for i in range(len(TC)):
print(TC[i][1],end='')
if i != len(TC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
print(TCW)
print("BCW = ", end='')
for i in range(len(BC)):
print('AFF(' + str(BC[i][0] + 1) + ') ',end='')
if i != len(BC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
if len(BC) > 1:
for i in range(len(BC)):
print(BC[i][1],end='')
if i != len(BC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
print(BCW)
print("BOCW = ", end='')
for i in range(len(BOC)):
print('AFF(' + str(BOC[i][0] + 1) + ') ',end='')
if i != len(BOC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
if len(BOC) > 1:
for i in range(len(BOC)):
print(BOC[i][1],end='')
if i != len(BOC) - 1:
print(' + ', end='')
else:
print(' = ', end='')
print(BOCW)
print('Z = TCW*BCW - BOCW^2 = ' + str(TCW) + '*' + str(BCW) + ' - ' + str(BOCW) + '^2 = ' + str(ibZ))
print()
print('Cac Z da duoc tinh = ',mxZ)
if not f and ibZ > Z:
print('=> Ta chon Z =',max(mxZ),'la innerBlock')
print('=> ', end='')
newOrder = list(map(lambda x: "C" + str(x + 1), newOrder))
print("Sau khi thêm khóa C cho vùng trên ta được kết quả hai mảnh dọc sau: ", "VF1",["C"] + newOrder[:inB-2],", VF2",["C"] + newOrder[inB-2:],sep='')
else:
print('=> Ta chon Z = ' + str(max(mxZ)) +' la Z' + str(order[pos+1] + 1))
print('=> ', end='')
order = list(map(lambda x: "C" + str(x + 1), order))
print("Sau khi thêm khóa C cho vùng trên ta được kết quả hai mảnh dọc sau: ", "VF1",["C"] + order[pos+1:],", VF2",["C"] + order[:pos+1],sep='')
solve() | [
"[email protected]"
] | |
3ec194341e228ccc5a47c0ec5c399d759e6710e2 | e543efeae918579a04b2f8f99ca5654efa4db94d | /2020/Day 5/1.py | 9b11f59e23ee4c539d8efed568c7a0f26098aed6 | [] | no_license | Matthias1590/Advent-of-Code | f5800f2090315eba06f2ed0c75503b51c3bec404 | 000ccf2bade9464c95af909b5c4d7355c43ac0cf | refs/heads/master | 2023-01-30T12:39:46.536505 | 2020-12-08T20:31:34 | 2020-12-08T20:31:34 | 318,288,777 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | with open("Day 5/input.txt", "r") as inputFile:
puzzleInput = inputFile.readlines(); seats =[]
for seat in puzzleInput:
seat = seat.strip(); row = range(128)
for char in seat[0:7]:
if char == "F":
row = row[:int(len(row)/2)]
elif char == "B":
row = row[int(len(row)/2):]
row = row[0]; column = range(8)
for char in seat[7:]:
if char == "R":
column = column[int(len(column)/2):]
elif char == "L":
column = column[:int(len(column)/2)]
column = column[0]; seats.append(row * 8 + column)
print(max(seats)) | [
"[email protected]"
] | |
da7e952a94a1d21c81cb6fafdf8a4fa0298e86c0 | 26d2584e3f51c77e2967ec48a7189413c1ffbfd9 | /apps/invoice/views/__init__.py | 77447bbf50822c65b3a12e82dbb6a78046efd700 | [] | no_license | classSaltyFish/invoiceRecognition | 9a0c1dc9ac19293c502abf92f8c61263896609a2 | fa20bc8d6c82d167189f5c80a2012b995e7c7005 | refs/heads/master | 2022-12-17T00:02:55.159891 | 2020-02-28T09:01:23 | 2020-02-28T09:01:23 | 241,006,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/19 13:51
# @Author : coldsun
# @File : __init__.py.py
# @Software: PyCharm
| [
"[email protected]"
] | |
2e89c3bb1577035563a3974bfab97259ab5d7d06 | cbaa8c30fb4352c241dc81c1c6cc5079aad2526b | /trees/getExterior.py | db607a9513b8a6267e760fc142a4b70268678399 | [] | no_license | Fatou1993/interviews-preparation | 68b7f870a4a0e93717f97ec1a4e1438bbb2a3410 | 1b733a79007362a4816a896ebd6c199b4098af36 | refs/heads/master | 2021-05-04T00:47:16.363097 | 2018-02-05T19:29:53 | 2018-02-05T19:29:53 | 120,351,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def printRoot(self):
if self.val:
print self.val,
if self.left :
self.left.printRoot()
if self.right:
self.right.printRoot()
def getLeaves(root):
#print(root.val if root else "")
if not root :
return []
if not root.left and not root.right :
leaves = [root]
else:
left_leaves = getLeaves(root.left)
right_leaves = getLeaves(root.right)
leaves = left_leaves+right_leaves
#print("Hi", root.val, leaves)
return leaves
def getExterior(root):
if not root :
return None
left_side = [root]
node = root.left
while node :
left_side.append(node)
node = node.left
leaves = getLeaves(root)
right_side = []
node = root.right
while node :
right_side.append(node)
node = node.right
return left_side + leaves[1:-1] + right_side[::-1]
if __name__ == "__main__":
root = TreeNode("A")
root.left = TreeNode("B")
root.left.left = TreeNode("C", TreeNode("D"), TreeNode("E"))
root.left.right = TreeNode("F", None, TreeNode("G", TreeNode("H")))
root.right = TreeNode("I")
root.right.left = TreeNode("J", None, TreeNode("K", TreeNode("L", None, TreeNode("M")), TreeNode("N")))
root.right.right = TreeNode("O", None, TreeNode("P"))
root.printRoot()
print ""
ext = getExterior(root)
for e in ext :
print e.val,
| [
"[email protected]"
] | |
45bac4dab50cbe817b140adfe75e19d24aa25990 | 57e1b81bcdd545ba5f592c37834ef2c4cf475ddc | /backend/s_ash_official_app_20896/urls.py | 18c2b429f66f35c5c029dcbf04ab23010411c5d0 | [] | no_license | crowdbotics-apps/s-ash-official-app-20896 | 87bfa8565c04b944faa1445798570eb767d07c14 | f86ae73e7b2af0af7ef8acf8ef6395d0e731f124 | refs/heads/master | 2022-12-23T13:24:37.113975 | 2020-09-30T18:29:04 | 2020-09-30T18:29:04 | 300,015,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | """s_ash_official_app_20896 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "S-ash official App"
admin.site.site_title = "S-ash official App Admin Portal"
admin.site.index_title = "S-ash official App Admin"
# swagger
api_info = openapi.Info(
title="S-ash official App API",
default_version="v1",
description="API documentation for S-ash official App App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
a37917a25a315482846d92c6aab56aa177ed98eb | 991da28acff0100caded971893b9412f98be8825 | /ck_project/ck_project/urls.py | 01d07a49fb60592cb2d8444a92d6ad5c022ed031 | [] | no_license | baidai/Django-Rest-Frame-Work-Web-Crawler | 682043a6708dddc5923a2bd28f9f5f2219c3332f | 6c5e727d90a5c36d9ca6db447d328e5f3585adf2 | refs/heads/master | 2020-05-04T18:07:35.818572 | 2019-04-03T17:54:06 | 2019-04-03T17:54:06 | 179,340,434 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | """ck_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('ck_app.urls')),
] | [
"[email protected]"
] | |
470d17a803eda4f88639ad4196594c54374cf9a7 | 3962320a58004aa7f4670e2a5e7c3ed7466b1fbc | /scripts/MiniDemo.py | 9a6d64324e81d59767b281d852092be9352b0a23 | [] | no_license | xochilt/protoboard | 6b401b5fef8163678be9d4b2bafd2d2ca249619a | 5b86872cda0f2a72e97a49e0ebc9abae39802e88 | refs/heads/master | 2021-01-16T21:54:37.657287 | 2015-09-28T21:03:46 | 2015-09-28T21:03:46 | 43,321,397 | 0 | 0 | null | 2015-09-28T19:15:26 | 2015-09-28T19:15:26 | null | UTF-8 | Python | false | false | 7,213 | py | # -*- coding: utf-8 -*-
__author__ = 'mario'
if __name__ == "__main__":
import os
from django.core.wsgi import get_wsgi_application
print "####### DJANGO SETTINGS"
os.environ['DJANGO_SETTINGS_MODULE'] = "protoboard.settings"
application = get_wsgi_application()
from activitytree.models import LearningStyleInventory, LearningActivity, Course, UserLearningActivity
from django.contrib.auth.models import User
from activitytree.interaction_handler import SimpleSequencing
LearningActivity.objects.all().delete()
Demo = LearningActivity( name = 'Protoboard 101', slug = 'Demo',
uri = "/activity/demo",
parent = None,
root = None,
flow = True,
forward_only = False,
choice = True,
choice_exit = False,
rollup_rule = "satisfied IF All satisfied",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 0
)
Demo.save()
description= u"""
<p> Este es un curso de ejemplo para mostrar la funcionalidad de <code>protoboard</code>.
Se muestran los tipos de ejercicios y recursos que se pueden utilizar para crear cursos de programación. </p>"""
cursoDemo = Course(short_description=description, root=Demo)
cursoDemo.save()
preliminar = LearningActivity( name = 'El secuenciado simple', slug = 'Preliminar',
uri = "/activity/SecuenciadoSimple",
# lom =
parent = Demo, root = Demo,
heading="1. Secuenciado simple",
description = u"""Protoboard utiliza reglas para el secuenciado de actividades de aprendizaje. Aquí se explica de que se trata. De hecho hay una regla que estipula que no puedes visitar la actividad siguiente hasta ver esta.""",
image = "https://s3.amazonaws.com/learning-python/IntroVideo.png",
pre_condition_rule = "",
post_condition_rule = "",
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 0
)
preliminar.save()
recursos = LearningActivity( name = 'Recursos', slug = 'Recursos',
uri = "/activity/Recursos",
# lom =
parent = Demo, root = Demo,
heading="2. Recursos",
secondary_text = "Contenedor",
description = u"""Este es un contenedor con varias actividades, estará deshabilitado hasta que visites la actividad Secuenciado Simple.""",
pre_condition_rule = """
if get_attr('/activity/SecuenciadoSimple','objective_status') == 'satisfied':
activity['pre_condition'] = ''
else:
activity['pre_condition'] = 'disabled'
""",
post_condition_rule = "",
flow = True,
forward_only = False,
choice = True,
choice_exit = False,
rollup_rule = "satisfied IF All satisfied",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 1
)
recursos.save()
video = LearningActivity( name = 'Video', slug = '',
uri = "/activity/video/intro",
# lom =
parent = recursos, root = Demo,
heading="Ejemplo de Video",
description = u"""Ejemplo de video, al llegar a los 15 segundos se salta a la siguiente actividad.""",
pre_condition_rule = "",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 0
)
video.save()
test = LearningActivity( name = 'Quiz', slug = '',
uri = "/test/demo",
# lom =
parent = recursos, root = Demo,
heading="Ejemplo de un Quiz",
description = u"""Máximo 4 intentos.""",
pre_condition_rule = "",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 1
)
test.save()
programas = LearningActivity( name = 'Ejercicios de Programación', slug = '',
uri = "/activity/Ejercicios",
# lom =
parent = Demo, root = Demo,
heading="3. Ejercicios",
secondary_text = "Contenedor",
description = u"""Ejemplos de los distintos lenguajes de programación, con los que se pueden hacer ejercicios""",
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
choice_exit = False,
rollup_rule = "satisfied IF All satisfied",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 2
)
programas.save()
csharp = LearningActivity( name = 'CSharp', slug = '',
uri = "/program/csharp/1",
# lom =
parent = programas, root = Demo,
heading="C#",
description = u"""C# es un lenguaje de programación orientado a objetos desarrollado y estandarizado por Microsoft como parte de su plataforma .NET""",
choice_exit = False,
pre_condition_rule = "",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 0
)
csharp.save()
javascript = LearningActivity( name = 'Javascript', slug = '',
uri = "/program/js/1",
# lom =
parent = programas, root = Demo,
heading="javascript",
description = u"""es un lenguaje de programación interpretado, dialecto del estándar ECMAScript. Se define como orientado a objetos,3 basado en prototipos, imperativo, débilmente tipado y dinámico.""",
choice_exit = False,
pre_condition_rule = "",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 1
)
javascript.save()
Java = LearningActivity( name = 'Java', slug = '',
uri = "/program/java/1",
# lom =
parent = programas, root = Demo,
heading="Java",
description = u"""Su intención es permitir que los desarrolladores de aplicaciones escriban el programa una vez y lo ejecuten en cualquier dispositivo""",
choice_exit = False,
pre_condition_rule = "",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 2
)
Java.save()
JQuery= LearningActivity( name = 'jQuery', slug = '',
uri = "/program/js/2",
# lom =
parent = programas, root = Demo,
heading="jQuery",
description = u"""jQuery es una biblioteca de JavaScript, creada inicialmente por John Resig, que permite simplificar la manera de interactuar con los documentos HTML, manipular el árbol DOM, manejar eventos, desarrollar animaciones y agregar interacción con la técnica AJAX a páginas web.""",
choice_exit = False,
pre_condition_rule = "",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 3
)
JQuery.save()
Py = LearningActivity( name = 'Python', slug = '',
uri = "/program/suma/3",
# lom =
parent = programas, root = Demo,
heading="Python",
description = u"""Es un lenguaje interpretado, usa tipado dinámico y es multiplataforma. Debes completar la actividad de JQuery para desbloquear esta actividad.""",
choice_exit = False,
pre_condition_rule = """
if get_attr('/program/js/2','objective_status') == 'satisfied':
activity['pre_condition'] = ''
else:
activity['pre_condition'] = 'disabled'
""",
post_condition_rule = "",
is_container = False,
is_visible = True,
order_in_container = 4
)
Py.save()
| [
"[email protected]"
] | |
d5a28461b8b1471cb66c036a2c61e6d7cb6f9e3d | 1284718203be50b23dcd1f6159746cfa42a04163 | /tensorflow_data/gdn/startgoalhardthres_highthresh_flowpenal/conf.py | ec97502a7db25b911d795bc532aa4155a7045770 | [] | no_license | febert/robustness_via_retrying | 8fe4106d7705228ff339f9643518a80c0a243d36 | 1def282dc22f24b72c51ff1ef9ea1a7a83291369 | refs/heads/master | 2020-03-31T19:33:39.664525 | 2018-11-07T21:52:56 | 2018-11-07T21:52:56 | 152,502,702 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import python_visual_mpc
base_dir = python_visual_mpc.__file__
base_dir = '/'.join(str.split(base_dir, '/')[:-2])
# tf record data location:
DATA_DIR = base_dir + '/pushing_data/cartgripper_startgoal_4step/train'
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
# local output directory
OUT_DIR = current_dir + '/modeldata'
configuration = {
'experiment_name': 'correction',
'data_dir': DATA_DIR, # 'directory containing data.' ,
'output_dir': OUT_DIR, #'directory for model checkpoints.' ,
'current_dir': base_dir, #'directory for writing summary.' ,
'num_iterations':50000,
'sequence_length':4,
'train_val_split':.95,
'visualize':'',
'skip_frame':1,
'batch_size': 64, #'batch size for training' ,
'learning_rate': 0.001, #'the base learning rate of the generator' ,
'normalization':'None',
'orig_size': [48,64],
'norm':'charbonnier',
'smoothcost':1e-6,
'smoothmode':'2nd',
'fwd_bwd':'',
'flow_diff_cost':1e-4,
'hard_occ_thresh':'',
'occlusion_handling':1e-4,
'occ_thres_mult':0.1,
'occ_thres_offset':0.5,
'flow_penal':1e-4,
'image_only':'',
} | [
"[email protected]"
] | |
a7e3a1788811eecdecc95edf3667ea9760163274 | 47227a083fa428f357d8dbf91e4b4c7294503ab1 | /基础篇/第8章-类与对象/8.4类关联结构/一对多关联结构.py | 4a0c2418e92c62b53ebb76099663f5f1f91e0990 | [] | no_license | kongziqing/Python-2lever | 9a7b890126711e609a681fceb5a90f1f87d60d54 | bbbd57d3685b6430392f044c6c079dc52ac96003 | refs/heads/master | 2022-12-10T06:43:37.913315 | 2020-09-14T04:04:14 | 2020-09-14T04:04:14 | 285,441,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | """
在进行一对一类关联的基础上,结合序列结构的使用,就可以实现一对多的关联,
在进行类引用关联的操作之中,一对多的关联结构是一种较为常见的形式,例如,假设要描述这样一种关联,一个部门
有多为部门员工,为了方便部门管理,每个部门应设置一位正领导和一位副领导,
"""
# coding : utf-8
class Dept: # 定义部门类
def __init__(self, **kwargs): # 构造方法
self.__dname = kwargs.get("dname") # dname属性初始化
self.__loc = kwargs.get("loc") # loc属性初始化
self.__emps = [] # 保存多个雇员
def get_emps(self): # 获取所有员工信息
return self.__emps # 返回雇员列表引用
def get_info(self): # 获取部门信息
return "【Dept类】部门名称:%s,部门位置:%s" % (self.__dname, self.__loc)
# setter、getter相关方法、略...
class Emp: # 雇员类
def __init__(self, **kwargs): # 构造方法
self.__ename = kwargs.get("ename") # ename属性初始化
self.__sal = kwargs.get("sal") # sal属性初始化
def set_mgr(self, mgr): # 设置员工对领导的引用
self.__mgr = mgr # 返回自身引用实例
def get_mgr(self): # 获取领导
if "_Emp__mgr" in dir(self): # 判断是否存在“__mgr”属性
return self.__mgr # 存在返回对象
else: # 没有领导
return None # 返回None
def set_dept(self, dept): # 设置雇员所属部门
self.__dept = dept # 设置Dept引用实例
def get_dept(self): # 获取雇员所属部门
return self.__dept # 获取Dept引用实例
def get_info(self): # 获取雇员信息
return "【Emp类】雇员姓名:%s,月薪:%s" % (self.__ename, self.__sal) # 返回对象信息
# setter、getter相关方法、略...
def main(): # 主函数
dept = Dept(dname="优拓教学部", loc="北京") # Dept对象实例化
emp_a = Emp(ename="于顺", sal=35000.00) # Emp对象实例化
emp_b = Emp(ename="陈浩东", sal=8500.00) # Emp对象实例化
emp_c = Emp(ename="公孙夏丹", sal=7000.00) # Emp对象实例化
emp_a.set_dept(dept) # 设置雇员与部门引用关联
emp_b.set_dept(dept) # 设置雇员与部门引用关联
emp_c.set_dept(dept) # 设置雇员与部门引用关联
emp_b.set_mgr(emp_a) # 设置雇员与领导引用关联
emp_c.set_mgr(emp_b) # 设置雇员与领导引用关联
dept.get_emps().append(emp_a) # 设置部门雇员引用关联
dept.get_emps().append(emp_b) # 设置部门雇员引用关联
dept.get_emps().append(emp_c) # 设置部门雇员引用关联
print(dept.get_info()) # 输出部门信息
for emp in dept.get_emps(): # 输出部门全部雇员信息
print(emp.get_info()) # 雇员信息
if emp.get_mgr() != None: # 如果该雇员有领导
print("\t|- %s" % emp.get_mgr().get_info()) # 输出领导信息
if __name__ == "__main__": # 判断程序执行名称
main() # 调用主函数
| [
"[email protected]"
] | |
60e40d8069fd7ee2df3209dbe6f1987e15dbb7c8 | 6def13753e8278b38eaca2a5d3c65cb2d504671b | /Length of Last Word.py | 17f5b624ba14be7673cc83c640cfb69e067284f2 | [] | no_license | rundongliu/leetcode-python | 46fad4ca426ec7825286f35d7500486166588827 | dac2699b49a7eb5a4ae705ffa0ff2245fa7435c2 | refs/heads/master | 2021-01-17T14:47:19.769829 | 2015-02-04T19:47:00 | 2015-02-04T19:47:00 | 29,038,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class Solution:
# @param s, a string
# @return an integer
def lengthOfLastWord(self, s):
flag = 0
count = 0
for c in reversed(s):
if ord('a')<=ord(c)<=ord('z') or ord('A')<=ord(c)<=ord('Z'):
if flag==0:
flag = 1
count = 1
else:
count += 1
else:
if flag==1:
return count
if flag==1:
return count
return 0 | [
"[email protected]"
] | |
d2c0e9a8b234d337660dc36ed01f6277cad62565 | 959f7fc2ba6d07ff314b06bbbc46e42ba68b89bb | /venv/Scripts/django-admin.py | 7675565d802bf1515a9e4dad97eb8c2cc6c06fe1 | [] | no_license | fengbin311/my_blog | 9dedc4e291d0f7bd089a41463f44322e107c9ba8 | f88cd225d87d681856eee61f0edb970e7d3e5795 | refs/heads/master | 2022-10-31T23:53:58.975924 | 2018-02-23T14:43:13 | 2018-02-23T14:43:13 | 122,431,473 | 0 | 1 | null | 2022-10-02T04:55:33 | 2018-02-22T04:40:03 | Python | UTF-8 | Python | false | false | 150 | py | #!F:\Python\my_blog\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
632851cbc1a2300e2011879f89b8d669e61b09b0 | 5e1959f251f42238456be6367eadbbec108852ec | /create_corpus.py | c9ff528ee4e34ffa6d700e8d6fdba47ffd522ce5 | [] | no_license | undertheseanlp/restore_diacritics | 82613fce05b5c04180a5cbf44474eec373ce4138 | 7f2b6e1b655d023728c74034e3d9ebc93b28cd00 | refs/heads/master | 2020-06-04T19:48:47.061828 | 2019-07-07T15:49:46 | 2019-07-07T15:49:46 | 192,168,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | import unidecode
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import LinearSVC
from underthesea.word_tokenize import tokenize
class WordSetVectorizer(TransformerMixin):
def __init__(self):
self.words = {}
def fit_transform(self, X):
result = []
for s in X:
tokens = s.split()
s_result = []
for token in tokens:
if token not in self.words:
self.words[token] = len(self.words)
s_result.append(self.words[token])
result.append(s_result)
return result
WINDOW_SIZE = 4
count = 0
X = []
y = []
for line in open("tmp/vietnamese_tone_prediction/train.txt"):
count += 1
text = line.strip().lower()
tokens = tokenize(text)
tokens = ["bos"] * WINDOW_SIZE + tokens + ["eos"] * WINDOW_SIZE
tokens_remove_tone = [unidecode.unidecode(token) for token in tokens]
for i, token in enumerate(tokens[WINDOW_SIZE:-WINDOW_SIZE]):
j = i + WINDOW_SIZE
yi = token
Xi = " ".join(tokens_remove_tone[j - WINDOW_SIZE:j + WINDOW_SIZE])
X.append(Xi)
y.append(yi)
if count > 100000:
break
x_encoder = WordSetVectorizer()
y_encoder = LabelEncoder()
X_train_dev = x_encoder.fit_transform(X)
y_train_dev = y_encoder.fit_transform(y)
X_train, X_dev, y_train, y_dev = train_test_split(X_train_dev, y_train_dev, test_size=0.2, random_state=1024)
print(f"Train: {len(X_train)}, Dev: {len(X_dev)}")
clf = LinearSVC()
clf.fit(X_train, y_train)
score = clf.score(X_dev, y_dev)
print(score)
| [
"[email protected]"
] | |
5df44887d2e523c9b80e9fe5e8a140381a77ea5c | fade4e5eb42c54a5fc89090740eafb8fd757a309 | /spark_coreset-master/run_k_segment/Coreset.py | 596ce2e1c66dda31ba277d202d84bdecbd800d98 | [] | no_license | vkhakham/k-segment | 6e7d8a6e49388338fcd6b1ec81f255bb93cfef40 | 0527a19e172f428381681fc9e1dd6c0aeb48d597 | refs/heads/master | 2020-04-06T05:55:45.469433 | 2017-08-18T09:30:24 | 2017-08-18T09:30:24 | 51,445,147 | 0 | 4 | null | 2019-05-17T22:57:26 | 2016-02-10T14:14:09 | Python | UTF-8 | Python | false | false | 6,166 | py | import numpy as np
import math
import utils
class OneSegCoreset:
def __init__(self, repPoints, weight, SVt):
self.repPoints = repPoints
self.weight = weight
self.SVt = SVt
class coreset:
def __init__(self, C, g, b, e):
self.C = C # 1-segment coreset
self.g = g # best line
self.b = b # coreset beginning index
self.e = e # coreset ending index
def __repr__(self):
return "OneSegmentCoreset " + str(self.b) + "-" + str(self.e) +"\n" + str(self.C.repPoints) + "\n"
def build_coreset(P, k, eps, is_coreset=False):
h = bicriteria(P, k, is_coreset)
print "bicritiria estimate:", h
# TODO verify if *100 is needed here.
b = (eps ** 2 * h) / (100 * k * np.log2(len(P)))
return BalancedPartition(P, eps, b, is_coreset)
def one_seg_cost(P, is_coreset=False):
if is_coreset:
oneSegmentCoreset = OneSegmentCorset(P, is_coreset)
return utils.best_fit_line_cost(oneSegmentCoreset.repPoints, is_coreset) * oneSegmentCoreset.weight
else:
return utils.best_fit_line_cost(P, is_coreset)
def bicriteria(P, k, is_coreset=False):
if len(P) <= (4 * k + 1):
return 0 # TODO changes
m = int(math.floor(len(P) / (4 * k)))
i = 0
j = m
# one_seg_res will hold segment starting index and result (squred distance sum)
one_seg_res = []
# partition to 4k segments and call 1-segment for each
while i < len(P):
partition_set = one_seg_cost(P[i:j], is_coreset)
one_seg_res.append((partition_set, int(i)))
i += m
j += m
# sort result
one_seg_res = sorted(one_seg_res, key=lambda res: res[0])
# res = the distances of the min k+1 segments
res = 0
# sum distances of k+1 min segments and make a list of point to delete from P to get P \ Q from the algo'
rows_to_delete = []
for i in xrange(k + 1):
res += one_seg_res[i][0]
for j in xrange(m):
rows_to_delete.append(one_seg_res[i][1] + j)
P = np.delete(P, rows_to_delete, axis=0)
return res + bicriteria(P, k, is_coreset)
def BalancedPartition(P, a, bicritiriaEst, is_coreset=False):
Q = []
D = []
points = P
# add arbitrary item to list
dimensions = points[0].C.repPoints.shape[1] if is_coreset else points.shape[1]
if is_coreset:
points.append(P[0]) # arbitrary coreset n+1
else:
points = np.vstack((points, np.zeros(dimensions))) # arbitrary point n+1
n = len(points)
for i in xrange(0, n):
Q.append(points[i])
cost = one_seg_cost(np.asarray(Q), is_coreset)
# print "bp cost:", cost, "points", Q
# if current number of points can be turned into a coreset - 3 conditions :
# 1) cost passed threshold
# 2) number of points to be packaged greater than dimensions + 1
# 3) number of points left greater then dimensions + 1 (so they could be packaged later)
if cost > bicritiriaEst and (is_coreset or (len(Q) > dimensions + 1 and dimensions + 1 <= n - 1 - i)) or i == n - 1:
if is_coreset and len(Q) == 1:
if i != n - 1:
D.append(Q[0])
Q = []
continue
T = Q[:-1]
C = OneSegmentCorset(T, is_coreset)
g = utils.calc_best_fit_line_polyfit(OneSegmentCorset(np.asarray(T), is_coreset).repPoints)
if is_coreset:
b = T[0].b
e = T[-1].e
else:
b = T[0][0] # signal index of first item in T
e = T[-1][0] # signal index of last item in T
D.append(coreset(C, g, b, e))
Q = [Q[-1]]
return D
def OneSegmentCorset(P, is_coreset=False):
# print "###########################", is_coreset, P
if len(P) < 2:
# print "edge case:", P
return P[0].C
if is_coreset:
svt_to_stack = []
for oneSegCoreset in P:
svt_to_stack.append(oneSegCoreset.C.SVt)
X = np.vstack(svt_to_stack)
else:
# add 1's to the first column
X = np.insert(P, 0, values=1, axis=1)
U, s, V = np.linalg.svd(X, full_matrices=False)
# reshape S
S = np.diag(s)
# calculate SV
SVt = np.dot(S, V)
u = SVt[:, 0] # u is leftmost column of SVt
w = (np.linalg.norm(u) ** 2) / X.shape[1]
q = np.identity(X.shape[1]) # q - temporary matrix to build an identity matrix with leftmost column - u
try:
q[:, 0] = u / np.linalg.norm(u)
except:
print "iscoreset:",is_coreset, "P", P,"u:", u, "q:", q
Q = np.linalg.qr(q)[0] # QR decomposition returns in Q what is requested
if np.allclose(Q[:, 0], -q[:, 0]):
Q = -Q
# assert matrix is as expected
assert (np.allclose(Q[:, 0], q[:, 0]))
# calculate Y
y = np.identity(X.shape[1]) # y - temporary matrix to build an identity matrix with leftmost column
yLeftCol = math.sqrt(w) / np.linalg.norm(u)
y[:, 0] = yLeftCol # set y's first column to be sqrt of w divided by u's normal
# compute Y with QR decompression - first column will not change - it is already normalized
Y = np.linalg.qr(y)[0]
if np.allclose(Y[:, 0], -y[:, 0]):
Y = -Y
# assert matrix is as expected
assert (np.allclose(Y[:, 0], y[:, 0]))
YQtSVt = np.dot(np.dot(Y, Q.T), SVt)
YQtSVt /= math.sqrt(w)
# set B to the d+1 rightmost columns
B = YQtSVt[:, 1:]
# return [B, w, SVt]
return OneSegCoreset(repPoints=B, weight=w, SVt=SVt)
def PiecewiseCoreset(n, eps):
def s(index, points_number):
return max(4.0 / float(index), 4.0 / (points_number - index + 1))
eps = eps / np.log2(n)
s_arr = [s(i, n) for i in xrange(1, n + 1)]
t = sum(s_arr)
B = []
b_list = []
W = np.zeros(n)
for i in xrange(1, n + 1):
b = math.ceil(sum(s_arr[0:i]) / (t * eps))
if b not in b_list:
B.append(i)
b_list.append(b)
for j in B:
I = [i + 1 for i, b in enumerate(b_list) if b == b_list[j - 1]]
W[j - 1] = (1. / s_arr[j - 1]) * sum([s_arr[i - 1] for i in I])
return W
| [
"[email protected]"
] | |
fb76b3246a5781390679ffa00f39d53a3e825249 | 2ca26905ed33823f8dd5ce41404ace463a744b02 | /functional_tests/base.py | b379ba24365aa3570d86ea3f9a491804f36b41fa | [] | no_license | calimat/tourngen20 | 08a958b9a5e9415300fc98f0c9ca37e1defd2cb8 | ecf6e43fd0371fbb79143043beae329c651bb1c0 | refs/heads/master | 2021-01-22T11:58:24.160881 | 2015-07-08T03:11:47 | 2015-07-08T03:11:47 | 33,584,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | from selenium import webdriver
import unittest
from unittest import skip
from selenium.webdriver.common.keys import Keys
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import sys
class FunctionalTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls): #1
for arg in sys.argv: #2
if 'liveserver' in arg: #3
cls.server_url = 'http://' + arg.split('=')[1] #4
return #5
super().setUpClass() #6
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_team_table(self, row_text):
team_table = self.browser.find_element_by_id('id_team_table')
rows = team_table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def get_team_input_box(self):
return self.browser.find_element_by_id('id_name')
| [
"[email protected]"
] | |
00993f7fd4364f888367ce596cfb6fabf4940944 | 36032e368a48369cc75103448e71159057de6b2f | /bubble_sort.py | cc0c41c975f0fdc169ed906d9268f6fb52340cb0 | [] | no_license | payalgupta1204/Data_Structure_Algorithms | 8cbb838241c4495707a7b61d89de5bc4cef8229b | 3bf97a22ae8a6dc6fec68189607b681aceaf8485 | refs/heads/master | 2020-04-05T21:36:58.005197 | 2018-11-12T14:26:29 | 2018-11-12T14:26:29 | 157,226,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | import numpy as np
def inputnumber(message):
while True:
try:
userInput = int(input(message))
except ValueError:
print("Not an integer, enter int value")
continue
else:
return userInput
n = inputnumber("enter number of elements in array:")
arr = []
for i in range(n):
a = inputnumber("enter element of array")
arr.append(a)
len_arr = len(arr)
while True:
flag = 0
for i in range(len_arr):
if i == len_arr - 1:
break
else:
if arr[i] > arr[i + 1]:
temp = arr[i]
arr[i] = arr[i + 1]
arr[i + 1] = temp
flag = 1
if flag == 0:
break
print("The sorted array is {}".format(arr))
| [
"[email protected]"
] | |
f97077ffdde8834fd332eb04ca18f4e4c80941e4 | d9adfb4e45c5cb38f5c2a079363067aae7656311 | /test/DBSCAN.py | 93c1d4c11c04245f908fe93da2a3d3b51cd03051 | [] | no_license | jiashuowang/airCustomer | 0ecf62b0470e2abc607600a1ea9c6764874ea247 | b1d981bba2d3b3c14325117d4c6326657060633f | refs/heads/master | 2022-09-22T06:57:43.852651 | 2020-06-01T06:33:08 | 2020-06-01T06:33:58 | 265,795,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py |
import pandas as pd
import os
import numpy as np # 数据结构
import sklearn.cluster as skc # 密度聚类
from sklearn import metrics # 评估模型
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
excelPath = os.getcwd() + '/out' + os.sep + 'std_result.xls'
excelFile = pd.ExcelFile(excelPath)
data = excelFile.parse('Sheet1')
db = skc.DBSCAN(eps=0.615, min_samples=9).fit(data)
label_pred = db.labels_
print(label_pred)
n_clusters_ = len(set(label_pred)) - (1 if -1 in label_pred else 0)
print("n_clusters = " + str(n_clusters_) + "==========")
print(metrics.silhouette_score(data, label_pred))
# PCA降维
pca = PCA(n_components=2)
data = pd.DataFrame(pca.fit_transform(data))
# 绘制k-means结果
x0 = data[label_pred == 0]
x1 = data[label_pred == 1]
x2 = data[label_pred == 2]
x3 = data[label_pred == 3]
x4 = data[label_pred == 4]
plt.scatter(x0.iloc[:, 0], x0.iloc[:, 1], c="red", marker='o', label='label0')
plt.scatter(x1.iloc[:, 0], x1.iloc[:, 1], c="green", marker='*', label='label1')
plt.scatter(x2.iloc[:, 0], x2.iloc[:, 1], c="blue", marker='+', label='label2')
plt.scatter(x3.iloc[:, 0], x3.iloc[:, 1], c="yellow", marker='+', label='label3')
plt.scatter(x4.iloc[:, 0], x4.iloc[:, 1], c="grey", marker='+', label='label4')
plt.legend(loc=2)
plt.show()
# data['cluster_db'] = label_pred # 在数据集最后一列加上经过DBSCAN聚类后的结果
# data.to_excel('./out/hhh.xls', index=False)
# # print(data['cluster_db'])
# # data.sort_values('cluster_db')
# # # plt.show(pd.plotting.scatter_matrix(data, c=data.cluster_db, figsize=(10,10),marker='1',alpha=.8,s=100))
# # plt.show(pd.plotting.scatter_matrix(data, c=data.cluster_db.tolist(), figsize=(10,10), s=100))
# plt.show(pd.plotting.scatter_matrix(data, alpha=0.7, figsize=(14,8), diagonal='kde'))
| [
"[email protected]"
] | |
5dba968a269457eb7dd0457a9c86c419e3247d63 | 7be3faf7b3149f111c8dd3c5af7bd3679acd0552 | /quarters/quarters/scm.py | 473a42399847398fca4752928754bd59ba062961 | [] | no_license | ChristianSP/sandbox | f01ccfd54ee87773a22378507d313375bd599de8 | 631ebfccb17bcc1fd1509ded910d38de7a434ddf | refs/heads/master | 2021-01-24T14:18:24.478757 | 2013-08-09T17:26:41 | 2013-08-09T17:26:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | '''
Reads the upstream scm.
'''
import os
import sys
import subprocess
class ArchSVN:
'''
Reads SVN repos.
'''
def __init__(self, opts):
'''
Pass the options structure, we want those in here.
'''
self.opts = opts
self.svn_repos = self.__gen_svn_repos()
{'packages': 'svn://svn.archlinux.org/packages',
'community': 'svn://svn.archlinux.org/community'}
def __gen_svn_repos(self):
'''
Generate the svn repos
'''
svn_repos = {}
if self.opts['core_pkg']:
svn_repos['packages'] = 'svn://svn.archlinux.org/packages'
if self.opts['community_pkg']:
svn_repos['community'] = 'svn://svn.archlinux.org/community'
if self.opts['svn_repos']:
for key in self.opts['svn_repos']:
svn_repos[key] = self.opts['svn_repos'][key]
return svn_repos
def _update_repos(self):
'''
Check the state of the svn repositories
'''
up = set()
co = set()
fresh = {}
if not os.path.isdir(self.opts['svn_root']):
os.makedirs(self.opts['svn_root'])
for key in self.svn_repos:
if os.path.isdir(os.path.join(self.opts['svn_root'], key)):
up.add(key)
else:
co.add(key)
cwd = os.getcwd()
os.chdir(self.opts['svn_root'])
for key in up:
os.chdir(os.path.join(self.opts['svn_root'], key))
subprocess.getoutput(['svn', 'up'])
os.chdir(self.opts['svn_root'])
for key in co:
subprocess.getoutput('svn co ' + self.svn_repos[key])
os.chdir(cwd)
| [
"[email protected]"
] | |
93e361c40527c88fb15b682de99f4dd25e0494c1 | 23345cb27e6a8310001dd367c87b6c59af0b5931 | /Practice2/StringIntDividation.py | 9026d6f3ddae5a1f38a3f16ee20fa6042ab09b26 | [] | no_license | ccsandhanshive/Practics-Code | 20840423385e7cd26672236cfbb565ff432af4cb | 048f2d2cd4bd76391e4578c18f20467f4526d137 | refs/heads/master | 2021-04-23T17:44:15.003384 | 2020-04-16T07:06:54 | 2020-04-16T07:06:54 | 249,952,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | upper=[]
lower=[]
even=[]
odd=[]
def divide(InputString):
#print("0")
if InputString.isalpha():
#print("00")
if InputString.isupper():
#print("000")
upper.append(InputString)
else:
#print("001")
lower.append(InputString)
else:
#print("01")
if int(InputString)%2==0:
#print("010")
even.append(InputString)
else:
#print("011")
odd.append(InputString)
return
for i in list(input()):
divide(i)
upper.sort()
lower.sort()
even.sort()
odd.sort()
output=lower+upper+odd+even
for i in output:
print(i,end="")
| [
"[email protected]"
] | |
2c4a4f9f3bee1e1db55af2d6b5b8128094f1e187 | 0e10fb66cc795d888ac2c18eebd05cd3963f1389 | /python/max_subarray_sum.py | d6756b2bf92da5122d9b932a410e2655030327f0 | [
"MIT"
] | permissive | mohamedabdelbary/algo-exercises | f12bf721ff584de14a11cecca424f7535eb54938 | 3b2b700453c010f61c0d4099762727e988e2b124 | refs/heads/master | 2023-04-14T22:14:03.648514 | 2021-05-01T14:56:05 | 2021-05-01T14:56:05 | 326,254,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | def max_subarray_sum(a):
"""
Given an array of integers a, find the consecutive subarray with the maximum sum in a
e.g. a = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7]
should return a sum of 43 ([18, 20, -7, 12])
e.g., given the array [34, -50, 42, 14, -5, 86], the maximum sum would be 137, since we would take elements
42, 14, -5, and 86.
Given the array [-5, -1, -8, -9], the maximum sum would be 0, since we would not take any elements.
"""
max_idx_sum = {}
for idx, e in enumerate(a):
if idx == 0:
max_idx_sum[idx] = e
else:
sub = a[0: idx]
sum_ = sum(sub)
max_ = sum_
start = 0
while start < idx:
if sum_ > max_:
max_ = sum_
sum_ -= a[start]
start += 1
max_with_curr = max_ + e
max_idx_sum[idx] = e if e > max_with_curr else max_with_curr
max_sum = max(max_idx_sum.values())
return max_sum if max_sum > 0 else 0
if __name__ == "__main__":
assert max_subarray_sum([13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7]) == 43
assert max_subarray_sum(range(1, 5)) == 10
assert max_subarray_sum([1, 2, 3, 4, -1]) == 10
assert max_subarray_sum([34, -50, 42, 14, -5, 86]) == 137
assert max_subarray_sum([-5, -1, -8, -9]) == 0
| [
"[email protected]"
] | |
7413beb29c82e7d9e80a34670df812e8442a6d6b | b7d4fea220a65e56b20a2073d719faeffb1f06e2 | /xavier_vendor/ee/profile/profile.py | 1b36edd643a968ab2fb92cf32248fb45c9c3ebdd | [] | no_license | CaiJianLee/Xavie_V5 | aad59118711de0f2823d5ac7bcd3ac4e683bfc06 | 68f1a95a38f4629b9f5fb01d2f3e033aab7dffab | refs/heads/master | 2020-03-27T08:30:27.844829 | 2018-08-27T07:46:50 | 2018-08-27T07:46:50 | 146,261,964 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,648 | py | __author__='Mingcheng'
from collections import OrderedDict
from ee.eedispatcher import EEDispatcher
class Profile():
_base_board_name=None
'''
example of _exendio
{
"base_board_id":{
"cp1": {"partno": "CAT9555", "bus": "/dev/i2c-0", "addr": 0x00, "switch_channel": "none"},
"cp2": {"partno": "CAT9555", "bus": "/dev/i2c-0", "addr": 0x01, "switch_channel": "none"},
"bit1": {"pin": 1, "chip": "cp1"},
"bit2": {"pin": 2, "chip": "cp1"},
"bit3": {"pin": 3, "chip": "cp1"},
"bit17": {"pin": 1, "chip": "cp2"}
},
"dmm":{
"cp3": {"partno": "CAT9555", "bus": "/dev/i2c-0", "addr": 0x02, "switch_channel": "none"},
"bit33": {"pin": 1, "chip": "cp3"},
"bit34": {"pin": 2, "chip": "cp3"},
"bit35": {"pin": 3, "chip": "cp3"}
}
}
'''
_extendio = OrderedDict()
'''
the example of _busswitch profile
key: the busswitch chip id or channel id
{
"tca9548-0": {"partno": "tca9548a", "addr": "0x70", "bus": "/dev/i2c-0", "switch_channel": "none"},
"back-light": {"chip": "tca9548-0", "channel": 2}
}
'''
_busswitch = OrderedDict()
'''
the example of _eeprom
key: the eeprom id
value: the eeprom profile
{
"dmm": {"partno": "AT24C08ZI", "bus": "/dev/AXI4_I2C_1", "addr": 0x50, "switch_channel": "dmm_eeprom"},
"datalogger": {"partno": "AT24C08ZI", "bus": "/dev/i2c-1", "addr": 0x50, "switch_channel": "none"}
}
'''
_eeprom = OrderedDict()
'''
the example of _chips which including the base board chip but doesn't include the eeprom
{
"TCA9548_1":{"partno": "TCA9548", "addr": 0x71, "bus": "/dev/AXI4_EEPROM_IIC", "switch_channel": "none"},
"psu_DAC_1":{"partno": "AD5667","addr": 0x00, "bus": "/dev/AXI4_INSTRUMENT_IIC_1", "switch_channel": "none", "vref": "2500mv"},
"cp1":{"partno": "CAT9555","addr": 0x00, "bus": "/dev/i2c-0", "switch_channel": "none"}
}
'''
_chips = OrderedDict()
'''
the example of _boards
{
"datalogger":{},
"dmm":{}
}
'''
_boards = OrderedDict()
'''
the example of _buses
{
'ps_i2c_0': {'path': '/dev/i2c-0', 'rate': '100000', 'type': 'i2c'},
'UUT_UART': {'ipcore': 'Axi4Uart', 'path': '/dev/AXI4_UUT_UART', 'baudrate': '115200', 'type': 'uart'},
'ELOAD_IIC' :{'ipcore': 'Axi4I2c', 'path': '/dev/AXI4_ELOAD_IIC', 'rate': '400000', 'type': 'i2c'}
}
'''
_buses = OrderedDict()
'''
the example of _initconfig
{
"extendio":{
"base_board_id":{
"cp1":{"dir":[0x1f,0x0],"value":[0x0,0x0]},
"cp2":{"dir":[0x0,0x0],"value":[0x0,0x0]}
},
"dmm":{
"cp3":{"dir":[0x0,0x0],"value":[0x0,0x0]}
}
},
"uart":{
"uart_1":{"baudrate":"115200","databits":"8","stopbits":1,"parity":"none"},
"axi4_uart_2":{"baudrate":"115200","databits":"8","stopbits":1,"parity":"none","timestamp":"none"}
},
"netconfig":{}
}
'''
_initconfig = OrderedDict()
'''
the example of _ioes which including the digital io
{
"digital_io":[]
}
'''
_ioes = OrderedDict()
@classmethod
def set_base_board_name(cls, name):
""" set the base board name
Args:
name(str) : the base board name
"""
cls._base_board_name=name
@classmethod
def get_base_board_name(cls):
""" get the base board name
Returns:
on success,the string of base board name will be return
on error,will be return None
"""
return cls._base_board_name
@classmethod
def get_extendio(cls):
'''get all the extendio profile
Returns:
a dict of all extendio profile
'''
return cls._extendio
@classmethod
def get_extendio_by_name(cls, board_name=None):
'''get the board extendio by board id
Args:
board_name(str) : the board id,default is the base board
Returns:
a dict of the board extendio profile
if board_name is None,return the base board extendio profile
'''
if board_name == None:
return cls._extendio[cls._base_board_name]
else:
return cls._extendio[board_name]
@classmethod
def get_extendio_chip_name(cls, bitnum, board_name=None):
'''get the extendio chip id by bitnum id
Args:
bitnum(str): bitnum id
board_name(str): the board id ,default is the base board
Returns:
a string of the extendio chip id
'''
if board_name == None:
return cls._extendio[cls._base_board_name][bitnum]['chip']
else:
return cls._extendio[board_name][bitnum]['chip']
@classmethod
def get_busswitch(cls):
'''get all the busswitch profile
Returns:
a dict of all busswitch profile
'''
return cls._busswitch
@classmethod
def get_busswitch_by_name(cls, name):
''' get the busswitch by chip id or channel id
Args:
name(str): the chip id or channel id
Returns:
if name is the chip id,return the busswitch chip profile
if name is the channel id, return the channel profile
'''
return cls._busswitch[name]
@classmethod
def get_eeprom(cls):
'''get the all eeprom profile
Returns:
a dict of all eeprom profile
'''
return cls._eeprom
@classmethod
def get_eeprom_by_name(cls, name):
'''get the eeprom profile by eeprom chip id
Args:
name(str): the eeprom chip id
Returns:
a dict of the eeprom chip profile
'''
return cls._eeprom[name]
@classmethod
def get_boards(cls):
'''get all the boards profile
Returns:
a dict of all boards profile
'''
return cls._boards
@classmethod
def get_board_by_name(cls, name):
'''get the board profile by board id
Args:
name(str): the board id
Returns:
a dict of the board profile
'''
return cls._boards[name]
@classmethod
def get_buses(cls):
'''get all the buses profile
Returns:
a dict of all buses profile
'''
return cls._buses
@classmethod
def get_bus_by_name(cls, name):
'''get the bus profile by bus id
Args:
name(str): the bus id
Returns:
the dict of the bus profile
'''
return cls._buses[name]
@classmethod
def get_bus_path(cls, name):
'''get the bus device path by the bus id
Args:
name(str): the bus id
Returns:
a string of the bus device path
'''
return cls._buses[name]['path']
@classmethod
def get_bus_by_path(cls, path):
'''get the bus profile by bus path
Args:
name(str): the bus id
Returns:
the dict of the bus profile
'''
if hasattr(cls,"__path_buses") is False:
cls.__path_buses=OrderedDict()
for name,bus in cls._buses.iteritems():
cls.__path_buses[bus['path']]=bus
return cls.__path_buses[path]
@classmethod
def get_chips(cls):
'''get all the base board chips profile
Returns:
a dict of all the base board chips profile
'''
return cls._chips
@classmethod
def get_chip_by_name(cls, name):
'''get the base board chip profile by chip id
Args:
name(str): the chip id
Returns:
a dict of the base board chip profile
'''
return cls._chips[name]
@classmethod
def get_initconfig(cls):
'''get the initconfig profile
Returns:
a dict of the initconfig
'''
return cls._initconfig
@classmethod
def get_ioes(cls):
'''get all the ioes profile
Returns:
a dict of all ioes profile
'''
return cls._ioes
EEDispatcher.register_method(Profile.get_extendio)
EEDispatcher.register_method(Profile.get_extendio_by_name)
EEDispatcher.register_method(Profile.get_extendio_chip_name)
EEDispatcher.register_method(Profile.get_busswitch)
EEDispatcher.register_method(Profile.get_busswitch_by_name)
EEDispatcher.register_method(Profile.get_eeprom)
EEDispatcher.register_method(Profile.get_eeprom_by_name)
EEDispatcher.register_method(Profile.get_boards)
EEDispatcher.register_method(Profile.get_board_by_name)
EEDispatcher.register_method(Profile.get_buses)
EEDispatcher.register_method(Profile.get_bus_by_name)
EEDispatcher.register_method(Profile.get_bus_path)
EEDispatcher.register_method(Profile.get_chips)
EEDispatcher.register_method(Profile.get_chip_by_name)
EEDispatcher.register_method(Profile.get_initconfig)
EEDispatcher.register_method(Profile.get_ioes)
EEDispatcher.register_method(Profile.get_base_board_name)
| [
"[email protected]"
] | |
2872eb328a95f0324b95c797601043cfaf190efa | 1391e4ae9ac5cebb0ecaaa8485ab98c39a8a1b35 | /tests/test__replacing.py | 1184ed1b2ce4f8b447f4d7dc0fa91de3521ec1b9 | [] | no_license | sagivmalihi/pyforge | bff9859cfbee53ef14d8ace860e870d950f7b85b | 9830bf090d8c828d06292cc852150d1cf6c65b11 | refs/heads/master | 2021-01-16T17:48:27.757949 | 2010-12-13T09:30:40 | 2010-12-13T09:30:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,725 | py | import types
from ut_utils import ForgeTestCase
from forge.stub import FunctionStub
from forge.class_mock import ClassMockObject
import time
orig_time_sleep = time.sleep
import os
orig_os_path_join = os.path.join
class NewStyleClass(object):
def method(self, a, b, c):
raise NotImplementedError()
@property
def some_property(self):
return 2
orig_newstyle_method = NewStyleClass.method
orig_newstyle_property = NewStyleClass.some_property
class OldStyleClass:
def method(self, a, b, c):
raise NotImplementedError()
@property
def some_property(self):
return 2
orig_oldstyle_method = OldStyleClass.method
orig_oldstyle_property = OldStyleClass.some_property
class StubbingObjectsTest(ForgeTestCase):
def _test__stubbing_object(self, obj):
expected = obj.method
returned = self.forge.replace(obj, 'method')
self.assertIsInstance(obj.method, FunctionStub)
self.assertIs(returned, obj.method)
self.assertIs(obj.method.__forge__.original.im_func, expected.im_func)
self.assertIs(obj.method.__forge__.signature.func.im_func, expected.im_func)
self.assertTrue(obj.method.__forge__.is_bound())
self.forge.restore_all_replacements()
self.assertIs(obj.method.im_func, expected.im_func)
def test__stubbing_new_style_objects(self):
self._test__stubbing_object(NewStyleClass())
def test__stubbing_old_style_objects(self):
self._test__stubbing_object(OldStyleClass())
class StubbedNewStyleClass(object):
@classmethod
def class_method(cls, a, b, c):
raise NotImplementedError()
@staticmethod
def static_method(a, b, c):
raise NotImplementedError()
assert 'class_method' in dir(StubbedNewStyleClass)
class StubbedOldStyleClass:
@classmethod
def class_method(cls, a, b, c):
raise NotImplementedError()
@staticmethod
def static_method(a, b, c):
raise NotImplementedError()
class StubbingClassMethodTest(ForgeTestCase):
def test__stubbing_class_methods(self):
for cls in (StubbedNewStyleClass, StubbedOldStyleClass):
self._test__stubbing_class_methods(cls, 'class_method', False)
def test__stubbing_static_methods(self):
for cls in (StubbedNewStyleClass, StubbedOldStyleClass):
self._test__stubbing_class_methods(cls, 'static_method', True)
def _test__stubbing_class_methods(self, cls, name, is_static):
orig = getattr(cls, name)
self.forge.replace(cls, name)
func = getattr(cls, name)
self.assertIsInstance(func, FunctionStub)
func(1, 2, 3)
self.forge.replay()
func = getattr(cls, name)
func(1, 2, 3)
self.forge.verify()
self.forge.reset()
self.forge.restore_all_replacements()
func = getattr(cls, name)
if is_static:
self.assertIsInstance(func, types.FunctionType)
self.assertIsInstance(cls.__dict__[name], staticmethod)
self.assertIs(func, orig)
else:
self.assertIsInstance(cls.class_method, types.MethodType)
self.assertIsInstance(cls.__dict__[name], classmethod)
#classmethods are re-computed on every fetch
self.assertIsNot(func, orig)
self.assertIs(cls.class_method.im_self, cls)
self.assertIs(cls.class_method.im_func, orig.im_func)
class StubbingModulesTest(ForgeTestCase):
def test__stub_c_function(self):
self.forge.replace(time, "sleep")
self.assertIsInstance(time.sleep, FunctionStub)
expected_result = 666
time.sleep(10).and_return(expected_result)
self.forge.replay()
self.assertEquals(time.sleep(10), expected_result)
self.forge.restore_all_replacements()
self.assertIs(time.sleep, orig_time_sleep)
def test__stub_module_functions(self):
self.forge.replace(os.path, "join")
self.assertIsInstance(os.path.join, FunctionStub)
self.assertFalse(os.path.join.__forge__.signature.has_variable_kwargs())
self.assertTrue(os.path.join.__forge__.signature.has_variable_args())
return_path = "return_path"
os.path.join("a", "b", "c").and_return(return_path)
self.forge.replay()
self.assertEquals(return_path, os.path.join("a", "b", "c"))
self.forge.verify()
self.forge.restore_all_replacements()
self.assertIs(os.path.join, orig_os_path_join)
class ReplacingTest(ForgeTestCase):
def test__replacing_simple_attributes(self):
s = self.forge.create_sentinel()
s.a = 2
self.forge.replace_with(s, "a", 3)
self.assertEquals(s.a, 3)
self.forge.restore_all_replacements()
self.assertEquals(s.a, 2)
def test__replacing_properties__new_style(self):
self._test__replacing_properties(NewStyleClass, orig_newstyle_property)
def test__replacing_properties__old_style(self):
self._test__replacing_properties(OldStyleClass, orig_oldstyle_property)
def _test__replacing_properties(self, cls, orig):
self.forge.replace_with(cls, "some_property", 3)
self.assertEquals(cls.some_property, 3)
self.assertEquals(cls().some_property, 3)
self.forge.restore_all_replacements()
self.assertIs(cls.some_property, orig)
self.assertIs(cls().some_property, 2)
class NonFunctionStubbingTest(ForgeTestCase):
def setUp(self):
super(NonFunctionStubbingTest, self).setUp()
self.x = self.forge.create_sentinel()
def test__replacing_new_style_class_objects(self):
class MyClass(object):
pass
self._test__replacing_objects(MyClass(), MyClass)
def test__replacing_old_style_class_objects(self):
class MyClass:
pass
self._test__replacing_objects(MyClass(), MyClass)
def test__replacing_builtin_objects(self):
from cStringIO import StringIO
self._test__replacing_objects(StringIO(), type(StringIO()))
def _test__replacing_objects(self, obj, cls):
orig = self.x.obj = obj
self.forge.replace(self.x, 'obj')
self.assertIsInstance(self.x.obj, ClassMockObject)
self.assertTrue(self.x.obj.__forge__.behaves_as_instance)
self.assertIs(self.x.obj.__forge__.mocked_class, cls)
self.forge.restore_all_replacements()
self.assertIs(self.x.obj, orig)
def test__replacing_new_style_classes(self):
class MyClass(object):
pass
self._test__replacing_classes(MyClass)
def test__stubbing_old_new_style_classes(self):
class MyClass:
pass
self._test__replacing_classes(MyClass)
def _test__replacing_classes(self, cls):
self.x.cls = cls
self.forge.replace(self.x, 'cls')
self.assertIsInstance(self.x.cls, ClassMockObject)
self.assertIs(self.x.cls.__forge__.mocked_class, cls)
self.assertFalse(self.x.cls.__forge__.behaves_as_instance)
self.forge.restore_all_replacements()
self.assertIs(self.x.cls, cls)
class MultipleStubbingTest(ForgeTestCase):
def test__multiple_stubbing(self):
self.forge.replace(self.forge, "replace")
some_object = self.forge.create_sentinel()
expected_results = [
self.forge.replace(some_object, x).and_return(object())
for x in ["a", "b", "c"]
]
self.forge.replay()
returned = self.forge.replace_many(some_object, "a", "b", "c")
self.assertEquals(returned, expected_results)
self.forge.restore_all_replacements()
self.forge.verify()
self.assertNoMoreCalls()
self.forge.reset()
| [
"[email protected]"
] | |
5d69865ad17d1b1417deb0ffe1502367ba84927e | 9329b45dab94acc5a9e022c821251526cd5a89d8 | /Inicial/miprograma1.py | 3db08a81ad8c63a52a6397276b222e8f909a7e7b | [] | no_license | CamilaBernales/DataScienceInitial | 832428d5a74a7ad2840a986c6d85f54cd7393860 | 8c6d4966f11e5f9cfaea93d6d5c642e0854a4121 | refs/heads/master | 2023-01-19T06:27:14.195532 | 2020-11-24T08:16:28 | 2020-11-24T08:16:28 | 286,147,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import modulo1
coche1 = modulo1.Coche("fiat", "blanco", "diesel", "1.5")
print(coche1.mostrar())
media = modulo1.media(4, 8, 9)
print(f"nuestra nota media es {media}") | [
"[email protected]"
] | |
7dbf6cea64115083ef6ee401aaddf28d95f0f443 | c3bf3e8c3a4205dc23fb5f723a29b74c83f6047a | /pyk8055/k8055testm.py | a20dc303ff7ec0f133f4540291ebdba32b52b8c7 | [] | no_license | jeremyz/k8055 | 807d90986d6fd77cf2566ad22191055cf81da9be | 46cf3ce29caeb7efe865ce684b629299ab6c51e6 | refs/heads/master | 2020-06-01T07:21:30.717005 | 2020-04-20T13:32:49 | 2020-04-20T13:32:49 | 2,293,405 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | #!/usr/bin/env python
# $Id: k8055testm.py,v 1.1 2007/03/28 10:10:32 pjetur Exp $
#
# Sample pyton test program for pyk8055 wrapper
# Scanning and testing multiple boards
#
from time import sleep
from pyk8055 import *
try:
K8055_devices =[]
BMask = SearchDevices()
if not BMask:
print "No K8055 devices found"
exit
if BMask & 0x01 : K8055_devices.append(k8055(0))
if BMask & 0x02 : K8055_devices.append(k8055(1))
if BMask & 0x04 : K8055_devices.append(k8055(2))
if BMask & 0x08 : K8055_devices.append(k8055(3))
for k in K8055_devices:
k.SetCurrentDevice()
# set analog output channel 1 to 200/255*5v = 3.9V
k.OutputAnalogChannel(1,200)
sleep(1)
# set analog output channel low again = 0V
k.OutputAnalogChannel(1,0)
# read both analog inputs
# note: this returns a list
res = k.ReadAllAnalog()
print "Analog status, board #",k.DeviceAddress(),res[1:]
# Test class string function
print "Status, board #",k.DeviceAddress(),str(k)
# Set debounce time on counter 1
k.SetCounterDebounceTime(1, 100)
# reset counter 1
k.ResetCounter(1)
# Loop creating a rotating display of digital outputs
# Loop 3 times
for i in range(1,3):
for k in K8055_devices:
k.SetCurrentDevice()
d = 1
k.WriteAllDigital(1)
while d <= 128: # While this running
if k.ReadDigitalChannel(1):
print "Hello world (from board #%d)" %(k.DeviceAddress(),)
sleep(0.2) # wait .2 sec
d *=2 # and continue rotating digital outputs
k.WriteAllDigital(d)
for k in K8055_devices:
k.SetCurrentDevice()
# read/print the counter on input 1
print "Board # %d Counter #1=%d" % (k.DeviceAddress(),k.ReadCounter(1))
# read/print the counter on input 2
print "Board # %d Counter #2=%d" % (k.DeviceAddress(),k.ReadCounter(2))
for k in K8055_devices:
k.SetCurrentDevice()
# set even bits on digital outputs
k.WriteAllDigital(170)
sleep(1)
for k in K8055_devices:
k.SetCurrentDevice()
# set odd bits on digital outputs
k.WriteAllDigital(85)
sleep(1)
for k in K8055_devices:
k.SetCurrentDevice()
# Clear all digital outputs
k.WriteAllDigital(0)
# and close
for k in K8055_devices:
k.SetCurrentDevice()
k.CloseDevice()
except KeyboardInterrupt:
for k in K8055_devices:
k.SetCurrentDevice()
k.WriteAllDigital(0)
k.CloseDevice()
exit
except IOError:
print "Could not open Device"
| [
"[email protected]"
] | |
506609114cac1e89bffb81bc8fe5b9e533ddfd02 | 88cc9b43de3becfef313a965acd4950423a85999 | /textGeneration.py | e456f5e1203674764e4512b492c46a2bdb1428bd | [] | no_license | landjbs/Translation | 15e31c90e41080ddc302873f641d4103a9c86f5d | f19f22a60fad52dd74025bdbd4c08d65327dee1d | refs/heads/master | 2020-07-09T20:57:52.697171 | 2019-10-14T01:43:20 | 2019-10-14T01:43:20 | 204,082,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | import keras
import numpy as np
from tqdm import tqdm
from nltk.tokenize import word_tokenize
from bert_serving.client import BertClient
SEQ_SIZE = 100
BERT_PATH = '/Users/landonsmith/Desktop/shortBert'
TEXT_PATH = 'gatsby.txt'
# configure BERT client
bc = BertClient(check_length=True)
bertLaunch = f'bert-serving-start -model_dir {BERT_PATH} -num_worker=3 -max_seq_len={SEQ_SIZE + 2} -pooling_strategy=NONE'
# bert-serving-start -model_dir /Users/landonsmith/Desktop/shortBert -num_worker=3 -max_seq_len=102 -pooling_strategy=NONE
def text_to_word_ids(textPath):
"""
Builds list of word ids for each word in text, encoded with wordIdx and
decipherable with reverseIdx
"""
lenList = []
wordIdx = dict()
with open(textPath, 'r') as textFile:
rawText = textFile.read()
cleanText = rawText.lower()
textWords = word_tokenize(cleanText, language='english')
wordIdx = {word : i for i, word in tqdm(enumerate(textWords))}
reverseIdx = {i : word for word, i in tqdm(wordIdx.items())}
word_to_id = lambda word : wordIdx[word]
textIds = [word_to_id(word) for word in tqdm(textWords)]
vocabSize = max(reverseIdx)
return textIds, wordIdx, reverseIdx, vocabSize
def one_hot_encode(wordId, vocabSize):
oneHotVec = np.zeros(shape=(vocabSize, ))
oneHotVec[wordId] = 1
return oneHotVec
def vectorize_context_seq(idList, reverseIdx, vocabSize):
"""
Vectorizes first seqSize tokens of idList using contextual attention
and outputs feature matrix of token embeddings and one hot vector encoding
target word id
"""
assert (len(idList) == (SEQ_SIZE + 1)), f'Invalid idList length of {len(idList)}'
seqWords = [reverseIdx[wordId] for wordId in idList[:-1]]
vectorMatrix = bc.encode([seqWords], is_tokenized=True)[0]
# remove cls and start
filteredMatrix = vectorMatrix[1:-1]
assert (len(filteredMatrix) == SEQ_SIZE), f'Invalid vector matrix shape of {vectorMatrix.shape}'
targetId = idList[-1]
targetVec = one_hot_encode(targetId, vocabSize)
return filteredMatrix, targetVec
def generate_train_data(textIds, batchSize, reverseIdx, vocabSize):
""" Generates batch for fitting """
textLength = len(textIds)
# pick random start point within the text
startLoc = np.random.randint(0, (textLength - SEQ_SIZE))
endLoc = startLoc + batchSize
chunkSize = SEQ_SIZE + 1
batchFeatures = []
batchTargets = []
for i in range(startLoc, endLoc):
currentIds = textIds[i : (i + chunkSize)]
filteredMatrix, targetVec = vectorize_context_seq(currentIds, reverseIdx, vocabSize)
batchFeatures.append(filteredMatrix)
batchTargets.append(targetVec)
featureArray = np.array(batchFeatures)
targetArray = np.array(batchTargets)
yield(featureArray, targetArray)
textIds, wordIdx, reverseIdx, vocabSize = text_to_word_ids(TEXT_PATH)
batchSize = 10
# model
inputs = keras.layers.Input(shape=(SEQ_SIZE, 768))
lstm = keras.layers.LSTM(units=768, return_sequences=True)(inputs)
dense = keras.layers.Dense(units=vocabSize, activation='softmax')(lstm)
model = keras.models.Model(inputs=inputs, outputs=dense)
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit_generator(generate_train_data(textIds, batchSize, reverseIdx, vocabSize), steps_per_epoch=10000, epochs=10)
| [
"[email protected]"
] | |
9a111f06491634bb0ef150b9279b992270319ca3 | 3010e6db2bb39a904ea288fe9135c787a08c5f64 | /read_image_createcsv.py | b139d7754ccf19d0a56a8ed83a88909705160dd8 | [] | no_license | Aayushktyagi/Utility | cc270151e56e3698fb6975b0d6306d3c5525aea6 | 3a49a5686422f13495235b1aaf4f97ea643de55f | refs/heads/master | 2023-02-27T04:46:13.648594 | 2021-01-29T11:15:27 | 2021-01-29T11:15:27 | 256,270,123 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | '''
Read images from multiple sub-folders(ex:0,1,2,3,....)
create a csv file with format filename , label
filenmae being name of image and label being folder name
move images to folder names as destination folder
max 200 images from each folder can be added
images added will me moved to destination folder specified with destination path
'''
import pandas as pd
import os
import sys
import shutil
train_path = 'path to dataset'
destination_path = 'path to destination folder'
def getcsv(datapath , train=True):
filename_list = []
label_list = []
label_counter_dict = {}
for root, dirs, files in os.walk(datapath):
for f in files:
if f.endswith(".jpg"):
label = os.path.basename(root)
if label not in label_counter_dict.keys() :
label_counter_dict[label] = 0
# print(label_counter_dict)
filename_list.append(f)
label_list.append(label)
image_path = os.path.join(root,f)
path = shutil.copy(image_path,destination_path)
if os.path.isfile(path):
pass
# print("")
else:
print("file does not exist")
break
else:
if label_counter_dict[label] <=200:
label_counter_dict[label] = label_counter_dict[label] + 1
filename_list.append(f)
label_list.append(label)
image_path = os.path.join(root,f)
path = shutil.copy(image_path,destination_path)
if os.path.isfile(path):
pass
# print("")
else:
print("file does not exist")
break
print(label_counter_dict)
df = pd.DataFrame(list(zip(filename_list, label_list)),
columns =['filename', 'label'])
print(df.head())
if train:
print('saving train csv file')
df.to_csv('train.csv',index=False)
else:
print("savig test csv file")
df.to_csv('test.csv',index=False)
getcsv(combined_cam2) | [
"[email protected]"
] | |
396cce06d609f118e60e05c2de9bb1b16030805f | 95eb485d9430cad805ad17a0424c13024c68fcc8 | /lesson_3/random_example.py | 0a26032c19847b8923360e275076e5efc1ee2ca7 | [] | no_license | antonplkv/itea_base_april | 54dbe7f4d71ac3ef5afe6e20c0369ed28f6fa2c2 | 6ef15c5e4e625f8f222f51e8b2c22667afda4075 | refs/heads/master | 2022-09-01T04:10:51.653031 | 2020-05-27T20:11:38 | 2020-05-27T20:11:38 | 260,017,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import random
a = [1, 100, 3]
my_random = random.randint(50, 100)
print(my_random)
random_number = random.choice(a)
print(random_number)
| [
"[email protected]"
] | |
b6bd7ca966221d80004fc733a00f569eac83fd91 | c61984aa217723621a0c65e2c4e673dd4d54d518 | /home/utils.py | a052f95a2830e17642be904dd0c05b9dce43c9e0 | [
"MIT"
] | permissive | twitterdev/tweet-search | 19aebd23cdbbd7f64b46de5397b042bbe2d95582 | 983de3165f6fc09bb1d8f348db1a1cb06b824a12 | refs/heads/master | 2021-06-28T16:13:55.469187 | 2019-07-01T16:49:06 | 2019-07-01T16:49:06 | 31,343,706 | 55 | 22 | MIT | 2021-06-10T17:45:04 | 2015-02-26T00:49:51 | JavaScript | UTF-8 | Python | false | false | 668 | py | import json
from django.http import HttpResponse
from django.conf import settings
from gnip_search.gnip_search_api import GnipSearchAPI
def get_gnip(paged=False):
"""
Returns Gnip Search API
"""
g = GnipSearchAPI(settings.GNIP_USERNAME,
settings.GNIP_PASSWORD,
settings.GNIP_SEARCH_ENDPOINT,
paged=paged)
return g
def handleQueryError(e):
response_data = {}
response_data['error'] = e.message
response_data['response'] = e.response
response_data['payload'] = e.payload
print response_data
return HttpResponse(json.dumps(response_data), status=400, content_type="application/json")
| [
"[email protected]"
] | |
7309533f6e324077857a089c0c3d29bc43d1d7a8 | 0e39c0fc70b529e8d1d86f9b076d08b0cea28c7e | /NTC_503_3950.py | cba3c3f954ada852c563bd7805c9fb294f765a5c | [] | no_license | russot/ADS | da1fe9566efad17a703ba69eeee58e36ab1a0fea | 0e26a9e3fdadda581001196b39ba16b82c9e9f03 | refs/heads/master | 2021-01-16T18:57:04.783684 | 2015-05-14T22:39:24 | 2015-05-14T22:39:24 | 21,374,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,956 | py | NTC_503_3950 =(
(-55,5443.210),
(-54,4953.700),
(-53,4517.540),
(-52,4127.930),
(-51,3779.060),
(-50,3465.930),
(-49,3184.230),
(-48,2930.270),
(-47,2700.830),
(-46,2493.110),
(-45,2304.710),
(-44,2133.490),
(-43,1977.610),
(-42,1835.440),
(-41,1705.570),
(-40,1586.730),
(-39,1477.820),
(-38,1377.860),
(-37,1285.970),
(-36,1201.390),
(-35,1123.430),
(-34,1051.480 ),
(-33,984.995 ),
(-32,923.478 ),
(-31,866.495 ),
(-30,813.653 ),
(-29,764.597 ),
(-28,719.007 ),
(-27,676.598 ),
(-26,637.107 ),
(-25,600.300 ),
(-24,565.962 ),
(-23,533.901 ),
(-22,503.941 ),
(-21,475.920 ),
(-20,449.692 ),
(-19,425.125 ),
(-18,402.096 ),
(-17,380.494 ),
(-16,360.216 ),
(-15,341.169 ),
(-14,323.267 ),
(-13,306.431 ),
(-12,290.587 ),
(-11,275.669 ),
(-10,261.615 ),
(-9,248.368 ),
(-8,235.875 ),
(-7,224.087 ),
(-6,212.960 ),
(-5,202.450 ),
(-4,192.520 ),
(-3,183.133 ),
(-2,174.256 ),
(-1,165.858 ),
(0,157.910 ),
(1,150.384 ),
(2,143.256 ),
(3,136.502 ),
(4,130.100 ),
(5,124.030 ),
(6,118.272 ),
(7,112.810 ),
(8,107.625 ),
(9,102.703 ),
(10,98.034 ),
(11,93.589 ),
(12,89.370 ),
(13,85.359 ),
(14,81.547 ),
(15,77.921 ),
(16,74.472 ),
(17,71.191 ),
(18,68.068 ),
(19,65.095 ),
(20,62.264 ),
(21,59.568 ),
(22,57.001 ),
(23,54.554 ),
(24,52.222 ),
(25,50.000 ),
(26,47.880 ),
(27,45.860 ),
(28,43.932 ),
(29,42.094 ),
(30,40.339 ),
(31,38.665 ),
(32,37.067 ),
(33,35.541 ),
(34,34.084 ),
(35,32.692 ),
(36,31.363 ),
(37,30.093 ),
(38,28.879 ),
(39,27.720 ),
(40,26.611 ),
(41,25.551 ),
(42,24.537 ),
(43,23.567 ),
(44,22.640 ),
(45,21.753 ),
(46,20.904 ),
(47,20.091 ),
(48,19.314 ),
(49,18.569 ),
(50,17.940 ),
(51,17.173 ),
(52,16.519 ),
(53,15.893 ),
(54,15.293 ),
(55,14.718 ),
(56,14.167 ),
(57,13.638 ),
(58,13.132 ),
(59,12.646 ),
(60,12.180 ),
(61,11.734 ),
(62,11.305 ),
(63,10.894 ),
(64,10.500 ),
(65,10.121 ),
(66,9.758 ),
(67,9.409 ),
(68,9.074 ),
(69,8.752 ),
(70,8.443 ),
(71,8.147 ),
(72,7.862 ),
(73,7.588 ),
(74,7.325 ),
(75,7.072 ),
(76,6.829 ),
(77,6.595 ),
(78,6.370 ),
(79,6.154 ),
(80,5.946 ),
(81,5.746 ),
(82,5.554 ),
(83,5.369 ),
(84,5.191 ),
(85,5.020 ),
(86,4.854 ),
(87,4.696 ),
(88,4.543 ),
(89,4.395 ),
(90,4.253 ),
(91,4.117 ),
(92,3.985 ),
(93,3.858 ),
(94,3.736 ),
(95,3.618 ),
(96,3.505 ),
(97,3.395 ),
(98,3.290 ),
(99,3.188 ),
(100,3.090 ),
(101,2.995 ),
(102,2.903 ),
(103,2.815 ),
(104,2.730 ),
(105,2.648 ),
(106,2.569 ),
(107,2.492 ),
(108,2.418 ),
(109,2.347 ),
(110,2.278 ),
(111,2.211 ),
(112,2.147 ),
(113,2.085 ),
(114,2.025 ),
(115,1.967 ),
(116,1.911 ),
(117,1.857 ),
(118,1.804 ),
(119,1.754 ),
(120,1.705 ),
(121,1.657 ),
(122,1.611 ),
(123,1.567 ),
(124,1.524 ),
(125,1.482 ),
)
| [
"[email protected]"
] | |
5b4558e27e94f093fd4109b06de784cbda195d8a | 65db19a5914e9e10889da4d750280c826f7dd758 | /app.py | 60bea856b818967c9d0cbfd4998775df1aa7b037 | [
"Apache-2.0"
] | permissive | shrikrishnaholla/delta-server | 02cc7789b66947b9ecd14b07917689a21708f355 | eccbc32864d3b7bd4b577b4288043dc9b1e25c61 | refs/heads/master | 2021-01-23T18:12:20.573461 | 2014-10-11T21:55:08 | 2014-10-11T21:55:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | from flask import Flask, request
from flask.ext import restful
from flask.ext.restful import reqparse
import redis
import json
import ast
app = Flask(__name__)
api = restful.Api(app)
def json_type(json_str):
try:
s = json.loads(json_str)
return True
except Exception, e:
raise Exception("Improper data. Use JSON strings")
class User(restful.Resource):
def get(self, email):
r = redis.StrictRedis(host = 'localhost', port = 6379, db=0)
if r.exists(email):
return {'email': email, 'data': r.get(email)}
else:
return {}, 404
class UserList(restful.Resource):
def post(self, email = None):
if email is not None:
restful.abort()
parser = reqparse.RequestParser()
parser.add_argument('email', type = str, required = True, location = 'form')
parser.add_argument('data', type = json_type, required = True, location = 'form')
args = parser.parse_args()
user_email = args.get('email')
user_data = args.get('data')
r = redis.StrictRedis(host = 'localhost', port = 6379, db=0)
if r.exists(user_email):
return {'message':'User already exists'}, 200
else:
r.set(user_email, user_data)
return {'email': user_email, 'data': r.get(user_email)}, 201
class RandReco(restful.Resource):
def get(self, email):
r = redis.StrictRedis(host = 'localhost', port = 6379, db = 0)
if r.exists(email):
rand_user = r.randomkey()
while (rand_user == email):
rand_user = r.randomkey()
return {'email': rand_user, 'data': r.get(rand_user)}, 200
else:
return {'message': 'User does not exist'}, 400
class Reco(restful.Resource):
def algorithm(self, user_email):
try:
# get data
r = redis.StrictRedis(host = 'localhost', port = 6379, db = 0)
user_data = r.get(user_email)
print user_data
print ast.literal_eval(user_data)
# convert data to json
user_data_json = json.loads(user_data)
print 'created user_data_json'
# get another random person
for x in range(1, 30):
rand_user_email = r.randomkey()
print 'got rand_user_email' + str(rand_user_email)
if rand_user_email == user_email:
continue
print r.get(rand_user_email)
rand_user_data = r.get(rand_user_email)
rand_user_data_json = json.loads(r.get(rand_user_email))
# if user and reco_user belong to the same bucket, do another rand
if rand_user_data_json.get('bucket') is None:
continue
if user_data_json.get('bucket') is None:
continue
if rand_user_data_json['bucket'] == user_data_json['bucket']:
continue
# if user and reco_user are connected, continue
# TODO
return rand_user_email
return None
except Exception, e:
raise
print e
print user_email
return None
def get(self, email):
r = redis.StrictRedis(host = 'localhost', port = 6379, db = 0)
if r.exists(email):
reco_user_email = self.algorithm(email)
if reco_user_email is None:
return 500
return {'reco_user_email': reco_user_email, 'data': r.get(reco_user_email)}, 200
else:
return {'message': 'User does not exist'}, 400
api.add_resource(User, '/user/<string:email>')
api.add_resource(UserList, '/user')
api.add_resource(RandReco, '/user/<string:email>/rand')
api.add_resource(Reco, '/user/<string:email>/reco')
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
5366c3e95f92aeaefc37b6796093fd3244d74dc9 | b2c3e3ca64502b32fdc9073633da25fc1f418c4c | /left_rotation.py | a94338d319ce0a61fddbe91f9796e6e34b590a9b | [] | no_license | LipsaMishra19/Hackerrank-Challenges | 5e3dca6cffefc77738bc124236c2a7cfd59ee190 | 4d69a722577e69c53d0e1ff44839ea451b5b3166 | refs/heads/master | 2020-12-06T13:11:18.431062 | 2020-01-08T05:07:12 | 2020-01-08T05:07:12 | 232,471,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
return(a[d:]+a[:d])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| [
"[email protected]"
] | |
b1f8418acbaad694286eb2a9bc612d3eb52e30f5 | cb5411e97d2d94f3f95c4df568addfa203ffd7ed | /test_screen_shot.py | 57de65d4a324a6ff25ef96a7bc2bb249a8e54f1b | [] | no_license | Remoterwls/pytest_exercise | 886dbfd318fbceaed10721e1ceb6b0e72f37fa34 | ed75e70934fe38a068e3c5b7c38d1541fba2fe86 | refs/heads/master | 2023-01-03T11:46:27.575795 | 2020-10-24T13:41:33 | 2020-10-24T13:41:33 | 304,717,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | #!usr/bin/env python
# encoding:utf-8
'''
__Author__: Jack Wu
__function__:
'''
from selenium.webdriver import Chrome
# with Chrome() as driver:
# driver.get('http://www.baidu.com')
# ele = driver.find_elements_by_css_selector('[title="点击一下,了解更多"]')
# ele.screenshot('./image.png')
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
def test_dd():
# from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
# driver =webdriver.Chrome()
from selenium import webdriver
driver = webdriver.Chrome()
# Navigate to url
driver.get("http://www.google.com")
# Store 'google search' button web element
searchBtn = driver.find_element(By.LINK_TEXT, "Sign in")
# Perform double-click action on the element
webdriver.ActionChains(driver).double_click(searchBtn).perform()
| [
"10520426632qq.com"
] | 10520426632qq.com |
998f68f20166111794e3b7c2074b0d04adebef4c | 54b7b01d861280690111df7f3c66ec88f61914a3 | /002_even.py | 4641d89e2973f7432de61905ebb4d44c2ab63c70 | [] | no_license | anjan111/001_Pallavi | 7cbd27cd05dac8f0b1b8f0b48a93b9a100a7a8d9 | 01a2a9bd26180b6aab34ef19be7409fd20014124 | refs/heads/master | 2022-11-26T03:34:44.347613 | 2020-08-05T02:20:19 | 2020-08-05T02:20:19 | 263,794,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | #wap find the given number even or not
num = input("enter num : ")
rem = num % 2
print num," is even : ",rem == 0
| [
"[email protected]"
] | |
23c490c1528adcc673b757d246e358d60a407abc | 7cf06162efb87c19db2d04f23518e33b14015876 | /04/task/venv/bin/pip3 | fc7500fdc037a5a7aad7f7f7c2d8f95087d3ff8a | [] | no_license | ivanmolchanov1988/client-server | d5fdfadcd6d6463f5787db5282b825d7b95b1127 | 58bf488df6e5a758adffffd6065eb480a336f15f | refs/heads/main | 2023-06-03T12:05:14.246932 | 2021-05-26T09:06:22 | 2021-05-26T09:06:22 | 362,454,753 | 0 | 0 | null | 2021-05-27T10:28:21 | 2021-04-28T12:12:01 | Python | UTF-8 | Python | false | false | 281 | #!/home/ivan/py/geekbrains/client-server/client-server/04/task/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
4660826660c29044651dd56c2532aeee21028b81 | 63c1e51a5ddbcac2643a3dd2b539e3228b0b57fe | /hashTable.py | 108edade73d4dd2d316c70f38d04eeb754e1303c | [] | no_license | seanjedi/Practice_Problems | efd139440bae19bed305d02b90e3cd1f8cd42d5d | c4c27a21d50c529d7f71a377459526dad89222e4 | refs/heads/master | 2020-05-02T11:11:34.985558 | 2019-06-14T06:04:58 | 2019-06-14T06:04:58 | 177,921,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | import random
import copy
import time
random.seed(200)
class Node:
"This is a data node for the linkedList"
def __init__(self, Key = None, Data = None):
self.__data = Data
self.__key = Key
self.__next = None
def setNext(self, Next):
self.__next = Next
def getNext(self):
return self.__next
def getData(self):
return self.__data
def getKey(self):
return self.__key
def __del__(self):
if(self.__next):
del self.__next
class hashTable:
"This is my implementation of a hashTable with a Linked List"
def __init__(self, Size = 1, Limit = 0.8):
if Size <= 0:
Size = 1
self.__maxSize = Size
self.__curSize = 0
self.__limit = Limit
self.table = [None] * Size
# self.hashValue = int(random.random() * 1000)
self.hashValue = Size
# Hash Function
def hash(self, key):
hash = key % self.hashValue
return hash
#Rehash Function
def reHash(self):
self.__maxSize *= 2
if self.__hashValue < self.maxSize:
self.__hashValue = self.maxSize
newTable = hashTable(self.__maxSize)
for cur in self.table:
while cur is not None:
newTable.insert(cur.getKey(), cur.getData())
cur = cur.getNext()
self.table = copy.deepcopy(newTable.table)
del newTable
# Insert Function
def insert(self, key, data):
if(self.__curSize >= int(self.__maxSize * self.__limit)):
self.reHash()
pos = hash(key)%self.__maxSize
cur = self.table[pos]
self.__curSize += 1
if cur is None:
self.table[pos] = Node(key, data)
return
while cur is not None:
prev = cur
cur = cur.getNext()
prev.setNext(Node(key, data))
# Insert No Re-Hash Function
#A function to test my linked list implementation by checking what happens if it never rehashes
def insertNoHash(self, key, data):
pos = hash(key)%self.__maxSize
cur = self.table[pos]
self.__curSize += 1
if cur is None:
self.table[pos] = Node(key, data)
return
while cur is not None:
prev = cur
cur = cur.getNext()
prev.setNext(Node(key, data))
# Search Function
def search(self, key):
pos = hash(key)%self.__maxSize
if self.table[pos] is None:
return False
cur = self.table[pos]
while cur is not None:
if cur.getKey() == key:
return cur.getData()
else:
cur = cur.getNext()
return False
# Delete Function
def __del__(self):
for i in self.table:
if i is not None:
del i
Table = hashTable()
Table.insert(1, 2)
Table.insert(2, 2)
Table.insert(3, 3)
Table.insert(3, 3)
Table.insert(4, 4)
Table.insert(10, 123)
Table.insert(200, 321)
Table.insert(21, 83)
start = time.time()
for i in range (20000):
data = int(random.random() * 1000000)
key = int(random.random() * 1000000)
Table.insert(key, data)
end = time.time()
print("Time for normal execution:", end - start)
#Test Normal hash function
# print(Table.search(4))
# print(Table.search(2))
# print(Table.search(200))
# print(Table.search(123))
#Test No Hash Functionality
Table2 = hashTable()
Table2.insert(1,2)
Table2.insert(10,123)
start = time.time()
for i in range (20000):
data = int(random.random() * 1000000)
key = int(random.random() * 1000000)
Table2.insertNoHash(key, data)
end = time.time()
print("Time for no rehash execution:", end - start)
# print(Table2.search(1))
# print(Table2.search(10))
# print(Table2.search(123))
del Table | [
"[email protected]"
] | |
9c4263c2823396be4e6cb0c384d34eb4fc0afe80 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/TprimeTprime/TprimeTprimeToTHTHinc_M_1000_TuneZ2star_8TeV-madgraph_cff.py | ab60a5b19dbddf127b5071564d6d82d1a104780f | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 5,117 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=8 ! fourth generation (t4) fermions',
'MWID(8)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(8,1) = 1000.0D0 ! tprime quarks mass',
'PMAS(8,2) = 10.0D0',
'PMAS(8,3) = 100.0D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(66,1)=0 ! g t4',
'MDME(67,1)=0 ! gamma t4',
'MDME(68,1)=0 ! Z0 t (2 : on for particle, off for anti-particle) ',
'MDME(69,1)=0 ! W d',
'MDME(70,1)=0 ! W s',
'MDME(71,1)=0 ! W b (3 : off for particle, on for particle) ',
'MDME(72,1)=0 ! W b4',
'KFDP(73,2)=6 ! defines H0 t',
'MDME(73,1)=1 ! h0 t4',
'MDME(74,1)=-1 ! H+ b',
'MDME(75,1)=-1 ! H+ b4',
'BRAT(66) = 0.0D0',
'BRAT(67) = 0.0D0',
'BRAT(68) = 0.0D0',
'BRAT(69) = 0.0D0',
'BRAT(70) = 0.0D0',
'BRAT(71) = 0.0D0',
'BRAT(72) = 0.0D0',
'BRAT(73) = 1.0D0',
'BRAT(74) = 0.0D0',
'BRAT(75) = 0.0D0',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
ef0f5fbc2cd53cd2b199a530b0d1f3a4b1ad1db0 | 4ec239aba047d0ff36348bea2e59595b9fb845ac | /Intro_to_encryption_safari/py3/symmetric.py | 22d632b57ffb25a9cbdb22168cc27a10a1556122 | [
"Apache-2.0"
] | permissive | Dmdv/PythonPlayground | eff8e6fa374ec04a0f4dcff4334a855428bd3f21 | f730c43dad69c746ce1c4ff7374af4ec933aef19 | refs/heads/master | 2018-11-16T17:26:07.479095 | 2018-09-02T18:26:36 | 2018-09-02T18:26:36 | 107,284,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,947 | py | """ Symmetric Cryptography Exercise
In this exercise you will use AES to encrypt and decrypt some messages. AES can be
applied in a number of modes. These include Electronic Code Book (ECB), cipher block
chaining (CBC) and counter mode (CTR).
Tasks:
1) Use AES-CBC to encrypt then decrypt a single block of data
2) Use AES-CTR to decrypt a longer message for which you already have the ciphertext.
ECB mode has been implemented for you, your task is to finish off CBC and CTR modes!
To complete this exercise, read through the document filling in the necessary missing
code. If you need a code reference, you will find it here:
https://cryptography.io/en/latest/hazmat/primitives/symmetric-encryption/#cryptography.hazmat.primitives.ciphers.Cipher
"""
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
backend = default_backend()
# Initialise a new 128 bit key
key = os.urandom(16)
# This is a one block message we can encrypt. One AES block is 128 bits.
message = b"a secret message"
# Initialise the cipher in ECB mode, and use the encryptor and decryptor interfaces
# to encrypt and decrypt the ciphertext
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)
# Encryption
encryptor = cipher.encryptor()
ciphertext = encryptor.update(message) + encryptor.finalize()
# Decryption
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
print ("-- ECB --")
print ("Ciphertext is:", ciphertext)
print ("Plaintext is:", plaintext)
### Task 1 ###
# Now it's your turn! CBC uses a similar interface to ECB, except that it requires both a key, and an iv
# Initialise these randomly now. Make the key 32 bytes and the IV 16 bytes.
key = None
iv = None
# Now fill in the code here to encrypt the same message as ECB, remember to use the CBC.
cipher = None
encryptor = None
ciphertext = None
decryptor = None
plaintext = None
print ("-- CBC --")
print ("Ciphertext is:", ciphertext)
print ("Plaintext is:", plaintext)
### Task 2 ###
# Last we'll look at CTR mode. This mode converts a block cipher into
# a stream cipher. This means that CTR mode neatly handles messages that
# are not a multiple of the block length without needing padding.
# Here is just such a message, that's already been encrypted:
ciphertext = b'\xb8\xbf\xa0$~\xbe\x87*\x86\x18\xa4g' \
b'\xd4=MAt\xd8X\x95<?>\xa2r\x04;{@\x8c' \
b'\xab!\rC\xb3\x0e\x10\xa9\t;\x83\xce|'
key = b'\xfa\t\xc6\xdd\xac\xb0a\x99\xef]{`\x07\xe7\xbf\xee'
iv = b'P\xbe\xd9\x04\xd00;4\xf9\xeb^\x0f3\x16\xfb\xa3'
# Create a cipher here to decrypt the ciphertext using CTR mode.
# No partially completed cipher code this time!
plaintext = None
print ("-- CTR --")
print ("Ciphertext is:", ciphertext)
print ("Plaintext is:", plaintext)
| [
"[email protected]"
] | |
9104bdacbb3e1bed271093270648f77d93e87665 | 4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5 | /app.py | 651d1f8222de485005a20c3c1f65be3a7ef0af9e | [] | no_license | shijingyu/sunningAPI | 241f33b0660dc84635ce39688fed499f5c57a5da | 4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5 | refs/heads/master | 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from flask import Flask
from socket import *
app = Flask(__name__)
import suning.api
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/suning/swap', methods=['GET'])
def sn_swap():
request = suning.api.CustompromotionurlQueryRequest()
request.adBookId = "195296"
request.visitUrl = "https://product.suning.com/0000000000/690105206.html"
domain = "https://open.suning.com"
appKey = "*"
appSecret = "*"
request.setDomainInfo(domain, "80")
request.setAppInfo(appKey, appSecret)
try:
result = request.getResponse()
return(result)
except Exception as e:
return(e)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| [
"[email protected]"
] | |
b9bc390702534d4d5fa1c3abcbd877f62252a082 | 190463e2cdab595b530661889b7f65a66943ee7d | /interventions.py | 6dbcca36e53245a8efc1f3e5e5def0947a3dfc20 | [] | no_license | xiscosc/migrator_mundiagua | a6f8f57541cee7da6d2b5cdb3cc7667013efce31 | 98c630bc1e6e05dcd31c913cb914cd626a444646 | refs/heads/master | 2021-01-19T00:17:17.975610 | 2016-08-04T23:03:53 | 2016-08-04T23:03:53 | 64,950,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,673 | py | from utils import make_connection_postgres, make_connection_mysql, close_connection_postgres, close_connection_mysql
def migrate_interventions():
print("MIGRATING INTERVENTIONS...")
cnx_m, cursor_m = make_connection_mysql()
cnx_p, cursor_p = make_connection_postgres()
query = "SELECT a_id, a_d_id, a_z_id, a_e_id, a_crea_id, a_asignado_id, a_nota, a_fecha, a_notaoperario from aviso order by a_id asc"
cursor_m.execute(query)
last_id = 0
for (a_id, a_d_id, a_z_id, a_e_id, a_crea_id, a_asignado_id, a_nota, a_fecha, a_notaoperario) in cursor_m:
cursor_p.execute(
'INSERT INTO intervention_intervention (id, address_id, zone_id, status_id, created_by_id, assigned_id, description, date, note, starred) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE)',
(a_id, a_d_id, a_z_id, a_e_id, a_crea_id, a_asignado_id, a_nota, a_fecha, a_notaoperario))
last_id = a_id
last_id += 1
cursor_p.execute('ALTER SEQUENCE intervention_intervention_id_seq RESTART WITH ' + str(last_id))
print("\tUPDATED LAST ID INTERVENTION " + str(last_id))
close_connection_mysql(cnx_m, cursor_m)
close_connection_postgres(cnx_p, cursor_p)
print("MIGRATING INTERVENTIONS-LOGS...")
migrate_intervention_logs()
print("MIGRATING INTERVENTIONS-MODIFICATIONS...")
migrate_interventions_modifications()
def migrate_interventions_modifications():
cnx_m, cursor_m = make_connection_mysql()
cnx_p, cursor_p = make_connection_postgres()
query = "SELECT m_a_id, m_u_id, m_modificacion, m_fecha from modificacion_aviso"
cursor_m.execute(query)
for (m_a_id, m_u_id, m_modificacion, m_fecha) in cursor_m:
cursor_p.execute(
'INSERT INTO intervention_interventionmodification (intervention_id, created_by_id, note, date) VALUES (%s, %s, %s, %s)',
(m_a_id, m_u_id, m_modificacion, m_fecha))
close_connection_mysql(cnx_m, cursor_m)
close_connection_postgres(cnx_p, cursor_p)
def migrate_intervention_logs():
cnx_m, cursor_m = make_connection_mysql()
cnx_p, cursor_p = make_connection_postgres()
query = "SELECT lav_a_id, lav_e_id, lav_asignado_id, lav_u_id, lav_fecha from log_aviso"
cursor_m.execute(query)
for (lav_a_id, lav_e_id, lav_asignado_id, lav_u_id, lav_fecha) in cursor_m:
cursor_p.execute(
'INSERT INTO intervention_interventionlog (intervention_id, status_id, assigned_id, created_by_id, date) VALUES (%s, %s, %s, %s, %s)',
(lav_a_id, lav_e_id, lav_asignado_id, lav_u_id, lav_fecha))
close_connection_mysql(cnx_m, cursor_m)
close_connection_postgres(cnx_p, cursor_p) | [
"[email protected]"
] | |
3ac6b5be460582651d6ba14c649f93eb8b63fdf4 | 03638440d9b6a11fd5adcc90abd826108c514d7e | /adversarialnlp/commands/test_install.py | 823cbfc127d099fb7d631f802477f6fd7f68b892 | [] | no_license | greydoubt/adversarialnlp | 843a00df0539eaff6e59da3f28b043df2968af04 | 543c02111c57bf245f2aa145c0e5a4879d151001 | refs/heads/master | 2023-05-23T01:10:27.765430 | 2018-10-26T09:24:50 | 2018-10-26T09:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | """
The ``test-install`` subcommand verifies
an installation by running the unit tests.
.. code-block:: bash
$ adversarialnlp test-install --help
usage: adversarialnlp test-install [-h] [--run-all]
[--include-package INCLUDE_PACKAGE]
Test that installation works by running the unit tests.
optional arguments:
-h, --help show this help message and exit
--run-all By default, we skip tests that are slow or download
large files. This flag will run all tests.
--include-package INCLUDE_PACKAGE
additional packages to include
"""
import argparse
import logging
import os
import pathlib
import pytest
from allennlp.commands.subcommand import Subcommand
import adversarialnlp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TestInstall(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Test that installation works by running the unit tests.'''
subparser = parser.add_parser(
name, description=description, help='Run the unit tests.')
subparser.add_argument('--run-all', action="store_true",
help="By default, we skip tests that are slow "
"or download large files. This flag will run all tests.")
subparser.set_defaults(func=_run_test)
return subparser
def _get_module_root():
return pathlib.Path(adversarialnlp.__file__).parent
def _run_test(args: argparse.Namespace):
initial_working_dir = os.getcwd()
module_parent = _get_module_root().parent
logger.info("Changing directory to %s", module_parent)
os.chdir(module_parent)
test_dir = os.path.join(module_parent, "adversarialnlp")
logger.info("Running tests at %s", test_dir)
if args.run_all:
# TODO(nfliu): remove this when notebooks have been rewritten as markdown.
exit_code = pytest.main([test_dir, '--color=no', '-k', 'not notebooks_test'])
else:
exit_code = pytest.main([test_dir, '--color=no', '-k', 'not sniff_test and not notebooks_test',
'-m', 'not java'])
# Change back to original working directory after running tests
os.chdir(initial_working_dir)
exit(exit_code)
| [
"[email protected]"
] | |
bf9f6c7fa8cce067d73d8950c1e6e677c288ade0 | 643ffef18a6d026648a10bd79bda2fa52f342512 | /SpiderRequests/02_Spider_Jiexi/02_xpath_methods.py | 6c44f0603df4001b53fd2cef05eecd2546b64125 | [] | no_license | ZhaoJiePG/PyScrapy | ba5c4199c867a2e879cb9f5aa1dd8a866d0f4e26 | a013bb6c46a3f0dff1072e662f028b7e17d0289d | refs/heads/master | 2022-12-10T05:27:37.054110 | 2021-03-15T05:50:39 | 2021-03-15T05:50:39 | 181,261,266 | 0 | 0 | null | 2022-12-08T02:35:48 | 2019-04-14T04:43:51 | HTML | UTF-8 | Python | false | false | 1,448 | py | # Author:Aliex ZJ
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import re
import requests
from lxml import etree
url = 'http://news.baidu.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
res = requests.get(url=url,headers=headers)
data = res.content.decode("utf-8")
# 1.转接码类型
xpath_data = etree.HTML(data)
'''
xpath 语法:
1.节点:/
2.跨界点://
3.找精确的标签://a[@属性="属性值"]
4.标签包裹的内容:text()
5.属性:@href
6.平级可以用下标
7.模糊查询[contaim(@class,"a")]
8.下一个节点(平级关系):folloing-sibling::*[index]
'''
# 2.调用xpath的方法
result = xpath_data.xpath('/html/head/title/text()')
result = xpath_data.xpath('//a/text()')
result = xpath_data.xpath('//a[@mon="c=civilnews&ct=0&a=27&col=8&locname=%E5%8D%97%E4%BA%AC&locid=2494"]/text()')
result = xpath_data.xpath('//a[@mon="ct=0&a=2&c=civilnews&pn=1"]/@href')
result1 = xpath_data.xpath('//li/a/text()')
result2 = xpath_data.xpath('//li/a/@href')
result2 = xpath_data.xpath('//div[contaim(@class,"a")]')
result_list = xpath_data.xpath('//div[contains(@class,"link-primary")]')
result_list = xpath_data.xpath('//head/following-sibling::*[1]')
print(len(result1))
print(len(result2))
res_dict = {result1[x]:result2[x] for x in range(0,len(result2))}
print(res_dict) | [
"[email protected]"
] | |
5e1f2af91e94223d6648d955153a9cc44fc7f112 | 6c22354f6b977762b02cb12752614dddcdef67d2 | /reconcile/test/test_terraform_repo.py | a32593823c199255df8fd91ee3f6bea8b89b9a02 | [
"Apache-2.0"
] | permissive | app-sre/qontract-reconcile | 266a284b96fd4876305b3c76887e982f40a065bd | 91734756b84d646ac1e4b5c4d8de2cc812ea6e46 | refs/heads/master | 2023-08-31T11:30:35.103253 | 2023-08-30T15:05:45 | 2023-08-30T15:05:45 | 157,718,650 | 33 | 72 | Apache-2.0 | 2023-09-14T12:00:01 | 2018-11-15T13:47:25 | Python | UTF-8 | Python | false | false | 7,014 | py | from unittest.mock import MagicMock
import pytest
from reconcile.gql_definitions.fragments.vault_secret import VaultSecret
from reconcile.gql_definitions.terraform_repo.terraform_repo import (
AWSAccountV1,
TerraformRepoV1,
)
from reconcile.terraform_repo import (
TerraformRepoIntegration,
TerraformRepoIntegrationParams,
)
from reconcile.utils.exceptions import ParameterError
from reconcile.utils.state import State
A_REPO = "https://git-example/tf-repo-example"
A_REPO_SHA = "a390f5cb20322c90861d6d80e9b70c6a579be1d0"
B_REPO = "https://git-example/tf-repo-example2"
B_REPO_SHA = "94edb90815e502b387c25358f5ec602e52d0bfbb"
AWS_UID = "000000000000"
AUTOMATION_TOKEN_PATH = "aws-secrets/terraform/foo"
@pytest.fixture
def existing_repo(aws_account) -> TerraformRepoV1:
return TerraformRepoV1(
name="a_repo",
repository=A_REPO,
ref=A_REPO_SHA,
account=aws_account,
projectPath="tf",
delete=False,
)
@pytest.fixture
def new_repo(aws_account) -> TerraformRepoV1:
return TerraformRepoV1(
name="b_repo",
repository=B_REPO,
ref=B_REPO_SHA,
account=aws_account,
projectPath="tf",
delete=False,
)
@pytest.fixture()
def automation_token() -> VaultSecret:
return VaultSecret(path=AUTOMATION_TOKEN_PATH, version=1, field="all", format=None)
@pytest.fixture
def aws_account(automation_token) -> AWSAccountV1:
return AWSAccountV1(
name="foo",
uid="000000000000",
automationToken=automation_token,
)
@pytest.fixture
def int_params() -> TerraformRepoIntegrationParams:
return TerraformRepoIntegrationParams(print_to_file=None, validate_git=False)
@pytest.fixture()
def state_mock() -> MagicMock:
return MagicMock(spec=State)
def test_addition_to_existing_repo(existing_repo, new_repo, int_params, state_mock):
existing = [existing_repo]
desired = [existing_repo, new_repo]
integration = TerraformRepoIntegration(params=int_params)
diff = integration.calculate_diff(
existing_state=existing, desired_state=desired, dry_run=False, state=state_mock
)
assert diff == [new_repo]
# ensure that the state is saved for the new repo
state_mock.add.assert_called_once_with(
new_repo.name, new_repo.dict(by_alias=True), force=True
)
def test_updating_repo_ref(existing_repo, int_params, state_mock):
existing = [existing_repo]
updated_repo = TerraformRepoV1.copy(existing_repo)
updated_repo.ref = B_REPO_SHA
integration = TerraformRepoIntegration(params=int_params)
diff = integration.calculate_diff(
existing_state=existing,
desired_state=[updated_repo],
dry_run=False,
state=state_mock,
)
assert diff == [updated_repo]
state_mock.add.assert_called_once_with(
updated_repo.name, updated_repo.dict(by_alias=True), force=True
)
def test_fail_on_update_invalid_repo_params(existing_repo, int_params):
existing = [existing_repo]
updated_repo = TerraformRepoV1.copy(existing_repo)
updated_repo.name = "c_repo"
updated_repo.project_path = "c_repo"
updated_repo.repository = B_REPO
updated_repo.ref = B_REPO_SHA
updated_repo.delete = True
integration = TerraformRepoIntegration(params=int_params)
with pytest.raises(ParameterError):
integration.calculate_diff(
existing_state=existing,
desired_state=[updated_repo],
dry_run=True,
state=None,
)
def test_delete_repo(existing_repo, int_params, state_mock):
existing = [existing_repo]
updated_repo = TerraformRepoV1.copy(existing_repo)
updated_repo.delete = True
integration = TerraformRepoIntegration(params=int_params)
diff = integration.calculate_diff(
existing_state=existing,
desired_state=[updated_repo],
dry_run=False,
state=state_mock,
)
assert diff == [updated_repo]
state_mock.rm.assert_called_once_with(updated_repo.name)
def test_delete_repo_without_flag(existing_repo, int_params):
existing = [existing_repo]
integration = TerraformRepoIntegration(params=int_params)
with pytest.raises(ParameterError):
integration.calculate_diff(
existing_state=existing, desired_state=[], dry_run=True, state=None
)
def test_get_repo_state(s3_state_builder, int_params, existing_repo):
state = s3_state_builder(
{
"ls": [
"/a_repo",
],
"get": {
"a_repo": {
"name": "a_repo",
"repository": A_REPO,
"ref": A_REPO_SHA,
"projectPath": "tf",
"delete": False,
"account": {
"name": "foo",
"uid": AWS_UID,
"automationToken": {
"path": AUTOMATION_TOKEN_PATH,
"field": "all",
"version": 1,
"format": None,
},
},
}
},
}
)
integration = TerraformRepoIntegration(params=int_params)
existing_state = integration.get_existing_state(state=state)
assert existing_state == [existing_repo]
def test_update_repo_state(int_params, existing_repo, state_mock):
integration = TerraformRepoIntegration(params=int_params)
existing_state: list = []
desired_state = [existing_repo]
integration.calculate_diff(
existing_state=existing_state,
desired_state=desired_state,
dry_run=False,
state=state_mock,
)
state_mock.add.assert_called_once_with(
existing_repo.name, existing_repo.dict(by_alias=True), force=True
)
def test_fail_on_multiple_repos_dry_run(int_params, existing_repo, new_repo):
integration = TerraformRepoIntegration(params=int_params)
desired_state = [existing_repo, new_repo]
with pytest.raises(Exception):
integration.calculate_diff(
existing_state=[], desired_state=desired_state, dry_run=True, state=None
)
def test_succeed_on_multiple_repos_non_dry_run(int_params, existing_repo, new_repo):
integration = TerraformRepoIntegration(params=int_params)
desired_state = [existing_repo, new_repo]
diff = integration.calculate_diff(
existing_state=[], desired_state=desired_state, dry_run=False, state=None
)
assert diff
if diff:
assert diff.sort(key=lambda r: r.name) == desired_state.sort(
key=lambda r: r.name
)
def test_no_op_succeeds(int_params, existing_repo):
integration = TerraformRepoIntegration(params=int_params)
state = [existing_repo]
diff = integration.calculate_diff(
existing_state=state, desired_state=state, dry_run=True, state=None
)
assert diff is None
| [
"[email protected]"
] | |
beda0013e67c007b612cb17d4abdc746145d1673 | dde6b373a87501c04b203708a45f080c949aab16 | /tests/test_iters.py | 4311101a7866cf383f5c01d8642ae6b0f31b3d6f | [
"MIT"
] | permissive | alexandershov/mess | 7c7f1cb8f85ff0aefe98878bf180dff8c3e190c6 | 7b0d956c1fd39cca2e4adcd5dc35952ec3ed3fd5 | refs/heads/master | 2021-01-21T23:37:56.397941 | 2015-05-03T14:12:38 | 2015-05-03T14:12:38 | 34,716,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | from mess import iters
def test_pairs():
assert list(iters.pairs([1, 2, 3, 4])) == [(1, 2), (2, 3), (3, 4)]
def test_pairs_empty():
assert list(iters.pairs([])) == []
def test_pairs_not_enough_items():
assert list(iters.pairs([1])) == []
def test_lines():
assert list(iters.lines(['a\n', 'b\n', 'c'])) == ['a', 'b', 'c']
def test_length():
assert iters.length(iter([0, 1])) == 2
def test_length_empty():
assert iters.length(iter([])) == 0
def test_length_list():
assert iters.length([0, 1]) == 2
def test_groupby():
assert (dict(iters.groupby([0, 1, 2, 3], key=lambda n: n % 2 == 0))
== {True: [0, 2], False: [1, 3]})
def test_groupby_no_key():
assert dict(iters.groupby([0, 1, 1, 0, 1])) == {0: [0, 0], 1: [1, 1, 1]} | [
"[email protected]"
] | |
36147e1d160595d505898d0c444e90bddf155132 | 7cbc9c284b2dd4569a59fe1c8db681a5810d739a | /test/constraint_test.py | 91a2ae59dd34c796a1530e85e1c4397dfbe32f53 | [] | no_license | emkor/n-queen-problem | 755e8040339d9f2110aae8df855041a2dc2f5253 | e6f9de66a13a563f2cb87924fcf912ddb6d6b0a7 | refs/heads/master | 2021-05-01T17:40:17.353498 | 2016-11-18T14:53:52 | 2016-11-18T14:53:52 | 73,830,248 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | import unittest
from constraint import SameColumnConstraint, SameRowConstraint, SameDiagonalConstraint
from model import Chessboard, Queen
class ConstraintTest(unittest.TestCase):
def setUp(self):
self.same_column_constraint = SameColumnConstraint()
self.same_row_constraint = SameRowConstraint()
self.same_diagonal_constraint = SameDiagonalConstraint()
def test_should_same_column_constraint_fail(self):
chessboard = Chessboard(n=4)
chessboard.add_queen(Queen(0, 0))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
chessboard.add_queen(Queen(0, 2))
self.assertTrue(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
def test_should_same_row_constraint_fail(self):
chessboard = Chessboard(n=4)
chessboard.add_queen(Queen(1, 3))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
chessboard.add_queen(Queen(0, 3))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertTrue(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
def test_should_same_diagonal_constraint_fail(self):
chessboard = Chessboard(n=4)
chessboard.add_queen(Queen(1, 1))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
chessboard.add_queen(Queen(3, 3))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertTrue(self.same_diagonal_constraint.is_broken(chessboard))
def test_should_secondary_diagonal_constraint_fail(self):
chessboard = Chessboard(n=4)
chessboard.add_queen(Queen(2, 0))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
chessboard.add_queen(Queen(3, 1))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertTrue(self.same_diagonal_constraint.is_broken(chessboard))
def test_should_none_of_constraints_fail(self):
chessboard = Chessboard(n=4)
chessboard.add_queen(Queen(1, 0))
chessboard.add_queen(Queen(0, 2))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
chessboard.add_queen(Queen(3, 1))
self.assertFalse(self.same_column_constraint.is_broken(chessboard))
self.assertFalse(self.same_row_constraint.is_broken(chessboard))
self.assertFalse(self.same_diagonal_constraint.is_broken(chessboard))
| [
"[email protected]"
] | |
3ac1f4ec3427ddd692f5ddd077650da8bf334f4f | 36add5afc63ec09d63b8a877c29c17391938ee5c | /.history/speech_sentiment_20201118101212.py | 3a31838199995c72518c7341918baad7cc2681f7 | [] | no_license | E-STAT/sentiment_api | e84eb04a9f21c7368ca20bdb97436ffea9f65f25 | bd9ee0d78d9eac8b6448b96c2560611a64f7b79d | refs/heads/master | 2023-01-12T13:06:14.654883 | 2020-11-20T11:30:22 | 2020-11-20T11:30:22 | 314,534,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from speech_helpers import convert_to_wav, show_pydub_stats, transcribe_audio
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = | [
"[email protected]"
] | |
a5ac43b495073b76d4593f738e88ffc420fd3db4 | 44791db4d9825835a120538e166e5d83d00344a0 | /commands/modlitwa.py | 5ea292ee5b0b4131ec576cdc7419f844d4f1cb9f | [] | no_license | asdfMaciej/fb-message-bot | 5245c7938ffd34c340ab36a2d996aa6f03855b52 | cf7b7260a102656a4c567a366853eac6f79f5280 | refs/heads/master | 2020-12-02T23:58:36.796423 | 2017-07-04T04:59:49 | 2017-07-04T04:59:49 | 95,969,034 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import requests
from bs4 import BeautifulSoup
from random import randint
class Command:
command_names = [
'.modlitwa', '.deusvult', '.avemaria', '.maria',
'.jezus', '.jesus', '.psalm', '.psalmy', '.biblia'
]
admin_only = False
description = "Losowa modlitwa."
@staticmethod
def run(self, params_d):
url = "http://www.biblia.deon.pl/otworz.php?skrot=Ps "
cyferka = str(randint(1, 150))
r = requests.get(url+cyferka)
r.encoding = 'iso-8859-2'
soup = BeautifulSoup(r.text)
div = soup.find("div", {"class": "tresc"})
params_d['functions_holder']._send_line(
div.text, params_d['thread_id'], params_d['thread_type']
)
| [
"[email protected]"
] | |
3a7f9e20b8446ba5bc8290e1f3ba4b7f4a7b16ce | c7badb87d60a543de6464a843e8c119edf312a04 | /natas26.py | e14992867a1b2e227c394a386bc0262ebc3e812e | [] | no_license | mikelty/otw-natas-python3 | ec4915c0c7a676b2518b7a9387e9bf9bf409505b | d1f448f4f43c9ab0bd1180a8dc76c5e2a22c9f1f | refs/heads/master | 2020-12-07T14:56:48.470448 | 2020-01-09T06:59:35 | 2020-01-09T06:59:35 | 232,741,989 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import requests, re
import base64
import urllib
u='natas26'
p='oGgWAJ7zcGT28vYazGo4rkhOPDhBu34T'
l=f'http://{u}.natas.labs.overthewire.org/'
sess=requests.Session()
resp=sess.get(l,auth=(u, p))
#print(base64.b64decode(requests.utils.unquote(drawing)))
sess.cookies['drawing']='Tzo2OiJMb2dnZXIiOjM6e3M6MTU6IgBMb2dnZXIAbG9nRmlsZSI7czoxNzoiaW1nL21pa2V5MjUyMC5waHAiO3M6MTU6IgBMb2dnZXIAaW5pdE1zZyI7czoxMToia25vY2trbm9jawoiO3M6MTU6IgBMb2dnZXIAZXhpdE1zZyI7czo1MDoiPD9waHAgc3lzdGVtKCdjYXQgL2V0Yy9uYXRhc193ZWJwYXNzL25hdGFzMjcnKTsgPz4iO30='
resp=sess.get(l+'?x1=0&y1=0&x2=500&y2=500',auth=(u, p))
resp=sess.get(l+'img/mikey2520.php',auth=(u, p))
print(resp.text)
| [
"[email protected]"
] | |
10c2d2784238c53c2afed1b904c7e067ec9cdd2b | c9abbbfd1ccdbdf75c5b35b28c841b6fc8165ee6 | /AtCoder/ABC B/141.py | d46b6287572a8ee7ee04cc3197cd653e9fb7af55 | [] | no_license | kanade9/kyopro | 8d9b12a4da2353595836cf68066e2b425379990b | 3b5d998b99603c4473dc22e62252a2dd5ba143c2 | refs/heads/master | 2023-04-20T15:18:37.199790 | 2021-05-19T17:49:16 | 2021-05-19T17:49:16 | 175,988,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | s = input()
odd = ['R', 'U', 'D']
even = ['L', 'U', 'D']
for i in range(len(s)):
# print(s[i])
if (i+1) % 2 == 0:
if s[i] not in even:
print('No')
exit()
else:
if s[i] not in odd:
print('No')
exit()
print('Yes')
| [
"[email protected]"
] | |
970b56b389bc16aa5b0aa25d8f0b0c21fe60b45a | 4af090efabd08ef73c411a00ce4972a1c6f30a22 | /python_100days/9day/game_01.py | a0a84bc845979886d6284387f96bae8696de9d51 | [] | no_license | predatory123/byhytest | e52bca664f9461c9309aaa9bf779c02368ed937c | 578206c9ec9253d0d9325e72cdc13dde6eeb2fc1 | refs/heads/master | 2023-04-26T13:33:14.462408 | 2021-05-20T13:33:37 | 2021-05-20T14:26:22 | 369,213,148 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | # 扑克游戏
import random
class Card(object):
"""一张牌"""
def __init__(self,face):
self._face = face
@property
def face(self):
return self._face
def __str__(self):
if self._face == 1:
face_str = 'A'
elif self._face == 11:
face_str = 'J'
elif self._face == 12:
face_str = 'Q'
elif self._face == 13:
face_str = 'K'
else:
face_str = str(self._face)
return '%s' % (face_str)
def __repr__(self):
return self.__str__()
class Poker(object):
"""一副牌"""
def __init__(self):
self._cards = [Card(face)
for face in range(1, 14)]
self._current = 0
@property
def cards(self):
return self._cards
def shuffle(self):
"""洗牌(随机乱序)"""
self._current = 0
random.shuffle(self._cards)
@property
def next(self):
"""发牌"""
card = self._cards[self._current]
self._current += 1
return card
@property
def has_next(self):
"""还有没有牌"""
return self._current < len(self._cards)
class Player(object):
"""玩家"""
def __init__(self, name):
self._name = name
self._cards_on_hand = []
@property
def name(self):
return self._name
@property
def cards_on_hand(self):
return self._cards_on_hand
def get(self, card):
"""摸牌"""
self._cards_on_hand.append(card)
def arrange(self, card_key):
"""玩家整理手上的牌"""
self._cards_on_hand.sort(key=card_key)
# 排序规则-先根据花色再根据点数排序
def get_key(card):
return (card.suite, card.face)
def main():
p = Poker()
p.shuffle()
players = [Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐')]
for _ in range(13):
for player in players:
player.get(p.next)
for player in players:
print(player.name + ':', end=' ')
player.arrange(get_key)
print(player.cards_on_hand)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
082ea7fa18bb8e8f00914925ed266d3cc7310486 | 0db1fbcb86deacc3b59a2f0b9c1a4ae619a2c17a | /periodic_table.py | 2f59a7738cac5882df29b9e1449fe4fd4ca6b745 | [
"BSD-3-Clause"
] | permissive | jmwoll/isocalc | 5c79888974925500fbff87ced7815020ed39250b | cf2cdeefbac5d7ace7c8be14067dbb2f2d8e7ec8 | refs/heads/master | 2021-04-28T18:13:06.567865 | 2018-02-17T16:22:08 | 2018-02-17T16:22:08 | 121,868,115 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 34,578 | py | # This file is licensed under the BSD 3-Clause License.
#
# Copyright (c) 2018, Jan Wollschläger
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class PeriodicTable(object):
def __init__(self):
self.elements = {}
for elt in _ELEMENTS:
self.elements[elt.symbol] = elt
class Element(object):
def __init__(self, z, symbol, name, **entries):
self.__dict__.update(entries)
self.z = z
self.symbol = symbol
self.name = name
class Isotope(object):
def __init__(self, mass, prob, num_neutrons):
self.mass = mass
self.prob = prob
self.num_neutrons = num_neutrons
_ELEMENTS = [
Element(
1, 'H', 'Hydrogen',
mass=1.00794,
isotopes={1: Isotope(1.0078250321, 0.999885, 1),
2: Isotope(2.014101778, 0.000115, 2)}),
Element(
1, 'D', 'Deuterium',
mass = 2.014101778,
isotopes={2: Isotope(2.014101778, 0.000115, 2)}),
Element(
2, 'He', 'Helium',
mass=4.002602,
isotopes={3: Isotope(3.0160293097, 1.37e-06, 3),
4: Isotope(4.0026032497, 0.99999863, 4)}),
Element(
3, 'Li', 'Lithium',
mass=6.941,
isotopes={6: Isotope(6.0151223, 0.0759, 6),
7: Isotope(7.016004, 0.9241, 7)}),
Element(
4, 'Be', 'Beryllium',
mass=9.012182,
isotopes={9: Isotope(9.0121821, 1.0, 9)}),
Element(
5, 'B', 'Boron',
mass=10.811,
isotopes={10: Isotope(10.012937, 0.199, 10),
11: Isotope(11.0093055, 0.801, 11)}),
Element(
6, 'C', 'Carbon',
mass=12.0107,
isotopes={12: Isotope(12.0, 0.9893, 12),
13: Isotope(13.0033548378, 0.0107, 13)}),
Element(
7, 'N', 'Nitrogen',
mass=14.0067,
isotopes={14: Isotope(14.0030740052, 0.99632, 14),
15: Isotope(15.0001088984, 0.00368, 15)}),
Element(
8, 'O', 'Oxygen',
mass=15.9994,
isotopes={16: Isotope(15.9949146221, 0.99757, 16),
17: Isotope(16.9991315, 0.00038, 17),
18: Isotope(17.9991604, 0.00205, 18)}),
Element(
9, 'F', 'Fluorine',
mass=18.9984032,
isotopes={19: Isotope(18.9984032, 1.0, 19)}),
Element(
10, 'Ne', 'Neon',
mass=20.1797,
isotopes={20: Isotope(19.9924401759, 0.9048, 20),
21: Isotope(20.99384674, 0.0027, 21),
22: Isotope(21.99138551, 0.0925, 22)}),
Element(
11, 'Na', 'Sodium',
mass=22.98977,
isotopes={23: Isotope(22.98976967, 1.0, 23)}),
Element(
12, 'Mg', 'Magnesium',
mass=24.305,
isotopes={24: Isotope(23.9850419, 0.7899, 24),
25: Isotope(24.98583702, 0.1, 25),
26: Isotope(25.98259304, 0.1101, 26)}),
Element(
13, 'Al', 'Aluminium',
mass=26.981538,
isotopes={27: Isotope(26.98153844, 1.0, 27)}),
Element(
14, 'Si', 'Silicon',
mass=28.0855,
isotopes={28: Isotope(27.9769265327, 0.922297, 28),
29: Isotope(28.97649472, 0.046832, 29),
30: Isotope(29.97377022, 0.030871, 30)}),
Element(
15, 'P', 'Phosphorus',
mass=30.973761,
isotopes={31: Isotope(30.97376151, 1.0, 31)}),
Element(
16, 'S', 'Sulfur',
mass=32.065,
isotopes={32: Isotope(31.97207069, 0.9493, 32),
33: Isotope(32.9714585, 0.0076, 33),
34: Isotope(33.96786683, 0.0429, 34),
36: Isotope(35.96708088, 0.0002, 36)}),
Element(
17, 'Cl', 'Chlorine',
mass=35.453,
isotopes={35: Isotope(34.96885271, 0.7578, 35),
37: Isotope(36.9659026, 0.2422, 37)}),
Element(
18, 'Ar', 'Argon',
mass=39.948,
isotopes={36: Isotope(35.96754628, 0.003365, 36),
38: Isotope(37.9627322, 0.000632, 38),
40: Isotope(39.962383123, 0.996003, 40)}),
Element(
19, 'K', 'Potassium',
mass=39.0983,
isotopes={39: Isotope(38.9637069, 0.932581, 39),
40: Isotope(39.96399867, 0.000117, 40),
41: Isotope(40.96182597, 0.067302, 41)}),
Element(
20, 'Ca', 'Calcium',
mass=40.078,
isotopes={40: Isotope(39.9625912, 0.96941, 40),
42: Isotope(41.9586183, 0.00647, 42),
43: Isotope(42.9587668, 0.00135, 43),
44: Isotope(43.9554811, 0.02086, 44),
46: Isotope(45.9536928, 4e-05, 46),
48: Isotope(47.952534, 0.00187, 48)}),
Element(
21, 'Sc', 'Scandium',
mass=44.95591,
isotopes={45: Isotope(44.9559102, 1.0, 45)}),
Element(
22, 'Ti', 'Titanium',
mass=47.867,
isotopes={46: Isotope(45.9526295, 0.0825, 46),
47: Isotope(46.9517638, 0.0744, 47),
48: Isotope(47.9479471, 0.7372, 48),
49: Isotope(48.9478708, 0.0541, 49),
50: Isotope(49.9447921, 0.0518, 50)}),
Element(
23, 'V', 'Vanadium',
mass=50.9415,
isotopes={50: Isotope(49.9471628, 0.0025, 50),
51: Isotope(50.9439637, 0.9975, 51)}),
Element(
24, 'Cr', 'Chromium',
mass=51.9961,
isotopes={50: Isotope(49.9460496, 0.04345, 50),
52: Isotope(51.9405119, 0.83789, 52),
53: Isotope(52.9406538, 0.09501, 53),
54: Isotope(53.9388849, 0.02365, 54)}),
Element(
25, 'Mn', 'Manganese',
mass=54.938049,
isotopes={55: Isotope(54.9380496, 1.0, 55)}),
Element(
26, 'Fe', 'Iron',
mass=55.845,
isotopes={54: Isotope(53.9396148, 0.05845, 54),
56: Isotope(55.9349421, 0.91754, 56),
57: Isotope(56.9353987, 0.02119, 57),
58: Isotope(57.9332805, 0.00282, 58)}),
Element(
27, 'Co', 'Cobalt',
mass=58.9332,
isotopes={59: Isotope(58.9332002, 1.0, 59)}),
Element(
28, 'Ni', 'Nickel',
mass=58.6934,
isotopes={58: Isotope(57.9353479, 0.680769, 58),
60: Isotope(59.9307906, 0.262231, 60),
61: Isotope(60.9310604, 0.011399, 61),
62: Isotope(61.9283488, 0.036345, 62),
64: Isotope(63.9279696, 0.009256, 64)}),
Element(
29, 'Cu', 'Copper',
mass=63.546,
isotopes={63: Isotope(62.9296011, 0.6917, 63),
65: Isotope(64.9277937, 0.3083, 65)}),
Element(
30, 'Zn', 'Zinc',
mass=65.409,
isotopes={64: Isotope(63.9291466, 0.4863, 64),
66: Isotope(65.9260368, 0.279, 66),
67: Isotope(66.9271309, 0.041, 67),
68: Isotope(67.9248476, 0.1875, 68),
70: Isotope(69.925325, 0.0062, 70)}),
Element(
31, 'Ga', 'Gallium',
group=13, period=4, block='p', series=7,
mass=69.723, eleneg=1.81, eleaffin=0.41,
covrad=1.26, atmrad=1.81, vdwrad=1.87,
tboil=2478.0, tmelt=302.92, density=5.91,
eleconfig='[Ar] 3d10 4s2 4p',
oxistates='3*',
ionenergy=(5.9993, 20.51, 30.71, 64.0, ),
isotopes={69: Isotope(68.925581, 0.60108, 69),
71: Isotope(70.924705, 0.39892, 71)}),
Element(
32, 'Ge', 'Germanium',
group=14, period=4, block='p', series=5,
mass=72.64, eleneg=2.01, eleaffin=1.232712,
covrad=1.22, atmrad=1.52, vdwrad=0.0,
tboil=3107.0, tmelt=1211.5, density=5.32,
eleconfig='[Ar] 3d10 4s2 4p2',
oxistates='4*',
ionenergy=(7.8994, 15.934, 34.22, 45.71, 93.5, ),
isotopes={70: Isotope(69.9242504, 0.2084, 70),
72: Isotope(71.9220762, 0.2754, 72),
73: Isotope(72.9234594, 0.0773, 73),
74: Isotope(73.9211782, 0.3628, 74),
76: Isotope(75.9214027, 0.0761, 76)}),
Element(
33, 'As', 'Arsenic',
group=15, period=4, block='p', series=5,
mass=74.9216, eleneg=2.18, eleaffin=0.814,
covrad=1.2, atmrad=1.33, vdwrad=1.85,
tboil=876.0, tmelt=1090.0, density=5.72,
eleconfig='[Ar] 3d10 4s2 4p3',
oxistates='5, 3*, -3',
ionenergy=(9.7886, 18.633, 28.351, 50.13, 62.63,
127.6, ),
isotopes={75: Isotope(74.9215964, 1.0, 75)}),
Element(
34, 'Se', 'Selenium',
group=16, period=4, block='p', series=1,
mass=78.96, eleneg=2.55, eleaffin=2.02067,
covrad=1.16, atmrad=1.22, vdwrad=1.9,
tboil=958.0, tmelt=494.0, density=4.82,
eleconfig='[Ar] 3d10 4s2 4p4',
oxistates='6, 4*, -2',
ionenergy=(9.7524, 21.9, 30.82, 42.944, 68.3,
81.7, 155.4, ),
isotopes={74: Isotope(73.9224766, 0.0089, 74),
76: Isotope(75.9192141, 0.0937, 76),
77: Isotope(76.9199146, 0.0763, 77),
78: Isotope(77.9173095, 0.2377, 78),
80: Isotope(79.9165218, 0.4961, 80),
82: Isotope(81.9167, 0.0873, 82)}),
Element(
35, 'Br', 'Bromine',
group=17, period=4, block='p', series=6,
mass=79.904, eleneg=2.96, eleaffin=3.363588,
covrad=1.14, atmrad=1.12, vdwrad=1.85,
tboil=331.85, tmelt=265.95, density=3.14,
eleconfig='[Ar] 3d10 4s2 4p5',
oxistates='7, 5, 3, 1, -1*',
ionenergy=(11.8138, 21.8, 36.0, 47.3, 59.7,
88.6, 103.0, 192.8, ),
isotopes={79: Isotope(78.9183376, 0.5069, 79),
81: Isotope(80.916291, 0.4931, 81)}),
Element(
36, 'Kr', 'Krypton',
group=18, period=4, block='p', series=2,
mass=83.798, eleneg=0.0, eleaffin=0.0,
covrad=1.12, atmrad=1.03, vdwrad=2.02,
tboil=120.85, tmelt=116.0, density=4.48,
eleconfig='[Ar] 3d10 4s2 4p6',
oxistates='2*',
ionenergy=(13.9996, 24.359, 36.95, 52.5, 64.7,
78.5, 110.0, 126.0, 230.39, ),
isotopes={78: Isotope(77.920386, 0.0035, 78),
80: Isotope(79.916378, 0.0228, 80),
82: Isotope(81.9134846, 0.1158, 82),
83: Isotope(82.914136, 0.1149, 83),
84: Isotope(83.911507, 0.57, 84),
86: Isotope(85.9106103, 0.173, 86)}),
Element(
37, 'Rb', 'Rubidium',
group=1, period=5, block='s', series=3,
mass=85.4678, eleneg=0.82, eleaffin=0.485916,
covrad=2.16, atmrad=2.98, vdwrad=0.0,
tboil=961.0, tmelt=312.63, density=1.53,
eleconfig='[Kr] 5s',
oxistates='1*',
ionenergy=(4.1771, 27.28, 40.0, 52.6, 71.0,
84.4, 99.2, 136.0, 150.0, 277.1, ),
isotopes={85: Isotope(84.9117893, 0.7217, 85),
87: Isotope(86.9091835, 0.2783, 87)}),
Element(
38, 'Sr', 'Strontium',
group=2, period=5, block='s', series=4,
mass=87.62, eleneg=0.95, eleaffin=0.05206,
covrad=1.91, atmrad=2.45, vdwrad=0.0,
tboil=1655.0, tmelt=1042.0, density=2.63,
eleconfig='[Kr] 5s2',
oxistates='2*',
ionenergy=(5.6949, 11.03, 43.6, 57.0, 71.6,
90.8, 106.0, 122.3, 162.0, 177.0,
324.1, ),
isotopes={84: Isotope(83.913425, 0.0056, 84),
86: Isotope(85.9092624, 0.0986, 86),
87: Isotope(86.9088793, 0.07, 87),
88: Isotope(87.9056143, 0.8258, 88)}),
Element(
39, 'Y', 'Yttrium',
group=3, period=5, block='d', series=8,
mass=88.90585, eleneg=1.22, eleaffin=0.307,
covrad=1.62, atmrad=2.27, vdwrad=0.0,
tboil=3611.0, tmelt=1795.0, density=4.47,
eleconfig='[Kr] 4d 5s2',
oxistates='3*',
ionenergy=(6.2173, 12.24, 20.52, 61.8, 77.0,
93.0, 116.0, 129.0, 146.52, 191.0,
206.0, 374.0, ),
isotopes={89: Isotope(88.9058479, 1.0, 89)}),
Element(
40, 'Zr', 'Zirconium',
group=4, period=5, block='d', series=8,
mass=91.224, eleneg=1.33, eleaffin=0.426,
covrad=1.45, atmrad=2.16, vdwrad=0.0,
tboil=4682.0, tmelt=2128.0, density=6.51,
eleconfig='[Kr] 4d2 5s2',
oxistates='4*',
ionenergy=(6.6339, 13.13, 22.99, 34.34, 81.5, ),
isotopes={90: Isotope(89.9047037, 0.5145, 90),
91: Isotope(90.905645, 0.1122, 91),
92: Isotope(91.9050401, 0.1715, 92),
94: Isotope(93.9063158, 0.1738, 94),
96: Isotope(95.908276, 0.028, 96)}),
Element(
41, 'Nb', 'Niobium',
group=5, period=5, block='d', series=8,
mass=92.90638, eleneg=1.6, eleaffin=0.893,
covrad=1.34, atmrad=2.08, vdwrad=0.0,
tboil=5015.0, tmelt=2742.0, density=8.58,
eleconfig='[Kr] 4d4 5s',
oxistates='5*, 3',
ionenergy=(6.7589, 14.32, 25.04, 38.3, 50.55,
102.6, 125.0, ),
isotopes={93: Isotope(92.9063775, 1.0, 93)}),
Element(
42, 'Mo', 'Molybdenum',
group=6, period=5, block='d', series=8,
mass=95.94, eleneg=2.16, eleaffin=0.7472,
covrad=1.3, atmrad=2.01, vdwrad=0.0,
tboil=4912.0, tmelt=2896.0, density=10.28,
eleconfig='[Kr] 4d5 5s',
oxistates='6*, 5, 4, 3, 2, 0',
ionenergy=(7.0924, 16.15, 27.16, 46.4, 61.2,
68.0, 126.8, 153.0, ),
isotopes={92: Isotope(91.90681, 0.1484, 92),
94: Isotope(93.9050876, 0.0925, 94),
95: Isotope(94.9058415, 0.1592, 95),
96: Isotope(95.9046789, 0.1668, 96),
97: Isotope(96.906021, 0.0955, 97),
98: Isotope(97.9054078, 0.2413, 98),
100: Isotope(99.907477, 0.0963, 100)}),
Element(
43, 'Tc', 'Technetium',
group=7, period=5, block='d', series=8,
mass=97.907216, eleneg=1.9, eleaffin=0.55,
covrad=1.27, atmrad=1.95, vdwrad=0.0,
tboil=4538.0, tmelt=2477.0, density=11.49,
eleconfig='[Kr] 4d5 5s2',
oxistates='7*',
ionenergy=(7.28, 15.26, 29.54, ),
isotopes={98: Isotope(97.907216, 1.0, 98)}),
Element(
44, 'Ru', 'Ruthenium',
group=8, period=5, block='d', series=8,
mass=101.07, eleneg=2.2, eleaffin=1.04638,
covrad=1.25, atmrad=1.89, vdwrad=0.0,
tboil=4425.0, tmelt=2610.0, density=12.45,
eleconfig='[Kr] 4d7 5s',
oxistates='8, 6, 4*, 3*, 2, 0, -2',
ionenergy=(7.3605, 16.76, 28.47, ),
isotopes={96: Isotope(95.907598, 0.0554, 96),
98: Isotope(97.905287, 0.0187, 98),
99: Isotope(98.9059393, 0.1276, 99),
100: Isotope(99.9042197, 0.126, 100),
101: Isotope(100.9055822, 0.1706, 101),
102: Isotope(101.9043495, 0.3155, 102),
104: Isotope(103.90543, 0.1862, 104)}),
Element(
45, 'Rh', 'Rhodium',
group=9, period=5, block='d', series=8,
mass=102.9055, eleneg=2.28, eleaffin=1.14289,
covrad=1.25, atmrad=1.83, vdwrad=0.0,
tboil=3970.0, tmelt=2236.0, density=12.41,
eleconfig='[Kr] 4d8 5s',
oxistates='5, 4, 3*, 1*, 2, 0',
ionenergy=(7.4589, 18.08, 31.06, ),
isotopes={103: Isotope(102.905504, 1.0, 103)}),
Element(
46, 'Pd', 'Palladium',
group=10, period=5, block='d', series=8,
mass=106.42, eleneg=2.2, eleaffin=0.56214,
covrad=1.28, atmrad=1.79, vdwrad=1.63,
tboil=3240.0, tmelt=1825.0, density=12.02,
eleconfig='[Kr] 4d10',
oxistates='4, 2*, 0',
ionenergy=(8.3369, 19.43, 32.93, ),
isotopes={102: Isotope(101.905608, 0.0102, 102),
104: Isotope(103.904035, 0.1114, 104),
105: Isotope(104.905084, 0.2233, 105),
106: Isotope(105.903483, 0.2733, 106),
108: Isotope(107.903894, 0.2646, 108),
110: Isotope(109.905152, 0.1172, 110)}),
Element(
47, 'Ag', 'Silver',
group=11, period=5, block='d', series=8,
mass=107.8682, eleneg=1.93, eleaffin=1.30447,
covrad=1.34, atmrad=1.75, vdwrad=1.72,
tboil=2436.0, tmelt=1235.1, density=10.49,
eleconfig='[Kr] 4d10 5s',
oxistates='2, 1*',
ionenergy=(7.5762, 21.49, 34.83, ),
isotopes={107: Isotope(106.905093, 0.51839, 107),
109: Isotope(108.904756, 0.48161, 109)}),
Element(
48, 'Cd', 'Cadmium',
group=12, period=5, block='d', series=8,
mass=112.411, eleneg=1.69, eleaffin=0.0,
covrad=1.48, atmrad=1.71, vdwrad=1.58,
tboil=1040.0, tmelt=594.26, density=8.64,
eleconfig='[Kr] 4d10 5s2',
oxistates='2*',
ionenergy=(8.9938, 16.908, 37.48, ),
isotopes={106: Isotope(105.906458, 0.0125, 106),
108: Isotope(107.904183, 0.0089, 108),
110: Isotope(109.903006, 0.1249, 110),
111: Isotope(110.904182, 0.128, 111),
112: Isotope(111.9027572, 0.2413, 112),
113: Isotope(112.9044009, 0.1222, 113),
114: Isotope(113.9033581, 0.2873, 114),
116: Isotope(115.904755, 0.0749, 116)}),
Element(
49, 'In', 'Indium',
group=13, period=5, block='p', series=7,
mass=114.818, eleneg=1.78, eleaffin=0.404,
covrad=1.44, atmrad=2.0, vdwrad=1.93,
tboil=2350.0, tmelt=429.78, density=7.31,
eleconfig='[Kr] 4d10 5s2 5p',
oxistates='3*',
ionenergy=(5.7864, 18.869, 28.03, 28.03, ),
isotopes={113: Isotope(112.904061, 0.0429, 113),
115: Isotope(114.903878, 0.9571, 115)}),
Element(
50, 'Sn', 'Tin',
group=14, period=5, block='p', series=7,
mass=118.71, eleneg=1.96, eleaffin=1.112066,
covrad=1.41, atmrad=1.72, vdwrad=2.17,
tboil=2876.0, tmelt=505.12, density=7.29,
eleconfig='[Kr] 4d10 5s2 5p2',
oxistates='4*, 2*',
ionenergy=(7.3439, 14.632, 30.502, 40.734, 72.28, ),
isotopes={112: Isotope(111.904821, 0.0097, 112),
114: Isotope(113.902782, 0.0066, 114),
115: Isotope(114.903346, 0.0034, 115),
116: Isotope(115.901744, 0.1454, 116),
117: Isotope(116.902954, 0.0768, 117),
118: Isotope(117.901606, 0.2422, 118),
119: Isotope(118.903309, 0.0859, 119),
120: Isotope(119.9021966, 0.3258, 120),
122: Isotope(121.9034401, 0.0463, 122),
124: Isotope(123.9052746, 0.0579, 124)}),
Element(
51, 'Sb', 'Antimony',
group=15, period=5, block='p', series=5,
mass=121.76, eleneg=2.05, eleaffin=1.047401,
covrad=1.4, atmrad=1.53, vdwrad=0.0,
tboil=1860.0, tmelt=903.91, density=6.69,
eleconfig='[Kr] 4d10 5s2 5p3',
oxistates='5, 3*, -3',
ionenergy=(8.6084, 16.53, 25.3, 44.2, 56.0,
108.0, ),
isotopes={121: Isotope(120.903818, 0.5721, 121),
123: Isotope(122.9042157, 0.4279, 123)}),
Element(
52, 'Te', 'Tellurium',
group=16, period=5, block='p', series=5,
mass=127.6, eleneg=2.1, eleaffin=1.970875,
covrad=1.36, atmrad=1.42, vdwrad=2.06,
tboil=1261.0, tmelt=722.72, density=6.25,
eleconfig='[Kr] 4d10 5s2 5p4',
oxistates='6, 4*, -2',
ionenergy=(9.0096, 18.6, 27.96, 37.41, 58.75,
70.7, 137.0, ),
isotopes={120: Isotope(119.90402, 0.0009, 120),
122: Isotope(121.9030471, 0.0255, 122),
123: Isotope(122.904273, 0.0089, 123),
124: Isotope(123.9028195, 0.0474, 124),
125: Isotope(124.9044247, 0.0707, 125),
126: Isotope(125.9033055, 0.1884, 126),
128: Isotope(127.9044614, 0.3174, 128),
130: Isotope(129.9062228, 0.3408, 130)}),
Element(
53, 'I', 'Iodine',
group=17, period=5, block='p', series=6,
mass=126.90447, eleneg=2.66, eleaffin=3.059038,
covrad=1.33, atmrad=1.32, vdwrad=1.98,
tboil=457.5, tmelt=386.7, density=4.94,
eleconfig='[Kr] 4d10 5s2 5p5',
oxistates='7, 5, 1, -1*',
ionenergy=(10.4513, 19.131, 33.0, ),
isotopes={127: Isotope(126.904468, 1.0, 127)}),
Element(
54, 'Xe', 'Xenon',
group=18, period=5, block='p', series=2,
mass=131.293, eleneg=0.0, eleaffin=0.0,
covrad=1.31, atmrad=1.24, vdwrad=2.16,
tboil=165.1, tmelt=161.39, density=4.49,
eleconfig='[Kr] 4d10 5s2 5p6',
oxistates='2, 4, 6',
ionenergy=(12.1298, 21.21, 32.1, ),
isotopes={124: Isotope(123.9058958, 0.0009, 124),
126: Isotope(125.904269, 0.0009, 126),
128: Isotope(127.9035304, 0.0192, 128),
129: Isotope(128.9047795, 0.2644, 129),
130: Isotope(129.9035079, 0.0408, 130),
131: Isotope(130.9050819, 0.2118, 131),
132: Isotope(131.9041545, 0.2689, 132),
134: Isotope(133.9053945, 0.1044, 134),
136: Isotope(135.90722, 0.0887, 136)}),
Element(
55, 'Cs', 'Caesium',
mass=132.90545,
isotopes={133: Isotope(132.905447, 1.0, 133)}),
Element(
56, 'Ba', 'Barium',
mass=137.327,
isotopes={130: Isotope(129.90631, 0.00106, 130),
132: Isotope(131.905056, 0.00101, 132),
134: Isotope(133.904503, 0.02417, 134),
135: Isotope(134.905683, 0.06592, 135),
136: Isotope(135.90457, 0.07854, 136),
137: Isotope(136.905821, 0.11232, 137),
138: Isotope(137.905241, 0.71698, 138)}),
Element(
57, 'La', 'Lanthanum',
mass=138.9055,
isotopes={138: Isotope(137.907107, 0.0009, 138),
139: Isotope(138.906348, 0.9991, 139)}),
Element(
58, 'Ce', 'Cerium',
mass=140.116,
isotopes={136: Isotope(135.90714, 0.00185, 136),
138: Isotope(137.905986, 0.00251, 138),
140: Isotope(139.905434, 0.8845, 140),
142: Isotope(141.90924, 0.11114, 142)}),
Element(
59, 'Pr', 'Praseodymium',
mass=140.90765,
isotopes={141: Isotope(140.907648, 1.0, 141)}),
Element(
60, 'Nd', 'Neodymium',
mass=144.24,
isotopes={142: Isotope(141.907719, 0.272, 142),
143: Isotope(142.90981, 0.122, 143),
144: Isotope(143.910083, 0.238, 144),
145: Isotope(144.912569, 0.083, 145),
146: Isotope(145.913112, 0.172, 146),
148: Isotope(147.916889, 0.057, 148),
150: Isotope(149.920887, 0.056, 150)}),
Element(
61, 'Pm', 'Promethium',
mass=144.912744,
isotopes={145: Isotope(144.912744, 1.0, 145)}),
Element(
62, 'Sm', 'Samarium',
mass=150.36,
isotopes={144: Isotope(143.911995, 0.0307, 144),
147: Isotope(146.914893, 0.1499, 147),
148: Isotope(147.914818, 0.1124, 148),
149: Isotope(148.91718, 0.1382, 149),
150: Isotope(149.917271, 0.0738, 150),
152: Isotope(151.919728, 0.2675, 152),
154: Isotope(153.922205, 0.2275, 154)}),
Element(
63, 'Eu', 'Europium',
mass=151.964,
isotopes={151: Isotope(150.919846, 0.4781, 151),
153: Isotope(152.921226, 0.5219, 153)}),
Element(
64, 'Gd', 'Gadolinium',
mass=157.25,
isotopes={152: Isotope(151.919788, 0.002, 152),
154: Isotope(153.920862, 0.0218, 154),
155: Isotope(154.922619, 0.148, 155),
156: Isotope(155.92212, 0.2047, 156),
157: Isotope(156.923957, 0.1565, 157),
158: Isotope(157.924101, 0.2484, 158),
160: Isotope(159.927051, 0.2186, 160)}),
Element(
65, 'Tb', 'Terbium',
mass=158.92534,
isotopes={159: Isotope(158.925343, 1.0, 159)}),
Element(
66, 'Dy', 'Dysprosium',
mass=162.5,
isotopes={156: Isotope(155.924278, 0.0006, 156),
158: Isotope(157.924405, 0.001, 158),
160: Isotope(159.925194, 0.0234, 160),
161: Isotope(160.92693, 0.1891, 161),
162: Isotope(161.926795, 0.2551, 162),
163: Isotope(162.928728, 0.249, 163),
164: Isotope(163.929171, 0.2818, 164)}),
Element(
67, 'Ho', 'Holmium',
mass=164.93032,
isotopes={165: Isotope(164.930319, 1.0, 165)}),
Element(
68, 'Er', 'Erbium',
mass=167.259,
isotopes={162: Isotope(161.928775, 0.0014, 162),
164: Isotope(163.929197, 0.0161, 164),
166: Isotope(165.93029, 0.3361, 166),
167: Isotope(166.932045, 0.2293, 167),
168: Isotope(167.932368, 0.2678, 168),
170: Isotope(169.93546, 0.1493, 170)}),
Element(
69, 'Tm', 'Thulium',
mass=168.93421,
isotopes={169: Isotope(168.934211, 1.0, 169)}),
Element(
70, 'Yb', 'Ytterbium',
mass=173.04,
isotopes={168: Isotope(167.933894, 0.0013, 168),
170: Isotope(169.934759, 0.0304, 170),
171: Isotope(170.936322, 0.1428, 171),
172: Isotope(171.9363777, 0.2183, 172),
173: Isotope(172.9382068, 0.1613, 173),
174: Isotope(173.9388581, 0.3183, 174),
176: Isotope(175.942568, 0.1276, 176)}),
Element(
71, 'Lu', 'Lutetium',
mass=174.967,
isotopes={175: Isotope(174.9407679, 0.9741, 175),
176: Isotope(175.9426824, 0.0259, 176)}),
Element(
72, 'Hf', 'Hafnium',
mass=178.49,
isotopes={174: Isotope(173.94004, 0.0016, 174),
176: Isotope(175.9414018, 0.0526, 176),
177: Isotope(176.94322, 0.186, 177),
178: Isotope(177.9436977, 0.2728, 178),
179: Isotope(178.9458151, 0.1362, 179),
180: Isotope(179.9465488, 0.3508, 180)}),
Element(
73, 'Ta', 'Tantalum',
mass=180.9479,
isotopes={180: Isotope(179.947466, 0.00012, 180),
181: Isotope(180.947996, 0.99988, 181)}),
Element(
74, 'W', 'Tungsten',
mass=183.84,
isotopes={180: Isotope(179.946706, 0.0012, 180),
182: Isotope(181.948206, 0.265, 182),
183: Isotope(182.9502245, 0.1431, 183),
184: Isotope(183.9509326, 0.3064, 184),
186: Isotope(185.954362, 0.2843, 186)}),
Element(
75, 'Re', 'Rhenium',
mass=186.207,
isotopes={185: Isotope(184.9529557, 0.374, 185),
187: Isotope(186.9557508, 0.626, 187)}),
Element(
76, 'Os', 'Osmium',
mass=190.23,
isotopes={184: Isotope(183.952491, 0.0002, 184),
186: Isotope(185.953838, 0.0159, 186),
187: Isotope(186.9557479, 0.0196, 187),
188: Isotope(187.955836, 0.1324, 188),
189: Isotope(188.9581449, 0.1615, 189),
190: Isotope(189.958445, 0.2626, 190),
192: Isotope(191.961479, 0.4078, 192)}),
Element(
77, 'Ir', 'Iridium',
mass=192.217,
isotopes={191: Isotope(190.960591, 0.373, 191),
193: Isotope(192.962924, 0.627, 193)}),
Element(
78, 'Pt', 'Platinum',
mass=195.078,
isotopes={190: Isotope(189.95993, 0.00014, 190),
192: Isotope(191.961035, 0.00782, 192),
194: Isotope(193.962664, 0.32967, 194),
195: Isotope(194.964774, 0.33832, 195),
196: Isotope(195.964935, 0.25242, 196),
198: Isotope(197.967876, 0.07163, 198)}),
Element(
79, 'Au', 'Gold',
mass=196.96655,
isotopes={197: Isotope(196.966552, 1.0, 197)}),
Element(
80, 'Hg', 'Mercury',
mass=200.59,
isotopes={196: Isotope(195.965815, 0.0015, 196),
198: Isotope(197.966752, 0.0997, 198),
199: Isotope(198.968262, 0.1687, 199),
200: Isotope(199.968309, 0.231, 200),
201: Isotope(200.970285, 0.1318, 201),
202: Isotope(201.970626, 0.2986, 202),
204: Isotope(203.973476, 0.0687, 204)}),
Element(
81, 'Tl', 'Thallium',
mass=204.3833,
isotopes={203: Isotope(202.972329, 0.29524, 203),
205: Isotope(204.974412, 0.70476, 205)}),
Element(
82, 'Pb', 'Lead',
mass=207.2,
isotopes={204: Isotope(203.973029, 0.014, 204),
206: Isotope(205.974449, 0.241, 206),
207: Isotope(206.975881, 0.221, 207),
208: Isotope(207.976636, 0.524, 208)}),
Element(
83, 'Bi', 'Bismuth',
mass=208.98038,
isotopes={209: Isotope(208.980383, 1.0, 209)}),
Element(
84, 'Po', 'Polonium',
mass=208.982416,
isotopes={209: Isotope(208.982416, 1.0, 209)}),
Element(
85, 'At', 'Astatine',
mass=209.9871,
isotopes={210: Isotope(209.987131, 1.0, 210)}),
Element(
86, 'Rn', 'Radon',
mass=222.0176,
isotopes={222: Isotope(222.0175705, 1.0, 222)}),
Element(
87, 'Fr', 'Francium',
mass=223.0197307,
isotopes={223: Isotope(223.0197307, 1.0, 223)}),
Element(
88, 'Ra', 'Radium',
mass=226.025403,
isotopes={226: Isotope(226.0254026, 1.0, 226)}),
Element(
89, 'Ac', 'Actinium',
mass=227.027747,
isotopes={227: Isotope(227.027747, 1.0, 227)}),
Element(
90, 'Th', 'Thorium',
mass=232.0381,
isotopes={232: Isotope(232.0380504, 1.0, 232)}),
Element(
91, 'Pa', 'Protactinium',
mass=231.03588,
isotopes={231: Isotope(231.0358789, 1.0, 231)}),
Element(
92, 'U', 'Uranium',
mass=238.02891,
isotopes={234: Isotope(234.0409456, 5.5e-05, 234),
235: Isotope(235.0439231, 0.0072, 235),
238: Isotope(238.0507826, 0.992745, 238)}),
Element(
93, 'Np', 'Neptunium',
mass=237.048167,
isotopes={237: Isotope(237.0481673, 1.0, 237)}),
Element(
94, 'Pu', 'Plutonium',
mass=244.064198,
isotopes={244: Isotope(244.064198, 1.0, 244)}),
Element(
95, 'Am', 'Americium',
mass=243.061373,
isotopes={243: Isotope(243.0613727, 1.0, 243)}),
Element(
96, 'Cm', 'Curium',
mass=247.070347,
isotopes={247: Isotope(247.070347, 1.0, 247)}),
Element(
97, 'Bk', 'Berkelium',
mass=247.070299,
isotopes={247: Isotope(247.070299, 1.0, 247)}),
Element(
98, 'Cf', 'Californium',
mass=251.07958,
isotopes={251: Isotope(251.07958, 1.0, 251)}),
Element(
99, 'Es', 'Einsteinium',
mass=252.08297,
isotopes={252: Isotope(252.08297, 1.0, 252)}),
Element(
100, 'Fm', 'Fermium',
mass=257.095099,
isotopes={257: Isotope(257.095099, 1.0, 257)}),
Element(
101, 'Md', 'Mendelevium',
mass=258.098425,
isotopes={258: Isotope(258.098425, 1.0, 258)}),
Element(
102, 'No', 'Nobelium',
mass=259.10102,
isotopes={259: Isotope(259.10102, 1.0, 259)}),
Element(
103, 'Lr', 'Lawrencium',
mass=262.10969,
isotopes={262: Isotope(262.10969, 1.0, 262)}),
Element(
104, 'Rf', 'Rutherfordium',
mass=261.10875,
isotopes={261: Isotope(261.10875, 1.0, 261)}),
Element(
105, 'Db', 'Dubnium',
mass=262.11415,
isotopes={262: Isotope(262.11415, 1.0, 262)}),
Element(
106, 'Sg', 'Seaborgium',
mass=266.12193,
isotopes={266: Isotope(266.12193, 1.0, 266)}),
Element(
107, 'Bh', 'Bohrium',
mass=264.12473,
isotopes={264: Isotope(264.12473, 1.0, 264)}),
Element(
108, 'Hs', 'Hassium',
mass=269.13411,
isotopes={269: Isotope(269.13411, 1.0, 269)}),
Element(
109, 'Mt', 'Meitnerium',
mass=268.13882,
isotopes={268: Isotope(268.13882, 1.0, 268)})]
| [
"[email protected]"
] | |
8e6a009c884e11c31cb6f6416fe15a0a4a1b9a55 | 70450f0c551adf47b450468e424f4f90bebfb58d | /dataclasses/resources/test/test_I3FlasherInfo.py | 5b4c4106c5f255593ca07e39f455cbf31358221e | [
"MIT"
] | permissive | hschwane/offline_production | ebd878c5ac45221b0631a78d9e996dea3909bacb | e14a6493782f613b8bbe64217559765d5213dc1e | refs/heads/master | 2023-03-23T11:22:43.118222 | 2021-03-16T13:11:22 | 2021-03-16T13:11:22 | 280,381,714 | 0 | 0 | MIT | 2020-07-17T09:20:29 | 2020-07-17T09:20:29 | null | UTF-8 | Python | false | false | 1,673 | py | #!/usr/bin/env python
import unittest
from icecube import dataclasses
from icecube import icetray
from icecube.icetray import OMKey
class TestI3FlasherInfo(unittest.TestCase):
def test_I3FlasherInfo_equality(self):
fi1 = dataclasses.I3FlasherInfo()
fi2 = dataclasses.I3FlasherInfo()
fi1.flashing_om = OMKey(2,1,0)
fi2.flashing_om = OMKey(2,1,0)
fi1.flash_time = 1.0
fi2.flash_time = 1.0
fi1.atwd_bin_size = 1.0
fi2.atwd_bin_size = 1.0
fi1.led_brightness = 1
fi2.led_brightness = 1
fi1.mask = 1
fi2.mask = 1
fi1.width = 1
fi2.width = 1
fi1.rate = 1
fi2.rate = 1
fi1.raw_atwd3 = [ 1, 2, 3 ]
fi2.raw_atwd3 = [ 1, 2, 3 ]
self.assertTrue(fi1==fi2, "this should be true.")
def test_I3FlasherInfo_inequality(self):
fi1 = dataclasses.I3FlasherInfo()
fi2 = dataclasses.I3FlasherInfo()
fi1.flashing_om = OMKey(3,1,0)
fi2.flashing_om = OMKey(2,1,0)
fi1.flash_time = 1.0
fi2.flash_time = 1.0
fi1.atwd_bin_size = 1.0
fi2.atwd_bin_size = 1.0
fi1.led_brightness = 1
fi2.led_brightness = 1
fi1.mask = 1
fi2.mask = 1
fi1.width = 1
fi2.width = 1
fi1.rate = 1
fi2.rate = 1
fi1.raw_atwd3 = [ 1, 2, 3 ]
fi2.raw_atwd3 = [ 1, 2, 3 ]
self.assertFalse(fi1==fi2, "this should be false.")
unittest.main()
| [
"[email protected]"
] | |
ac87b06fcf8526dff1c8c6cea683f676c0773626 | 725f172aae1d97ee6b33734843bbe670644dec65 | /train_mtcnn_LPR/24train_wm_lpr/gen_data_shuffle宽高非比例.py | a6e350acd8894d689c5311ebee8efadd54a599a0 | [] | no_license | xingguoliang/License-Plate-Detect-Recognition-via-Deep-Neural-Networks-accuracy-up-to-99.9 | 377004c0129d8dad3f70a36c2cda05ce878d83ef | a2cf438a8d2df7b3a55f869fb01f5410741aae5e | refs/heads/master | 2023-07-07T19:18:15.437859 | 2020-06-30T04:45:18 | 2020-06-30T04:45:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,228 | py | # -*- coding: utf-8 -*-
"""
descriptor: generate mtcnn training data from source image and convert it into the lmdb database
author: Aliang 2018-01-12
"""
import numpy as np
import cv2
import lmdb
import numpy.random as npr
import data_tran_tool
import caffe
from caffe.proto import caffe_pb2
from utils import IoU
anno_file = './label.txt'
with open(anno_file, 'r') as f:
annotations = f.readlines()
num = len(annotations)
print "total num of image: %d" % num
lmdb_id = 2
dir_prefix = ''
p_idx = 0 # positive
n_idx = 0 # negative
d_idx = 0 # dont care
item_id = 0 # 数据库的id
batch_size = 1000 #多少图片进行一次写入,防止缓存不足
# create the lmdb file
# map_size指的是数据库的最大容量,根据需求设置
if(lmdb_id == 0):
lmdb_env_12 = lmdb.open(dir_prefix + 'mtcnn_train_12_test', map_size=5000000000)
lmdb_txn_12 = lmdb_env_12.begin(write=True)
elif(lmdb_id == 1):
lmdb_env_24 = lmdb.open(dir_prefix + 'mtcnn_train_24_test', map_size=5000000000)
lmdb_txn_24 = lmdb_env_24.begin(write=True)
else:
lmdb_env_48 = lmdb.open(dir_prefix + 'mtcnn_train_48_test', map_size=10000000000)
lmdb_txn_48 = lmdb_env_48.begin(write=True)
# 因为caffe中经常采用datum这种数据结构存储数据
mtcnn_datum = caffe_pb2.MTCNNDatum()
for line_idx,annotation in enumerate(annotations):
annotation = annotation.strip().split(' ') #每一行的数据以空白分隔符为界限
im_path = annotation[0] #图片的路径
bbox = map(float, annotation[1:])
if np.size(bbox) % 4 != 0: #标注数据有问题
print "the annotation data in line %d is invalid, please check file %s !" % (line_idx + 1, anno_file)
exit(-1);
elif np.size(bbox) == 0:
continue;
boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4)
boxes_num = boxes.shape[0]
img = cv2.imread(im_path) #读取图片
cv2.namedWindow("input",1)
cv2.imshow("input",img)
# cv2.waitKey(0)
if (line_idx+1) % 10 ==0:
print line_idx + 1, "images done"
height, width, channel = img.shape
if width < 200 or height < 200:
continue;
pos_num = 0
part_num = 0
neg_num = 0
num_for_each = 5
while(pos_num < boxes_num * num_for_each and part_num < boxes_num * num_for_each and neg_num < boxes_num * num_for_each * 3):
# print "%d images done, pos: %d part: %d neg: %d" % (line_idx + 1, p_idx, d_idx, n_idx)
choose = npr.randint(0,100)
#pos
if(choose < 20):
max_loop = 0
while(1):
max_loop += 1
if(max_loop > boxes_num * 10):
break;
box_ch = npr.randint(0,boxes_num)
box = boxes[box_ch]
x1, y1, x2, y2 = box
w = x2 - x1 + 1
h = y2 - y1 + 1
w_h = float(w)/float(h)
print w_h
cv2.rectangle(img, (x1, y1), (x2, y2), (55, 255, 155), 2)
cv2.namedWindow("input", 1)
cv2.imshow("input", img)
# cv2.waitKey(0)
if max(w, h) < 10 or min(w ,h) < 5 or x1 < 0 or y1 < 0:
continue;
# size = npr.randint(int(min(w, h)*0.8), np.ceil(1.25 * max(w, h)))
size_w = npr.randint(int(w) * 0.8, np.ceil(1.25 * w ))
size_h = float(size_w)/float(w_h)
delta_x = npr.randint(-w * 0.2, w * 0.2)
delta_y = npr.randint(-h * 0.2, h * 0.2)
nx1 = max(x1 + w / 2 + delta_x - size / 2, 0)
ny1 = max(y1 + h / 2 + delta_y - size / 2, 0)
nx2 = nx1 + size_w
ny2 = ny1 + size_h
print nx1,ny1,nx2-nx1,ny2-ny1
if nx2 > width or ny2 > height:
continue
crop_box = np.array([nx1, ny1, nx2, ny2])
offset_x1 = (x1 - nx1) / float(size_w)
offset_y1 = (y1 - ny1) / float(size_h)
offset_x2 = (x2 - nx1) / float(size_w)
offset_y2 = (y2 - ny1) / float(size_h)
cropped_im = img[int(ny1) : int(ny2), int(nx1) : int(nx2), :]
cv2.rectangle(img, (int(nx1), int(ny1)), (int(nx2), int(ny2)), (55, 55, 155), 2)
cv2.namedWindow("input_cropped_im", 1)
cv2.imshow("input_cropped_im", img)
cv2.namedWindow("cropped_im", 1)
cv2.imshow("cropped_im", cropped_im)
cv2.waitKey(0)
if(lmdb_id == 0):
resized_im12 = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR)
elif(lmdb_id == 1):
resized_im24 = cv2.resize(cropped_im, (24, 24), interpolation=cv2.INTER_LINEAR)
else:
resized_im48 = cv2.resize(cropped_im, (48, 48), interpolation=cv2.INTER_LINEAR)
box_ = box.reshape(1, -1)
if IoU(crop_box, box_) >= 0.65:
#save_file = os.path.join(pos_save_dir, "%s.jpg"%p_idx)
#cv2.imwrite(save_file, resized_im)
#f1.write(str(stdsize)+"/positive/%s"%p_idx + ' 1 %f %f %f %f\n'%(offset_x1, offset_y1, offset_x2, offset_y2))
'''正样本的标签为 1'''
item_id += 1
''' size 12'''
if(lmdb_id == 0):
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im12, 1, [offset_x1, offset_y1, offset_x2, offset_y2])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_12.put(keystr, mtcnn_datum.SerializeToString())
elif(lmdb_id == 1):
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im24, 1, [offset_x1, offset_y1, offset_x2, offset_y2])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_24.put(keystr, mtcnn_datum.SerializeToString())
else:
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im48, 1, [offset_x1, offset_y1, offset_x2, offset_y2])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_48.put(keystr, mtcnn_datum.SerializeToString())
# print("finally")
# write batch
if(item_id) % batch_size == 0:
if(lmdb_id == 0):
lmdb_txn_12.commit()
lmdb_txn_12 = lmdb_env_12.begin(write=True)
elif(lmdb_id == 1):
lmdb_txn_24.commit()
lmdb_txn_24 = lmdb_env_24.begin(write=True)
elif(lmdb_id == 2):
lmdb_txn_48.commit()
lmdb_txn_48 = lmdb_env_48.begin(write=True)
#print (item_id + 1)
p_idx += 1
pos_num += 1
break
#part
elif(choose < 40):
max_loop = 0
while(1):
max_loop += 1
if(max_loop > boxes_num * 10):
break;
box_ch = npr.randint(0,boxes_num)
box = boxes[box_ch]
x1, y1, x2, y2 = box
w = x2 - x1 + 1
h = y2 - y1 + 1
if max(w, h) < 10 or min(w ,h) < 5 or x1 < 0 or y1 < 0:
continue;
size = npr.randint(int(min(w, h)*0.8), np.ceil(1.25 * max(w, h)))
delta_x = npr.randint(-w * 0.2, w * 0.2)
delta_y = npr.randint(-h * 0.2, h * 0.2)
nx1 = max(x1 + w / 2 + delta_x - size / 2, 0)
ny1 = max(y1 + h / 2 + delta_y - size / 2, 0)
nx2 = nx1 + size
ny2 = ny1 + size
if nx2 > width or ny2 > height:
continue
crop_box = np.array([nx1, ny1, nx2, ny2])
offset_x1 = (x1 - nx1) / float(size)
offset_y1 = (y1 - ny1) / float(size)
offset_x2 = (x2 - nx1) / float(size)
offset_y2 = (y2 - ny1) / float(size)
cropped_im = img[int(ny1) : int(ny2), int(nx1) : int(nx2), :]
if(lmdb_id == 0):
resized_im12 = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR)
elif(lmdb_id == 1):
resized_im24 = cv2.resize(cropped_im, (24, 24), interpolation=cv2.INTER_LINEAR)
else:
resized_im48 = cv2.resize(cropped_im, (48, 48), interpolation=cv2.INTER_LINEAR)
box_ = box.reshape(1, -1)
if IoU(crop_box, box_) >= 0.4 and IoU(crop_box, box_) < 0.65 :
#save_file = os.path.join(part_save_dir, "%s.jpg"%d_idx)
#f3.write(str(stdsize)+"/part/%s"%d_idx + ' -1 %f %f %f %f\n'%(offset_x1, offset_y1, offset_x2, offset_y2))
#cv2.imwrite(save_file, resized_im)
'''部分样本的标签为 -1'''
item_id += 1
''' size 12'''
if(lmdb_id == 0):
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im12, -1, [offset_x1, offset_y1, offset_x2, offset_y2])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_12.put(keystr, mtcnn_datum.SerializeToString())
elif(lmdb_id == 1):
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im24, -1, [offset_x1, offset_y1, offset_x2, offset_y2])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_24.put(keystr, mtcnn_datum.SerializeToString())
else:
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im48, -1, [offset_x1, offset_y1, offset_x2, offset_y2])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_48.put(keystr, mtcnn_datum.SerializeToString())
# write batch
if(item_id) % batch_size == 0:
if(lmdb_id == 0):
lmdb_txn_12.commit()
lmdb_txn_12 = lmdb_env_12.begin(write=True)
elif(lmdb_id == 1):
lmdb_txn_24.commit()
lmdb_txn_24 = lmdb_env_24.begin(write=True)
elif(lmdb_id == 2):
lmdb_txn_48.commit()
lmdb_txn_48 = lmdb_env_48.begin(write=True)
d_idx += 1
part_num += 1
break;
#neg
else:
while(1):
size = npr.randint(40, min(width, height) / 2)
nx = npr.randint(0, width - size)
ny = npr.randint(0, height - size)
crop_box = np.array([nx, ny, nx + size, ny + size])
Iou = IoU(crop_box, boxes)
cropped_im = img[ny : ny + size, nx : nx + size, :]
if(lmdb_id == 0):
resized_im12 = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR)
elif(lmdb_id == 1):
resized_im24 = cv2.resize(cropped_im, (24, 24), interpolation=cv2.INTER_LINEAR)
else:
resized_im48 = cv2.resize(cropped_im, (48, 48), interpolation=cv2.INTER_LINEAR)
if np.max(Iou) < 0.3:
# Iou with all gts must below 0.3
'''负样本的标签为 0'''
item_id += 1
''' size 12'''
if(lmdb_id == 0):
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im12, 0, [-1.0, -1.0, -1.0, -1.0])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_12.put(keystr, mtcnn_datum.SerializeToString())
elif(lmdb_id == 1):
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im24, 0, [-1.0, -1.0, -1.0, -1.0])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_24.put(keystr, mtcnn_datum.SerializeToString())
else:
mtcnn_datum = data_tran_tool.array_to_mtcnndatum(resized_im48, 0, [-1.0, -1.0, -1.0, -1.0])
keystr = '{:0>8d}'.format(item_id)
lmdb_txn_48.put(keystr, mtcnn_datum.SerializeToString())
# write batch
if(item_id) % batch_size == 0:
if(lmdb_id == 0):
lmdb_txn_12.commit()
lmdb_txn_12 = lmdb_env_12.begin(write=True)
elif(lmdb_id == 1):
lmdb_txn_24.commit()
lmdb_txn_24 = lmdb_env_24.begin(write=True)
elif(lmdb_id == 2):
lmdb_txn_48.commit()
lmdb_txn_48 = lmdb_env_48.begin(write=True)
n_idx += 1
neg_num += 1
break;
if (item_id+1) % batch_size != 0:
if(lmdb_id == 0):
lmdb_txn_12.commit()
lmdb_env_12.close()
elif(lmdb_id == 1):
lmdb_txn_24.commit()
lmdb_env_24.close()
elif(lmdb_id == 2):
lmdb_txn_48.commit()
lmdb_env_48.close()
print 'last batch'
print "There are %d images in total" % item_id | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.