blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efec59e8370e4f56814a2820c890bc9dc2fff0bd | 659836ef3a9ac558538b016dbf4e128aa975ae7c | /backend/ingredient/migrations/0001_initial.py | 4f51e388822ee86c8a34ae068419dd993474fd70 | [] | no_license | zzerii/save_your_ingredients | fda1c769d158bca9dfd3c28ac9ff34ed7ae4e6a3 | 5ebde82255c1a6edf0c19d9032015d05c9d0abc9 | refs/heads/master | 2023-02-21T22:19:28.954594 | 2021-01-22T11:39:16 | 2021-01-22T11:39:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | # Generated by Django 3.1.3 on 2020-11-13 05:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('info', models.CharField(max_length=255)),
('trim', models.CharField(max_length=255)),
],
),
]
| [
"[email protected]"
] | |
c97da6f4cbe2fab9d85310007193f7f9c0e31396 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_guess_vectors1.py | dc797ddfb00b810b4621cc8d73f069c7cf32de02 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._each591 import _each591
class _guess_vectors1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each591()
self._name = "GUESS_VECTORS"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| [
"[email protected]"
] | |
6ea8d87f4043296a4cc62e53fd9effbbff89bc02 | 23307f8e889f232724756bb26b1def1f0ba3323b | /fairseq/tasks/translation.py | eb8f1152748737cafa5b8b528dcc64d926689108 | [] | no_license | krisjeong/fairseq_data | 9395cb574d91147c95b6f08eecd814e4cb2fdad8 | f29e7dae3c2be3a908e795bfc952cc845b80280d | refs/heads/master | 2023-07-12T22:21:22.349970 | 2021-08-18T06:20:11 | 2021-08-18T06:20:11 | 397,152,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,421 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| [
"[email protected]"
] | |
65b28ee3513376fdfc19b0ce3e8b4f8543856013 | d737fa49e2a7af29bdbe5a892bce2bc7807a567c | /software/qt_examples/src/pyqt-official/webkit/formextractor/ui_formextractor.py | 518bb9cef3ef753de6499455d980501f40903ab0 | [
"GPL-3.0-only",
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later"
] | permissive | TG-Techie/CASPER | ec47dfbfd6c3a668739ff4d707572e0b853518b4 | 2575d3d35e7dbbd7f78110864e659e582c6f3c2e | refs/heads/master | 2020-12-19T12:43:53.825964 | 2020-01-23T17:24:04 | 2020-01-23T17:24:04 | 235,736,872 | 0 | 1 | MIT | 2020-01-23T17:09:19 | 2020-01-23T06:29:10 | Python | UTF-8 | Python | false | false | 4,813 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'formextractor.ui'
#
# Created: Tue May 14 17:59:08 2013
# by: PyQt5 UI code generator 5.0-snapshot-b0831183bf83
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(680, 218)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setObjectName("horizontalLayout")
self.webFormGroupBox = QtWidgets.QGroupBox(Form)
self.webFormGroupBox.setObjectName("webFormGroupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.webFormGroupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.webView = QtWebKitWidgets.QWebView(self.webFormGroupBox)
self.webView.setMinimumSize(QtCore.QSize(200, 150))
self.webView.setMaximumSize(QtCore.QSize(400, 16777215))
self.webView.setUrl(QtCore.QUrl("about:blank"))
self.webView.setObjectName("webView")
self.verticalLayout.addWidget(self.webView)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout.addWidget(self.webFormGroupBox)
spacerItem = QtWidgets.QSpacerItem(28, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.dataGroupBox = QtWidgets.QGroupBox(Form)
self.dataGroupBox.setObjectName("dataGroupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.dataGroupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.FieldsStayAtSizeHint)
self.formLayout.setObjectName("formLayout")
self.firstNameLabel = QtWidgets.QLabel(self.dataGroupBox)
self.firstNameLabel.setObjectName("firstNameLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.firstNameLabel)
self.firstNameEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.firstNameEdit.setReadOnly(True)
self.firstNameEdit.setObjectName("firstNameEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstNameEdit)
self.lastNameLabel = QtWidgets.QLabel(self.dataGroupBox)
self.lastNameLabel.setObjectName("lastNameLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.lastNameLabel)
self.lastNameEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.lastNameEdit.setReadOnly(True)
self.lastNameEdit.setObjectName("lastNameEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lastNameEdit)
self.genderLabel = QtWidgets.QLabel(self.dataGroupBox)
self.genderLabel.setObjectName("genderLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.genderLabel)
self.genderEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.genderEdit.setReadOnly(True)
self.genderEdit.setObjectName("genderEdit")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.genderEdit)
self.updatesLabel = QtWidgets.QLabel(self.dataGroupBox)
self.updatesLabel.setObjectName("updatesLabel")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.updatesLabel)
self.updatesEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.updatesEdit.setReadOnly(True)
self.updatesEdit.setObjectName("updatesEdit")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.updatesEdit)
self.verticalLayout_3.addLayout(self.formLayout)
spacerItem1 = QtWidgets.QSpacerItem(20, 24, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem1)
self.horizontalLayout.addWidget(self.dataGroupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.webFormGroupBox.setTitle(_translate("Form", "Web Form"))
self.dataGroupBox.setTitle(_translate("Form", "Extracted Data"))
self.firstNameLabel.setText(_translate("Form", "First Name"))
self.lastNameLabel.setText(_translate("Form", "Last Name"))
self.genderLabel.setText(_translate("Form", "Gender"))
self.updatesLabel.setText(_translate("Form", "Receive Updates"))
from PyQt5 import QtWebKitWidgets
| [
"[email protected]"
] | |
a151f15578260c6246fa532d91a39e1ae25d102d | f7bc9ff51518d11d0d21249e57cdbd7277091e18 | /0x02-python-import_modules/2-args.py | 7ed9bebdf1babd5e74b51714354869b8faf4b092 | [] | no_license | veeteeran/holbertonschool-low_level_programming | 578fd521de625e47406b9141920a531c0483f042 | ff2f79942eb282ae485deda7d9598eda50723c3f | refs/heads/master | 2020-12-29T04:41:46.509414 | 2020-12-27T14:49:57 | 2020-12-27T14:49:57 | 238,458,078 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
length = len(argv)
if length == 1:
print("{:d} arguments.".format(length - 1))
elif length == 2:
print("{:d} argument:".format(length - 1))
else:
print("{:d} arguments:".format(length - 1))
for i in range(1, length):
print("{:d}: {}".format(i, argv[i]))
| [
"[email protected]"
] | |
25c20fdddfb3b6a9a8654381f8f2a0f5d7f1be1a | 0e1cbcf4f05e6ddd498f6b334014106f5734bfa4 | /backend/bin/elbadmin | ebaace8a1f684454e7e59b8e022e57f4975abdf8 | [
"MIT"
] | permissive | rcmiskin10/mem_landing_page | 33e9b448dcc2ff7de566044d6f81adf465f0c561 | 892e5f57a6359f39e4edd13de848cf4d0ec6344a | refs/heads/master | 2020-04-01T00:04:35.525714 | 2018-10-12T02:12:58 | 2018-10-12T02:12:58 | 152,680,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,721 | #!/home/rcmiskin/Desktop/mem/backend/bin/python3
# Copyright (c) 2009 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
#
# Elastic Load Balancer Tool
#
VERSION = "0.2"
usage = """%prog [options] [command]
Commands:
list|ls List all Elastic Load Balancers
delete <name> Delete ELB <name>
get <name> Get all instances associated with <name>
create <name> Create an ELB; -z and -l are required
add <name> <instances> Add <instances> in ELB <name>
remove|rm <name> <instances> Remove <instances> from ELB <name>
reap <name> Remove terminated instances from ELB <name>
enable|en <name> <zone> Enable Zone <zone> for ELB <name>
disable <name> <zone> Disable Zone <zone> for ELB <name>
addl <name> Add listeners (specified by -l) to the ELB
<name>
rml <name> <port> Remove Listener(s) specified by the port on
the ELB <name>
"""
def find_elb(elb, name):
try:
elbs = elb.get_all_load_balancers(name)
except boto.exception.BotoServerError as se:
if se.code == 'LoadBalancerNotFound':
elbs = []
else:
raise
if len(elbs) < 1:
print "No load balancer by the name of %s found" % name
return None
elif len(elbs) > 1:
print "More than one elb matches %s?" % name
return None
# Should not happen
if name not in elbs[0].name:
print "No load balancer by the name of %s found" % name
return None
return elbs[0]
def list(elb):
"""List all ELBs"""
print "%-20s %s" % ("Name", "DNS Name")
print "-" * 80
for b in elb.get_all_load_balancers():
print "%-20s %s" % (b.name, b.dns_name)
def check_valid_region(conn, region):
if conn is None:
print 'Invalid region (%s)' % region
sys.exit(1)
def get(elb, name):
"""Get details about ELB <name>"""
b = find_elb(elb, name)
if b:
print "=" * 80
print "Name: %s" % b.name
print "DNS Name: %s" % b.dns_name
if b.canonical_hosted_zone_name:
chzn = b.canonical_hosted_zone_name
print "Canonical hosted zone name: %s" % chzn
if b.canonical_hosted_zone_name_id:
chznid = b.canonical_hosted_zone_name_id
print "Canonical hosted zone name id: %s" % chznid
print
print "Health Check: %s" % b.health_check
print
print "Listeners"
print "---------"
print "%-8s %-8s %s" % ("IN", "OUT", "PROTO")
for l in b.listeners:
print "%-8s %-8s %s" % (l[0], l[1], l[2])
print
print " Zones "
print "---------"
for z in b.availability_zones:
print z
print
# Make map of all instance Id's to Name tags
import boto
from boto.compat.six import iteritems
if not options.region:
ec2 = boto.connect_ec2()
else:
ec2 = boto.ec2.connect_to_region(options.region)
check_valid_region(ec2, options.region)
instance_health = b.get_instance_health()
instances = [state.instance_id for state in instance_health]
names = dict((k,'') for k in instances)
for i in ec2.get_only_instances():
if i.id in instances:
names[i.id] = i.tags.get('Name', '')
name_column_width = max([4] + [len(v) for k,v in iteritems(names)]) + 2
print "Instances"
print "---------"
print "%-12s %-15s %-*s %s" % ("ID",
"STATE",
name_column_width, "NAME",
"DESCRIPTION")
for state in instance_health:
print "%-12s %-15s %-*s %s" % (state.instance_id,
state.state,
name_column_width, names[state.instance_id],
state.description)
print
def create(elb, name, zones, listeners):
"""Create an ELB named <name>"""
l_list = []
for l in listeners:
l = l.split(",")
if l[2] == 'HTTPS':
l_list.append((int(l[0]), int(l[1]), l[2], l[3]))
else:
l_list.append((int(l[0]), int(l[1]), l[2]))
b = elb.create_load_balancer(name, zones, l_list)
return get(elb, name)
def delete(elb, name):
"""Delete this ELB"""
b = find_elb(elb, name)
if b:
b.delete()
print "Load Balancer %s deleted" % name
def add_instances(elb, name, instances):
"""Add <instance> to ELB <name>"""
b = find_elb(elb, name)
if b:
b.register_instances(instances)
return get(elb, name)
def remove_instances(elb, name, instances):
"""Remove instance from elb <name>"""
b = find_elb(elb, name)
if b:
b.deregister_instances(instances)
return get(elb, name)
def reap_instances(elb, name):
"""Remove terminated instances from elb <name>"""
b = find_elb(elb, name)
if b:
for state in b.get_instance_health():
if (state.state == 'OutOfService' and
state.description == 'Instance is in terminated state.'):
b.deregister_instances([state.instance_id])
return get(elb, name)
def enable_zone(elb, name, zone):
"""Enable <zone> for elb"""
b = find_elb(elb, name)
if b:
b.enable_zones([zone])
return get(elb, name)
def disable_zone(elb, name, zone):
"""Disable <zone> for elb"""
b = find_elb(elb, name)
if b:
b.disable_zones([zone])
return get(elb, name)
def add_listener(elb, name, listeners):
"""Add listeners to a given load balancer"""
l_list = []
for l in listeners:
l = l.split(",")
l_list.append((int(l[0]), int(l[1]), l[2]))
b = find_elb(elb, name)
if b:
b.create_listeners(l_list)
return get(elb, name)
def rm_listener(elb, name, ports):
"""Remove listeners from a given load balancer"""
b = find_elb(elb, name)
if b:
b.delete_listeners(ports)
return get(elb, name)
if __name__ == "__main__":
try:
import readline
except ImportError:
pass
import boto
import sys
from optparse import OptionParser
from boto.mashups.iobject import IObject
parser = OptionParser(version=VERSION, usage=usage)
parser.add_option("-z", "--zone",
help="Operate on zone",
action="append", default=[], dest="zones")
parser.add_option("-l", "--listener",
help="Specify Listener in,out,proto",
action="append", default=[], dest="listeners")
parser.add_option("-r", "--region",
help="Region to connect to",
action="store", dest="region")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
if not options.region:
elb = boto.connect_elb()
else:
import boto.ec2.elb
elb = boto.ec2.elb.connect_to_region(options.region)
check_valid_region(elb, options.region)
print "%s" % (elb.region.endpoint)
command = args[0].lower()
if command in ("ls", "list"):
list(elb)
elif command == "get":
get(elb, args[1])
elif command == "create":
if not options.listeners:
print "-l option required for command create"
sys.exit(1)
if not options.zones:
print "-z option required for command create"
sys.exit(1)
create(elb, args[1], options.zones, options.listeners)
elif command == "delete":
delete(elb, args[1])
elif command in ("add", "put"):
add_instances(elb, args[1], args[2:])
elif command in ("rm", "remove"):
remove_instances(elb, args[1], args[2:])
elif command == "reap":
reap_instances(elb, args[1])
elif command in ("en", "enable"):
enable_zone(elb, args[1], args[2])
elif command == "disable":
disable_zone(elb, args[1], args[2])
elif command == "addl":
if not options.listeners:
print "-l option required for command addl"
sys.exit(1)
add_listener(elb, args[1], options.listeners)
elif command == "rml":
if not args[2:]:
print "port required"
sys.exit(2)
rm_listener(elb, args[1], args[2:])
| [
"[email protected]"
] | ||
bdb8f13e9fbc077dbe3d7433f38846a7e81f3246 | 12abe02e205d3e8dabe78fb5a93ccca89e2c42c4 | /otp/level/LevelSpec.py | 921ec08b942508c5f2208dfbb93a095d690be079 | [] | no_license | nate97/toontown-src-py3.0 | 55092b2973b76e6b6d566887f44c52822684394c | f76c515801ae08c40b264b48365211fd44b137eb | refs/heads/master | 2022-07-07T05:23:22.071185 | 2022-06-22T16:36:10 | 2022-06-22T16:36:10 | 187,682,471 | 15 | 8 | null | null | null | null | UTF-8 | Python | false | false | 15,137 | py | from pandac import PandaModules as PM
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import list2dict, uniqueElements
import string
from . import LevelConstants
import types
import importlib
if __dev__:
import os
class LevelSpec:
notify = DirectNotifyGlobal.directNotify.newCategory('LevelSpec')
SystemEntIds = (LevelConstants.UberZoneEntId, LevelConstants.LevelMgrEntId, LevelConstants.EditMgrEntId)
def __init__(self, spec = None, scenario = 0):
newSpec = 0
if type(spec) is types.ModuleType:
if __dev__:
importlib.reload(spec)
self.specDict = spec.levelSpec
if __dev__:
self.setFilename(spec.__file__)
elif type(spec) is dict:
self.specDict = spec
elif spec is None:
if __dev__:
newSpec = 1
self.specDict = {'globalEntities': {},
'scenarios': [{}]}
self.entId2specDict = {}
self.entId2specDict.update(list2dict(self.getGlobalEntIds(), value=self.privGetGlobalEntityDict()))
for i in range(self.getNumScenarios()):
self.entId2specDict.update(list2dict(self.getScenarioEntIds(i), value=self.privGetScenarioEntityDict(i)))
self.setScenario(scenario)
if __dev__:
if newSpec:
from . import EntityTypes
from . import EntityTypeRegistry
etr = EntityTypeRegistry.EntityTypeRegistry(EntityTypes)
self.setEntityTypeReg(etr)
entId = LevelConstants.UberZoneEntId
self.insertEntity(entId, 'zone')
self.doSetAttrib(entId, 'name', 'UberZone')
entId = LevelConstants.LevelMgrEntId
self.insertEntity(entId, 'levelMgr')
self.doSetAttrib(entId, 'name', 'LevelMgr')
entId = LevelConstants.EditMgrEntId
self.insertEntity(entId, 'editMgr')
self.doSetAttrib(entId, 'name', 'EditMgr')
return
def destroy(self):
del self.specDict
del self.entId2specDict
del self.scenario
if hasattr(self, 'level'):
del self.level
if hasattr(self, 'entTypeReg'):
del self.entTypeReg
def getNumScenarios(self):
return len(self.specDict['scenarios'])
def setScenario(self, scenario):
self.scenario = scenario
def getScenario(self):
return self.scenario
def getGlobalEntIds(self):
return list(self.privGetGlobalEntityDict().keys())
def getScenarioEntIds(self, scenario = None):
if scenario is None:
scenario = self.scenario
return list(self.privGetScenarioEntityDict(scenario).keys())
def getAllEntIds(self):
return self.getGlobalEntIds() + self.getScenarioEntIds()
def getAllEntIdsFromAllScenarios(self):
entIds = self.getGlobalEntIds()
for scenario in range(self.getNumScenarios()):
entIds.extend(self.getScenarioEntIds(scenario))
return entIds
def getEntitySpec(self, entId):
specDict = self.entId2specDict[entId]
return specDict[entId]
def getCopyOfSpec(self, spec):
return __import__(self.getSpecImportsModuleName(), fromlist=['*']).__dict__
def getEntitySpecCopy(self, entId):
specDict = self.entId2specDict[entId]
return self.getCopyOfSpec(specDict[entId])
def getEntityType(self, entId):
return self.getEntitySpec(entId)['type']
def getEntityZoneEntId(self, entId):
spec = self.getEntitySpec(entId)
type = spec['type']
if type == 'zone':
return entId
return self.getEntityZoneEntId(spec['parentEntId'])
def getEntType2ids(self, entIds):
entType2ids = {}
for entId in entIds:
type = self.getEntityType(entId)
entType2ids.setdefault(type, [])
entType2ids[type].append(entId)
return entType2ids
def privGetGlobalEntityDict(self):
return self.specDict['globalEntities']
def privGetScenarioEntityDict(self, scenario):
return self.specDict['scenarios'][scenario]
def printZones(self):
allIds = self.getAllEntIds()
type2id = self.getEntType2ids(allIds)
zoneIds = type2id['zone']
if 0 in zoneIds:
zoneIds.remove(0)
zoneIds.sort()
for zoneNum in zoneIds:
spec = self.getEntitySpec(zoneNum)
print('zone %s: %s' % (zoneNum, spec['name']))
if __dev__:
def setLevel(self, level):
self.level = level
def hasLevel(self):
return hasattr(self, 'level')
def setEntityTypeReg(self, entTypeReg):
self.entTypeReg = entTypeReg
for entId in self.getAllEntIds():
spec = self.getEntitySpec(entId)
type = self.getEntityType(entId)
typeDesc = self.entTypeReg.getTypeDesc(type)
attribDescDict = typeDesc.getAttribDescDict()
for attribName, desc in attribDescDict.items():
if attribName not in spec:
spec[attribName] = desc.getDefaultValue()
self.checkSpecIntegrity()
def hasEntityTypeReg(self):
return hasattr(self, 'entTypeReg')
def setFilename(self, filename):
self.filename = filename
def doSetAttrib(self, entId, attrib, value):
specDict = self.entId2specDict[entId]
specDict[entId][attrib] = value
def setAttribChange(self, entId, attrib, value, username):
LevelSpec.notify.info('setAttribChange(%s): %s, %s = %s' % (username,
entId,
attrib,
repr(value)))
self.doSetAttrib(entId, attrib, value)
if self.hasLevel():
self.level.handleAttribChange(entId, attrib, value, username)
def insertEntity(self, entId, entType, parentEntId = 'unspecified'):
LevelSpec.notify.info('inserting entity %s (%s)' % (entId, entType))
globalEnts = self.privGetGlobalEntityDict()
self.entId2specDict[entId] = globalEnts
globalEnts[entId] = {}
spec = globalEnts[entId]
attribDescs = self.entTypeReg.getTypeDesc(entType).getAttribDescDict()
for name, desc in list(attribDescs.items()):
spec[name] = desc.getDefaultValue()
spec['type'] = entType
if parentEntId != 'unspecified':
spec['parentEntId'] = parentEntId
if self.hasLevel():
self.level.handleEntityInsert(entId)
else:
LevelSpec.notify.warning('no level to be notified of insertion')
def removeEntity(self, entId):
LevelSpec.notify.info('removing entity %s' % entId)
if self.hasLevel():
self.level.handleEntityRemove(entId)
else:
LevelSpec.notify.warning('no level to be notified of removal')
dict = self.entId2specDict[entId]
del dict[entId]
del self.entId2specDict[entId]
def removeZoneReferences(self, removedZoneNums):
type2ids = self.getEntType2ids(self.getAllEntIdsFromAllScenarios())
for type in type2ids:
typeDesc = self.entTypeReg.getTypeDesc(type)
visZoneListAttribs = typeDesc.getAttribsOfType('visZoneList')
if len(visZoneListAttribs) > 0:
for entId in type2ids[type]:
spec = self.getEntitySpec(entId)
for attribName in visZoneListAttribs:
for zoneNum in removedZoneNums:
while zoneNum in spec[attribName]:
spec[attribName].remove(zoneNum)
def getSpecImportsModuleName(self):
return 'toontown.coghq.SpecImports'
def getFilename(self):
return self.filename
def privGetBackupFilename(self, filename):
return '%s.bak' % filename
def saveToDisk(self, filename = None, makeBackup = 1):
if filename is None:
filename = self.filename
if filename.endswith('.pyc'):
filename = filename.replace('.pyc', '.py')
if makeBackup and self.privFileExists(filename):
try:
backupFilename = self.privGetBackupFilename(filename)
self.privRemoveFile(backupFilename)
os.rename(filename, backupFilename)
except OSError as e:
LevelSpec.notify.warning('error during backup: %s' % str(e))
LevelSpec.notify.info("writing to '%s'" % filename)
self.privRemoveFile(filename)
self.privSaveToDisk(filename)
return
def privSaveToDisk(self, filename):
retval = 1
f = file(filename, 'wb')
try:
f.write(self.getPrettyString())
except IOError:
retval = 0
f.close()
return retval
def privFileExists(self, filename):
try:
os.stat(filename)
return 1
except OSError:
return 0
def privRemoveFile(self, filename):
try:
os.remove(filename)
return 1
except OSError:
return 0
def getPrettyString(self):
import pprint
tabWidth = 4
tab = ' ' * tabWidth
globalEntitiesName = 'GlobalEntities'
scenarioEntitiesName = 'Scenario%s'
topLevelName = 'levelSpec'
def getPrettyEntityDictStr(name, dict, tabs = 0):
def t(n):
return (tabs + n) * tab
def sortList(lst, firstElements = []):
elements = list(lst)
result = []
for el in firstElements:
if el in elements:
result.append(el)
elements.remove(el)
elements.sort()
result.extend(elements)
return result
firstTypes = ('levelMgr', 'editMgr', 'zone')
firstAttribs = ('type', 'name', 'comment', 'parentEntId', 'pos', 'x', 'y', 'z', 'hpr', 'h', 'p', 'r', 'scale', 'sx', 'sy', 'sz', 'color', 'model')
str = t(0) + '%s = {\n' % name
entIds = list(dict.keys())
entType2ids = self.getEntType2ids(entIds)
types = sortList(list(entType2ids.keys()), firstTypes)
for type in types:
str += t(1) + '# %s\n' % type.upper()
entIds = entType2ids[type]
entIds.sort()
for entId in entIds:
str += t(1) + '%s: {\n' % entId
spec = dict[entId]
attribs = sortList(list(spec.keys()), firstAttribs)
for attrib in attribs:
str += t(2) + "'%s': %s,\n" % (attrib, repr(spec[attrib]))
str += t(2) + '}, # end entity %s\n' % entId
str += t(1) + '}\n'
return str
def getPrettyTopLevelDictStr(tabs = 0):
def t(n):
return (tabs + n) * tab
str = t(0) + '%s = {\n' % topLevelName
str += t(1) + "'globalEntities': %s,\n" % globalEntitiesName
str += t(1) + "'scenarios': [\n"
for i in range(self.getNumScenarios()):
str += t(2) + '%s,\n' % (scenarioEntitiesName % i)
str += t(2) + '],\n'
str += t(1) + '}\n'
return str
str = 'from %s import *\n' % self.getSpecImportsModuleName()
str += '\n'
str += getPrettyEntityDictStr('GlobalEntities', self.privGetGlobalEntityDict())
str += '\n'
numScenarios = self.getNumScenarios()
for i in range(numScenarios):
str += getPrettyEntityDictStr('Scenario%s' % i, self.privGetScenarioEntityDict(i))
str += '\n'
str += getPrettyTopLevelDictStr()
self.testPrettyString(prettyString=str)
return str
def _recurKeyTest(self, dict1, dict2):
s = ''
errorCount = 0
if set(dict1.keys()) != set(dict2.keys()):
return 0
for key in dict1:
if type(dict1[key]) == type({}) and type(dict2[key]) == type({}):
if not self._recurKeyTest(dict1[key], dict2[key]):
return 0
else:
strd1 = repr(dict1[key])
strd2 = repr(dict2[key])
if strd1 != strd2:
s += '\nBAD VALUE(%s): %s != %s\n' % (key, strd1, strd2)
errorCount += 1
print(s)
if errorCount == 0:
return 1
else:
return 0
def testPrettyString(self, prettyString=None):
pass
def checkSpecIntegrity(self):
entIds = self.getGlobalEntIds()
entIds = list2dict(entIds)
for i in range(self.getNumScenarios()):
for id in self.getScenarioEntIds(i):
entIds[id] = None
if self.entTypeReg is not None:
allEntIds = entIds
for entId in allEntIds:
spec = self.getEntitySpec(entId)
entType = spec['type']
typeDesc = self.entTypeReg.getTypeDesc(entType)
attribNames = typeDesc.getAttribNames()
attribDescs = typeDesc.getAttribDescDict()
for attrib in list(spec.keys()):
if attrib not in attribNames:
LevelSpec.notify.warning("entId %s (%s): unknown attrib '%s', omitting" % (entId, spec['type'], attrib))
del spec[attrib]
for attribName in attribNames:
if attribName not in spec:
LevelSpec.notify.warning("entId %s (%s): missing attrib '%s'" % (entId, spec['type'], attribName))
return
def stringHash(self):
h = PM.HashVal()
h.hashString(repr(self))
return h.asHex()
def __hash__(self):
return hash(repr(self))
def __str__(self):
return 'LevelSpec'
def __repr__(self):
return 'LevelSpec(%s, scenario=%s)' % (repeatableRepr(self.specDict), repeatableRepr(self.scenario))
| [
"[email protected]"
] | |
6dd98b7831f5feee90d9df6b5301c0257eb2e665 | b972faf032590c9722dc240c45fc60157d5a1bee | /(구현)주사위네개.py | a7b250ef8517e21ed6b5335a12dac1288a5381af | [] | no_license | kih1024/codingStudy | 3a91b628bc301d1777d954595e93bf1f9246aca3 | 3e8a6fe86d3861613a85d3e75991f4bc7cd1e716 | refs/heads/master | 2022-12-09T04:58:55.264433 | 2020-09-22T07:29:44 | 2020-09-22T07:29:44 | 269,874,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # https://www.acmicpc.net/problem/2484
def dice():
li = sorted(list(map(int, input().split())))
temp = set(li)
if len(temp) == 1:
return li[0] * 5000 + 50000
elif len(temp) == 2 and li[1] == li[2]:
return li[1] * 1000 + 10000
elif len(temp) == 2 and li[1] != li[2]:
return (li[1] * 500) + (li[2] * 500) + 2000
elif len(temp) == 3:
for i in range(3):
if li[i] == li[i + 1]:
return li[i] * 100 + 1000
else:
return li[-1] * 100
n = int(input())
# money = []
# for i in range(n):
# li = sorted(list(map(int, input().split())))
# money.append(dice())
# print(max(money))
print(max(dice() for i in range(n))) | [
"[email protected]"
] | |
a8140ac64131df0e94f7c4b4e3daa8d4fbc87dbf | 6d60ac89ee9c14bfc62342f7b33da3932f4eb564 | /mini_build.py | 8b0881434026fcc7a4ba9e7ec6986b0b8a269d1d | [
"MIT"
] | permissive | samuelcolvin/donkey-simple-old | dda5b3c41387231b755965fa982bbb4c845e24c1 | 765810076c01d3677819e4f5a03aefd05300fbda | refs/heads/master | 2021-05-28T19:49:45.269978 | 2014-05-30T17:04:33 | 2014-05-30T17:04:33 | 14,808,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | from DonkeySimple.DS.download import download_libraries
import os, re
lib_static_dir = os.path.join('DonkeySimple', 'WebInterface', 'static', 'libs')
libs_json_path = 'static_libraries.json'
try:
download_libraries(libs_json_path, lib_static_dir)
except Exception, e:
print 'ERROR: %s' % str(e)
print 'Problem downloading libraries, you may have problems with internet connection.\n\n'
print 'generating long_descriptions docs for PyPi...'
import pandoc
pandoc.core.PANDOC_PATH = '/usr/bin/pandoc'
doc = pandoc.Document()
readme_file = 'README.md'
doc.markdown = open(readme_file, 'r').read()
docs_file = 'DonkeySimple/docs.txt'
open(docs_file,'w').write(doc.rst)
print '%s converted to rst and written to %s' % (readme_file, docs_file)
print 'changing version number'
setup_text = open('setup.py','r').read()
s=re.search("version='(.+?)'", setup_text)
v = s.groups()[0]
print 'setting version to: %s' % v
init_file = 'DonkeySimple/__init__.py'
init_text = "__version__ = 'v%s'\n" % v
open(init_file,'w').write(init_text) | [
"[email protected]"
] | |
a40ab2dbce7300afcd86166710c810b3a538e662 | f63c4eb29ce57319441f5469d1d049b63bc220de | /swu_cycle_variance/run909.py | 93ff56deac66a626022dba5caebd6cc9b16e0c7d | [] | no_license | a-co/diversion_models | 0237642153668b16035699e9e734ff0538568582 | 69eed2687b1cd2b48f5717d15919eccd24a0eabc | refs/heads/main | 2023-05-02T19:04:26.333677 | 2020-06-18T20:50:18 | 2020-06-18T20:50:18 | 216,904,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,241 | py | SIMULATION = {'simulation': {'agent': [{'name': 'deployer_civilian', 'prototype': 'civilian_deployer'}, {'name': 'deployer_shared', 'prototype': 'shared_deployer'}], 'archetypes': {'spec': [{'lib': 'cycamore', 'name': 'DeployInst'}, {'lib': 'cycamore', 'name': 'Source'}, {'lib': 'cycamore', 'name': 'Sink'}, {'lib': 'cycamore', 'name': 'Storage'}, {'lib': 'cycamore', 'name': 'Reactor'}, {'lib': 'cycamore', 'name': 'Separations'}, {'lib': 'cycamore', 'name': 'Enrichment'}]}, 'control': {'duration': '144', 'explicit_inventory': 'true', 'startmonth': '1', 'startyear': '2020'}, 'prototype': [{'config': {'Source': {'inventory_size': '1e30', 'outcommod': 'u_ore', 'outrecipe': 'r_u_ore', 'throughput': '1e10'}}, 'name': 'mine'}, {'config': {'Separations': {'feed_commod_prefs': {'val': ['1.0', '10.0', '100.0']}, 'feed_commods': {'val': ['u_ore', 'u_ore1', 'u_ore2']}, 'feedbuf_size': '2e8', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'u_nat', 'info': {'buf_size': '150000', 'efficiencies': {'item': [{'comp': 'U', 'eff': '.99'}, {'comp': 'O', 'eff': '.99'}]}}}}, 'throughput': '2e8'}}, 'name': 'milling'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'u_nat'}, 'feedbuf_size': '200000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'uf6', 'info': {'buf_size': '200000', 'efficiencies': {'item': {'comp': 'U', 'eff': '.99'}}}}}, 'throughput': '200000'}}, 'name': 'conversion'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': ['1', '20']}, 'feed_commods': {'val': ['uf6', 'mil_uf6']}, 'feed_recipe': 'r_natl_u', 'max_feed_inventory': '100000', 'product_commod': 'civ_leu', 'swu_capacity': '27652.62531959284', 'tails_assay': '0.003', 'tails_commod': 'u_dep'}}, 'name': 'civ_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'u_dep'}, 'out_commods': {'val': 'u_dep_str'}, 'residence_time': '0'}}, 'name': 'civ_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1000'}, 'in_commods': {'val': 'civ_leu'}, 'in_recipe': 'r_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'uox'}, 'residence_time': '1'}}, 'name': 'civ_fabrication'}, {'config': {'Reactor': {'assem_size': '29565', 'cycle_time': '30', 'fuel_incommods': {'val': 'uox'}, 'fuel_inrecipes': {'val': 'r_uox'}, 'fuel_outcommods': {'val': 'uox_spent'}, 'fuel_outrecipes': {'val': 'r_uox_spent'}, 'n_assem_batch': '1', 'n_assem_core': '3', 'power_cap': '900', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'civ_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'uox_spent'}, 'out_commods': {'val': 'uox_spent_str'}, 'residence_time': '60'}}, 'name': 'civ_str_uox_spent'}, {'config': {'DeployInst': {'build_times': {'val': ['121', '121', '121', '145', '157', '169']}, 'n_build': {'val': ['1', '1', '1', '1', '1', '1']}, 'prototypes': {'val': ['civ_enrichment', 'civ_str_u_dep', 'civ_fabrication', 'civ_lwr', 'civ_str_uox_spent', 'civ_lwr']}}}, 'name': 'civilian_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['1', '1', '1']}, 'n_build': {'val': ['1', '1', '1']}, 'prototypes': {'val': ['mine', 'milling', 'conversion']}}}, 'name': 'shared_deployer'}], 'recipe': [{'basis': 'mass', 'name': 'r_u_ore', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}, {'comp': '999', 'id': '120240000'}]}, {'basis': 'mass', 'name': 'r_natl_u', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox', 'nuclide': [{'comp': '0.05', 'id': '922350000'}, {'comp': '0.95', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox_spent', 'nuclide': [{'comp': '0.01', 'id': '922350000'}, {'comp': '0.94', 'id': '922380000'}, {'comp': '0.01', 'id': '942390000'}, {'comp': '0.001', 'id': '952410000'}, {'comp': '0.03', 'id': '551350000'}]}, {'basis': 'mass', 'name': 'r_mil_uox', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_mil_uox_spent', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9919', 'id': '922380000'}, {'comp': '0.001', 'id': '942390000'}]}, {'basis': 'mass', 'name': 'r_mil_heu', 'nuclide': [{'comp': '0.90', 'id': '922350000'}, {'comp': '0.10', 'id': '922380000'}]}]}} | [
"[email protected]"
] | |
3b1d6dec03293efd9bdbed97ea34210432f2cbb3 | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py | aac9e2bc91fc75d16de26dfd01bb98512f260f25 | [
"Apache-2.0"
] | permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 1,593 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1beta1.schema.predict.instance',
manifest={
'ImageClassificationPredictionInstance',
},
)
class ImageClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Image Classification.
Attributes:
content (str):
The image bytes or Cloud Storage URI to make
the prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
supported. - image/jpeg
- image/gif
- image/png
- image/webp
- image/bmp
- image/tiff
- image/vnd.microsoft.icon
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
93576199922965b76e6aae451eccce45bfffccf8 | 47175228ce25812549eb5203fc8b86b76fec6eb9 | /API_scripts/dfp/dfp_python3/v201408/custom_targeting_service/update_custom_targeting_values.py | ff879dd7d5d7d8f3c1653d851a0c2fe2b2a727d5 | [] | no_license | noelleli/documentation | c1efe9c2bdb169baa771e9c23d8f4e2683c2fe20 | a375698b4cf0776d52d3a9d3c17d20143bd252e1 | refs/heads/master | 2021-01-10T05:41:30.648343 | 2016-02-13T05:46:31 | 2016-02-13T05:46:31 | 51,477,460 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,604 | py | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of the custom targeting values
belonging to a custom targeting key.
To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201408')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE customTargetingKeyId = :keyId'
statement = dfp.FilterStatement(query, values)
while True:
# Get custom targeting values by statement.
response = custom_targeting_service.getCustomTargetingValuesByStatement(
statement.ToStatement())
# Update each local custom targeting value object by changing its name.
if 'results' in response:
updated_values = []
for value in response['results']:
if not value['displayName']:
value['displayName'] = value['name']
value['displayName'] += ' (Deprecated)'
updated_values.append(value)
values = custom_targeting_service.updateCustomTargetingValues(
updated_values)
# Display results.
for value in values:
print(('Custom targeting value with id \'%s\', name \'%s\', and display'
' name \'%s\' was updated.'
% (value['id'], value['name'], value['displayName'])))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
if response['totalResultSetSize'] == 0:
print('No custom targeting values were updated.')
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
| [
"[email protected]"
] | |
dc297c0b5e6036c2ea15e34e99425db785dd32f1 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_03_01/aio/operations/_operations.py | 00d071f258156e1def7090b7a8e641437310f9b5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 4,665 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationsList"]:
"""List all the supported operations.
List all the supported operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationsList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.databoxedge.models.OperationsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.DataBoxEdge/operations'} # type: ignore
| [
"[email protected]"
] | |
4e68d6993b53a2ddd25f70df5669610a0df20cf0 | 952762035d7ffa7c46a2c7dc85063f08b1c4a975 | /2019/23/intcode.py | be46113b6662b2f5f65c6203efea2182ca7f225a | [] | no_license | pjot/advent-of-code | c8a59df25b3c1afa0e14fd22139b9ac3b789ff4d | 4b8b5c55c44dc8325caa2aeea7aa064a98738fd7 | refs/heads/master | 2023-03-10T20:22:44.724300 | 2023-03-04T21:01:02 | 2023-03-04T21:01:02 | 225,183,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,708 | py | def parse_file(filename):
with open(filename) as f:
code = f.readline()
ints = code.split(',')
program = [int(i) for i in ints]
return program
def parse_instruction(instruction):
full_instruction = '{:05d}'.format(instruction)
return (
instruction % 100,
int(full_instruction[2]),
int(full_instruction[1]),
int(full_instruction[0]),
)
class Computer:
def __init__(self, program, inputs):
self.position = 0
self.base = 0
self.inputs = inputs
self.tape = {k: v for k, v in enumerate(program)}
self.output = None
def iterate_once(self=None):
self.iterate()
return self.output
def run_to_output(self):
done = False
outputs = []
while not done:
done = self.iterate()
if not done:
outputs.append(self.output)
return outputs
def read(self, delta=0, mode=1):
if mode == 2:
return self.tape.get(
self.base + self.tape.get(self.position + delta, 0),
0
)
if mode == 1:
return self.tape.get(self.position + delta, 0)
if mode == 0:
return self.tape.get(
self.tape.get(self.position + delta, 0),
0
)
def write(self, delta, value, mode):
if mode == 2:
key = self.base + self.tape[self.position + delta]
if mode == 1:
key = self.position + delta
if mode == 0:
key = self.tape[self.position + delta]
self.tape[key] = value
def iterate(self):
while True:
instruction = self.read()
op_code, mode_a, mode_b, mode_c = parse_instruction(instruction)
if op_code == 99:
return True
if op_code == 1:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
self.write(3, a + b, mode_c)
self.position += 4
if op_code == 2:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
self.write(3, a * b, mode_c)
self.position += 4
if op_code == 3:
if self.inputs:
self.write(1, self.inputs.pop(0), mode_a)
self.position += 2
else:
return False
if op_code == 4:
self.output = self.read(1, mode_a)
self.position += 2
return False
if op_code == 5:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
if a != 0:
self.position = b
else:
self.position += 3
if op_code == 6:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
if a == 0:
self.position = b
else:
self.position += 3
if op_code == 7:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
value = 1 if a < b else 0
self.write(3, value, mode_c)
self.position += 4
if op_code == 8:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
value = 1 if a == b else 0
self.write(3, value, mode_c)
self.position += 4
if op_code == 9:
a = self.read(1, mode_a)
self.base += a
self.position += 2
| [
"[email protected]"
] | |
3f263a2f1abc45d83649b00dfc604ebb900b4cbd | fa7deca280e1443d5ca79e9910f295a668be14b0 | /compile.py | bc770262d10363c06f9b6fdd5e15420b7d681620 | [
"Apache-2.0"
] | permissive | tomas-cliqz/ichnaea | e210ba419eb2a69553594e3dd4dba2c56c88753a | 1e49cc694b1e9c850417ac093e81849b1886b19e | refs/heads/master | 2021-01-12T20:00:27.455686 | 2016-01-27T14:00:22 | 2016-01-27T14:00:22 | 49,965,385 | 0 | 0 | null | 2016-01-19T16:17:23 | 2016-01-19T16:17:22 | null | UTF-8 | Python | false | false | 1,686 | py | """
This script is used as part of the "make release" command used as part of
building an rpm of this entire virtualenv.
The rpm building process compiles all *.py files found anywhere in the
source tree, independent of whether or not these would actually be used.
It finds some Python files which aren't meant for the specific Python
version being build this way and would abort the build process.
We therefor specifically remove files from our site-packages directory,
which aren't meant for the current Python version and include incompatible
Python syntax.
"""
from compileall import compile_dir
from distutils.sysconfig import get_python_lib
import os
import os.path
import sys
# files excluded when run under Python 2.x
PYTHON_2_INCOMPATIBLE = [
'gunicorn/workers/_gaiohttp.py',
'linecache2/tests/inspect_fodder2.py',
]
# files excluded when run under Python 3.x
PYTHON_3_INCOMPATIBLE = [
'gevent/_util_py2.py',
]
def compile_files(path):
return compile_dir(path, maxlevels=50, quiet=True)
def remove_incompatible_files(path):
excludes = []
if sys.version_info < (3, 0):
excludes.extend(PYTHON_2_INCOMPATIBLE)
if sys.version_info >= (3, 0):
excludes.extend(PYTHON_3_INCOMPATIBLE)
for e in excludes:
fp = os.path.join(path, e)
for extension in ('', 'c', 'o'):
name = fp + extension
if os.path.exists(name):
print('Removing file %s with incompatible syntax.' % name)
os.remove(name)
def main():
sp = get_python_lib()
remove_incompatible_files(sp)
status = compile_files(sp)
sys.exit(not status)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f745deca3f8bab4ea141b85d67a953beab121496 | 025fa245d4cbffdaa422287ed2f31c4d0442ee28 | /orders/api/serializers.py | 15a4a1cf958d28696d037af645fde921fd646007 | [
"MIT"
] | permissive | elcolie/zero-to-deploy | 01f346ca50b8ccb271faef23934abe6a487baca6 | 6191a33ef55af7c550c0e529a4e373bfe40bc014 | refs/heads/master | 2022-02-08T23:22:17.008555 | 2018-06-15T19:39:06 | 2018-06-15T19:39:06 | 137,083,690 | 0 | 0 | MIT | 2022-01-21T19:35:33 | 2018-06-12T14:28:01 | Python | UTF-8 | Python | false | false | 1,228 | py | from rest_framework import serializers
from menus.models import Menu
from order_items.api.serializers import ShortItemSerializer
from order_items.models import OrderItem
from orders.models import Order
class OrderSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:order-detail')
customer = serializers.CurrentUserDefault()
order_items = ShortItemSerializer(read_only=True, many=True)
menus = serializers.PrimaryKeyRelatedField(queryset=Menu.objects.all(), many=True, write_only=True)
class Meta:
model = Order
fields = [
'url',
'customer',
'order_items',
'menus',
'sum',
'created_at',
'updated_at',
]
extra_kwargs = {
'created_at': {'read_only': True},
'updated_at': {'read_only': True},
}
def create(self, validated_data):
menus = validated_data.pop('menus')
order = Order.objects.create(customer=validated_data.get('customer'))
for item in menus:
OrderItem.objects.bulk_create([
OrderItem(order=order, menu=item)
])
return order
| [
"[email protected]"
] | |
fa003de4ee3de4e7bedcf10266cdb6a8c9a3f183 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02576/s945264148.py | c54a359be369f17e4f7c755d919335e3b79d3522 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | n, x, t = map(int, input().split())
ans = t * (n//x)
print(ans+t if n%x else ans) | [
"[email protected]"
] | |
33d9a5f14e08e91b0a36cea7796270daf9f6d3fc | 71f19c14de97846193972830fbc9a4ec972d0ca9 | /website/management/commands/tweet_something.py | 9c69a942adf0593825c1b90f2c1043deb803ba42 | [] | no_license | wbt/govtrack.us-web | 87790050aaba83ca6cca16e26ec796603317e7da | c94c0dfdc809ad506a54108802a2328cc26ca97d | refs/heads/master | 2021-01-25T06:25:24.943845 | 2017-07-26T13:55:01 | 2017-07-26T13:55:01 | 93,572,164 | 0 | 0 | null | 2017-09-24T03:54:30 | 2017-06-06T23:15:59 | Python | UTF-8 | Python | false | false | 7,400 | py | #;encoding=utf8
from django.db.models import F
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from django.template.defaultfilters import truncatechars
from collections import defaultdict
import json, os, sys
from datetime import timedelta
class OkITweetedSomething(Exception):
pass
class Command(BaseCommand):
help = 'Tweets something interesting as @GovTrack.'
tweets_storage_fn = 'data/misc/tweets.json'
def handle(self, *args, **options):
# Construct client.
import twitter
self.twitter = twitter.Api(consumer_key=settings.TWITTER_OAUTH_TOKEN, consumer_secret=settings.TWITTER_OAUTH_TOKEN_SECRET,
access_token_key=settings.TWITTER_ACCESS_TOKEN, access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET)
# What have we tweeted about before? Let's not tweet
# it again.
self.load_previous_tweets()
try:
# Send out a tweet.
self.tweet_something()
except OkITweetedSomething:
pass
finally:
# Save the updated cache of previous tweets for next time.
self.save_previous_tweets()
def load_previous_tweets(self):
if not os.path.exists(self.tweets_storage_fn):
self.previous_tweets = { }
else:
self.previous_tweets = json.loads(open(self.tweets_storage_fn).read())
def save_previous_tweets(self):
with open(self.tweets_storage_fn, 'w') as output:
json.dump(self.previous_tweets, output, sort_keys=True, indent=2)
###
def tweet_something(self):
# Find something interesting to tweet!
self.tweet_new_signed_laws_yday()
self.tweet_votes_yday(True)
self.tweet_new_bills_yday()
self.tweet_coming_up()
self.tweet_a_bill_action()
self.tweet_votes_yday(False)
###
def post_tweet(self, key, text, url):
if key in self.previous_tweets:
return
text = truncatechars(text, 140-1-23-3) + " " + url
text += u" 🏛️" # there's a civics building emoji there indicating to followers this is an automated tweet? the emoji is two characters (plus a space before it) as Twitter sees it
if "TEST" in os.environ:
# Don't tweet. Just print and exit.
print key, text
sys.exit(1)
tweet = self.twitter.PostUpdate(text, verify_status_length=False) # it does not do link shortening test correctly
self.previous_tweets[key] = {
"text": text,
"when": timezone.now().isoformat(),
"tweet": tweet.AsDict(),
}
#print(json.dumps(self.previous_tweets[key], indent=2))
raise OkITweetedSomething()
###
def tweet_new_signed_laws_yday(self):
# Because of possible data delays, don't tweet until the afternoon.
if timezone.now().hour < 12: return
# Tweet count of new laws enacted yesterday.
from bill.models import Bill, BillStatus
count = Bill.objects.filter(
current_status_date__gte=timezone.now().date()-timedelta(days=1),
current_status_date__lt=timezone.now().date(),
current_status=BillStatus.enacted_signed,
).count()
if count == 0: return
self.post_tweet(
"%s:newlaws" % timezone.now().date().isoformat(),
"%d new law%s signed by the President yesterday." % (
count,
"s were" if count != 1 else " was",
),
"https://www.govtrack.us/congress/bills/browse#current_status[]=28&sort=-current_status_date")
def tweet_votes_yday(self, if_major):
# Tweet count of votes yesterday, by vote type if there were any major votes.
from vote.models import Vote, VoteCategory
votes = Vote.objects.filter(
created__gte=timezone.now().date()-timedelta(days=1),
created__lt=timezone.now().date(),
)
if votes.count() == 0: return
has_major = len([v for v in votes if v.is_major]) > 0
if not has_major and if_major: return
if not has_major:
count = votes.count()
msg = "%d minor vote%s held by Congress yesterday." % (
count,
"s were" if count != 1 else " was",
)
else:
counts = defaultdict(lambda : 0)
for v in votes:
counts[v.category] += 1
counts = list(counts.items())
counts.sort(key = lambda kv : (VoteCategory.by_value(kv[0]).importance, -kv[1]))
msg = "Votes held by Congress yesterday: " + ", ".join(
str(value) + " on " + VoteCategory.by_value(key).label
for key, value in counts
)
self.post_tweet(
"%s:votes" % timezone.now().date().isoformat(),
msg,
"https://www.govtrack.us/congress/votes")
def tweet_new_bills_yday(self):
# Because of possible data delays, don't tweet until the afternoon.
if timezone.now().hour < 12: return
# Tweet count of new bills introduced yesterday.
from bill.models import Bill, BillStatus
count = Bill.objects.filter(
introduced_date__gte=timezone.now().date()-timedelta(days=1),
introduced_date__lt=timezone.now().date(),
).count()
if count == 0: return
self.post_tweet(
"%s:newbills" % timezone.now().date().isoformat(),
"%d bill%s introduced in Congress yesterday." % (
count,
"s were" if count != 1 else " was",
),
"https://www.govtrack.us/congress/bills/browse#sort=-introduced_date")
def tweet_coming_up(self):
# legislation posted as coming up within the last day
from bill.models import Bill
dhg_bills = Bill.objects.filter(docs_house_gov_postdate__gt=timezone.now().date()-timedelta(days=1)).filter(docs_house_gov_postdate__gt=F('current_status_date'))
sfs_bills = Bill.objects.filter(senate_floor_schedule_postdate__gt=timezone.now().date()-timedelta(days=1)).filter(senate_floor_schedule_postdate__gt=F('current_status_date'))
coming_up = list(dhg_bills | sfs_bills)
coming_up.sort(key = lambda b : b.docs_house_gov_postdate if (b.docs_house_gov_postdate and (not b.senate_floor_schedule_postdate or b.senate_floor_schedule_postdate < b.docs_house_gov_postdate)) else b.senate_floor_schedule_postdate)
for bill in coming_up:
text = "Coming up: " + bill.display_number
if bill.sponsor and bill.sponsor.twitterid: text += " by @" + bill.sponsor.twitterid
text += ": " + bill.title_no_number
self.post_tweet(
"%s:comingup:%s" % (timezone.now().date().isoformat(), bill.congressproject_id),
text,
"https://www.govtrack.us" + bill.get_absolute_url())
def tweet_a_bill_action(self):
# Tweet an interesting action on a bill.
from bill.models import Bill, BillStatus
from bill.status import get_bill_really_short_status_string
bills = list(Bill.objects.filter(
current_status_date__gte=timezone.now().date()-timedelta(days=2),
current_status_date__lt=timezone.now().date(),
).exclude(
current_status=BillStatus.introduced,
))
if len(bills) == 0: return
# Choose bill with the most salient status, breaking ties with the highest proscore.
bills.sort(key = lambda b : (BillStatus.by_value(b.current_status).sort_order, b.proscore()), reverse=True)
for bill in bills:
status = BillStatus.by_value(bill.current_status).xml_code
if "Providing for consideration" in bill.title: continue
text = get_bill_really_short_status_string(status)
if text == "": continue
bill_number = bill.display_number
if bill.sponsor and bill.sponsor.twitterid: bill_number += " by @" + bill.sponsor.twitterid
text = text % (bill_number, u"y’day")
text += " " + bill.title_no_number
self.post_tweet(
bill.current_status_date.isoformat() + ":bill:%s:status:%s" % (bill.congressproject_id, status),
text,
"https://www.govtrack.us" + bill.get_absolute_url())
| [
"[email protected]"
] | |
4a34314cf3ab3799f0e9db22f7bf9934c45a1f33 | d0ff9af885dc01de43ae7bdd2d26d6370c7b7ab5 | /unsup_vvs/neural_fit/brainscore_mask/compute_rdms_from_activations.py | 6d62b4160899f47a41da16e2251033e1f396d3d0 | [] | no_license | augix/unsup_vvs | a09f89c7d002006f59ffbe223c9469e959949e04 | 168ed0d068d27b7a7ca1dd5c1ebc28fbe84f8c7c | refs/heads/master | 2023-07-17T05:55:27.630844 | 2021-06-24T01:27:28 | 2021-06-24T01:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | import pickle
import argparse
import os
import sys
import pdb
from tqdm import tqdm
import numpy as np
RESULT_CACHING_DIR = '/mnt/fs4/chengxuz/v4it_temp_results/.result_caching'
DEFAULT_SAVE_DIR = os.path.join(RESULT_CACHING_DIR, 'computed_rdms')
ACTIVATION_DIR = os.path.join(
RESULT_CACHING_DIR,
'model_tools.activations.core.ActivationsExtractorHelper._from_paths_stored')
ACTIVATION_PATTERN = 'activations'
def get_parser():
parser = argparse.ArgumentParser(
description='The script to compute RDMs from activations')
parser.add_argument(
'--save_dir', type=str,
default=DEFAULT_SAVE_DIR,
action='store',
help='Directory for saving rdm results')
return parser
def get_activation_pkls():
all_pkls = os.listdir(ACTIVATION_DIR)
all_pkls = list(filter(lambda name: ACTIVATION_PATTERN in name, all_pkls))
all_pkls = sorted(all_pkls)
all_pkls = [os.path.join(ACTIVATION_DIR, each_pkl) for each_pkl in all_pkls]
return all_pkls
def main():
parser = get_parser()
args = parser.parse_args()
all_pkls = get_activation_pkls()
os.system('mkdir -p ' + args.save_dir)
for each_pkl in tqdm(all_pkls):
save_path = os.path.join(
args.save_dir,
os.path.basename(each_pkl))
if os.path.exists(save_path):
continue
activations = pickle.load(open(each_pkl, 'rb'))['data']
all_layers = np.unique(activations.layer)
act_arr = np.asarray(activations)
layer_names = np.asarray(activations.layer)
_rdms = {}
for each_layer in all_layers:
_resp = act_arr[:, layer_names == each_layer]
_rdms[each_layer] = np.corrcoef(_resp)
pickle.dump(_rdms, open(save_path, 'wb'))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8a31fb2658e068c346166523f70684bc6035c3fc | 410f0d66049ca881dfdeb9b7f784bd70b1c3f6e7 | /bootea/bootea/pipelines.py | 1bcd7524443c973b2ed7217b2a7ddcccda6d3f42 | [] | no_license | ans2human/Scrappers | 1bdf5a1a4a34752c58fb18d45ac01e3cb54b5fe1 | cb2183d25a8af08284f2a6c1311b8da24c720b96 | refs/heads/master | 2020-03-19T20:41:01.244911 | 2018-06-11T11:27:17 | 2018-06-11T11:27:17 | 136,911,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | from scrapy import log
from twisted.enterprise import adbapi
import time
import sys; sys.path.append("/users/user/appdata/local/programs/python/python36-32/lib/site-packages")
import MySQLdb.cursors
from bootea.items import test
class MySQLStorePipeline(object):
def __init__(self):
print ('init')
self.dbpool = adbapi.ConnectionPool('MySQLdb', db = 'usalogic_testdb', user='root', passwd='1234', cursorclass=MySQLdb.cursors.DictCursor, charset='utf8', use_unicode=True)
def process_item(self, item, spider):
print('process')
query = self.dbpool.runInteraction(self._conditional_insert, item) #("""INSERT INTO Example_Movie (title, url, gross, release) VALUES (%s, %s, %s, %s)""", (item['title'].endcode('utf-8'), item['url'].encode('utf-8'), item['gross'].encode('utf-8'), item['release'].encode('utf-8')))
query.addErrback(self.handle_error)#self.conn.commit()
return item
def _conditional_insert(self, tx, item):
print ('conditional insert')
#Create record if doesn't exist
#all this block run on it's own thread
tx.execute("select * from test where producturl = %s", (item['producturl'], ))
result = tx.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level = log.DEBUG)
else:
tx.execute("insert into test (producturl, prodprice, prodname) values (%s, %s, %s)", [item['producturl'], item['prodprice'], item['prodname']])
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
print ('handle_error')
log.err(e)
# from scrapy import log
# from twisted.enterprise import adbapi
# import time
# import sys; sys.path.append("/users/user/appdata/local/programs/python/python36-32/lib/site-packages")
# import MySQLdb.cursors
# from bootea.items import test
# class BooteaPipeline(object):
# def __init__(self):
# self.dbpool = adbapi.ConnectionPool('MySQLdb', db='usalogic_testdb',
# user='root', passwd='1234', cursorclass=MySQLdb.cursors.DictCursor,
# charset='utf8', use_unicode=True)
# def process_item(self, item, spider):
# query = self.dbpool.runInteraction(self._conditional_insert, item)
# query.addErrback(self.handle_error)
# return item
# def _conditional_insert(self, tx, item):
# tx.execute("select * from test where producturl = %s", (item['producturl'], ))
# result = tx.fetchone()
# if result:
# log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
# else:
# tx.execute("insert into test (producturl, prodname, prodprice) values (%s, %s, %s,)", [item['producturl'], item['prodname'], item['prodprice']])
# log.msg("Item stored in db: %s" % item, level=log.DEBUG)
# def handle_error(self, e):
# log.err(e)
| [
"[email protected]"
] | |
ecbbe380ee06e59502cd568e0d8911e8ee387e8b | ef72a7df3c39c215dd90ac5e72b164eb9d7da892 | /rpg/heroes/exp.py | 8580678d1bd1cda8e1e24f9779c8fce251493dcc | [] | no_license | thebmo/messing_around | d49a87fc1ff722428ea67bc710ca99ad287098bd | 4cb12e0b224cf7d1f93cb4ae6ff7603619fb7aa9 | refs/heads/master | 2021-01-13T02:18:50.799898 | 2015-04-08T01:12:41 | 2015-04-08T01:12:41 | 28,570,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | exp_to_next_level = [
0,
29,
87,
174,
304,
499,
792,
1232,
1891,
2880,
4364,
6218,
8534,
11428,
15045,
19114,
23690,
28837,
34627,
41141,
48468,
56711,
65983,
76413,
88147,
101347,
116196,
132901,
151694,
172836,
196621,
223378,
253480,
287344,
325440,
368298,
416512,
470752,
531771,
600417,
677644,
764524,
862263,
960002,
1057741,
1155480,
1253219,
1350958,
1448697,
1546436,
1644175,
1741914,
1839653,
1937392,
2035131,
2132870,
2230609,
2328348,
2426087,
2523826,
2621565,
2719304,
2817043,
2914782,
3012521,
3110260,
3207999,
3305738,
3403477,
3501216,
3598955,
3696694,
3794433,
3892172,
3989911,
4087650,
4185389,
4283128,
4380867,
4478606,
4576345,
4674084,
4771823,
4869562,
4967301,
5065040,
5162779,
5260518,
5358257,
5455996,
5553735,
5651474,
5749213,
5846952,
5944691,
6042430,
6140169,
6237908,
6335647
] | [
"[email protected]"
] | |
3c415b6254a6a1dfd67e5c564bbeeab602bbbac5 | c3432a248c8a7a43425c0fe1691557c0936ab380 | /Greedy/1744_수묶기*.py | cb76fdcd5d14a3364ec30819632212850e7c8292 | [] | no_license | Parkyunhwan/BaekJoon | 13cb3af1f45212d7c418ecc4b927f42615b14a74 | 9a882c568f991c9fed3df45277f091626fcc2c94 | refs/heads/master | 2022-12-24T21:47:47.052967 | 2022-12-20T16:16:59 | 2022-12-20T16:16:59 | 232,264,447 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | import heapq
n = int(input())
sm = 0
plus = []
minus = []
zero = 0
one = 0
for _ in range(n):
val = int(input())
if val == 1:
one += 1
elif val > 0:
heapq.heappush(plus, -val)
elif val == 0:
zero += 1
else:
heapq.heappush(minus, val)
if len(plus) % 2:
heapq.heappush(plus, -1)
if len(minus) % 2:
if zero > 0:
heapq.heappush(minus, 0)
else:
heapq.heappush(minus, 1)
while plus:
val1 = heapq.heappop(plus)
val2 = heapq.heappop(plus)
sm += (val1 * val2)
while minus:
val1 = heapq.heappop(minus)
val2 = heapq.heappop(minus)
sm += (val1 * val2)
sm += one
print(sm)
| [
"[email protected]"
] | |
3042f2d03444d4ec8ed65c9bcad199f8c3f11f73 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2536/60761/235389.py | 3dece7a79508610db4ccd9b65cff8208cc2b29f7 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | airlines=input("")
airlines=list(map(str,airlines[2:-2].split("],[")))
result=[]
result.append("JFK")
i=0
while(0<len(airlines)):
destination=[]
for airline in airlines:
if(airline.startswith('"'+result[i])):
destination.append(airline[7:-1])
destination.sort()
airlines.remove('"'+result[i]+'","'+destination[0]+'"')
result.append(destination[0])
i=i+1
print(result)
| [
"[email protected]"
] | |
047f80690a5099e9f1505b2dd7da347d7bd2adc1 | 04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29 | /378/Solution.py | 2933a83513c07b54670ecbf4838330642d1675d9 | [] | no_license | zhangruochi/leetcode | 6f739fde222c298bae1c68236d980bd29c33b1c6 | cefa2f08667de4d2973274de3ff29a31a7d25eda | refs/heads/master | 2022-07-16T23:40:20.458105 | 2022-06-02T18:25:35 | 2022-06-02T18:25:35 | 78,989,941 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | """
Given a n x n matrix where each of the rows and columns are sorted in ascending order, find the kth smallest element in the matrix.
Note that it is the kth smallest element in the sorted order, not the kth distinct element.
Example:
matrix = [
[ 1, 5, 9],
[10, 11, 13],
[12, 13, 15]
],
k = 8,
return 13.
Note:
You may assume k is always valid, 1 ≤ k ≤ n2.
"""
import heapq
class Solution:
# time complexity O(nlogn) and space cmplexity is O(n*2)
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
lists_ = []
for row in matrix:
lists_ += row
lists_.sort()
return lists_[k-1]
# time complexity O(nk) and space cmplexity is O(n)
def kthSmallest2(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
n = len(matrix)
pointers = [0] * n
count = 0
while True:
min_,min_index = float("inf"),-1
for index,point in enumerate(pointers):
if point < n:
tmp_min = matrix[index][point]
if tmp_min < min_:
min_ = tmp_min
min_index = index
pointers[min_index] += 1
count += 1
if count == k:
return min_
import heapq
class MyItem:
def __init__(self,num,row,column):
self.num = num
self.row = row
self.column = column
def __lt__(self,item):
return self.num < item.num
def __repr__(self):
return "{}".format(self.num)
class Solution:
def kthSmallest(self, matrix, k):
n = len(matrix)
heap = [ MyItem(matrix[0][j],0,j) for j in range(n)]
heapq.heapify(heap)
for i in range(k):
item = heapq.heappop(heap)
num, row, column = item.num, item.row, item.column
if row+1 < n:
heapq.heappush(heap,MyItem(matrix[row+1][column],row+1,column))
return num
| [
"[email protected]"
] | |
cabf56396409a2b8bbe2bba525dd0a4347f411d5 | 4af8e73d47535494a25e06d5ac693fdc60fc95f5 | /NLP_pytorch/04_pytorch_ignite/main.py | 3c143464dd83f6b08e51033562e87b446747ba97 | [] | no_license | woosa7/nbcc_projects | 4de2f846068bacdc73a6877d26dba93c483b1be3 | 0e7a30f58e554125d02451ab407f2a19c8c7b5f4 | refs/heads/main | 2023-06-16T16:19:51.975216 | 2021-07-16T05:18:53 | 2021-07-16T05:18:53 | 306,594,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from model import ImageClassifier
from trainer import Trainer
from data_loader import get_loaders
def define_argparser():
p = argparse.ArgumentParser()
p.add_argument('--model_fn', required=True)
p.add_argument('--gpu_id', type=int, default=0 if torch.cuda.is_available() else -1)
p.add_argument('--train_ratio', type=float, default=.8)
p.add_argument('--batch_size', type=int, default=256)
p.add_argument('--n_epochs', type=int, default=10)
p.add_argument('--verbose', type=int, default=2)
config = p.parse_args()
return config
def main(config):
# Set device based on user defined configuration.
device = torch.device('cpu') if config.gpu_id < 0 else torch.device('cuda:%d' % config.gpu_id)
train_loader, valid_loader, test_loader = get_loaders(config) # custom dataset & dataloader
print("Train:", len(train_loader.dataset))
print("Valid:", len(valid_loader.dataset))
print("Test:", len(test_loader.dataset))
model = ImageClassifier(28**2, 10).to(device)
optimizer = optim.Adam(model.parameters())
crit = nn.CrossEntropyLoss()
# Using Ignite
trainer = Trainer(config)
trainer.train(model, crit, optimizer, train_loader, valid_loader)
if __name__ == '__main__':
config = define_argparser()
main(config)
| [
"[email protected]"
] | |
71a26a87ecee866c893700324d40a1a2572e4a99 | 6eb58e32b469c37428185ab4456184905a5b4fb5 | /analysis_code/parse_ICD.py | c9e4102115862dbdf62d0a5bc4cdcd1bf75b3e03 | [] | no_license | rchenmit/mht_analysis | 0b8bfff7730df835975c7c41d65f007ad269e3a9 | 678d4419bdaed9ed9d0041df3a2cd8638074590f | refs/heads/master | 2020-04-06T03:40:41.577209 | 2015-01-12T00:14:48 | 2015-01-12T00:14:48 | 19,548,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | ## Robert Chen
## Monday 5/12/2014
##
## read and process BMI file
##
##
import datetime as dt
import scipy as s
## options
input_folder = '../../data/new_data_20140416/Data_20140409/'
output_dir = '../../data/new_data_20140416/Data_curated_RC/'
pickle_dir = '../analysis_output/pickle/'
bool_make_count_matrix_ALL = 0
bool_make_count_matrix_JD_CODE =
bool_make_count_matrix_JD_X_RANGE = 0
## Prepare data, read data
filename = input_folder + 'ICD_9_04082014.csv'
pd.set_option('display.line_width', 300)
df_ICD = pd.read_csv(filename, sep=',')
df_ICD['EVENT_DATE'] = pd.to_datetime(df_ICD['EVENT_DATE'])
## read in the ICD excel PheWAS file
pheWAS_xls_file = input_folder + 'ICD9-2-PheWAS.xls'
xls = pd.ExcelFile(pheWAS_xls_file)
df_pheWAS = xls.parse(xls.sheet_names[0])
## add columns to df_ICD for pheWAS: JD_CODE and JD_X_RANGE
df_ICD = df_ICD.merge(df_pheWAS, left_on = 'ICD_9_CODE', right_on='ICD9_CODE', how = 'left' )
## make a counts matrix
if bool_make_count_matrix_ALL:
unique_ICD_values = df_ICD.ICD_9_CODE.unique() #ARRAY with unique ICD codes as STRINGS
df_ICD_counts = pd.DataFrame(columns=['RUID'])
for icd in unique_ICD_values:
if isinstance(icd, str) or isinstance(jd, unicode):
if s.mod(len(df_ICD_counts.columns), 100) == 0:
print len(df_ICD_counts.columns)
df_this_icd = df_ICD[df_ICD.ICD_9_CODE==icd][['RUID', 'ICD_9_CODE']]
df_this_icd[icd] = df_this_icd.groupby('RUID').transform('count')
df_this_icd = df_this_icd.drop( 'ICD_9_CODE', 1)
df_this_icd = df_this_icd.drop_duplicates()
df_this_icd.replace(np.nan, 0)
if len(df_ICD_counts) == 0:
df_ICD_counts = df_this_icd.copy()
else:
df_ICD_counts = pd.merge(df_ICD_counts, df_this_icd, left_on='RUID', right_on='RUID', how='outer')
df_ICD_counts.to_csv( output_dir + 'df_ICD_counts.csv', index = False)
if bool_make_count_matrix_JD_CODE:
unique_JD_values = df_ICD.JD_CODE.unique() #ARRAY with unique ICD codes as STRINGS
df_JD_counts = pd.DataFrame(columns=['RUID'])
print "JD_Counts, n= " + str(len(unique_JD_values))
for jd in unique_JD_values:
if isinstance(jd, str) or isinstance(jd, unicode):
if s.mod(len(df_JD_counts.columns), 100) == 0:
print len(df_JD_counts.columns)
df_this_jd = df_ICD[df_ICD.JD_CODE==jd][['RUID', 'JD_CODE']]
df_this_jd[jd] = df_this_jd.groupby('RUID').transform('count')
df_this_jd = df_this_jd.drop( 'JD_CODE', 1)
df_this_jd = df_this_jd.drop_duplicates()
df_this_jd.replace(np.nan, 0)
if len(df_JD_counts) == 0: #base case
df_JD_counts = df_this_jd.copy()
else:
df_JD_counts = pd.merge(df_JD_counts, df_this_jd, left_on='RUID', right_on='RUID', how='outer')
df_JD_counts.to_csv( output_dir + 'df_JD_counts.csv', index = False)
if bool_make_count_matrix_JD_X_RANGE:
unique_JD_X_RANGE_values = df_ICD.JD_X_RANGE.unique() #ARRAY with unique ICD codes as STRINGS
df_JD_RANGE_counts = pd.DataFrame(columns=['RUID'])
print "JD_X_RANGE Counts, n= " + str(len(unique_JD_X_RANGE_values))
for jd in unique_JD_X_RANGE_values:
if isinstance(jd, str) or isinstance(jd, unicode):
if s.mod(len(df_JD_RANGE_counts.columns), 100) == 0:
print len(df_JD_RANGE_counts.columns)
df_this_jd = df_ICD[df_ICD.JD_X_RANGE==jd][['RUID', 'JD_X_RANGE']]
df_this_jd[jd] = df_this_jd.groupby('RUID').transform('count')
df_this_jd = df_this_jd.drop( 'JD_X_RANGE', 1)
df_this_jd = df_this_jd.drop_duplicates()
df_this_jd.replace(np.nan, 0)
if len(df_JD_RANGE_counts) == 0: #base case
df_JD_RANGE_counts = df_this_jd.copy()
else:
df_JD_RANGE_counts = pd.merge(df_JD_RANGE_counts, df_this_jd, left_on='RUID', right_on='RUID', how='outer')
df_JD_RANGE_counts.to_csv( output_dir + 'df_JD_RANGE_counts.csv', index = False)
| [
"[email protected]"
] | |
581ff3c3ff8eac75bf2c32be001fe3da752ff030 | 87bbeac699af7fa4dc76592acecef52dead1f436 | /Adelphi Academic Calendar/skill/skill_env/Lib/site-packages/docutils/parsers/rst/languages/it.py | ad6085c6d6d78353bca04e8d50786cae5b9e5628 | [
"MIT",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"Python-2.0",
"TCL",
"LicenseRef-scancode-newlib-historical"
] | permissive | EnriqueGambra/Amazon-Alexa-Skill | 79ec8848605a0e1e13092a1f6163dd63573322ec | 198ed51bef555eee006041fef0bcbf5c955142d5 | refs/heads/master | 2022-12-02T01:01:48.419524 | 2019-10-23T21:45:49 | 2019-10-23T21:45:49 | 214,226,014 | 0 | 2 | MIT | 2022-11-27T06:15:00 | 2019-10-10T15:58:16 | Python | UTF-8 | Python | false | false | 3,267 | py | # $Id: it.py 7119 2011-09-02 13:00:23Z milde $
# Authors: Nicola Larosa <[email protected]>;
# Lele Gaifax <[email protected]>
# Copyright: This module has been placed in the public domain.
# Beware: the italian translation of the reStructuredText documentation
# at http://docit.bice.dyndns.org/static/ReST, in particular
# http://docit.bice.dyndns.org/static/ReST/ref/rst/directives.html, needs
# to be synced with the content of this file.
"""
Italian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'attenzione': 'attention',
'cautela': 'caution',
'code (translation required)': 'code',
'pericolo': 'danger',
'errore': 'error',
'suggerimento': 'hint',
'importante': 'important',
'nota': 'note',
'consiglio': 'tip',
'avvertenza': 'warning',
'ammonizione': 'admonition',
'riquadro': 'sidebar',
'argomento': 'topic',
'blocco-di-righe': 'line-block',
'blocco-interpretato': 'parsed-literal',
'rubrica': 'rubric',
'epigrafe': 'epigraph',
'punti-salienti': 'highlights',
'estratto-evidenziato': 'pull-quote',
'composito': 'compound',
'container (translation required)': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'tabella': 'table',
'tabella-csv': 'csv-table',
'tabella-elenco': 'list-table',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
'immagine': 'image',
'figura': 'figure',
'includi': 'include',
'grezzo': 'raw',
'sostituisci': 'replace',
'unicode': 'unicode',
'tmp': 'date',
'classe': 'class',
'ruolo': 'role',
'ruolo-predefinito': 'default-role',
'titolo': 'title',
'indice': 'contents',
'contenuti': 'contents',
'seznum': 'sectnum',
'sezioni-autonumerate': 'sectnum',
'annota-riferimenti-esterni': 'target-notes',
'intestazione': 'header',
'piede-pagina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Italian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'abbreviazione': 'abbreviation',
'acronimo': 'acronym',
'code (translation required)': 'code',
'indice': 'index',
'deponente': 'subscript',
'esponente': 'superscript',
'riferimento-titolo': 'title-reference',
'riferimento-pep': 'pep-reference',
'riferimento-rfc': 'rfc-reference',
'enfasi': 'emphasis',
'forte': 'strong',
'letterale': 'literal',
'math (translation required)': 'math',
'riferimento-con-nome': 'named-reference',
'riferimento-anonimo': 'anonymous-reference',
'riferimento-nota': 'footnote-reference',
'riferimento-citazione': 'citation-reference',
'riferimento-sostituzione': 'substitution-reference',
'destinazione': 'target',
'riferimento-uri': 'uri-reference',
'grezzo': 'raw',}
"""Mapping of Italian role names to canonical role names for interpreted text.
"""
| [
"[email protected]"
] | |
3ec541908b733c963a38d71f7f5949c8f8a7327d | 465efab6e7b419d4493d09786a4b2d7a976f7a31 | /src/Universe/LevelProps/Decorator.py | d036547a63ad9d0c74b8c3f991b1315b75e5aedb | [] | no_license | dzz/kthuune | aa2cadcdfaed9a06b6384516be429575640a7896 | 2a8be25ec5303586e5a7e067c024d6e6ca171efa | refs/heads/master | 2021-01-24T07:42:41.426973 | 2018-07-29T06:21:34 | 2018-07-29T06:21:34 | 93,354,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | from Newfoundland.Object import Object
from Beagle import API as BGL
class Decorator(Object):
textures = BGL.assets.get('KT-forest/animation/decorators')
def parse(od,df):
x1 = float(od["x"])
y1 = float(od["y"])
x2 = float(od["w"])
y2 = float(od["h"])
cx = (x1+x2) / 2.0
cy = (y1+y2) / 2.0
w = (x2-x1)*0.5
h = (y2-y1)*0.5
if "decorator_id" in od["meta"]:
texture = Decorator.textures[od["meta"]["decorator_id"]]
else:
texture = Decorator.textures[0]
if "self_lit" in od["meta"]:
if od["meta"]["self_lit"]:
light_type = Object.LightTypes.DYNAMIC_TEXTURE_OVERLAY
else:
light_type = Object.LightTypes.NONE
return Decorator(
p=[cx,cy],
size=[w,h],
scale_uv=[1.0,1.0],
tick_type = Object.TickTypes.STATIC,
visible = True,
buftarget = "floor",
texture = texture,
light_type = light_type
)
| [
"[email protected]"
] | |
c618214707b9fb6a6ca9236d529434d841c267c9 | 50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7 | /challenge17(backOnTime)/solutions/FelipeAg.py | 87790a5c157315c30659f1835cb2ca4e79b5036b | [
"MIT"
] | permissive | banana-galaxy/challenges | 792caa05e7b8aa10aad8e04369fc06aaf05ff398 | 8655c14828607535a677e2bb18689681ee6312fa | refs/heads/master | 2022-12-26T23:58:12.660152 | 2020-10-06T13:38:04 | 2020-10-06T13:38:04 | 268,851,516 | 11 | 8 | MIT | 2020-09-22T21:21:30 | 2020-06-02T16:24:41 | Python | UTF-8 | Python | false | false | 190 | py | def solution(steps):
if len(steps) > 10:
return False
elif steps.count('n') == steps.count('s') and steps.count('e') == steps.count('w'):
return True
return False | [
"[email protected]"
] | |
5b2207888d5917f774beaa8e83fa97856d693717 | f6c69a7f7f1bbae5fd5473dfaac5ef5fad840d58 | /lib/datatools/dataclass/datacatalog.py | 61a0c4aa163e163193588df29a4266dfac7aa5a3 | [
"Apache-2.0"
] | permissive | JokerWDL/PyAnomaly | 8c5ca4ca705a1251c70ff1f36c908c8f6f75e7d8 | cf93437e5d7ae87fa916141cf4b5cc2e929b8199 | refs/heads/master | 2022-11-05T11:31:42.345422 | 2020-06-22T17:21:20 | 2020-06-22T17:21:20 | 274,295,638 | 1 | 0 | Apache-2.0 | 2020-06-23T03:04:32 | 2020-06-23T03:04:31 | null | UTF-8 | Python | false | false | 2,336 | py | '''
Refer to the detectron2's DatasetCatalog
'''
from typing import List
class DatasetCatalog(object):
"""
A catalog that stores information about the datasets and how to obtain them.
It contains a mapping from strings
(which are names that identify a dataset, e.g. "coco_2014_train")
to a function which parses the dataset and returns the samples in the
format of `list[dict]`.
The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
The purpose of having this catalog is to make it easy to choose
different datasets, by just using the strings in the config.
"""
_REGISTERED = {}
@staticmethod
def register(name, func):
"""
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
func (callable): a callable which takes no arguments and returns a list of dicts.
"""
assert callable(func), "You must register a function with `DatasetCatalog.register`!"
assert name not in DatasetCatalog._REGISTERED, "Dataset '{}' is already registered!".format(
name
)
DatasetCatalog._REGISTERED[name] = func
@staticmethod
def get(name, cfg, flag, aug):
"""
Call the registered function and return its results.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
Returns:
list[dict]: dataset annotations.0
"""
try:
f = DatasetCatalog._REGISTERED[name]
except KeyError:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(DatasetCatalog._REGISTERED.keys())
)
)
return f(cfg, flag, aug)
@staticmethod
def list() -> List[str]:
"""
List all registered datasets.
Returns:
list[str]
"""
return list(DatasetCatalog._REGISTERED.keys())
@staticmethod
def clear():
"""
Remove all registered dataset.
"""
DatasetCatalog._REGISTERED.clear() | [
"[email protected]"
] | |
b5ead447cfefeb3618026a45f0fd21cea7995513 | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/test/expensive/_internally_replaced_utils.py | 8276fa0ab10c37bd4effe408fa3f5694ab102d26 | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 2,197 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import importlib.machinery
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise err
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
| [
"[email protected]"
] | |
f9f83f196d52a47793819cfbd6af460d5fdce595 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon.py | f8d8b822bf224b7b14e0e62f9d3dfc07c4a33180 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,170 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon.json')
def test_storage_encoding_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"[email protected]"
] | |
c5ff1437f0d351d49af69153cb81a3ca68b48a2c | f4475acdf01fa80ae3a638c19df30773cfd379dc | /listenclosely/admin.py | b7740fd874d120412af39807149d22c7247f7d97 | [
"BSD-3-Clause"
] | permissive | jlmadurga/listenclosely | ea48e7ea05f971ca4fc979f5f52d5a07ec74dcbb | d6df9110c3ed6fd337e0236cccbe4d931bf217b0 | refs/heads/master | 2023-01-07T13:27:30.024214 | 2016-03-12T12:40:29 | 2016-03-12T12:40:29 | 49,677,476 | 7 | 3 | BSD-3-Clause | 2022-12-26T20:23:37 | 2016-01-14T21:49:08 | Python | UTF-8 | Python | false | false | 203 | py | from django.contrib import admin
from listenclosely.models import Message, Chat, Agent, Asker
admin.site.register(Message)
admin.site.register(Chat)
admin.site.register(Agent)
admin.site.register(Asker) | [
"[email protected]"
] | |
c383989311b6a33537436038b4dedd0a24e43d79 | 658773cf775fd97c3cec3aca5f559500dec021bc | /controllers/asset.py | c93abf8e08e4c373f5688d022a96348e5dd4d844 | [
"MIT"
] | permissive | smeissner/ifrc | f3795474219d20fba5c68192f5d9b90006288e3e | 505eb6ffbb8fc32fdbbe63fdab4c19d87e53ca86 | refs/heads/master | 2021-01-18T10:43:55.847965 | 2012-10-07T22:43:15 | 2012-10-07T22:43:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | # -*- coding: utf-8 -*-
"""
Asset Management Functionality
http://eden.sahanafoundation.org/wiki/BluePrint/Assets
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to asset/create """
redirect(URL(f="asset", args="create"))
# -----------------------------------------------------------------------------
def asset():
""" RESTful CRUD controller """
# Use the item() controller in this module to set options correctly
s3db.asset_asset.item_id.comment = S3AddResourceLink(f="item",
label=T("Add New Item"),
title=T("Item"),
tooltip=T("Type the name of an existing catalog item OR Click 'Add New Item' to add an item which is not in the catalog."))
# Defined in Model for use from Multiple Controllers for unified menus
return s3db.asset_controller()
# =============================================================================
def item():
""" RESTful CRUD controller """
# Filter to just Assets
table = s3db.supply_item
ctable = s3db.supply_item_category
s3.filter = (table.item_category_id == ctable.id) & \
(ctable.can_be_asset == True)
# Limit the Categories to just those with vehicles in
# - make category mandatory so that filter works
field = s3db.supply_item.item_category_id
field.requires = IS_ONE_OF(db,
"supply_item_category.id",
s3db.supply_item_category_represent,
sort=True,
filterby = "can_be_asset",
filter_opts = [True]
)
field.comment = S3AddResourceLink(f="item_category",
label=T("Add Item Category"),
title=T("Item Category"),
tooltip=T("Only Categories of type 'Vehicle' will be seen in the dropdown."))
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.supply_item_controller()
# =============================================================================
def item_category():
""" RESTful CRUD controller """
table = s3db.supply_item_category
# Filter to just Assets
s3.filter = (table.can_be_asset == True)
# Default to Assets
field = table.can_be_asset
field.readable = field.writable = False
field.default = True
return s3_rest_controller("supply", "item_category")
# END =========================================================================
| [
"[email protected]"
] | |
f7cb7a5de948cd36e56b54eee4c79406be49b77a | dffd7156da8b71f4a743ec77d05c8ba031988508 | /ac/abc109/abc109_b/11416688.py | a108bbb8ac35b9102f2c74dfe3e7b4513b27c333 | [] | no_license | e1810/kyopro | a3a9a2ee63bc178dfa110788745a208dead37da6 | 15cf27d9ecc70cf6d82212ca0c788e327371b2dd | refs/heads/master | 2021-11-10T16:53:23.246374 | 2021-02-06T16:29:09 | 2021-10-31T06:20:50 | 252,388,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py |
st = set()
cant = False
prev = "-"
for i in range(int(input())):
s = input()
if (prev!="-" and prev!=s[0]) or s in st: cant = True
prev = s[-1]
st.add(s)
print("YNeos"[cant::2])
| [
"[email protected]"
] | |
72a7cb385ba562e3c44eeb5951a6ad27b05b8072 | 992f6a7436a9755d13bfbf0e3e0d98daa7541f1a | /coresite/views.py | 3956a3f993453816a94d377858f59f39293cc9de | [
"MIT"
] | permissive | Klim314/argent_app | 838c3f6b2d15666670ea1e90ac0c23bdc0df50aa | 767a0a11646fc08fb7197a191348466c913fe360 | refs/heads/master | 2021-01-22T05:32:44.626421 | 2017-09-17T15:23:54 | 2017-09-17T15:23:54 | 102,282,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from django.shortcuts import render, reverse, HttpResponseRedirect, HttpResponse
from django.views import View
from argent_app.models import Room, InRoom
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class Register(View):
def post(self, request):
print(request.POST)
username, password = request.POST["username"], request.POST["password1"]
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("room_manage"))
else:
# if user does not exist, create
if User.objects.filter(username=username).exists():
return HttpResponseRedirect(reverse("register"),
context={"user_exists": True})
else:
user = User.objects.create_user(username, password=password)
login(request, user)
return HttpResponseRedirect(reverse("room_manage"))
def get(self, request):
return render(request, "registration/register.html", {"form": UserCreationForm()})
| [
"[email protected]"
] | |
4fe09e4033ab5248274e1eb4eca7d375acc4598d | b1ba5707a5cbe918d33bc2082b3eb4ff1378c060 | /SDPython/tests/test_sd/test_package.py | 21b1b208b6690f20449ad6c4d37e68781b3b36f3 | [] | no_license | qq781217732/SubstanceDev | 2eb1d9ed48d477cf70c7bfdac2103bb884e9204c | b9ffab0a1b8f3c01783259074940b2712a8142b8 | refs/heads/master | 2023-03-26T00:43:35.047305 | 2021-03-01T04:12:28 | 2021-03-01T04:12:28 | 342,539,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | # ADOBE CONFIDENTIAL
#
# Copyright 2019 Adobe
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying it.
# If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
#
import unittest
import sd
from tests import tools
class TestPackage(unittest.TestCase):
@classmethod
def setUpClass(cls):
context = sd.getContext()
# Load the reference package
cls.sdPackage = tools.loadSDPackage(context, 'test_sdpackage.sbs')
# Load some other packages
cls.sdPackageTestNewContent = tools.loadSDPackage(context, 'test_write_content.sbs')
def testPackagesLoaded(self):
self.assertTrue(self.sdPackage, 'Fail to load package')
self.assertTrue(self.sdPackageTestNewContent, 'Fail to load package')
def test_SDPackage_getChildrenResources(self):
# Check Non Recursive mode
sbsResourceArray = self.sdPackage.getChildrenResources(False)
self.assertEqual(len(sbsResourceArray), 3)
# Check Recursive Mode
sbsResourceArray = self.sdPackage.getChildrenResources(True)
self.assertEqual(len(sbsResourceArray), 5)
def test_SDPackage_findResourceFromUrl(self):
# Check that a resource of the reference package can be retrieved
sbMDLSubGraph = self.sdPackage.findResourceFromUrl('folder0/mdl_sub_graph')
self.assertTrue(sbMDLSubGraph)
# Check that a resource in another can't be found in the reference package
sbPBRGraph = self.sdPackage.findResourceFromUrl('pbr_graph')
self.assertFalse(sbPBRGraph)
def test_SDPackage_getDependencies(self):
pkgDeps = self.sdPackage.getDependencies()
self.assertEqual(len(pkgDeps), 1)
firstPkgDep = pkgDeps[0]
self.assertTrue(len(firstPkgDep.getFilePath())>0)
self.assertTrue(firstPkgDep.getPackage())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e4944b1b43ee05a56b2e875b6344b52e18ed8f74 | 08ff71fec39604302c36d0a016147f0a57820765 | /zentral/contrib/monolith/views.py | 7d825ad49521e9bea382df96fe07f47f223e397b | [
"Apache-2.0"
] | permissive | secdragon/zentral | a20134cd5288b6689bfdd6528836e93229e48a03 | a000fb314ba514488df49dcc3d2106001948ea3b | refs/heads/master | 2020-03-25T11:11:50.871327 | 2018-07-09T06:52:10 | 2018-07-09T06:52:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,388 | py | from itertools import chain
import json
import logging
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.core.urlresolvers import reverse_lazy
from django.http import (FileResponse,
Http404,
HttpResponse, HttpResponseForbidden, HttpResponseNotFound, HttpResponseRedirect,
JsonResponse)
from django.shortcuts import get_object_or_404, render
from django.utils.crypto import get_random_string
from django.urls import reverse
from django.views.generic import DetailView, ListView, TemplateView, View
from django.views.generic.edit import CreateView, DeleteView, FormView, UpdateView
from zentral.contrib.inventory.exceptions import EnrollmentSecretVerificationFailed
from zentral.contrib.inventory.forms import EnrollmentSecretForm
from zentral.contrib.inventory.models import EnrollmentSecret, MachineTag, MetaMachine
from zentral.contrib.inventory.utils import verify_enrollment_secret
from zentral.utils.api_views import (APIAuthError, make_secret, verify_secret,
SignedRequestHeaderJSONPostAPIView)
from zentral.utils.http import user_agent_and_ip_address_from_request
from .conf import monolith_conf
from .events import (post_monolith_cache_server_update_request,
post_monolith_enrollment_event,
post_monolith_munki_request, post_monolith_repository_updates,
post_monolith_sync_catalogs_request)
from .forms import (AddManifestCatalogForm, DeleteManifestCatalogForm,
AddManifestEnrollmentPackageForm,
AddManifestSubManifestForm,
CacheServersPostForm,
ConfigurationForm,
ConfigureCacheServerForm,
DeleteManifestSubManifestForm,
EnrollmentForm,
ManifestForm, ManifestPrinterForm, ManifestSearchForm,
PkgInfoSearchForm, UpdatePkgInfoCatalogForm,
SubManifestForm, SubManifestSearchForm,
SubManifestPkgInfoForm, SubManifestAttachmentForm, SubManifestScriptForm,
UploadPPDForm)
from .models import (MunkiNameError, parse_munki_name,
Catalog, CacheServer,
Configuration, EnrolledMachine, Enrollment,
Manifest, ManifestEnrollmentPackage, PkgInfo, PkgInfoName,
Printer, PrinterPPD,
SUB_MANIFEST_PKG_INFO_KEY_CHOICES, SubManifest, SubManifestAttachment, SubManifestPkgInfo)
from .osx_package.builder import MonolithZentralEnrollPkgBuilder
logger = logging.getLogger('zentral.contrib.monolith.views')
# repository sync configuration
class WebHookView(LoginRequiredMixin, TemplateView):
template_name = "monolith/webhook.html"
def get_context_data(self, **kwargs):
context = super(WebHookView, self).get_context_data(**kwargs)
context['monolith'] = True
context['api_host'] = self.request.get_host()
context['api_secret'] = make_secret('zentral.contrib.monolith')
return context
# configuration
class ConfigurationListView(LoginRequiredMixin, ListView):
model = Configuration
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['monolith'] = True
ctx["configurations_count"] = ctx["object_list"].count()
return ctx
class CreateConfigurationView(LoginRequiredMixin, CreateView):
model = Configuration
form_class = ConfigurationForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['monolith'] = True
return ctx
class ConfigurationView(LoginRequiredMixin, DetailView):
model = Configuration
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['monolith'] = True
enrollments = list(self.object.enrollment_set.select_related("secret").all().order_by("id"))
ctx["enrollments"] = enrollments
ctx["enrollments_count"] = len(enrollments)
return ctx
class UpdateConfigurationView(LoginRequiredMixin, UpdateView):
model = Configuration
form_class = ConfigurationForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['monolith'] = True
return ctx
# enrollment endpoint called by the postinstall script
class EnrollView(View):
def post(self, request, *args, **kwargs):
user_agent, ip = user_agent_and_ip_address_from_request(request)
try:
request_json = json.loads(request.body.decode("utf-8"))
secret = request_json["secret"]
serial_number = request_json["serial_number"]
uuid = request_json["uuid"]
es_request = verify_enrollment_secret(
"monolith_enrollment", secret,
user_agent, ip, serial_number, uuid
)
except (KeyError, ValueError, EnrollmentSecretVerificationFailed):
raise SuspiciousOperation
else:
# get or create enrolled machine
enrolled_machine, enrolled_machine_created = EnrolledMachine.objects.get_or_create(
enrollment=es_request.enrollment_secret.monolith_enrollment,
serial_number=serial_number,
defaults={"token": get_random_string(64)}
)
# apply enrollment secret tags
for tag in es_request.enrollment_secret.tags.all():
MachineTag.objects.get_or_create(serial_number=serial_number, tag=tag)
# post event
post_monolith_enrollment_event(serial_number, user_agent, ip,
{'action': "enrollment" if enrolled_machine_created else "re-enrollment"})
return JsonResponse({"token": enrolled_machine.token})
# pkg infos
class PkgInfosView(LoginRequiredMixin, TemplateView):
template_name = "monolith/pkg_info_list.html"
def get_context_data(self, **kwargs):
ctx = super(PkgInfosView, self).get_context_data(**kwargs)
form = PkgInfoSearchForm(self.request.GET)
form.is_valid()
ctx['form'] = form
ctx['name_number'], ctx['info_number'], ctx['pkg_names'] = PkgInfo.objects.alles(**form.cleaned_data)
if not form.is_initial():
bc = [(reverse("monolith:pkg_infos"), "Monolith pkg infos"),
(None, "Search")]
else:
bc = [(None, "Monolith pkg infos")]
ctx["breadcrumbs"] = bc
return ctx
class UpdatePkgInfoCatalogView(LoginRequiredMixin, UpdateView):
model = PkgInfo
form_class = UpdatePkgInfoCatalogForm
def form_valid(self, form):
old_catalogs = set(self.model.objects.get(pk=self.object.pk).catalogs.all())
response = super().form_valid(form)
new_catalogs = set(self.object.catalogs.all())
if old_catalogs != new_catalogs:
attr_diff = {}
removed = old_catalogs - new_catalogs
if removed:
attr_diff["removed"] = sorted(str(c) for c in removed)
added = new_catalogs - old_catalogs
if added:
attr_diff["added"] = sorted(str(c) for c in added)
post_monolith_repository_updates(monolith_conf.repository,
[{"pkg_info": {"name": self.object.name.name,
"version": self.object.version,
"diff": {"catalogs": attr_diff}},
"type": "pkg_info",
"action": "updated"}],
self.request)
return response
class PkgInfoNameView(LoginRequiredMixin, DetailView):
model = PkgInfoName
template_name = "monolith/pkg_info_name.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
pkg_info_name = ctx["object"]
# sub manifests
sub_manifests = []
for smpi in pkg_info_name.submanifestpkginfo_set.select_related("sub_manifest").order_by("sub_manifest__name"):
sub_manifests.append((smpi.sub_manifest, smpi.get_key_display()))
ctx["sub_manifests"] = sub_manifests
# pkg infos
ctx["pkg_infos"] = list(pkg_info_name.pkginfo_set.select_related("name")
.prefetch_related("catalogs")
.filter(archived_at__isnull=True))
# to display update catalog links or not
ctx["manual_catalog_management"] = monolith_conf.repository.manual_catalog_management
return ctx
# PPDs
class PPDsView(LoginRequiredMixin, ListView):
model = PrinterPPD
class UploadPPDView(LoginRequiredMixin, CreateView):
model = PrinterPPD
form_class = UploadPPDForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["title"] = "Upload PPD file"
return ctx
class PPDView(LoginRequiredMixin, DetailView):
model = PrinterPPD
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["printers"] = list(ctx["object"].printer_set.filter(trashed_at__isnull=True))
return ctx
# catalogs
class CatalogsView(LoginRequiredMixin, ListView):
model = Catalog
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manual_catalog_management"] = monolith_conf.repository.manual_catalog_management
if monolith_conf.repository.manual_catalog_management:
ctx["edit_catalog_view"] = "monolith:update_catalog"
else:
ctx["edit_catalog_view"] = "monolith:update_catalog_priority"
return ctx
class CatalogView(LoginRequiredMixin, DetailView):
model = Catalog
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
catalog = ctx["object"]
# edit view
if monolith_conf.repository.manual_catalog_management:
ctx["edit_catalog_view"] = "monolith:update_catalog"
else:
ctx["edit_catalog_view"] = "monolith:update_catalog_priority"
# manifests
manifests = []
for mc in (catalog.manifestcatalog_set.select_related("manifest__meta_business_unit")
.prefetch_related("tags")
.all()
.order_by("manifest__meta_business_unit__name")):
manifests.append((mc.manifest, mc.tags.all()))
ctx["manifests"] = manifests
# pkg infos
ctx["pkg_infos"] = list(catalog.pkginfo_set.filter(archived_at__isnull=True))
return ctx
class ManualCatalogManagementRequiredMixin(LoginRequiredMixin):
def dispatch(self, request, *args, **kwargs):
self.manual_catalog_management = monolith_conf.repository.manual_catalog_management
if not self.manual_catalog_management:
return HttpResponseForbidden("Automatic catalog management. "
"See configuration. "
"You can't create catalogs.")
return super().dispatch(request, *args, **kwargs)
class CreateCatalogView(ManualCatalogManagementRequiredMixin, CreateView):
model = Catalog
fields = ['name', 'priority']
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['title'] = "Create catalog"
return ctx
def form_valid(self, form):
response = super().form_valid(form)
post_monolith_repository_updates(monolith_conf.repository,
[{"catalog": {"name": self.object.name,
"id": self.object.id,
"priority": self.object.priority},
"type": "catalog",
"action": "added"}],
self.request)
return response
class UpdateCatalogMixin(object):
def form_valid(self, form):
before_object = self.model.objects.get(pk=self.object.pk)
before = {f: getattr(before_object, f) for f in self.fields}
response = super().form_valid(form)
diff = {}
for f in self.fields:
before_val = before[f]
after_val = getattr(self.object, f)
if after_val != before_val:
diff[f] = {"removed": before_val,
"added": after_val}
if diff:
post_monolith_repository_updates(monolith_conf.repository,
[{"catalog": {"name": self.object.name,
"id": self.object.id,
"diff": diff},
"type": "catalog",
"action": "updated"}],
self.request)
return response
class UpdateCatalogView(ManualCatalogManagementRequiredMixin, UpdateCatalogMixin, UpdateView):
model = Catalog
fields = ['name', 'priority']
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['title'] = "Update catalog {}".format(ctx["object"])
return ctx
class UpdateCatalogPriorityView(LoginRequiredMixin, UpdateCatalogMixin, UpdateView):
model = Catalog
fields = ['priority']
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['title'] = "Update catalog {} priority".format(ctx["object"])
return ctx
class DeleteCatalogView(LoginRequiredMixin, DeleteView):
model = Catalog
success_url = reverse_lazy("monolith:catalogs")
def get_object(self, queryset=None):
obj = super().get_object(queryset)
if not obj.can_be_deleted():
raise Http404("Catalog {} can't be deleted".format(obj))
return obj
def delete(self, request, *args, **kwargs):
response = super().delete(request, *args, **kwargs)
post_monolith_repository_updates(monolith_conf.repository,
[{"catalog": {"name": self.object.name},
"type": "catalog",
"action": "deleted"}],
request)
return response
# sub manifests
class SubManifestsView(LoginRequiredMixin, ListView):
model = SubManifest
template_name = "monolith/sub_manifest_list.html"
paginate_by = 10
def get(self, request, *args, **kwargs):
self.form = SubManifestSearchForm(request.GET)
self.form.is_valid()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self.form.get_queryset()
def get_context_data(self, **kwargs):
context = super(SubManifestsView, self).get_context_data(**kwargs)
context['monolith'] = True
context['form'] = self.form
# pagination
page = context['page_obj']
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
context['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
context['previous_url'] = "?{}".format(qd.urlencode())
return context
class CreateSubManifestView(LoginRequiredMixin, CreateView):
model = SubManifest
form_class = SubManifestForm
template_name = "monolith/edit_sub_manifest.html"
def get_context_data(self, **kwargs):
context = super(CreateSubManifestView, self).get_context_data(**kwargs)
context['monolith'] = True
return context
class SubManifestView(LoginRequiredMixin, DetailView):
model = SubManifest
template_name = "monolith/sub_manifest.html"
def get_context_data(self, **kwargs):
context = super(SubManifestView, self).get_context_data(**kwargs)
sub_manifest = context['object']
context['monolith'] = True
pkg_info_dict = sub_manifest.pkg_info_dict()
keys = pkg_info_dict.pop("keys")
sorted_keys = []
for key, _ in SUB_MANIFEST_PKG_INFO_KEY_CHOICES:
value = keys.get(key, None)
if value:
sorted_keys.append((value['key_display'], value['key_list']))
context["keys"] = sorted_keys
context.update(pkg_info_dict)
context['manifests'] = sub_manifest.manifests_with_tags()
return context
class UpdateSubManifestView(LoginRequiredMixin, UpdateView):
model = SubManifest
form_class = SubManifestForm
template_name = 'monolith/edit_sub_manifest.html'
def get_context_data(self, **kwargs):
context = super(UpdateSubManifestView, self).get_context_data(**kwargs)
context['monolith'] = True
return context
class DeleteSubManifestView(LoginRequiredMixin, DeleteView):
model = SubManifest
success_url = reverse_lazy("monolith:sub_manifests")
class SubManifestAddPkgInfoView(LoginRequiredMixin, FormView):
form_class = SubManifestPkgInfoForm
template_name = 'monolith/edit_sub_manifest_pkg_info.html'
def dispatch(self, request, *args, **kwargs):
self.sub_manifest = SubManifest.objects.get(pk=kwargs['pk'])
return super(SubManifestAddPkgInfoView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(SubManifestAddPkgInfoView, self).get_form_kwargs()
kwargs['sub_manifest'] = self.sub_manifest
return kwargs
def get_context_data(self, **kwargs):
context = super(SubManifestAddPkgInfoView, self).get_context_data(**kwargs)
context['monolith'] = True
context['sub_manifest'] = self.sub_manifest
return context
def form_valid(self, form):
smpi = form.save(commit=False)
smpi.sub_manifest = self.sub_manifest
smpi.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return self.sub_manifest.get_absolute_url()
class DeleteSubManifestPkgInfoView(LoginRequiredMixin, DeleteView):
model = SubManifestPkgInfo
template_name = "monolith/delete_sub_manifest_pkg_info.html"
def get_context_data(self, **kwargs):
context = super(DeleteSubManifestPkgInfoView, self).get_context_data(**kwargs)
context['monolith'] = True
return context
def get_success_url(self):
return self.object.sub_manifest.get_absolute_url()
class SubManifestAddAttachmentView(LoginRequiredMixin, FormView):
form_class = SubManifestAttachmentForm
template_name = 'monolith/edit_sub_manifest_attachment.html'
def dispatch(self, request, *args, **kwargs):
self.sub_manifest = SubManifest.objects.get(pk=kwargs['pk'])
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['sub_manifest'] = self.sub_manifest
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
context['sub_manifest'] = self.sub_manifest
return context
def form_valid(self, form):
smpi = form.save(commit=False)
smpi.sub_manifest = self.sub_manifest
smpi.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return self.sub_manifest.get_absolute_url()
class SubManifestAddScriptView(LoginRequiredMixin, FormView):
form_class = SubManifestScriptForm
template_name = 'monolith/edit_sub_manifest_script.html'
def dispatch(self, request, *args, **kwargs):
self.sub_manifest = SubManifest.objects.get(pk=kwargs['pk'])
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['sub_manifest'] = self.sub_manifest
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
context['sub_manifest'] = self.sub_manifest
return context
def form_valid(self, form):
smpi = form.save(commit=False)
smpi.sub_manifest = self.sub_manifest
smpi.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return self.sub_manifest.get_absolute_url()
class SubManifestUpdateScriptView(LoginRequiredMixin, FormView):
form_class = SubManifestScriptForm
template_name = 'monolith/edit_sub_manifest_script.html'
def dispatch(self, request, *args, **kwargs):
self.sub_manifest = SubManifest.objects.get(pk=kwargs['sm_pk'])
self.script = SubManifestAttachment.objects.get(sub_manifest=self.sub_manifest, pk=kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['sub_manifest'] = self.sub_manifest
kwargs['script'] = self.script
kwargs['initial'] = {'name': self.script.name,
'key': self.script.key}
for attr in ('description', 'installcheck_script',
'postinstall_script', 'uninstall_script'):
kwargs['initial'][attr] = self.script.pkg_info.get(attr, "")
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
context['sub_manifest'] = self.sub_manifest
context['script'] = self.script
return context
def form_valid(self, form):
smpi = form.save(commit=False)
smpi.sub_manifest = self.sub_manifest
smpi.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return self.sub_manifest.get_absolute_url()
class DeleteSubManifestAttachmentView(LoginRequiredMixin, DeleteView):
model = SubManifestAttachment
template_name = "monolith/delete_sub_manifest_attachment.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
return context
def get_success_url(self):
return self.object.sub_manifest.get_absolute_url()
def delete(self, request, *args, **kwargs):
# TODO we can't just use the DeleteView delete method, but can we do better than that ?
self.object = self.get_object()
success_url = self.get_success_url()
SubManifestAttachment.objects.trash(self.object.sub_manifest, self.object.name)
return HttpResponseRedirect(success_url)
# manifests
class ManifestsView(LoginRequiredMixin, ListView):
model = Manifest
template_name = "monolith/manifest_list.html"
paginate_by = 10
def get(self, request, *args, **kwargs):
self.form = ManifestSearchForm(request.GET)
self.form.is_valid()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self.form.get_queryset()
def get_context_data(self, **kwargs):
context = super(ManifestsView, self).get_context_data(**kwargs)
context['monolith'] = True
context['form'] = self.form
# pagination
page = context['page_obj']
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
context['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
context['previous_url'] = "?{}".format(qd.urlencode())
return context
class CreateManifestView(LoginRequiredMixin, CreateView):
model = Manifest
form_class = ManifestForm
template_name = "monolith/edit_manifest.html"
def get_context_data(self, **kwargs):
context = super(CreateManifestView, self).get_context_data(**kwargs)
context['monolith'] = True
return context
class ManifestView(LoginRequiredMixin, DetailView):
model = Manifest
template_name = "monolith/manifest.html"
def get_context_data(self, **kwargs):
context = super(ManifestView, self).get_context_data(**kwargs)
manifest = context["object"]
context['monolith'] = True
context['enrollments'] = list(manifest.enrollment_set.all())
context['manifest_enrollment_packages'] = list(manifest.manifestenrollmentpackage_set.all())
context['manifest_enrollment_packages'].sort(key=lambda mep: (mep.get_name(), mep.id))
context['manifest_cache_servers'] = list(manifest.cacheserver_set.all().order_by("name"))
context['manifest_catalogs'] = list(manifest.manifestcatalog_set
.prefetch_related("tags")
.select_related("catalog").all())
context['manifest_printers'] = list(manifest.printer_set
.prefetch_related("tags")
.select_related("ppd")
.filter(trashed_at__isnull=True))
context['manifest_sub_manifests'] = list(manifest.manifestsubmanifest_set
.prefetch_related("tags")
.select_related("sub_manifest").all())
add_enrollment_package_path = reverse("monolith:add_manifest_enrollment_package", args=(manifest.id,))
context['add_enrollment_package_links'] = [
("{}?builder={}".format(add_enrollment_package_path, k),
v["class"].name) for k, v in monolith_conf.enrollment_package_builders.items()
]
context['add_enrollment_package_links'].sort(key=lambda t: t[1])
return context
class AddManifestEnrollmentView(LoginRequiredMixin, TemplateView):
template_name = "monolith/enrollment_form.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_forms(self):
secret_form_kwargs = {"prefix": "secret",
"meta_business_unit": self.manifest.meta_business_unit,
"initial": {"meta_business_unit": self.manifest.meta_business_unit}}
enrollment_form_kwargs = {"manifest": self.manifest,
"initial": {"manifest": self.manifest}}
if self.request.method == "POST":
secret_form_kwargs["data"] = self.request.POST
enrollment_form_kwargs["data"] = self.request.POST
return (EnrollmentSecretForm(**secret_form_kwargs),
EnrollmentForm(**enrollment_form_kwargs))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['monolith'] = True
context["manifest"] = self.manifest
if "secret_form" not in kwargs or "enrollment_form" not in kwargs:
context["secret_form"], context["enrollment_form"] = self.get_forms()
return context
def forms_invalid(self, secret_form, enrollment_form):
return self.render_to_response(self.get_context_data(secret_form=secret_form,
enrollment_form=enrollment_form))
def forms_valid(self, secret_form, enrollment_form):
secret = secret_form.save()
enrollment = enrollment_form.save(commit=False)
enrollment.secret = secret
enrollment.manifest = self.manifest
enrollment.save()
return HttpResponseRedirect(enrollment.get_absolute_url())
def post(self, request, *args, **kwargs):
secret_form, enrollment_form = self.get_forms()
if secret_form.is_valid() and enrollment_form.is_valid():
return self.forms_valid(secret_form, enrollment_form)
else:
return self.forms_invalid(secret_form, enrollment_form)
class ManifestEnrollmentPackageView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
enrollment = get_object_or_404(Enrollment, pk=kwargs["pk"], manifest__pk=kwargs["manifest_pk"])
builder = MonolithZentralEnrollPkgBuilder(enrollment)
return builder.build_and_make_response()
# manifest catalogs
class BaseManifestM2MView(LoginRequiredMixin, FormView):
m2m_model = None
def dispatch(self, request, *args, **kwargs):
self.manifest = Manifest.objects.get(pk=kwargs['pk'])
if self.m2m_model and 'm2m_pk' in kwargs:
self.m2m_object = self.m2m_model.objects.get(pk=kwargs['m2m_pk'])
else:
self.m2m_object = None
return super(BaseManifestM2MView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(BaseManifestM2MView, self).get_form_kwargs()
kwargs['manifest'] = self.manifest
return kwargs
def get_context_data(self, **kwargs):
context = super(BaseManifestM2MView, self).get_context_data(**kwargs)
context['monolith'] = True
context['manifest'] = self.manifest
context['m2m_object'] = self.m2m_object
return context
def get_success_url(self):
return self.manifest.get_absolute_url()
def form_valid(self, form):
form.save()
return HttpResponseRedirect(self.get_success_url())
class AddManifestCatalogView(BaseManifestM2MView):
form_class = AddManifestCatalogForm
template_name = "monolith/add_manifest_catalog.html"
class DeleteManifestCatalogView(BaseManifestM2MView):
form_class = DeleteManifestCatalogForm
template_name = "monolith/delete_manifest_catalog.html"
m2m_model = Catalog
def get_initial(self):
return {'catalog': self.m2m_object}
# manifest enrollment packages
class BaseEditManifestEnrollmentPackageView(LoginRequiredMixin, TemplateView):
template_name = "monolith/manifest_enrollment_package_forms.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["pk"])
if "mep_pk" in kwargs:
self.manifest_enrollment_package = get_object_or_404(ManifestEnrollmentPackage,
manifest=self.manifest,
pk=kwargs["mep_pk"])
builder = self.manifest_enrollment_package.builder
self.builder_config = monolith_conf.enrollment_package_builders[builder]
self.builder_class = self.manifest_enrollment_package.builder_class
else:
self.manifest_enrollment_package = None
try:
self.builder = request.GET["builder"]
self.builder_config = monolith_conf.enrollment_package_builders[self.builder]
self.builder_class = self.builder_config["class"]
except KeyError:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_forms(self):
builder_form_kwargs = {
"prefix": "builder",
"update_for": self.builder_config["update_for"]
}
mep_form_kwargs = {
"prefix": "mep",
"manifest": self.manifest
}
if self.request.method == "POST":
for kwargs in (builder_form_kwargs, mep_form_kwargs):
kwargs["data"] = self.request.POST
if self.manifest_enrollment_package:
builder_form_kwargs["instance"] = self.manifest_enrollment_package.get_enrollment()
mep_form_kwargs["initial"] = {"tags": self.manifest_enrollment_package.tags.all()}
return (self.builder_class.form(**builder_form_kwargs),
AddManifestEnrollmentPackageForm(**mep_form_kwargs))
def forms_invalid(self, builder_form, mep_form):
return self.render_to_response(self.get_context_data(builder_form=builder_form,
mep_form=mep_form))
def get_context_data(self, **kwargs):
kwargs["manifest"] = self.manifest
if hasattr(self, "manifest_enrollment_package"):
kwargs["manifest_enrollment_package"] = self.manifest_enrollment_package
kwargs["builder_name"] = self.builder_class.name
if "builder_form" not in kwargs or "mep_form" not in kwargs:
kwargs["builder_form"], kwargs["mep_form"] = self.get_forms()
return super().get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
builder_form, mep_form = self.get_forms()
if builder_form.is_valid() and mep_form.is_valid():
return self.forms_valid(builder_form, mep_form)
else:
return self.forms_invalid(builder_form, mep_form)
class AddManifestEnrollmentPackageView(BaseEditManifestEnrollmentPackageView):
def forms_valid(self, builder_form, mep_form):
# enrollment secret
enrollment_secret = EnrollmentSecret.objects.create(meta_business_unit=self.manifest.meta_business_unit)
# enrollment
enrollment = builder_form.save(commit=False)
enrollment.version = 0 # will be saved one extra time, and start at 1
enrollment.secret = enrollment_secret
enrollment.save()
# manifest enrollment package
mep = ManifestEnrollmentPackage.objects.create(
manifest=self.manifest,
builder=self.builder,
enrollment_pk=enrollment.pk,
version=0 # will be updated by the callback call in enrollment.save()
)
mep.tags = mep_form.cleaned_data["tags"]
# link from enrollment to manifest enrollment package, for config update propagation
enrollment.distributor = mep
enrollment.save() # bump mep version and build package via callback call
return HttpResponseRedirect(self.manifest.get_absolute_url())
class UpdateManifestEnrollmentPackageView(BaseEditManifestEnrollmentPackageView):
def forms_valid(self, builder_form, mep_form):
self.manifest_enrollment_package.tags = mep_form.cleaned_data["tags"]
self.manifest_enrollment_package.save()
builder_form.save() # bump mep version and build package via callback call
return HttpResponseRedirect(self.manifest.get_absolute_url())
class DeleteManifestEnrollmentPackageView(LoginRequiredMixin, TemplateView):
template_name = "monolith/delete_manifest_enrollment_package.html"
def dispatch(self, request, *args, **kwargs):
self.manifest_enrollment_package = get_object_or_404(
ManifestEnrollmentPackage,
manifest__id=kwargs["pk"], pk=kwargs["mep_pk"]
)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['manifest_enrollment_package'] = self.manifest_enrollment_package
context['manifest'] = self.manifest_enrollment_package.manifest
return context
def post(self, request, *args, **kwargs):
redirect_url = self.manifest_enrollment_package.manifest.get_absolute_url()
self.manifest_enrollment_package.delete()
return HttpResponseRedirect(redirect_url)
# manifest printers
class AddManifestPrinterView(LoginRequiredMixin, CreateView):
model = Printer
form_class = ManifestPrinterForm
template_name = "monolith/manifest_printer_form.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["m_pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manifest"] = self.manifest
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['manifest'] = self.manifest
return kwargs
def form_valid(self, form):
printer = form.save(commit=False)
printer.manifest = self.manifest
printer.save()
form.save_m2m()
return HttpResponseRedirect("{}#printers".format(self.manifest.get_absolute_url()))
class UpdateManifestPrinterView(LoginRequiredMixin, UpdateView):
model = Printer
form_class = ManifestPrinterForm
template_name = "monolith/manifest_printer_form.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["m_pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manifest"] = self.manifest
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['manifest'] = self.manifest
return kwargs
def get_success_url(self):
return "{}#printers".format(self.manifest.get_absolute_url())
class DeleteManifestPrinterView(LoginRequiredMixin, DeleteView):
model = Printer
template_name = "monolith/delete_manifest_printer.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["m_pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["manifest"] = self.manifest
return ctx
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.mark_as_trashed()
return HttpResponseRedirect("{}#printers".format(self.manifest.get_absolute_url()))
# manifest sub manifests
class AddManifestSubManifestView(BaseManifestM2MView):
form_class = AddManifestSubManifestForm
template_name = "monolith/add_manifest_sub_manifest.html"
class DeleteManifestSubManifestView(BaseManifestM2MView):
form_class = DeleteManifestSubManifestForm
template_name = "monolith/delete_manifest_sub_manifest.html"
m2m_model = SubManifest
def get_initial(self):
return {'sub_manifest': self.m2m_object}
class ConfigureManifestCacheServerView(LoginRequiredMixin, FormView):
form_class = ConfigureCacheServerForm
template_name = "monolith/configure_manifest_cache_server.html"
def dispatch(self, request, *args, **kwargs):
self.manifest = get_object_or_404(Manifest, pk=kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["monolith"] = True
ctx["manifest"] = self.manifest
return ctx
def form_valid(self, form):
ctx = self.get_context_data()
ctx["curl_command"] = form.build_curl_command(self.manifest)
return render(self.request, 'monolith/manifest_cache_server_setup.html', ctx)
class DeleteManifestCacheServerView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
cache_server = get_object_or_404(CacheServer, pk=kwargs["cs_pk"], manifest__pk=kwargs["pk"])
manifest = cache_server.manifest
cache_server.delete()
return HttpResponseRedirect("{}#cache-servers".format(manifest.get_absolute_url()))
# API
class SyncCatalogsView(SignedRequestHeaderJSONPostAPIView):
verify_module = "zentral.contrib.monolith"
def do_post(self, data):
post_monolith_sync_catalogs_request(self.user_agent, self.ip)
monolith_conf.repository.sync_catalogs()
return {'status': 0}
class CacheServersView(SignedRequestHeaderJSONPostAPIView):
verify_module = "zentral.contrib.monolith"
def do_post(self, data):
form = CacheServersPostForm(data)
if form.is_valid():
manifest = get_object_or_404(Manifest, meta_business_unit=self.business_unit.meta_business_unit)
cache_server = form.save(manifest, self.ip)
post_monolith_cache_server_update_request(self.user_agent, self.ip, cache_server=cache_server)
return {'status': 0}
else:
post_monolith_cache_server_update_request(self.user_agent, self.ip, errors=form.errors)
# TODO: JSON response with error code and form.errors.as_json()
raise SuspiciousOperation("Posted json data invalid")
class DownloadPrinterPPDView(View):
def get(self, request, *args, **kwargs):
try:
printer_ppd = PrinterPPD.objects.get_with_token(kwargs["token"])
except ValueError:
logger.error("Invalid token %s", kwargs["token"])
raise Http404
except PrinterPPD.DoesNotExist:
logger.warning("Could not find printer PPD with token %s", kwargs["token"])
raise Http404
else:
return FileResponse(printer_ppd.file)
# managedsoftwareupdate API
class MRBaseView(View):
def post_monolith_munki_request(self, **payload):
payload["manifest"] = {"id": self.manifest.id,
"name": str(self.manifest)}
post_monolith_munki_request(self.machine_serial_number, self.user_agent, self.ip, **payload)
def get_token(self, request):
try:
return request.META['HTTP_X_MONOLITH_TOKEN'].strip()
except (KeyError, AttributeError):
raise PermissionDenied("Could not read token header")
def verify_enrolled_machine_token(self, token):
"""Find the corresponding enrolled machine"""
try:
enrolled_machine = (EnrolledMachine.objects.select_related("enrollment__manifest__meta_business_unit")
.get(token=token))
except EnrolledMachine.DoesNotExist:
raise PermissionDenied("Enrolled machine does not exist")
else:
self.token_machine_serial_number = enrolled_machine.serial_number
self.manifest = enrolled_machine.enrollment.manifest
self.meta_business_unit = self.manifest.meta_business_unit
def verify_signed_token(self, token):
"""Verify the token signature"""
# TODO: deprecate and remove
try:
api_data = verify_secret(token, 'zentral.contrib.monolith')
except APIAuthError:
raise PermissionDenied("Invalid API secret")
else:
self.token_machine_serial_number = api_data.get("machine_serial_number")
self.meta_business_unit = api_data['business_unit'].meta_business_unit
self.manifest = get_object_or_404(Manifest, meta_business_unit=self.meta_business_unit)
def set_machine(self, request):
header_machine_serial_number = request.META.get("HTTP_X_ZENTRAL_SERIAL_NUMBER")
if header_machine_serial_number and \
self.token_machine_serial_number and \
header_machine_serial_number != self.token_machine_serial_number:
logger.warning("Serial number mismatch. header: %s, token: %s",
header_machine_serial_number,
self.token_machine_serial_number)
self.machine_serial_number = header_machine_serial_number or self.token_machine_serial_number
if not self.machine_serial_number:
raise PermissionDenied("Unknown machine serial number")
# machine extra infos
self.machine = MetaMachine(self.machine_serial_number)
self.tags = self.machine.tags
def authenticate(self, request):
self.token_msn = None
token = self.get_token(request)
if ":" not in token:
self.verify_enrolled_machine_token(token)
else:
self.verify_signed_token(token)
self.set_machine(request)
self.user_agent, self.ip = user_agent_and_ip_address_from_request(request)
def dispatch(self, request, *args, **kwargs):
self.authenticate(request)
return super().dispatch(request, *args, **kwargs)
class MRNameView(MRBaseView):
def get_request_args(self, name):
try:
model, key = parse_munki_name(name)
except MunkiNameError:
model = key = None
return model, key
def get(self, request, *args, **kwargs):
event_payload = {"type": self.event_payload_type}
model, key = self.get_request_args(kwargs["name"])
if model is None or key is None:
error = True
response = HttpResponseForbidden("No no no!")
else:
event_payload["subtype"] = model
response = self.do_get(model, key, event_payload)
if not response:
error = True
response = HttpResponseNotFound("Not found!")
else:
error = False
event_payload["error"] = error
self.post_monolith_munki_request(**event_payload)
return response
class MRCatalogView(MRNameView):
event_payload_type = "catalog"
def do_get(self, model, key, event_payload):
catalog_data = None
if model == "enrollment_catalog":
# intercept calls for special enrollment catalog
mbu_id = int(key)
if mbu_id == self.meta_business_unit.id:
catalog_data = self.manifest.serialize_enrollment_catalog(self.tags)
elif model == "printer_catalog":
# intercept calls for special printer catalog
mbu_id = int(key)
if mbu_id == self.meta_business_unit.id:
# do not filter with tags. need all the possible installs for autoremove.
catalog_data = self.manifest.serialize_printer_catalog()
elif model == "sub_manifest_catalog":
# intercept calls for sub manifest catalog
sm_id = int(key)
event_payload["sub_manifest"] = {"id": sm_id}
# verify machine access to sub manifest and respond
sub_manifest = self.manifest.sub_manifest(sm_id, self.tags)
if sub_manifest:
catalog_data = sub_manifest.serialize_catalog()
event_payload["sub_manifest"]["name"] = sub_manifest.name
elif model == "catalog":
# intercept calls for manifest catalog
c_id = int(key)
event_payload["catalog"] = {"id": c_id}
# verify machine access to catalog and respond
catalog = self.manifest.catalog(c_id, self.tags)
if catalog:
catalog_data = catalog.serialize()
event_payload["catalog"].update({"name": catalog.name,
"priority": catalog.priority})
if catalog_data:
return HttpResponse(catalog_data, content_type="application/xml")
class MRManifestView(MRNameView):
event_payload_type = "manifest"
def get_request_args(self, name):
model, key = super().get_request_args(name)
if model is None or key is None:
# Not a valid munki name.
# It is the first request for the main manifest.
model = "manifest"
key = self.manifest.id
return model, key
def do_get(self, model, key, event_payload):
manifest_data = None
if model == "manifest":
manifest_data = self.manifest.serialize(self.tags)
elif model == "sub_manifest":
sm_id = int(key)
# verify machine access to sub manifest and respond
sub_manifest = self.manifest.sub_manifest(sm_id, self.tags)
event_payload["sub_manifest"] = {"id": sm_id}
if sub_manifest:
event_payload["sub_manifest"]["name"] = sub_manifest.name
manifest_data = sub_manifest.serialize()
if manifest_data:
return HttpResponse(manifest_data, content_type="application/xml")
class MRPackageView(MRNameView):
event_payload_type = "package"
def do_get(self, model, key, event_payload):
if model == "enrollment_pkg":
# intercept calls for mbu enrollment packages
mep_id = int(key)
event_payload["manifest_enrollment_package"] = {"id": mep_id}
try:
mep = ManifestEnrollmentPackage.objects.get(manifest=self.manifest, pk=mep_id)
except ManifestEnrollmentPackage.DoesNotExist:
return
event_payload["manifest_enrollment_package"]["filename"] = mep.file.name
return FileResponse(mep.file)
elif model == "sub_manifest_attachment":
# intercept calls for sub manifest attachments
# the sma key is sub_manifest, name, version, but we encoded only sub_manifest id and sma id
# we need to recover the name before we can look for an active version.
sm_id, sma_id = key
event_payload["sub_manifest"] = {"id": sm_id}
event_payload["sub_manifest_attachment"] = {"req_id": sma_id}
try:
req_sma = SubManifestAttachment.objects.get(sub_manifest__id=sm_id, pk=sma_id)
except SubManifestAttachment.DoesNotExist:
return
event_payload["sub_manifest_attachment"]["name"] = req_sma.name
sub_manifest = self.manifest.sub_manifest(sm_id, self.tags)
if sub_manifest:
event_payload["sub_manifest"]["name"] = sub_manifest.name
try:
sma = SubManifestAttachment.objects.active().get(sub_manifest=sub_manifest,
name=req_sma.name)
except SubManifestAttachment.DoesNotExist:
pass
else:
event_payload["sub_manifest_attachment"].update({"id": sma.id,
"filename": sma.file.name})
return FileResponse(sma.file)
else:
return
elif model == "repository_package":
pk = int(key)
event_payload["repository_package"] = {"id": pk}
# TODO: cache
for pkginfo in chain(self.manifest.pkginfos_with_deps_and_updates(self.tags),
self.manifest.enrollment_packages_pkginfo_deps(self.tags),
self.manifest.printers_pkginfo_deps(self.tags),
self.manifest.default_managed_installs_deps(self.tags)):
if pkginfo.pk == pk:
event_payload["repository_package"].update({"name": pkginfo.name.name,
"version": pkginfo.version})
cache_server = CacheServer.objects.get_current_for_manifest_and_ip(self.manifest, self.ip)
return monolith_conf.repository.make_munki_repository_response(
"pkgs", pkginfo.data["installer_item_location"],
cache_server=cache_server
)
class MRRedirectView(MRBaseView):
section = None
def get(self, request, *args, **kwargs):
name = kwargs["name"]
self.post_monolith_munki_request(type=self.section, name=name)
return monolith_conf.repository.make_munki_repository_response(self.section, name)
| [
"[email protected]"
] | |
8e601ba9e0dbeb81f667938b4d361cda5ccc749d | 1ff9adfdb9d559e6f81ed9470467bab25e93b5ab | /src/ta_lib/_vendor/tigerml/pyspark/eda/eda.py | 6d6da84cea9ce373d66310101722a68fb7f8f2f8 | [] | no_license | Seemant-tiger/housing-price-prediction | a39dbefcb11bc460edeeee92e6becf77d35ff3a8 | be5d8cca769c7e267cfee1932eb82b70c2855bc1 | refs/heads/main | 2023-06-24T00:25:49.776720 | 2021-07-18T16:44:28 | 2021-07-18T16:44:28 | 387,222,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,365 | py | """Functions to carry out the EDA in spark framework."""
import gc
import logging # noqa
from collections import Counter
import holoviews as hv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from hvplot import hvPlot
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, VectorAssembler
from pyspark.ml.stat import Correlation
from pyspark.sql import functions as F
from pyspark_dist_explore import distplot
from tigerml.core.plots.bokeh import add_to_secondary, finalize_axes_right
from tigerml.core.reports import create_report
from tigerml.pyspark.core import dp
from tigerml.pyspark.core.utils import (
append_file_to_path,
flatten_list,
time_now_readable,
)
_LOGGER = logging.getLogger(__name__)
def setanalyse(df1, df2, col, simplify=True, exceptions_only=False):
"""
Given two spark dataframes, returns a dictionary of set analysis.
A-B: set(A) - set(B)
B-A: set(B) - set(A)
AuB: A union B
A^B: A intersection B
Parameters
----------
df1, df2: spark dataframes to be evaluated
exceptions_only: 'False'(default):
if True, gives only A-B & B-A. False gives all 4.
True is efficient while dealing with large sets or analyzing exceptions alone.
"""
A = set(df1.select(col).rdd.map(lambda r: r).collect())
B = set(df2.select(col).rdd.map(lambda r: r).collect())
output = {"A-B": A - B, "B-A": B - A}
if ~exceptions_only:
output["AuB"] = A.union(B)
output["A^B"] = A.intersection(B)
if simplify:
for key, value in output.items():
output[key] = len(value)
return output
# ---------------
# Health Report
# ---------------
def column_values_summary(data):
"""Summarise the column types, number of unique values, and percentage of unique values per column.
Parameters
----------
data - pyspark.sql.DataFrame
Returns
-------
df - pyspark.sql.DataFrame
"""
# datatypes
a = pd.DataFrame({x.name: [x.dataType] for x in list(data.schema)})
# countDistinct
b = data.agg(*(F.countDistinct(F.col(c)).alias(c) for c in data.columns)).toPandas()
# percent of countDistinct over the entire len
c = round(b * 100 / data.count(), 2)
df = a.append(b)
df = df.append(c)
df.index = ["Data type", "Distinct count", "Distinct count(%)"]
return df
def get_datatypes(data):
"""List the numerical columns and non_numerical columns.
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
numerical_columns: list(str)
list of numerical columns
non_numerical_columns: list(str)
list of non numerical columns
"""
numerical_columns = dp.list_numerical_columns(data)
non_numerical_columns = set(data.columns) - set(numerical_columns)
return numerical_columns, non_numerical_columns
def get_missing_values_summary(data):
"""Get a summary of the missing values.
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
df_mising: pd.DataFrame
pyspark dataframe that contains counts of missing values
"""
df_missing = dp.identify_missing_values(data)
df_missing = df_missing.toPandas().T
return df_missing
def _missing_values(data):
"""Get a pandas dataframe with the information about missing values in the dataset.
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
df_missing: pyspark.sql.DataFrame
pyspark dataframe that contains summary of missing values
"""
no_of_rows = data.count()
df = data
df = get_missing_values_summary(data)
df = df.reset_index()
df = df.rename(
columns=dict(zip(list(df.columns), ["Variable Name", "No of Missing"],)) # noqa
)
df["Percentage Missing"] = df["No of Missing"] / float(no_of_rows) * 100
return df
def get_health_analysis(
data, missing=True, data_types=True, duplicate_values=True, duplicate_columns=True
):
"""Get the summary of health analysis.
Parameters
----------
data: pyspark.sql.DataFrame
missing: bool, default is True.
data_types: bool, default is True.
duplicate_values:bool, default is True.
duplicate_columns:bool, default is True.
Returns
-------
dict_summary: dict
dictionary containing the summary of health analysis
"""
dict_summary = {
"data_types": {},
"missing": {},
"duplicate_values": {},
"duplicate_columns": {},
}
row_count = data.count()
numerical_columns, non_numerical_columns = get_datatypes(data)
numeric_list = [
len(numerical_columns) / len(data.columns),
len(non_numerical_columns) / len(data.columns),
] * 100
df_missing = get_missing_values_summary(data)
per_ = df_missing.sum()[0] / row_count
missing_values = [1 - per_, per_] * 100
dict_summary["missing"] = {
"Available": missing_values[0],
"Missing": missing_values[1],
}
dict_summary["data_types"] = {"Numeric": numeric_list[0], "Others": numeric_list[1]}
duplicate_rows = (row_count - data.dropDuplicates().count()) / row_count
duplicate_rows_list = [(1 - duplicate_rows), duplicate_rows]
dict_summary["duplicate_values"] = {
"Unique": duplicate_rows_list[0],
"Duplicated": duplicate_rows_list[1],
}
# TBD duplicate columns
return dict_summary
def plot_health(
data, missing=True, data_types=True, duplicate_values=True, duplicate_columns=True
):
"""Get the health analysis plots.
Parameters
----------
data: pyspark.sql.DataFrame
missing: bool, default is True.
data_types: bool, default is True.
duplicate_values:bool, default is True.
duplicate_columns:bool, default is True.
Returns
-------
final_plot: HvPlot
HvPlot containing all the health plots of variables
"""
data_dict = get_health_analysis(
data, missing, data_types, duplicate_values, duplicate_columns
)
df_dict = {
"type": flatten_list([[x] * len(data_dict[x]) for x in data_dict.keys()]),
"labels": list(data_dict["data_types"].keys())
+ list(data_dict["missing"].keys())
+ list(data_dict["duplicate_values"].keys()),
"values": list(data_dict["data_types"].values())
+ list(data_dict["missing"].values())
+ list(data_dict["duplicate_values"].values()),
}
df = pd.DataFrame(df_dict)
df = df.set_index(["type", "labels"])
# this multipliers resolves the issue of duplicate columns as it's values are multiplied by 1 and others
# with no_of_columns. which was needed for the correct metrics.
final_plot = None
for metric in df.index.get_level_values(0).unique():
plot = (
hvPlot(df.loc[metric].T)
.bar(stacked=True, title=metric, height=100, invert=True)
.opts(xticks=list([i for i in range(df.shape[1])]))
)
if final_plot:
final_plot += plot
else:
final_plot = plot
return final_plot.cols(1)
def missing_plot(data):
"""Get the Missing Variable plot.
The function returns a bar plot mentioning the number of variables that are present in each missing value
bucket(0%-100% in increments of 5%).
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
f: `hvplot`
missing_plot returns a bar plot with the following axis:
X.axis - % of missing observation bucket
Y.axis - Number of variables
"""
# plt.close('all')
df = data
missing_values = _missing_values(data)
break_value = [0, 5, 10, 20, 30, 40, 50, 100]
lab_value = ["0-5%", "5-10%", "10-20%", "20-30%", "30-40%", "40-50%", "50-100%"]
cuts = pd.cut(
missing_values["Percentage Missing"],
bins=break_value,
labels=lab_value,
right=True,
)
cuts = cuts.value_counts().reindex(lab_value)
remaining_cols = len(df.columns) - cuts.sum()
cuts = pd.concat([pd.Series([remaining_cols], index=["No Missing"]), cuts])
plot = hvPlot(cuts).bar(
rot=0,
title="Missing Variables",
xlabel="# of missing observations",
ylabel="# of variables",
)
return plot
def missing_value_summary(data):
"""Get the summary of missing values computed from `missing_values` function.
This function describes the share of missing values for each variable
in the dataset. If there are no missing values, "No Missing Values"
message is displayed, else a table containing the percentage of
missing for all variables with missing values are displayed
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
df: pandas.DataFrame
"""
df = _missing_values(data)
df = df.loc[df["No of Missing"] != 0].reset_index()
if df.empty:
return "No Missing Values"
else:
return df
def get_outliers(data, cols=None):
"""To get the summary of outliers.
Parameters
----------
data: pyspark.sql.DataFrame
cols: list(str)
list of numerical columns
Returns
-------
outliers_dict_iqr: dict
contains the details of iqr bounds
outliers_dict_mean:
contains the details of std bounds
"""
iqr_bounds = dp._calculate_outlier_bounds_iqr(data, cols)
mean_bounds = dp._calculate_outlier_bounds_sdv(data, cols)
outliers_dict_iqr = {}
outliers_dict_mean = {}
df = data
for col_ in cols:
df = df.withColumn(
"lower_bound_iqr",
F.when(F.col(col_) < iqr_bounds[col_]["min_b"], 1).otherwise(0),
)
df = df.withColumn(
"upper_bound_iqr",
F.when(F.col(col_) > iqr_bounds[col_]["max_b"], 1).otherwise(0),
)
df = df.withColumn(
"lower_bound_mean",
F.when(F.col(col_) < mean_bounds[col_]["min_b"], 1).otherwise(0),
)
df = df.withColumn(
"upper_bound_mean",
F.when(F.col(col_) > mean_bounds[col_]["max_b"], 1).otherwise(0),
)
agg_df = (
df.select(
"lower_bound_iqr",
"upper_bound_iqr",
"lower_bound_mean",
"upper_bound_mean",
)
.groupBy()
.sum()
.collect()
)
outliers_dict_iqr[col_] = (agg_df[0][0], agg_df[0][1])
outliers_dict_mean[col_] = (agg_df[0][2], agg_df[0][3])
return outliers_dict_iqr, outliers_dict_mean
def get_outliers_table(data):
"""To get a dataframe with outlier analysis table.
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
outliers_df: pyspark.sql.DataFrame
"""
if data is None:
raise ValueError("Data is not provided")
outlier_col_labels = [
"< (mean-3*std)",
"> (mean+3*std)",
"< (1stQ - 1.5 * IQR)",
"> (3rdQ + 1.5 * IQR)",
]
numerical_columns = dp.list_numerical_columns(data)
outliers_dict_iqr, outliers_dict_mean = get_outliers(data, numerical_columns)
outliers_df = pd.DataFrame.from_dict(outliers_dict_mean)
outliers_df = pd.concat([outliers_df, pd.DataFrame.from_dict(outliers_dict_iqr)])
outliers_df = outliers_df.reset_index(drop=True).T
outliers_df.rename(
columns=dict(zip(list(outliers_df.columns), outlier_col_labels)), inplace=True
)
outliers_sum = outliers_df.sum(axis=1)
outliers_df = outliers_df[outliers_sum > 0]
outliers_df.index.name = "feature"
return outliers_df
def health_analysis(data, save_as=None, save_path=""):
"""Data health report.
Compiles outputs from data_health, missing_plot, missing_value_summary and get_outliers_df as a report.
Parameters
----------
save_as : str, default=None
give ".html" for saving the report
save_path : str, default=''
Location where report to be saved. By default report saved in working directory.
Examples
--------
>>> from tigerml.pyspark import health_analysis
>>> df = spark.read.parquet("train.parquet")
>>> health_analysis(df, save_as=".html", save_path="PySpark_Reports/")
"""
# data_shape = str(data.count())
health_analysis_report = {}
health_analysis_report.update({"health_plot": plot_health(data)})
health_analysis_report.update({"missing_plot": missing_plot(data)})
health_analysis_report.update(
{"missing_value_summary": missing_value_summary(data)}
)
health_analysis_report.update({"outliers_in_features": get_outliers_table(data)})
if save_as:
default_report_name = "health_analysis_report_at_{}".format(time_now_readable())
save_path = append_file_to_path(save_path, default_report_name + save_as)
create_report(
health_analysis_report, path=save_path, format=save_as,
)
return health_analysis_report
# ----------------
# Feature Analysis
# ----------------
def describe_data(data, columns):
"""Obtain the basic stats results and percentiles of numerical data.
Parameters
----------
data: pyspark.sql.DataFrame
columns: list(str)
the cloumn name list of the numerical variable
Returns
-------
new_df: pyspark.sql.DataFrame
the numerical describe info. of the input dataframe
"""
percentiles = [25, 50, 75]
# array_list=[np.array([row[f"{x}"] for row in data.select(x).collect()],dtype='float') for x in columns]
temp = data.select(columns).toPandas()
array_list = [np.array(temp[f"{x}"], dtype="float") for x in columns]
array_list_samples = [
list(np.unique(np.unique(row[~np.isnan(row)])[-5:])) for row in array_list
]
array_list_unique = [len(np.unique(row[~np.isnan(row)])) for row in array_list]
percs = [np.nanpercentile(row, percentiles) for row in array_list]
percs = np.transpose(percs)
percs = pd.DataFrame(percs, columns=columns)
samples = pd.DataFrame([array_list_samples], columns=columns)
percs = pd.DataFrame([array_list_unique], columns=columns).append(percs)
percs = samples.append(percs)
percs["summary"] = ["samples", "nunique"] + [str(p) + "%" for p in percentiles]
spark_describe = data.describe().toPandas()
drop_cols = list(set(spark_describe.columns) - set(percs.columns))
spark_describe.drop(drop_cols, axis=1, inplace=True)
new_df = pd.concat([spark_describe, percs], ignore_index=True)
new_df = new_df.round(2)
new_df = new_df.T
new_df.columns = list(np.concatenate(new_df.loc[new_df.index == "summary"].values))
new_df.drop("summary", inplace=True)
return new_df
def describe_categoricaldata(data, cat_cols):
"""Obtain basic stats results and percentiles of categorical data.
Parameters
----------
data: pyspark.sql.DataFrame
cat_cols: list(str)
the cloumn name list of the categorical variable
Returns
-------
new_df: pd.DataFrame
the categorical describe info. of the input dataframe
"""
na_list = ["nan", "NA"]
# array_list=[np.array([row[f"{x}"] for row in data.select(x).collect()],dtype='str') for x in cat_cols]
temp = data.select(cat_cols).toPandas()
array_list = [np.array(temp[f"{x}"], dtype="str") for x in cat_cols]
array_list_samples = [
list(np.unique(np.unique([val_ for val_ in row if val_ not in na_list])[-5:]))
for row in array_list
]
array_list_unique = [
len(np.unique([val_ for val_ in row if val_ not in na_list]))
for row in array_list
]
samples = pd.DataFrame([array_list_samples], columns=cat_cols)
unique_df = pd.DataFrame([array_list_unique], columns=cat_cols)
samples = unique_df.append(samples)
mode_list = [
max(dict(Counter(row)), key=dict(Counter(row)).get) for row in array_list
]
samples = samples.append(pd.DataFrame([mode_list], columns=cat_cols))
mode_freq = [
dict(Counter(row)).get(mode_) for row, mode_ in zip(array_list, mode_list)
]
samples = samples.append(pd.DataFrame([mode_freq], columns=cat_cols))
samples["summary"] = ["nunique", "samples", "mode", "mode_freq"]
samples = samples.T
samples.columns = list(
np.concatenate(samples.loc[samples.index == "summary"].values)
)
samples.drop("summary", inplace=True)
return samples
def feature_analysis_table(data):
"""Get the descriptive statistics of the data.
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
numerical_description: pd.DataFrame
contains the descriptive statistics of numerical variables.
categorical_description: pd.DataFrame
contains the descriptive statistics of categorical variables.
"""
numerical_columns = dp.list_numerical_columns(data)
numerical_description = describe_data(data, numerical_columns)
categorical_columns = dp.list_categorical_columns(data)
categorical_description = describe_categoricaldata(data, categorical_columns)
return numerical_description, categorical_description
def density_plots_numerical(data):
"""Get the densisty plots of the numerical variables.
Parameters
----------
data: pyspark.sql.DataFrame
"""
num_cols = dp.list_numerical_columns(data)
fig, axes = plt.subplots(nrows=int(np.ceil(len(num_cols) / 2)), ncols=2)
fig.set_size_inches(20, 20)
axes = np.concatenate(axes)
plots_dict = {}
for index_, col_ in enumerate(num_cols):
plot_ = distplot(axes[index_], [data.select(col_)], bins=40) # noqa
axes[index_].set_title(f"distribution of {col_}")
axes[index_].legend()
plots_dict.update({col_: plot_})
return plots_dict
def non_numeric_frequency_plots(data, cols):
"""Get a dictionary of interactive frequency plots and summary table for non-numeric cols.
Parameters
----------
data: pyspark.sql.DataFrame
cols: list(str)
default: empty, takes the requested columns in the given dataframe.
Returns
-------
plot: dict
for all the non-numeric required columns in the list if it is not empty else all non-numeric from the given data.
"""
plots_dict = {}
for col_ in sorted(cols):
series = data.select(col_)
summary_df = series.describe().toPandas().T.round(2)
summary_df.columns = list(
np.concatenate(summary_df.loc[summary_df.index == "summary"].values)
)
summary_df.drop("summary", inplace=True)
summary_table = hvPlot(summary_df).table(
columns=list(summary_df.columns), height=60, width=600
)
freq_plot = hvPlot(
series.toPandas()[col_].value_counts().head(20).sort_values(ascending=True)
).bar(title="Frequency Plot for {}".format(col_), invert=True, width=600)
plots_dict.update({col_: (freq_plot + summary_table).cols(1)})
return plots_dict
def density_plots(data, cols):
"""Get a dict of interactive density plots and numeric summary.
Parameters
----------
data: pyspark.sql.DataFrame
cols: list
default: empty, takes the requested columns in the given dataframe.
Returns
-------
plots_dict: dict
for all the requested numeric columns defined in the list if it is not empty else all non-numeric from the given data.
"""
plots_dict = {}
for col_ in sorted(cols):
series = data.select(col_)
summary_df = series.describe().toPandas().T.round(2)
summary_df.columns = list(
np.concatenate(summary_df.loc[summary_df.index == "summary"].values)
)
summary_df.drop("summary", inplace=True)
summary_table = hvPlot(summary_df).table(
columns=list(summary_df.columns), height=60, width=600
)
try:
hist_plot = hv.Histogram(np.histogram(series.toPandas(), bins=20))
density_plot = hvPlot(series.toPandas()).kde(
title="Density Plot for {}".format(col_), width=600
)
hooks = [add_to_secondary, finalize_axes_right]
complete_plot = hist_plot.options(
color="#00fff0", xlabel=col_
) * density_plot.options(hooks=hooks)
plots_dict[col_] = (complete_plot + summary_table).cols(1)
except Exception as e:
plots_dict[col_] = f"Could not generate. Error - {e}"
return plots_dict
def non_numeric_frequency_plots_v2(data, cols):
"""Get a holoviews layout of interactive frequency plots and summary table for non-numeric cols.
Parameters
----------
data: pyspark.sql.DataFrame
cols: list(str)
default: empty, takes the requested columns in the given dataframe.
Returns
-------
plot: holoviews.core.layout.Layout
for all the non-numeric required columns in the list if it is not empty else all non-numeric from the given data.
"""
series = data.select(cols)
summary_df = series.describe().toPandas().T.round(2)
summary_df.columns = list(
np.concatenate(summary_df.loc[summary_df.index == "summary"].values)
)
summary_df.drop("summary", inplace=True)
summary_table = hvPlot(summary_df).table(
columns=list(summary_df.columns), height=60, width=600
)
freq_plot = hvPlot(
series.toPandas()
.apply(lambda x: x.value_counts().head(20).sort_values(ascending=True))
.T.reset_index()
.melt(id_vars="index")
.dropna()
.reset_index()
.drop(columns=["level_0"])
).bar(
invert=True,
width=600,
x="variable",
y=["value"],
subplots=True,
by="index",
shared_axes=False,
)
plots = (freq_plot + summary_table).cols(1)
return plots
def density_plots_v2(data, cols):
"""Get a holoviews layout of interactive density plots.
A numeric summary for the given columns or all numeric columns for the given data is also provided.
Parameters
----------
data: pyspark.sql.DataFrame
cols: list
default: empty, takes the requested columns in the given dataframe.
Returns
-------
plots: holoviews.core.layout.Layout
for all the requested numeric columns defined in the list if it is not empty else all non-numeric from the given data.
"""
series = data.select(cols)
summary_df = series.describe().toPandas().T.round(2)
summary_df.columns = list(
np.concatenate(summary_df.loc[summary_df.index == "summary"].values)
)
summary_df.drop("summary", inplace=True)
summary_table = hvPlot(summary_df.loc[cols]).table(
columns=list(summary_df.columns),
height=60,
width=600,
subplots=True,
shared_axes=False,
)
density_plot = hvPlot(series.toPandas()[cols]).kde(
width=600, subplots=True, shared_axes=False
)
plots = (density_plot + summary_table).cols(1)
return plots
def feature_density_plots(data, num_cols=[], cat_cols=[]):
"""Get density plots and bar plots for numerical and categorical columns respectively.
Parameters
----------
data: pyspark.sql.DataFrame
Returns
-------
numerical_plots: dict
dict containing the density plots of numerical variables.
categorical_plots:
dict containing the bar plots of categorical variables
"""
categorical_cols = dp.list_categorical_columns(data)
if not cat_cols:
cat_cols = categorical_cols
else:
for col in cat_cols:
assert ( # noqa
col in categorical_cols
), "{0} is not a valid categorical column in the input data"
numerical_cols = dp.list_numerical_columns(data)
if not num_cols:
num_cols = numerical_cols
else:
for col in num_cols:
assert ( # noqa
col in numerical_cols
), "{0} is not a valid numerical column in the input data"
numerical_plots = density_plots(data, numerical_cols)
categorical_plots = non_numeric_frequency_plots(data, categorical_cols)
return numerical_plots, categorical_plots
def feature_analysis(data, save_as=None, save_path=""):
"""Univariate analysis for the columns.
Generate summary_stats, distributions and normality tests for columns.
Parameters
----------
data: pyspark.sql.DataFrame
save_as : str, default=None
Name of the report. By default name is auto generated from system timestamp.
save_path : str, default=''
Location where report to be saved. By default report saved in working directory.
Examples
--------
>>> from tigerml.pyspark.eda import feature_analysis
>>> df = spark.read.parquet("train.parquet")
>>> feature_analysis(df, save_as=".html", save_path="PySpark_Reports/")
"""
report = {}
numeric_variables, non_numeric_summary = feature_analysis_table(data)
report["summary_stats"] = {}
# report['summary_stats']['variable_summary'] = self.variable_summary()
report["summary_stats"]["numeric_variables"] = [numeric_variables]
report["summary_stats"]["non_numeric_variables"] = [non_numeric_summary]
report["distributions"] = {}
numeric_density, non_numeric_frequency = feature_density_plots(data)
report["distributions"]["numeric_variables"] = numeric_density
report["distributions"]["non_numeric_variables"] = non_numeric_frequency
if save_as:
default_report_name = "feature_analysis_report_at_{}".format(
time_now_readable()
)
save_path = append_file_to_path(save_path, default_report_name + save_as)
create_report(
report, path=save_path, format=save_as,
)
return report
# Feature Interactions
def correlation_table(data, plot="table"):
"""Get feature interaction plot or table.
Parameters
----------
data: pyspark.sql.DataFrame
plot: str
if table, then correlation table is obtained else correlation plot
Returns
-------
c_df: pd.DataFrame
correlation table
heatmap: hvplot
correlation plot
"""
cat_cols = dp.list_categorical_columns(data)
if len(cat_cols):
data = label_encode(data, cat_cols)
cols = dp.list_numerical_columns(data)
assembler = VectorAssembler(
inputCols=cols, outputCol="features", handleInvalid="skip"
)
df_vector = assembler.transform(data).select("features")
corr_mat = Correlation.corr(df_vector, "features", method="pearson")
corr_mat = corr_mat.collect()[0].asDict()["pearson(features)"]
corr_df = pd.DataFrame(corr_mat.toArray())
corr_df.index, corr_df.columns = cols, cols
if plot == "table":
corr_df = corr_df.where(np.triu(np.ones(corr_df.shape)).astype(np.bool))
c_df = corr_df.stack().reset_index()
c_df = c_df.rename(
columns=dict(zip(list(c_df.columns), ["var1", "var2", "corr_coef"]))
)
return c_df
else:
heatmap = hvPlot(corr_df).heatmap(rot=45, height=450)
return heatmap
def feature_interactions(data, save_as=None, save_path=""):
"""Feature interactions report.
Compiles outputs from correlation_table, correlation_heatmap, covariance_heatmap and bivariate_plots as a report.
Parameters
----------
data: pyspark.sql.DataFrame
save_as : str, default=None
Name of the report. By default name is auto generated from system timestamp.
save_path : str, default=''
Location where report to be saved. By default report saved in working directory.
Examples
--------
>>> from tigerml.pyspark.eda import feature_interactions
>>> df = spark.read.parquet("train.parquet")
>>> feature_interactions(df, save_as=".html", save_path="PySpark_Reports/")
"""
feature_interactions_report = {}
feature_interactions_report["correlation_table"] = [correlation_table(data)]
feature_interactions_report["correlation_heatmap"] = [correlation_table(data, "")]
if save_as:
default_report_name = "feature_interactions_report_at_{}".format(
time_now_readable()
)
save_path = append_file_to_path(save_path, default_report_name + save_as)
create_report(
feature_interactions_report, path=save_path, format=save_as,
)
return feature_interactions_report
# ------------
# Key Features
# ------------
def correlation_with_target(data, target_var, cols=None):
"""Get a barplot with correlation with target variable.
Parameters
----------
data: pyspark.sql.DataFrame
target_var: str
target variable of the data
cols: list
List of numerical columns.
default - considers all the numeriacal features in the data
Returns
-------
plot: hvplot of bars related to the correlation with target variable
"""
if not cols:
cat_cols = dp.list_categorical_columns(data)
cat_cols = [i for i in cat_cols if i != target_var]
if len(cat_cols):
data = label_encode(data, cat_cols)
cols = dp.list_numerical_columns(data)
assembler = VectorAssembler(
inputCols=cols, outputCol="features", handleInvalid="keep"
)
df_vector = assembler.transform(data).select("features")
corr_mat = Correlation.corr(df_vector, "features", method="pearson")
corr_mat = corr_mat.collect()[0].asDict()["pearson(features)"]
corr_df = pd.DataFrame(corr_mat.toArray())
corr_df.index, corr_df.columns = cols, cols
corr_df = corr_df[[target_var]]
corr_df = corr_df[corr_df.index != target_var]
corr_df = corr_df[~corr_df[target_var].isna()]
corr_df.rename(
columns={target_var: "Pearson_correlation_with_Target"}, inplace=True
)
corr_df.sort_values(by="Pearson_correlation_with_Target", inplace=True)
plot = hvPlot(corr_df).bar(
invert=True, title="Feature Correlation with Target Function"
)
return plot
def label_encode(data, cat_cols):
"""Obtain label encoding of categorical variables.
Parameters
----------
data: pyspark.sql.DataFrame
cat_cols: list(str)
list of categorical column names
Returns
-------
data: pyspark.sql.DataFrame
dataframe with label encoding of categorical columns
"""
indexers = []
for cat_ in cat_cols:
stringIndexer = StringIndexer(
inputCol=cat_, outputCol=f"label_encoded_{cat_}", handleInvalid="keep"
)
indexers += [stringIndexer]
pipeline = Pipeline(stages=indexers)
data = pipeline.fit(data).transform(data)
return data
def feature_importance(data, target_var, classification=False):
"""Get feature importance based on RandomForestRegressor or RandomForestClassifier.
Parameters
----------
data: pyspark.sql.DataFrame
target_var: str
target variable of the data
classification: bool, default is False.
choosen basen on Regression or Classification problem.
Returns
-------
plot: hvplot
feature importance plot based on Random Forests
"""
cat_cols = dp.list_categorical_columns(data)
if len(cat_cols):
data = label_encode(data, cat_cols)
num_cols = dp.list_numerical_columns(data)
if classification:
from pyspark.ml.classification import RandomForestClassifier
if target_var in cat_cols:
data = data.withColumnRenamed(f"label_encoded_{target_var}", "target")
else:
print(True)
data = data.withColumnRenamed(f"{target_var}", "target")
rf = RandomForestClassifier(
numTrees=3, maxDepth=20, labelCol="target", maxBins=100, seed=42
)
else:
from pyspark.ml.regression import RandomForestRegressor
data = data.withColumnRenamed(f"{target_var}", "target")
# Load model
rf = RandomForestRegressor(
numTrees=3, maxDepth=20, labelCol="target", maxBins=100, seed=42
)
if target_var in num_cols:
num_cols.remove(target_var)
elif f"label_encoded_{target_var}" in num_cols:
num_cols.remove(f"label_encoded_{target_var}")
assembler = VectorAssembler(
inputCols=num_cols, outputCol="features", handleInvalid="skip"
)
model_data = assembler.transform(data)
model_data = model_data.select(["features", "target"])
# BUILD THE MODEL
model = rf.fit(model_data)
# FEATURE IMPORTANCES
feature_importance = pd.DataFrame.from_dict(
dict(zip(num_cols, model.featureImportances.toArray())), orient="index"
).rename(columns={0: "Feature Importance"})
feature_importance.sort_values(by="Feature Importance", inplace=True)
plot = hvPlot(feature_importance).bar(
invert=True, title="Feature Importances from RF"
)
return plot
def feature_analysis_pca(data, target_var):
"""Get feature importance based on RandomForestRegressor or RandomForestClassifier.
Parameters
----------
data: pyspark.sql.DataFrame
target_var: str
target variable of the data
Returns
-------
transformed: pyspark.sql.DataFrame
modified dataframe with pca column
"""
from pyspark.ml.feature import PCA as PCAml
cat_cols = dp.list_categorical_columns(data)
cat_cols = [i for i in cat_cols if i != target_var]
if len(cat_cols):
data = label_encode(data, cat_cols)
num_cols = dp.list_numerical_columns(data)
num_cols = [i for i in num_cols if i != target_var]
assembler = VectorAssembler(
inputCols=num_cols, outputCol="features", handleInvalid="skip"
)
model_data = assembler.transform(data)
pca = PCAml(k=2, inputCol="features", outputCol="pca")
model = pca.fit(model_data)
transformed = model.transform(model_data)
return transformed
class EDAReportPyspark:
"""EDA toolkit for classification and regression models.
To evaluate and generate reports to summarize, data health, univariate & bivariate analyis, interactions and keydrivers.
Parameters
----------
data: pyspark.sql.DataFrame
y : string, default=None
Name of the target column
is_classification : bool, default=None
Set to True, for classificaiton target
Examples
--------
>>> from tigerml.pyspark.eda import EDAReportPyspark
>>> anp = EDAReportPyspark(data=train_df, is_classification=True, y="target_var")
>>> anp.get_report(y="target_var",save_path="PySpark_Reports/")
"""
def __init__(self, data, is_classification, y=None):
self.data = data
self.y = y
self.is_classification = is_classification
def _set_y_cols(self, y=None):
if isinstance(y, str):
y = [y]
if y:
return y
else:
return []
def _get_x_cols(self, y=None):
data = self.data
if y is not None:
y = [y] if isinstance(y, str) else y
else:
y = self.y_cols
if self.y_cols:
return [col for col in data.columns if col not in y]
else:
return list(data.columns)
def _set_xy_cols(self, y):
self.y_cols = self._set_y_cols(y=y)
self.x_cols = self._get_x_cols()
def key_drivers(
self, y=None, features=None, quick=True, save_as=None, save_path=""
):
"""Univariate analysis for the columns.
Generate summary_stats, distributions and normality tests for columns.
Parameters
----------
y : Target column name (String)
features : list, default=[]
list of columns in the dataframe for analysis. By default all are used.
save_as : str, default=None
Name of the report. By default name is auto generated from system timestamp.
save_path : str, default=''
Location where report to be saved. By default report saved in working directory.
Examples
--------
>>> from tigerml.pyspark.eda import feature_interactions
>>> anp = EDAReportPyspark(data=train_df, is_classification=True, y="target_var")
>>> anp.key_drivers(y="target_var", save_as=".html", save_path="PySpark_Reports/")
"""
if y:
assert isinstance(y, str) or isinstance(y, list)
ys = y if isinstance(y, list) else [y]
self._set_xy_cols(ys)
else:
raise Exception("dependent variable name needs to be passed")
if features:
features = list(set(features) & set(self.x_cols))
else:
features = self.x_cols
key_drivers = {}
for y in ys:
key_drivers[y] = {}
key_drivers[y]["feature_scores"] = correlation_with_target(self.data, y)
key_drivers[y]["feature_importances"] = feature_importance(
self.data, y, classification=self.is_classification
)
# key_drivers[y]["pca_analysis"] = self.get_pca_analysis(features=features)
# key_drivers[y]['tsne_projection'] = self.get_tsne_projection()
if save_as:
default_report_name = "key_drivers_report_at_{}".format(time_now_readable())
save_path = append_file_to_path(save_path, default_report_name + save_as)
create_report(
key_drivers, path=save_path, format=save_as, split_sheets=True,
)
self.key_drivers_report = key_drivers
return key_drivers
def _create_report(self, y=None, quick=True, corr_threshold=None):
if y:
self._set_xy_cols(y)
self.report = {}
self.report["data_preview"] = {"head": [self.data.limit(5).toPandas()]}
self.report["health_analysis"] = health_analysis(self.data)
# self.report['data_preview']['pre_processing'] = self._prepare_data(corr_threshold)
self.report["feature_analysis"] = feature_analysis(self.data)
self.report["feature_interactions"] = feature_interactions(self.data)
if self.y_cols:
self.report["key_drivers"] = self.key_drivers(quick=quick, y=self.y_cols)
else:
_LOGGER.info(
"Could not generate key drivers report as dependent variable is not defined"
)
def _save_report(self, format=".html", name="", save_path="", tiger_template=False):
if not name:
name = "data_exploration_report_at_{}".format(time_now_readable())
create_report(
self.report,
name=name,
path=save_path,
format=format,
split_sheets=True,
tiger_template=tiger_template,
)
del self.report
gc.collect()
def get_report(
self,
format=".html",
name="",
y=None,
corr_threshold=None,
quick=True,
save_path="",
tiger_template=False,
):
"""Create consolidated report on data preview,feature analysis,feature interaction and health analysis.
The consolidated report also includes key driver report if y(target dataframe) is passed while
calling create_report.
Parameters
----------
y : str, default = None
format : str, default='.html'
format of report to be generated. possible values '.xlsx', '.html'
name : str, default=None
Name of the report. By default name is auto generated from system timestamp.
save_path : str, default=''
location with filename where report to be saved. By default is auto generated from system timestamp and saved in working directory.
quick : boolean, default=True
If true,calculate SHAP values and create bivariate plots
corr_threshold : float, default=None
To specify correlation threshold
"""
self._create_report(y=y, quick=quick, corr_threshold=corr_threshold)
return self._save_report(
format=format, name=name, save_path=save_path, tiger_template=tiger_template
)
| [
"[email protected]"
] | |
ed3353e5133a00ddd17176040ec7d8f1a619806c | 1d7eec692553afc411ec1e7325634f71a2aed291 | /backend/curriculum_tracking/management/project_wrangle_helpers.py | 8c347fb6739812587fcbd8c3727942620ad8dd97 | [] | no_license | Andy-Nkumane/Tilde | a41a2a65b3901b92263ae94d527de403f59a5caf | 80de97edaf99f4831ca8cb989b93e3be5e09fdd6 | refs/heads/develop | 2023-05-09T10:02:41.240517 | 2021-05-28T09:20:51 | 2021-05-28T09:20:51 | 299,501,586 | 0 | 0 | null | 2020-10-25T22:37:30 | 2020-09-29T04:10:48 | Python | UTF-8 | Python | false | false | 10,188 | py | """ These helper functions were leveraged heavily in preparing the recruit projects for flavoursome cards.
There were a lot of different messes to be cleaned up
"""
from taggit.models import Tag
import csv
from core.models import User, Team
from curriculum_tracking.models import (
RecruitProject,
ContentItem,
AgileCard,
CurriculumContentRequirement,
)
javascript = Tag.objects.get_or_create(name="javascript")[0]
python = Tag.objects.get_or_create(name="python")[0]
java = Tag.objects.get_or_create(name="java")[0]
kotlin = Tag.objects.get_or_create(name="kotlin")[0]
swift = Tag.objects.get_or_create(name="swift")[0]
typescript = Tag.objects.get_or_create(name="typescript")[0]
none = Tag.objects.get_or_create(name="none")[0]
def get_project_info(content_item_id, user_id):
user = User.objects.get(pk=user_id)
projects = RecruitProject.objects.filter(
content_item_id=content_item_id, recruit_users__in=[user]
)
groups = Team.objects.filter(users__in=[user])
cohorts = [o.cohort for o in RecruitCohort.objects.filter(user=user)]
content_item = ContentItem.objects.get(pk=content_item_id)
print(f"user = {user}")
print(f"groups = {groups}")
print(f"cohorts = {cohorts}")
print(f"content_item = {content_item}")
print(f"{projects.count()} matching projects:")
for project in projects:
print(f"Project: id={project.id} {project}")
print(f"\trepo: {project.repository.ssh_url}")
try:
print(f"\tcard: id={project.agile_card.id} {project.agile_card}")
except AgileCard.DoesNotExist:
print("\tno card")
print()
def export_projects_without_flavours():
with open("gitignore/projects_needing_flavours.csv", "w") as f:
writer = csv.writer(f)
for project in RecruitProject.objects.all():
if project.flavours.count() == 0:
all_groups = []
for user in project.recruit_users.all():
all_groups.extend(
[
f"group {o.id} {o.name}"
for o in Team.objects.filter(users__in=[user])
]
)
all_groups.extend(
[
f"c {o.id} {o.cohort.label}"
for o in RecruitCohort.objects.filter(user=user)
]
)
writer.writerow(
[
project.id,
str(project),
set(all_groups),
project.repository.ssh_url if project.repository else "",
[o.name for o in project.content_item.flavours.all()],
]
)
def assign_flavours_to_cohort(cohort_id, default_flavour):
cohort = Cohort.objects.get(pk=cohort_id)
users = [o.user for o in RecruitCohort.objects.filter(cohort=cohort)]
for user in users:
assign_flavours_to_user(user, default_flavour)
def assign_flavours_to_group(group_id, default_flavour):
group = Team.objects.get(pk=group_id)
users = [o.user for o in TeamMembership.objects.filter(group=group)]
for user in users:
assign_flavours_to_user(user, default_flavour)
def assign_flavours_to_user(user, default_flavour):
for project in RecruitProject.objects.filter(recruit_users__in=[user]):
if project.flavours.count() > 0:
continue
flavours = project.content_item.flavours.all()
if default_flavour in flavours:
print(f"project: {project.id} {project}")
project.flavours.add(default_flavour)
def assign_flavours_to_user_by_email(email, default_flavour):
user = User.objects.get(email=email)
assign_flavours_to_user(user, default_flavour)
# curriculum_tracking//.py
# from curriculum_tracking.management.project_wragle_helpers import assign_flavours
# for o in l:
# if 'web' in o.cohort_curriculum.name:
# print(o)
# assign_flavours(o.id, javascript)
# if 'data' in o.cohort_curriculum.name:
# print(o)
# assign_flavours(o.id, python)
def remove_flavours(cohort_id):
cohort = Cohort.objects.get(pk=cohort_id)
users = [o.user for o in RecruitCohort.objects.filter(cohort=cohort)]
for user in users:
for project in RecruitProject.objects.filter(recruit_users__in=[user]):
project.flavours.clear()
print(project)
print(project.flavours)
def export_project_flavours(cohort_id):
cohort = Cohort.objects.get(pk=cohort_id)
users = [o.user for o in RecruitCohort.objects.filter(cohort=cohort)]
all_projects = []
for user in users:
all_projects.extend(RecruitProject.objects.filter(recruit_users__in=[user]))
all_projects.sort(
key=lambda project: (
[o.id for o in project.recruit_users.all()],
project.content_item_id,
)
)
with open(f"gitignore/cohort_projects_{cohort_id}_{cohort.label}.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(
[
"project.id",
"content_item",
"repository.full_name",
"project.flavours",
"content_item.flavours",
]
)
for project in all_projects:
writer.writerow(
[
project.id,
project.content_item,
project.repository.full_name,
[o.name for o in project.flavours.all()],
[o.name for o in project.content_item.flavours.all()],
]
)
def if_one_flavour_available_then_assign(cohort_id=None):
cohort = Cohort.objects.get(pk=cohort_id)
users = [o.user for o in RecruitCohort.objects.filter(cohort=cohort)]
for user in users:
for project in RecruitProject.objects.filter(recruit_users__in=[user]):
if project.flavours.count() > 0:
continue
if project.content_item.flavours.count() == 1:
flavour = project.content_item.flavours.first()
if flavour != none:
print(f"adding {flavour.name} to {project}")
project.flavours.add(flavour)
def change_project_flavour(project_id, to):
project = RecruitProject.objects.get(pk=project_id)
project.flavours.clear()
project.flavours.add(to)
def export_nosubmit_projects():
with open("gitignore/nosubmit_projects.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(
["project.id", "project.content_item_id", "project.content_item.title"]
)
for project in RecruitProject.objects.filter(
content_item__project_submission_type=ContentItem.NO_SUBMIT
).order_by("content_item_id"):
writer.writerow(
[project.id, project.content_item_id, project.content_item.title]
)
def change_project_content_item_id(from_id, to_id):
print(f"from {ContentItem.objects.get(pk=from_id)}")
print(f"to {ContentItem.objects.get(pk=to_id)}")
for project in RecruitProject.objects.filter(content_item_id=from_id):
print(project.id)
project.content_item_id = to_id
project.save()
print()
def get_project_info(content_item_id, user_id):
user = User.objects.get(pk=user_id)
projects = RecruitProject.objects.filter(
content_item_id=content_item_id, recruit_users__in=[user]
)
if projects.count() < 2:
return
flavours = [
sorted([o.name for o in project.flavours.all()]) for project in projects
]
flavours = [",".join(l) for l in flavours]
if len(set(flavours)) == projects.count():
return
groups = Team.objects.filter(users__in=[user])
cohorts = [o.cohort for o in RecruitCohort.objects.filter(user=user)]
content_item = ContentItem.objects.get(pk=content_item_id)
print(f"user = {user}")
print(f"groups = {groups}")
print(f"cohorts = {cohorts}")
print(f"content_item = {content_item}")
print(f"{projects.count()} matching projects:")
for project in projects:
print(f"Project: id={project.id} {project}")
print(f"\trepo: {project.repository.ssh_url}")
print(f"\tflavours: {[o.name for o in project.flavours.all()]}")
try:
print(f"\tcard: id={project.agile_card.id} {project.agile_card}")
except AgileCard.DoesNotExist:
print("\tno card")
print()
SQL_QUERY_TO_FETCH_POTENTIAL_DUPLICATE_PROJECTS = """
select count(*) ,curriculum_tracking_recruitproject.content_item_id,curriculum_tracking_recruitproject_recruit_users.user_id
into TEMPORARY temp
from curriculum_tracking_recruitproject, curriculum_tracking_recruitproject_recruit_users where curriculum_tracking_recruitproject_recruit_users.recruitproject_id = curriculum_tracking_recruitproject.id group by user_id,content_item_id;
select * from temp where count>1;
"""
def change_content_id(project_id, cid, flavour):
o = RecruitProject.objects.get(pk=project_id)
o.content_item_id = cid
o.save()
o.flavours.add(flavour)
def pproject(id):
proj = RecruitProject.objects.get(pk=id)
print(proj)
print(proj.repository)
def delete_nosubmit_instances():
AgileCard.objects.filter(
content_item__project_submission_type=ContentItem.NO_SUBMIT
).delete()
CurriculumContentRequirement.objects.filter(
content_item__project_submission_type=ContentItem.NO_SUBMIT
).delete()
RecruitProject.objects.filter(
content_item__project_submission_type=ContentItem.NO_SUBMIT
).delete()
def get_all_recruits_in_cohorts_with_matching_curriculum(curriculum_id):
ds_users = []
for cohort in Cohort.objects.filter(cohort_curriculum_id=2, active=True):
if cohort.cohort_number == 22:
continue
for o in RecruitCohort.objects.filter(cohort=cohort, user__active=True):
ds_users.append(o.user)
return ds_users
| [
"[email protected]"
] | |
cc25b49cba139ab957706257cc9e9cdd5119b7a6 | 382c8cfb29f420297462d122c571995b62e10a6b | /temp.py | ab5d445e6bd49d2df01744e32851e61b3e05f170 | [] | no_license | innovation-labs/Vader | c1a9592cc02f85cc5a28b3116fc41b35df5baf04 | aa9091f90c41fe2b1ae6e488670bf89bcbbde5c9 | refs/heads/master | 2021-08-23T21:18:46.609875 | 2017-12-06T15:50:56 | 2017-12-06T15:50:56 | 103,031,449 | 0 | 0 | null | 2017-09-10T13:28:51 | 2017-09-10T13:28:50 | null | UTF-8 | Python | false | false | 1,914 | py | #
from django.conf import settings
from geoip2 import database
from geoip2.errors import AddressNotFoundError
from apps.warehouse.models import IPStore
reader = database.Reader(settings.MAXMIND_CITY_DB)
ips = ['99.248.9.54',
'173.34.75.225',
'70.54.130.204',
'67.58.222.87',
'70.55.50.230',
'76.71.67.164',
'70.24.105.229',
'64.231.136.194',
'135.0.4.175',
'173.34.222.226',
'174.92.74.247',
'99.231.160.194',
'184.151.178.201',
'70.49.149.23',
'66.49.185.244',
'70.53.51.197',
'174.112.43.253',
'173.34.125.63',
'64.231.148.82',
'66.49.190.181',
'173.32.111.198',
'70.50.213.134',
'50.100.149.203',
'99.230.228.92',
'184.151.190.55',
'24.114.51.122',
'174.118.26.209',
'73.201.179.235',
'99.237.95.19',
'76.71.112.4',
'76.71.4.24',
'76.68.126.170',
'174.115.124.199',
'99.243.22.198',
'69.157.66.143',
'99.226.8.59',
'70.26.57.62',
'184.147.122.233',
'216.165.217.88',
'99.233.178.15',
'72.15.61.181', ]
def update_gecode(ip, location):
from googlemaps import Client
# from django.conf import settings
import json
ip.latitude = location['latitude']
ip.longitude = location['longitude']
gmaps = Client(key=settings.GOOGLE_GEOCODE_KEY)
result = gmaps.reverse_geocode((location['latitude'], location['longitude']
))
ip.geocode = json.dumps(result)
print result
ip.save()
for ip in ips:
try:
ip2geo = reader.city(ip).raw
location = ip2geo['location']
store, created = IPStore.objects.get_or_create(ip=ip)
if created:
update_gecode(store, location)
except AddressNotFoundError as e:
print e
| [
"[email protected]"
] | |
1bff2ac0967e16eaba266de1b1749c487521c995 | 08778088da558a8bc79326754bb86f61c4cf082b | /eisitirio/database/waiting.py | 5d06fb161572f0a5a5f73faf4ed6d38b4a5a6dad | [] | no_license | toastwaffle/Eisitirio | b5a7c48fc015857bfccdbe3f702e4c12c2e8277c | 64ff15704b6b62d6ed385f7add59e7face88a95c | refs/heads/master | 2020-05-24T15:51:12.023834 | 2019-09-12T19:09:23 | 2019-09-12T19:09:23 | 187,341,863 | 0 | 0 | null | 2019-05-18T09:59:35 | 2019-05-18T09:59:35 | null | UTF-8 | Python | false | false | 954 | py | # coding: utf-8
"""Database model for entries on the waiting list."""
from __future__ import unicode_literals
import datetime
from eisitirio.database import db
DB = db.DB
class Waiting(DB.Model):
"""Model for entries on the waiting list."""
__tablename__ = "waiting"
waiting_since = DB.Column(DB.DateTime(), nullable=False)
waiting_for = DB.Column(DB.Integer(), nullable=False)
user_id = DB.Column(DB.Integer, DB.ForeignKey("user.object_id"), nullable=False)
user = DB.relationship(
"User", backref=DB.backref("waiting", lazy="dynamic"), foreign_keys=[user_id]
)
def __init__(self, user, waiting_for):
self.user = user
self.waiting_for = waiting_for
self.waiting_since = datetime.datetime.utcnow()
def __repr__(self):
return "<Waiting: {0} for {1} ticket{2}>".format(
self.user.full_name, self.waiting_for, "" if self.waiting_for == 1 else "s"
)
| [
"[email protected]"
] | |
7aba7f32ed96c5e577fdeed1ddb2f5bf167c0d91 | 0d32e3819606c3fb6820d0cd5f5097db3b0d3dd4 | /HW3/q_learning_no_epsilon_decay_mountain_car.py | 6afd1f1d11cfe24eb60909d28e8ea03a2aa4b912 | [] | no_license | IanCBrown/COMP5600 | e8e06b2a8e3bde0acc6897adb2396a57a2811f0a | ef454c009d6fd5eec50ceec5a8283a7c6d81d097 | refs/heads/master | 2020-08-02T13:20:41.024681 | 2019-12-09T03:53:37 | 2019-12-09T03:53:37 | 211,366,293 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,059 | py | import math
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import gym
from gym import spaces
from gym.utils import seeding
# Resources:
# https://en.wikipedia.org/wiki/Mountain_car_problem
# https://towardsdatascience.com/getting-started-with-reinforcement-learning-and-open-ai-gym-c289aca874f
# https://towardsdatascience.com/reinforcement-learning-temporal-difference-sarsa-q-learning-expected-sarsa-on-python-9fecfda7467e
def epsilon_greedy(Q, state, action_space, epsilon):
# if in epsilon range use it
if np.random.rand() < 1 - epsilon:
action = np.argmax(Q[state[0], state[1]])
# else take random action
else:
action = np.random.randint(0, action_space)
return action
def q_learning(learning_rate, discount, epsilon, episodes):
# initialize environment
env = gym.make("MountainCar-v0")
env.reset()
states = (env.observation_space.high - env.observation_space.low)*np.array([10,100])
states = np.round(states, 0).astype(int) + 1
# Q(s,a)
Q_table = np.random.uniform(low = -1, high = 1, size = (states[0], states[1], env.action_space.n))
reward_list = []
var_list = []
avg_reward_list = []
# Q learning main loop
for i in range(episodes):
finished = False
total_reward = 0
reward = 0
state = env.reset()
state_adj = (state - env.observation_space.low)*np.array([10,100])
state_adj = np.round(state_adj, 0).astype(int)
while not finished:
# render last N episodes
# comment out to see plots
# if i >= episodes - 1:
# env.render()
# action = epsilon_greedy(Q_table, state_adj, env.action_space.n, epsilon)
# pick aciton greedily without randomness
action = np.argmax(Q_table[state_adj[0], state_adj[1]])
next_state, reward, finished, info = env.step(action)
# Discretize
next_state_adj = (next_state - env.observation_space.low)*np.array([10,100])
next_state_adj = np.round(next_state_adj, 0).astype(int)
if finished and next_state[0] >= 0.5: # and ... condition
Q_table[state_adj[0], state_adj[1], action] = reward
else:
update = learning_rate * (reward + discount * np.max(Q_table[next_state_adj[0],next_state_adj[1]])
- Q_table[state_adj[0], state_adj[1], action])
# update Q table
Q_table[state_adj[0], state_adj[1], action] += update
total_reward += reward
state_adj = next_state_adj
reward_list.append(total_reward)
# choose how often to record data
# recording every data point will make the plots crowded
# 10 and 100 work well.
recording_interval = 100
if i % recording_interval == 0:
avg_reward = np.mean(reward_list)
var = np.var(reward_list)
var_list.append(var)
avg_reward_list.append(avg_reward)
reward_list = []
env.close()
return (avg_reward_list, var_list)
# Adjust these parameters as needed
number_of_episodes = 2500
learning_rate = 0.1
gamma = 0.9
epsilon = 0.8
def single_run():
"""
Run the algorithm once
"""
rewards_and_var = q_learning(learning_rate, gamma, epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, avg_reward, color='blue')
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var, color='orange')
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, avg_reward, var, linestyle='None', marker='^', ecolor="orange")
plt.show()
def multi_run(N):
"""
Run the algorithm N times
@param N - number of times to test (e.g. 20)
"""
rewards = []
vars = []
for _ in range(N):
rewards_and_var = q_learning(learning_rate, gamma, epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
rewards.append(avg_reward)
vars.append(var)
rewards = list(zip(*rewards))
vars = list(zip(*vars))
reward_to_plot = []
for sublist in rewards:
reward_to_plot.append(np.mean(sublist))
var_to_plot = []
for sublist in vars:
var_to_plot.append(np.mean(sublist))
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, reward_to_plot, color='blue')
plt.savefig("q_learning_no_epsilon_results/Average_Reward_vs_Episodes.png")
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var_to_plot, color='orange')
plt.savefig("q_learning_no_epsilon_results/Variance_vs_Episodes.png")
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, reward_to_plot, var_to_plot, linestyle='None', marker='^', ecolor="orange")
plt.savefig("q_learning_no_epsilon_results/Average_Reward_and_Variance_vs_Episodes.png")
# choose multi or single run
# single_run()
multi_run(20) | [
"[email protected]"
] | |
04495c8e1849b8df9b7b60f749e3172809966b93 | 7e419f7046386c20b9a6ed121c377fbcc8ff6885 | /mobject/region.py | d5ba70ff04615c627dc85bf3d0a85a9d4bddec0d | [] | no_license | ralusek/manim | 47ebabb4d42accdda42a0da32ccdef6b129205b8 | d53412d738e3b9d306b7d7d2ad39cac702ff3d18 | refs/heads/master | 2021-04-29T00:47:40.990006 | 2018-02-16T20:16:59 | 2018-02-16T20:16:59 | 121,837,012 | 1 | 0 | null | 2018-02-17T07:44:48 | 2018-02-17T07:44:47 | null | UTF-8 | Python | false | false | 3,206 | py | import numpy as np
import itertools as it
from PIL import Image
from copy import deepcopy
from mobject import Mobject
from helpers import *
#TODO, this whole class should be something vectorized.
class Region(Mobject):
CONFIG = {
"display_mode" : "region"
}
def __init__(self, condition = (lambda x, y : True), **kwargs):
"""
Condition must be a function which takes in two real
arrays (representing x and y values of space respectively)
and return a boolean array. This can essentially look like
a function from R^2 to {True, False}, but & and | must be
used in place of "and" and "or"
"""
Mobject.__init__(self, **kwargs)
self.condition = condition
def _combine(self, region, op):
self.condition = lambda x, y : op(
self.condition(x, y),
region.condition(x, y)
)
def union(self, region):
self._combine(region, lambda bg1, bg2 : bg1 | bg2)
return self
def intersect(self, region):
self._combine(region, lambda bg1, bg2 : bg1 & bg2)
return self
def complement(self):
self.bool_grid = ~self.bool_grid
return self
class HalfPlane(Region):
def __init__(self, point_pair, upper_left = True, *args, **kwargs):
"""
point_pair of the form [(x_0, y_0,...), (x_1, y_1,...)]
Pf upper_left is True, the side of the region will be
everything on the upper left side of the line through
the point pair
"""
if not upper_left:
point_pair = list(point_pair)
point_pair.reverse()
(x0, y0), (x1, y1) = point_pair[0][:2], point_pair[1][:2]
def condition(x, y):
return (x1 - x0)*(y - y0) > (y1 - y0)*(x - x0)
Region.__init__(self, condition, *args, **kwargs)
def region_from_line_boundary(*lines, **kwargs):
reg = Region(**kwargs)
for line in lines:
reg.intersect(HalfPlane(line, **kwargs))
return reg
def region_from_polygon_vertices(*vertices, **kwargs):
return region_from_line_boundary(*adjacent_pairs(vertices), **kwargs)
def plane_partition(*lines, **kwargs):
"""
A 'line' is a pair of points [(x0, y0,...), (x1, y1,...)]
Returns the list of regions of the plane cut out by
these lines
"""
result = []
half_planes = [HalfPlane(line, **kwargs) for line in lines]
complements = [deepcopy(hp).complement() for hp in half_planes]
num_lines = len(lines)
for bool_list in it.product(*[[True, False]]*num_lines):
reg = Region(**kwargs)
for i in range(num_lines):
if bool_list[i]:
reg.intersect(half_planes[i])
else:
reg.intersect(complements[i])
if reg.bool_grid.any():
result.append(reg)
return result
def plane_partition_from_points(*points, **kwargs):
"""
Returns list of regions cut out by the complete graph
with points from the argument as vertices.
Each point comes in the form (x, y)
"""
lines = [[p1, p2] for (p1, p2) in it.combinations(points, 2)]
return plane_partition(*lines, **kwargs)
| [
"[email protected]"
] | |
3d44285b7f4667f8c29bbb1bf7a019e76508ecb4 | 61673ab9a42f7151de7337608c442fa6247f13bb | /pillow/change-pixels-numpy-array-PixelAccess/main-PixelAccess.py | 753aed3b65ccfc089b9e2661eb5527fc4b576b6e | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 449 | py | #!/usr/bin/env python3
# date: 2019.09.29
# `PixelAccess` changes pixels in original `img`
# so there is no need to convert it back to `Image`
# BTW: Image uses [col,row] (array uses [row,col])
from PIL import Image
img = Image.open('image.jpg')
pixels = img.load()
width, height = img.size
for col in range(width):
for row in range(height):
if pixels[col,row] == (0, 0, 0):
pixels[col,row] = (255, 0 ,0)
img.show()
| [
"[email protected]"
] | |
ad2df6b8a4b5b1a74f780a295c9eebfbadacb6db | 131f9efacc5944e1f1f156b82ae0f59fe1d39dfa | /django_quickblocks/migrations/0002_auto__add_field_quickblocktype_has_rich_text__add_field_quickblocktype.py | 752a8a7bd6459d2f40fd1660969c53e1f12a0d44 | [] | no_license | nyaruka/django-quickblocks | fcaeb5c45c968c96e47f34ca7c641b674f2194f6 | 2ac419caff11dad1570f0a56b4576e5e40cfa758 | refs/heads/master | 2022-11-12T18:32:27.666397 | 2014-10-29T17:04:13 | 2014-10-29T17:04:13 | 1,535,809 | 6 | 4 | null | 2022-11-03T10:37:39 | 2011-03-28T08:47:23 | JavaScript | UTF-8 | Python | false | false | 9,029 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QuickBlockType.has_rich_text'
db.add_column('django_quickblocks_quickblocktype', 'has_rich_text',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'QuickBlockType.has_summary'
db.add_column('django_quickblocks_quickblocktype', 'has_summary',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'QuickBlockType.has_gallery'
db.add_column('django_quickblocks_quickblocktype', 'has_gallery',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'QuickBlockType.has_video'
db.add_column('django_quickblocks_quickblocktype', 'has_video',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'QuickBlock.summary'
db.add_column('django_quickblocks_quickblock', 'summary',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'QuickBlock.video_id'
db.add_column('django_quickblocks_quickblock', 'video_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'QuickBlockType.has_rich_text'
db.delete_column('django_quickblocks_quickblocktype', 'has_rich_text')
# Deleting field 'QuickBlockType.has_summary'
db.delete_column('django_quickblocks_quickblocktype', 'has_summary')
# Deleting field 'QuickBlockType.has_gallery'
db.delete_column('django_quickblocks_quickblocktype', 'has_gallery')
# Deleting field 'QuickBlockType.has_video'
db.delete_column('django_quickblocks_quickblocktype', 'has_video')
# Deleting field 'QuickBlock.summary'
db.delete_column('django_quickblocks_quickblock', 'summary')
# Deleting field 'QuickBlock.video_id'
db.delete_column('django_quickblocks_quickblock', 'video_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_quickblocks.quickblock': {
'Meta': {'object_name': 'QuickBlock'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'quickblock_creations'", 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'quickblock_modifications'", 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quickblock_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_quickblocks.QuickBlockType']"}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_quickblocks.quickblocktype': {
'Meta': {'object_name': 'QuickBlockType'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'quickblocktype_creations'", 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'has_gallery': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_rich_text': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_summary': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_video': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'quickblocktype_modifications'", 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['django_quickblocks']
| [
"[email protected]"
] | |
8af072ca10d2818422727df5fbd11c71012690ef | 5da80918ac50156f351966f96e2131123222d483 | /hack/coalesce.py | ddb07665af6933731c1eef0253ccadbd9605a08e | [
"Apache-2.0"
] | permissive | dinomiteX/cluster-api-provider-aws | 43045e2c2d1836f1722795e5d4afeef2a3b407ea | bc4496ff9235a64f81dd9e6f2c97e368b6099431 | refs/heads/master | 2020-07-07T14:50:26.244166 | 2019-08-20T13:37:01 | 2019-08-20T13:37:01 | 203,380,287 | 0 | 0 | Apache-2.0 | 2019-08-20T13:18:24 | 2019-08-20T13:18:23 | null | UTF-8 | Python | false | false | 3,191 | py | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stolen from https://github.com/kubernetes/test-infra/blob/master/hack/coalesce.py
"""Coalesces bazel test results into one file."""
import argparse
import os
import re
import xml.etree.ElementTree as ET
BAZEL_FAILURE_HEADER = '''exec ${PAGER:-/usr/bin/less} "$0" || exit 1
-----------------------------------------------------------------------------
'''
# from https://www.w3.org/TR/xml11/#charsets
# RestrictedChar ::= [#x1-#x8]|[#xB-#xC]|[#xE-#x1F]|[#x7F-#x84]|[#x86-#x9F]
RESTRICTED_XML_CHARS_RE = re.compile(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x84\x86-\x9F]')
ANSI_ESCAPE_CODES_RE = re.compile(r'\033\[[\d;]*[@-~]')
def test_packages(root):
"""Yields test package directories under root."""
for package, _, files in os.walk(root):
if 'test.xml' in files and 'test.log' in files:
yield package
def sanitize(text):
if text.startswith(BAZEL_FAILURE_HEADER):
text = text[len(BAZEL_FAILURE_HEADER):]
# ANSI escape sequences should be removed.
text = ANSI_ESCAPE_CODES_RE.sub('', text)
# And any other badness that slips through.
text = RESTRICTED_XML_CHARS_RE.sub('', text)
return text
def result(pkg):
"""Given a directory, create a testcase element describing it."""
elem = ET.Element('testcase')
elem.set('classname', 'go_test')
pkg_parts = pkg.split('/')
elem.set('name', '//%s:%s' % ('/'.join(pkg_parts[1:-1]), pkg_parts[-1]))
elem.set('time', '0')
suites = ET.parse(pkg + '/test.xml').getroot()
for suite in suites:
for case in suite:
for status in case:
if status.tag == 'error' or status.tag == 'failure':
failure = ET.Element('failure')
with open(pkg + '/test.log') as fp:
text = fp.read().decode('UTF-8', 'ignore')
failure.text = sanitize(text)
elem.append(failure)
return elem
def main():
root = ET.Element('testsuite')
root.set('time', '0')
for package in sorted(test_packages('bazel-testlogs')):
root.append(result(package))
artifacts_dir = os.environ['ARTIFACTS']
try:
os.mkdir(artifacts_dir)
except OSError:
pass
with open(os.path.join(artifacts_dir, 'junit_bazel.xml'), 'w') as fp:
fp.write(ET.tostring(root, 'UTF-8'))
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Coalesce JUnit results.')
PARSER.add_argument('--repo_root', default='.')
ARGS = PARSER.parse_args()
os.chdir(ARGS.repo_root)
main()
| [
"[email protected]"
] | |
92785644c971f51c1aff7d47c833a14c473328a3 | dea5dfdc661309fa26fc93d4884fbcbb75e4d8a0 | /Back_ground/control/classcontrol.py | be904879e1ec0441ee72a9fc24b89447004fa001 | [
"Apache-2.0"
] | permissive | sherwel/Behavior_culture | 0583ba061d7a2feba8f45c2b574005fcc76e4cfd | 43b68e50aed369aacefc5435ec4f9b70960d2344 | refs/heads/master | 2021-01-10T03:47:20.328109 | 2016-02-29T04:42:49 | 2016-02-29T04:42:49 | 50,472,281 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,199 | py | #!/usr/bin/python
#coding:utf-8
from tool import SQLTool ,config
from Back_ground.model import takeclass,Class
limitpage=15
localconfig=config.Config()
def haveclassshow(schoolid='',classid='',teacherid='',page='0'):
validresult=False
request_params=[]
values_params=[]
if schoolid!='':
request_params.append('schoolId')
values_params.append(SQLTool.formatstring(schoolid))
if classid!='':
request_params.append('t_classes.classId')
values_params.append(SQLTool.formatstring(classid))
if teacherid!='':
request_params.append('teacherId')
values_params.append(SQLTool.formatstring(teacherid))
request_params.append('t_teach.teacherId')
values_params.append('t_teachers.teacherId')
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
table=localconfig.teachertable
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.teachtable], ['t_teachers.teacherId','masterId','time','schoolId','teacherName','classId'], request_params, values_params)
if count == 0:
pagecount = 0;
elif count %limitpage> 0:
# pagecount = math.ceil(count / limitpage)
pagecount=int((count+limitpage-1)/limitpage)
else:
pagecount = count / limitpage
# print pagecount
if pagecount>0:
limit=' limit '+str(int(page)*limitpage)+','+str(limitpage)
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.teachtable], ['t_teachers.teacherId','masterId','time','schoolId','teacherName','classId'], request_params, values_params,limit,order='time desc')
DBhelp.closedb()
classes=[]
if count>0:
validresult=True
for temp in result :
aclass=takeclass.Takeclass(teacherid=temp['teacherId'],schoolid=temp['schoolId'],masterid=temp['masterId'],time=temp['time'],teachername=temp['teacherName'],classid=temp['classId'])
classes.append(aclass)
return classes,count,pagecount
return [],0,pagecount
def classshow(schoolname='',schoolid='',gradeid='',classid='',classname='',page='0'):
validresult=False
request_params=[]
values_params=[]
if schoolname!='':
request_params.append('schoolName')
values_params.append(SQLTool.formatstring(schoolname))
if gradeid!='':
request_params.append('t_classes.gradeId')
values_params.append(SQLTool.formatstring(gradeid))
if classid!='':
request_params.append('t_classes.classId')
values_params.append(SQLTool.formatstring(classid))
if classname!='':
request_params.append('t_class_name.className')
values_params.append(SQLTool.formatstring(classname))
if schoolid!='':
request_params.append('t_classes.schoolId')
values_params.append(SQLTool.formatstring(schoolid))
request_params.append('t_school.schoolId')
values_params.append('t_classes.schoolId')
request_params.append('t_classes.classId')
values_params.append('t_class_name.classId')
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
table=localconfig.schooltable
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.classtable,localconfig.classnametable], ['schoolName','t_classes.schoolId','t_classes.gradeId','cId','t_class_name.className','t_classes.classId'], request_params, values_params)
if count == 0:
pagecount = 0;
elif count %limitpage> 0:
# pagecount = math.ceil(count / limitpage)
pagecount=int((count+limitpage-1)/limitpage)
else:
pagecount = count / limitpage
# print pagecount
if pagecount>0:
limit=' limit '+str(int(page)*limitpage)+','+str(limitpage)
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.classtable,localconfig.classnametable], ['schoolName','t_classes.schoolId','t_classes.gradeId','cId','t_class_name.className','t_classes.classId'], request_params, values_params,limit,order='schoolId desc')
DBhelp.closedb()
classes=[]
if count>0:
validresult=True
for temp in result :
aclass=Class.Class(schoolname=temp['schoolName'],schoolid=temp['schoolId'],gradeid=temp['gradeId'],cid=temp['cId'],classname=temp['className'],classid=temp['classId'])
classes.append(aclass)
return classes,count,pagecount
return [],0,pagecount
##count为返回结果行数,col为返回结果列数,count,pagecount都为int型
def loadclass(request,username=''):
schoolname=request.POST.get('schoolname','')
schoolid=request.POST.get('schoolid','')
province=request.POST.get('province','')
city=request.POST.get('city','')
starttime=request.POST.get('starttime','')
tempschool=None
if schoolid=='' or schoolname=='':
return tempschool,False
tempschool=school.School(schoolname=schoolname,schoolid=schoolid,province=province,city=city)
return tempschool,True
def classadd(school):
schoolname=school.getSchoolname()
schoolid=school.getSchoolid()
province=school.getProvince()
city=school.getCity()
starttime=school.getStarttime()
request_params=[]
values_params=[]
if schoolname!='':
request_params.append('schoolName')
values_params.append(SQLTool.formatstring(schoolname))
if schoolid!='':
request_params.append('schoolId')
values_params.append(SQLTool.formatstring(schoolid))
if province!='':
request_params.append('province')
values_params.append(SQLTool.formatstring(province))
if city!='':
request_params.append('city')
values_params.append(SQLTool.formatstring(city))
if starttime!='':
request_params.append('starttime')
values_params.append(SQLTool.formatstring(starttime))
table=localconfig.schooltable
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
tempresult=DBhelp.inserttableinfo_byparams(table=table, select_params=request_params,insert_values= [tuple(values_params)])
DBhelp.closedb()
return tempresult
def classupdate(schoolname='',schoolid='',province='',city='',starttime=''):
request_params=[]
values_params=[]
wset_params=[]
wand_params=[]
if schoolname!='':
request_params.append('schoolName')
values_params.append(SQLTool.formatstring(schoolname))
if schoolid!='':
request_params.append('schoolId')
values_params.append(SQLTool.formatstring(schoolid))
if province!='':
request_params.append('province')
values_params.append(SQLTool.formatstring(province))
if city!='':
request_params.append('city')
values_params.append(SQLTool.formatstring(city))
if starttime!='':
request_params.append('starttime')
values_params.append(SQLTool.formatstring(starttime))
table=localconfig.schooltable
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
tempresult=DBhelp.updatetableinfo_byparams([table],request_params,values_params,wset_params,wand_params)
DBhelp.closedb()
return tempresult
| [
"[email protected]"
] | |
6ef63b1370f15c96f0052bd21466bd3759df1805 | 8e939e0f075a14377d87e0eb7729e4f0818f1df9 | /zarc/old/views_2017-05-20-19:22:30.py | 6430fada935c77135158fdeceb7ba6caefb39194 | [
"MIT"
] | permissive | mimcomp/caseke | 072d076c9442c19916d8f71ec25fa45676031914 | 3c0749a431bb2e2c82bcb292d528c748bea8b1ba | refs/heads/master | 2020-06-02T15:42:24.159728 | 2019-06-03T16:34:09 | 2019-06-03T16:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,958 | py | # coding: utf-8
# AUTOGENERATED BY gen_script.sh from kpony3.py
# Copyright (C) Nyimbi Odero, Sat May 20 19:21:41 EAT 2017
import calendar
from flask import redirect, flash, url_for, Markup
from flask import render_template
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.views import ModelView, BaseView, MasterDetailView, MultipleView, RestCRUDView, CompactCRUDMixin
from flask_appbuilder import ModelView, CompactCRUDMixin, aggregate_count, action, expose, BaseView, has_access
from flask_appbuilder.charts.views import ChartView, TimeChartView, GroupByChartView
from flask_appbuilder.models.group import aggregate_count
from flask_appbuilder.widgets import ListThumbnail, ListWidget
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import appbuilder, db
from .models import *
# Basic Lists
hide_list = ['created_by', 'changed_by', 'created_on', 'changed_on']
#To pretty Print from PersonMixin
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class AttorneyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Attorney, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class BailView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Bail, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class CaseView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Case, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class CauseofactionView(CompactCRUDMixin, ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Causeofaction, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class ConstituencyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Constituency, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class CountyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(County, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class CourtView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Court, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class CourtlevelView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Courtlevel, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class DefendantView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Defendant, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class DoctemplateView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Doctemplate, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class FilingView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Filing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class FilingtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Filingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class HearingView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Hearing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class HearingtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Hearingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class InvestigationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Investigation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class JudgeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Judge, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class LawfirmView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Lawfirm, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class NatureofsuitView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Natureofsuit, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class ObserverView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Observer, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class PlaintiffView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Plaintiff, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class PolicemanView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policeman, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class PoliceroleView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policerole, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class PolicestationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policestation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class PrisonView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prison, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class PrisonremandView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prisonremand, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class ProsecutorView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prosecutor, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class ProsecutorteamView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prosecutorteam, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class SubcountyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Subcounty, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class SuretyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Surety, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
class TownView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Town, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
class AttorneyChartView(GroupByChartView):
datamodel = SQLAInterface(Attorney , db.session)
chart_title = 'Grouped Attorney by Birth'
label_columns = AttorneyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class AttorneyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Attorney , db.session)
chart_title = 'Grouped Birth Attorney'
chart_type = 'AreaChart'
label_columns = AttorneyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Plaintiff by Birth'
label_columns = PlaintiffView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Birth Plaintiff'
chart_type = 'AreaChart'
label_columns = PlaintiffView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class ObserverChartView(GroupByChartView):
datamodel = SQLAInterface(Observer , db.session)
chart_title = 'Grouped Observer by Birth'
label_columns = ObserverView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class ObserverTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Observer , db.session)
chart_title = 'Grouped Birth Observer'
chart_type = 'AreaChart'
label_columns = ObserverView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Surety by Birth'
label_columns = SuretyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Birth Surety'
chart_type = 'AreaChart'
label_columns = SuretyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Prosecutor by Birth'
label_columns = ProsecutorView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Birth Prosecutor'
chart_type = 'AreaChart'
label_columns = ProsecutorView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PolicemanChartView(GroupByChartView):
datamodel = SQLAInterface(Policeman , db.session)
chart_title = 'Grouped Policeman by Birth'
label_columns = PolicemanView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PolicemanTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Policeman , db.session)
chart_title = 'Grouped Birth Policeman'
chart_type = 'AreaChart'
label_columns = PolicemanView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class JudgeChartView(GroupByChartView):
datamodel = SQLAInterface(Judge , db.session)
chart_title = 'Grouped Judge by Birth'
label_columns = JudgeView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class JudgeTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Judge , db.session)
chart_title = 'Grouped Birth Judge'
chart_type = 'AreaChart'
label_columns = JudgeView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Defendant by Birth'
label_columns = DefendantView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Birth Defendant'
chart_type = 'AreaChart'
label_columns = DefendantView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
# How to create a MasterDetailView
#class DetailView(ModelView):
# datamodel = SQLAInterface(DetailTable, db.session)
#class MasterView(MasterDetailView):
# datamodel = SQLAInterface(MasterTable, db.session)
# related_views = [DetailView]
# How to create a MultipleView
#class MultipleViewsExp(MultipleView):
# views = [GroupModelView, ContactModelView]
#View Registration
db.create_all()
appbuilder.add_view(AttorneyView(), "Attorneys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(BailView(), "Bails", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CaseView(), "Cases", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CauseofactionView(), "Causeofactions", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ConstituencyView(), "Constituencys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CountyView(), "Countys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtView(), "Courts", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtlevelView(), "Courtlevels", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DefendantView(), "Defendants", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DoctemplateView(), "Doctemplates", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(FilingView(), "Filings", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(FilingtypeView(), "Filingtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(HearingView(), "Hearings", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(HearingtypeView(), "Hearingtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(InvestigationView(), "Investigations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(JudgeView(), "Judges", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(LawfirmView(), "Lawfirms", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(NatureofsuitView(), "Natureofsuits", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ObserverView(), "Observers", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PlaintiffView(), "Plaintiffs", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicemanView(), "Policemans", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PoliceroleView(), "Policeroles", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicestationView(), "Policestations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisonView(), "Prisons", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisonremandView(), "Prisonremands", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ProsecutorView(), "Prosecutors", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ProsecutorteamView(), "Prosecutorteams", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SubcountyView(), "Subcountys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SuretyView(), "Suretys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(TownView(), "Towns", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(AttorneyChartView(), 'Attorney Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(AttorneyTimeChartView(), 'Attorney Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffChartView(), 'Plaintiff Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffTimeChartView(), 'Plaintiff Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ObserverChartView(), 'Observer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ObserverTimeChartView(), 'Observer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyChartView(), 'Surety Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyTimeChartView(), 'Surety Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorChartView(), 'Prosecutor Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorTimeChartView(), 'Prosecutor Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PolicemanChartView(), 'Policeman Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PolicemanTimeChartView(), 'Policeman Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudgeChartView(), 'Judge Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudgeTimeChartView(), 'Judge Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantChartView(), 'Defendant Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantTimeChartView(), 'Defendant Time Chart', icon='fa-dashboard', category='Reports')
#appbuilder.add_separator("Setup")
#appbuilder.add_separator("My Views")
#appbuilder.add_link(name, href, icon='', label='', category='', category_icon='', category_label='', baseview=None)
| [
"[email protected]"
] | |
56922e0d19840031cf2fd989884a3f68c517958f | 28c0bcb13917a277cc6c8f0a34e3bb40e992d9d4 | /koku/api/resource_types/azure_regions/view.py | 903367bda356e2193783aa57648a5dc9d0784689 | [
"Apache-2.0"
] | permissive | luisfdez/koku | 43a765f6ba96c2d3b2deda345573e1d97992e22f | 2979f03fbdd1c20c3abc365a963a1282b426f321 | refs/heads/main | 2023-06-22T13:19:34.119984 | 2021-07-20T12:01:35 | 2021-07-20T12:01:35 | 387,807,027 | 0 | 1 | Apache-2.0 | 2021-07-20T13:50:15 | 2021-07-20T13:50:14 | null | UTF-8 | Python | false | false | 2,126 | py | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""View for Azure Region locations."""
from django.db.models import F
from django.utils.decorators import method_decorator
from django.views.decorators.vary import vary_on_headers
from rest_framework import filters
from rest_framework import generics
from api.common import CACHE_RH_IDENTITY_HEADER
from api.common.permissions.azure_access import AzureAccessPermission
from api.resource_types.serializers import ResourceTypeSerializer
from reporting.provider.azure.models import AzureCostSummaryByLocation
from reporting.provider.azure.openshift.models import OCPAzureCostSummaryByLocation
class AzureRegionView(generics.ListAPIView):
"""API GET list view for Azure Region locations."""
queryset = (
AzureCostSummaryByLocation.objects.annotate(**{"value": F("resource_location")})
.values("value")
.distinct()
.filter(resource_location__isnull=False)
)
serializer_class = ResourceTypeSerializer
permission_classes = [AzureAccessPermission]
filter_backends = [filters.OrderingFilter, filters.SearchFilter]
ordering = ["value"]
search_fields = ["$value"]
@method_decorator(vary_on_headers(CACHE_RH_IDENTITY_HEADER))
def list(self, request):
# Reads the users values for Azure subscription guid and displays values related to what the user has access to
user_access = []
openshift = self.request.query_params.get("openshift")
if openshift == "true":
self.queryset = (
OCPAzureCostSummaryByLocation.objects.annotate(**{"value": F("resource_location")})
.values("value")
.distinct()
.filter(resource_location__isnull=False)
)
if request.user.admin:
return super().list(request)
elif request.user.access:
user_access = request.user.access.get("azure.subscription_guid", {}).get("read", [])
self.queryset = self.queryset.values("value").filter(subscription_guid__in=user_access)
return super().list(request)
| [
"[email protected]"
] | |
9602630702d42d0b11831605326a2ac95fab89e6 | c04cacb72f04a478b1168fd1c99a7374e185332c | /gym_SnakeGame/envs/__init__.py | 81e9d6fcc454c9d3816a7313a9d3328360fadb8a | [] | no_license | x2ever/Snake-Game | 6f5dc17843b51ee1cfeadfce2593cd9cb13ac5ba | 07d73b2b63b90091915fa9e1a8c3b9db879cc98e | refs/heads/master | 2021-12-22T22:19:59.258420 | 2021-12-21T22:51:50 | 2021-12-21T22:51:50 | 225,833,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | from gym_SnakeGame.envs.SnakeGame import SnakeGameEnv
| [
"[email protected]"
] | |
844a75a41f3d66405dd80d5bfc9004b0700ee34c | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/__init__.py | bb2f3ba984ed9af5079bfaa77f8bc4d6cafda7c5 | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 22,119 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/use-multiple-paths/ebgp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to eBGP multipath
"""
__slots__ = (
"_path_helper", "_extmethods", "__allow_multiple_as", "__maximum_paths"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__allow_multiple_as = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="allow-multiple-as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__maximum_paths = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
1
),
is_leaf=True,
yang_name="maximum-paths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"use-multiple-paths",
"ebgp",
"state",
]
def _get_allow_multiple_as(self):
"""
Getter method for allow_multiple_as, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/allow_multiple_as (boolean)
YANG Description: Allow multipath to use paths from different neighbouring
ASes. The default is to only consider multiple paths from
the same neighbouring AS.
"""
return self.__allow_multiple_as
def _set_allow_multiple_as(self, v, load=False):
"""
Setter method for allow_multiple_as, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/allow_multiple_as (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_allow_multiple_as is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_allow_multiple_as() directly.
YANG Description: Allow multipath to use paths from different neighbouring
ASes. The default is to only consider multiple paths from
the same neighbouring AS.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="allow-multiple-as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """allow_multiple_as must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="allow-multiple-as", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__allow_multiple_as = t
if hasattr(self, "_set"):
self._set()
def _unset_allow_multiple_as(self):
self.__allow_multiple_as = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="allow-multiple-as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_maximum_paths(self):
"""
Getter method for maximum_paths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/maximum_paths (uint32)
YANG Description: Maximum number of parallel paths to consider when using
BGP multipath. The default is use a single path.
"""
return self.__maximum_paths
def _set_maximum_paths(self, v, load=False):
"""
Setter method for maximum_paths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/maximum_paths (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_maximum_paths is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maximum_paths() directly.
YANG Description: Maximum number of parallel paths to consider when using
BGP multipath. The default is use a single path.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
1
),
is_leaf=True,
yang_name="maximum-paths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """maximum_paths must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="maximum-paths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__maximum_paths = t
if hasattr(self, "_set"):
self._set()
def _unset_maximum_paths(self):
self.__maximum_paths = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
1
),
is_leaf=True,
yang_name="maximum-paths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
allow_multiple_as = __builtin__.property(_get_allow_multiple_as)
maximum_paths = __builtin__.property(_get_maximum_paths)
_pyangbind_elements = OrderedDict(
[("allow_multiple_as", allow_multiple_as), ("maximum_paths", maximum_paths)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/use-multiple-paths/ebgp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to eBGP multipath
"""
__slots__ = (
"_path_helper", "_extmethods", "__allow_multiple_as", "__maximum_paths"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__allow_multiple_as = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="allow-multiple-as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__maximum_paths = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
1
),
is_leaf=True,
yang_name="maximum-paths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"use-multiple-paths",
"ebgp",
"state",
]
def _get_allow_multiple_as(self):
"""
Getter method for allow_multiple_as, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/allow_multiple_as (boolean)
YANG Description: Allow multipath to use paths from different neighbouring
ASes. The default is to only consider multiple paths from
the same neighbouring AS.
"""
return self.__allow_multiple_as
def _set_allow_multiple_as(self, v, load=False):
"""
Setter method for allow_multiple_as, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/allow_multiple_as (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_allow_multiple_as is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_allow_multiple_as() directly.
YANG Description: Allow multipath to use paths from different neighbouring
ASes. The default is to only consider multiple paths from
the same neighbouring AS.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="allow-multiple-as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """allow_multiple_as must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="allow-multiple-as", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__allow_multiple_as = t
if hasattr(self, "_set"):
self._set()
def _unset_allow_multiple_as(self):
self.__allow_multiple_as = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="allow-multiple-as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_maximum_paths(self):
"""
Getter method for maximum_paths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/maximum_paths (uint32)
YANG Description: Maximum number of parallel paths to consider when using
BGP multipath. The default is use a single path.
"""
return self.__maximum_paths
def _set_maximum_paths(self, v, load=False):
"""
Setter method for maximum_paths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ebgp/state/maximum_paths (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_maximum_paths is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maximum_paths() directly.
YANG Description: Maximum number of parallel paths to consider when using
BGP multipath. The default is use a single path.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
1
),
is_leaf=True,
yang_name="maximum-paths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """maximum_paths must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="maximum-paths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__maximum_paths = t
if hasattr(self, "_set"):
self._set()
def _unset_maximum_paths(self):
self.__maximum_paths = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
1
),
is_leaf=True,
yang_name="maximum-paths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
allow_multiple_as = __builtin__.property(_get_allow_multiple_as)
maximum_paths = __builtin__.property(_get_maximum_paths)
_pyangbind_elements = OrderedDict(
[("allow_multiple_as", allow_multiple_as), ("maximum_paths", maximum_paths)]
)
| [
"[email protected]"
] | |
ef24ba60db7198ba0b519cb7a3e69990ba8cea61 | 87bb2b9258c887e8fbcaca08d18e5d95ae96462d | /Codewars/Python/6kyu/6kyu_Sort the odd.py | 8cedc8b089fe3134f4a5c46a38957c62ca3278d3 | [] | no_license | KonradMarzec1991/Codewars-LeetCode | a9e4d09f4271fecb3a7fc1ee436358ac1bbec5e4 | 442113532158f5a3ee7051a42e911afa5373bb5f | refs/heads/master | 2023-04-21T17:04:37.434876 | 2021-05-11T21:47:14 | 2021-05-11T21:47:14 | 166,555,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | def sort_array(source_array):
sorted_odds = sorted([d for d in source_array if d % 2 == 1])
startpoint = 0
for d in range(len(source_array)):
if source_array[d] % 2 == 1:
source_array[d] = sorted_odds[startpoint]
startpoint += 1
return source_array
| [
"[email protected]"
] | |
82519a5e2771413edc95e5dee8e2e066d793d518 | e838076bc1c8aedbb8c77710b1a1a32efc3a4da1 | /pc1/models.py | 1e61f611c4a5dbf401dcd01d926e4bc857bb4b2f | [] | no_license | abbasgis/ferrp | 5f2f7768f0e38e299498c2e74379311698b6321f | 77736c33e7ec82b6adf247a1bf30ccbc4897f02e | refs/heads/master | 2023-05-25T09:59:45.185025 | 2021-06-12T09:15:07 | 2021-06-12T09:15:07 | 376,236,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,549 | py | from django.db import models
# Create your models here.
class AdpDraft201718Vw(models.Model):
gs_no = models.TextField(primary_key=True) # This field type is a guess.
s_name = models.TextField(blank=True, null=True) # This field type is a guess.
district = models.TextField(blank=True, null=True) # This field type is a guess.
tehsil = models.TextField(blank=True, null=True) # This field type is a guess.
s_type = models.TextField(blank=True, null=True) # This field type is a guess.
sec_name = models.TextField(blank=True, null=True) # This field type is a guess.
sec_id = models.TextField(blank=True, null=True) # This field type is a guess.
approval_date = models.TextField(blank=True, null=True) # This field type is a guess.
cost_total = models.TextField(blank=True, null=True) # This field type is a guess.
foreign_aid = models.TextField(blank=True, null=True) # This field type is a guess.
local_capital = models.TextField(blank=True, null=True) # This field type is a guess.
local_revenue = models.TextField(blank=True, null=True) # This field type is a guess.
capital_total = models.TextField(blank=True, null=True) # This field type is a guess.
revenue_total = models.TextField(blank=True, null=True) # This field type is a guess.
foreign_capital = models.TextField(blank=True, null=True) # This field type is a guess.
foreign_revenue = models.TextField(blank=True, null=True) # This field type is a guess.
allocation = models.TextField(blank=True, null=True) # This field type is a guess.
exp_upto_june = models.TextField(blank=True, null=True) # This field type is a guess.
projection_2017_18 = models.TextField(db_column='projection_2017-18', blank=True,
null=True) # Field renamed to remove unsuitable characters. This field type is a guess.
projection_2018_19 = models.TextField(db_column='projection_2018-19', blank=True,
null=True) # Field renamed to remove unsuitable characters. This field type is a guess.
throw_forward = models.TextField(blank=True, null=True) # This field type is a guess.
monitoring = models.TextField(blank=True, null=True) # This field type is a guess.
start_date = models.TextField(blank=True, null=True)
end_date = models.TextField(blank=True, null=True)
cost_total_adp_origional = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'adp_draft_201718_vw'
class TblSchemesHistory(models.Model):
id = models.AutoField(primary_key=True)
gs_no = models.IntegerField(blank=True, null=True)
project_location = models.TextField(blank=True, null=True)
authorities_responsible = models.TextField(blank=True, null=True)
plan_provision = models.TextField(blank=True, null=True)
project_objectives = models.TextField(blank=True, null=True)
annual_operating_cost = models.TextField(blank=True, null=True)
capital_cost_estimates = models.TextField(blank=True, null=True)
physical_plan = models.TextField(blank=True, null=True)
financial_plan = models.TextField(blank=True, null=True)
financial_plan_text = models.TextField(blank=True, null=True)
gantt_chart = models.TextField(blank=True, null=True)
demand_and_supply_analysis = models.TextField(blank=True, null=True)
benefits_of_the_projects_analysis = models.TextField(blank=True, null=True)
implementation_schedule = models.TextField(blank=True, null=True)
ms_and_mp = models.TextField(blank=True, null=True)
additional_projects_decisions_required = models.TextField(blank=True, null=True)
certified = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tbl_schemes_history'
class TblSchemesAnnexure(models.Model):
id = models.TextField(primary_key=True)
gs_no = models.TextField(blank=True, null=True)
annexure_title = models.TextField(blank=True, null=True)
annexure_data = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tbl_scheme_annexure'
class TblHelp(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
section_name = models.TextField(blank=True, null=True)
info_name = models.TextField(blank=True, null=True) # This field type is a guess.
help_image = models.BinaryField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tbl_help'
| [
"abbas123@abc"
] | abbas123@abc |
ef1c013cd55b95725b787557e86769b4fd758760 | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/cyphon/cyphon/alerts/migrations/0011_auto_20170815_1432.py | 83a21b67e06bc5f594c3731289b36f09f4baa58f | [
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"MIT"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 621 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-15 18:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('alerts', '0010_remove_alert_tags'),
]
operations = [
migrations.AlterField(
model_name='alert',
name='distillery',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='alerts', related_query_name='alerts', to='distilleries.Distillery'),
),
]
| [
"[email protected]"
] | |
1d9066b6f8c175153341870908a2697e4398d8bf | e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163 | /ScientificComputing/ch14/filter_equalizer.py | 8d3830bd050cd0f33b823f8ec982a021af94ccdc | [] | no_license | socrates77-sh/learn | a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b | ae50978023f6b098b168b8cca82fba263af444aa | refs/heads/master | 2022-12-16T16:53:50.231577 | 2019-07-13T13:52:42 | 2019-07-13T13:52:42 | 168,442,963 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-01-31T01:30:06 | HTML | UTF-8 | Python | false | false | 860 | py | # -*- coding: utf-8 -*-
import scipy.signal as signal
import pylab as pl
import math
import numpy as np
def design_equalizer(freq, Q, gain, Fs):
'''设计二次均衡滤波器的系数'''
A = 10**(gain/40.0)
w0 = 2*math.pi*freq/Fs
alpha = math.sin(w0) / 2 / Q
b0 = 1 + alpha * A
b1 = -2*math.cos(w0)
b2 = 1 - alpha * A
a0 = 1 + alpha / A
a1 = -2*math.cos(w0)
a2 = 1 - alpha / A
return [b0/a0, b1/a0, b2/a0], [1.0, a1/a0, a2/a0]
pl.figure(figsize=(8, 4))
for freq in [1000, 2000, 4000]:
for q in [0.5, 1.0]:
for p in [5, -5, -10]:
b, a = design_equalizer(freq, q, p, 44100)
w, h = signal.freqz(b, a)
pl.semilogx(w/np.pi*44100, 20*np.log10(np.abs(h)))
pl.xlim(100, 44100)
pl.xlabel(u"频率(Hz)")
pl.ylabel(u"振幅(dB)")
pl.subplots_adjust(bottom=0.15)
pl.show()
| [
"[email protected]"
] | |
04dafde3bc7f94f93d9f00a64036a3aba00ae0e4 | caf0ba85f1c7a2b7208e7f0acebb3c047b17b0ba | /4_py_libro_1_pydroid/venv/4_py_libro_1_pydroid/COLECCIONES/py_1_collecciones.py | f5b5a8874953870d8fbe5e698201b48134170b5f | [] | no_license | JAreina/python | 12ca9bd5467420a813ac3f33b0adba6cd492f855 | 3b9ac8d37ab2abe70e34043857f96a76c19468c8 | refs/heads/master | 2020-03-22T07:57:31.675271 | 2018-09-12T06:38:49 | 2018-09-12T06:38:49 | 139,735,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import collections
c = collections
# JAreina
a = c.Counter([2,2,8,5,1,2,3,4,5,8])
print(a)
b = 'en un lugar de la mancha'
print( c.Counter(b))
d = ['a',"b",'c',"c",'z']
print(c.Counter(d))
print("\n ::::::::: update counter::::::::::::\n")
e = c.Counter()
print(e)
e.update("hola hola")
print(e)
#JAreina
e.update( {"a": 100} )
print(e)
| [
"[email protected]"
] | |
2f44a2a3e2304fa716d563c22a881c2c0fbf5ef7 | 257bd63361aa846ffdacdc15edaecf84c6364e78 | /psou/pro1/pack3/class_ex9.py | b5fb7d6e7b2028d16e4b7c2b560541e4fb51073b | [] | no_license | gom4851/hcjeon | 86dcfd05ce47a13d066f13fe187d6a63142fb9fe | 59a00ca9499f30e50127bb16eb510553e88ace43 | refs/heads/master | 2020-06-04T23:16:08.632278 | 2019-01-15T09:54:08 | 2019-01-15T09:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | '''
Created on 2018. 11. 8.
상속
'''
class Person:
say = "난 사람이야~"
nai = '20'
__kor = 59 # 앞에 __ 해주면 private가 됨.
def __init__(self, nai):
print('Person 생성자')
self.nai = nai
def printInfo(self):
print('나이:{}, 이야기:{}'.format(self.nai, self.say))
def hello(self):
print('안녕')
print('hello에서 kor : ', self.__kor)
@staticmethod
def sbs(tel):
print('sbs_tel : ', tel) # 다른 멤버와 상관없이
# self.hello # 멤버와 상관없는 독립적 수행할 때 사용.(self와 상관없ㅇㅣ..)
@classmethod # 프로테드 부를때..?
def mbc(cls):
print('mbc_', cls.say, cls.nai, cls.__kor)
p = Person('22')
p.printInfo()
p.hello()
print('**' * 20)
class Employee(Person):
say = "일하는 동물" # 만들어진 객체에 변수 먼저 확인.
subject = "부가"
def __init__(self):
print('Employee 생성자')
def EprintInfo(self):
#printInfo() 이렇게하면 모듈의 함수를 찾는 것.
self.printInfo()
super().printInfo()
e = Employee()
print(e.say, e.nai, e.subject)
e.printInfo()
e.EprintInfo()
print("**" * 20)
class Worker(Person):
def __init__(self, nai):
print('Worker 생성자')
self.nai = nai
def WprintInfo(self):
super().printInfo()
w = Worker('30')
w.WprintInfo()
print("~~" * 20)
class Programmer(Worker):
def __init__(self, nai):
print("Programmer 생성자")
#super().__init__(nai) # Bound
Worker.__init__(self, nai) # UnBound
def printInfo(self):
print("오바라이딩 메소드")
def WprintInfo(self):
self.printInfo() # PerSon까지 감.
#print('Worker에서 kor : ', self._kor)
pr = Programmer(36)
pr.WprintInfo()
print()
a = 5
print(type(a))
print(Person.__bases__) # 현재 클래스의 부모 클래스 타입 확인
print(Programmer.__bases__)
pr.sbs('111-1111')
Person.sbs('222-2222')
pr.mbc()
Person.mbc() | [
"[email protected]"
] | |
83b2690e40232cdd946af37e4e070b48b12eccf2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02379/s325687867.py | d01451aa8ad3582ccaf4aae8635f9f68db13f00f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | import math
x1, y1, x2, y2 = map(float,input().split())
a = abs(x1-x2)
b = abs(y1-y2)
print(math.sqrt(a*a+b*b))
| [
"[email protected]"
] | |
c4efff0966532f4a7d4d8ee3839194c7e641bf5c | 91668f2d4404dd3d85b598b15b852d5a2eeeec6a | /setup.py | 27912b3549aa77181a86318e03cafa84b6cf5e87 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | r-swilderd/mi-instrument | b4fd841f05e7c0522580b73f145c2a826ccf21d6 | 34b60925a01d6cf89db254fdc75566c59ccba144 | refs/heads/master | 2021-01-17T21:56:34.298829 | 2015-08-29T23:02:31 | 2015-08-29T23:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import os
import sys
# Add /usr/local/include to the path for macs, fixes easy_install for several packages (like gevent and pyyaml)
if sys.platform == 'darwin':
os.environ['C_INCLUDE_PATH'] = '/usr/local/include'
version = '0.2.1'
setup( name = 'marine-integrations',
version = version,
description = 'OOINet Marine Integrations',
url = 'https://github.com/ooici/marine-integrations',
download_url = 'http://sddevrepo.oceanobservatories.org/releases/',
license = 'BSD',
author = 'Ocean Observatories Initiative',
author_email = '[email protected]',
keywords = ['ooici'],
packages = find_packages(),
dependency_links = [
'http://sddevrepo.oceanobservatories.org/releases/',
'https://github.com/ooici/pyon/tarball/master#egg=pyon',
#'https://github.com/ooici/utilities/tarball/v2012.12.12#egg=utilities-2012.12.12',
],
test_suite = 'pyon',
entry_points = {
'console_scripts' : [
'package_driver=ion.idk.scripts.package_driver:run',
'start_driver=ion.idk.scripts.start_driver:run',
'test_driver=ion.idk.scripts.test_driver:run',
],
},
install_requires = [
'gitpy==0.6.0',
'snakefood==1.4',
'ntplib>=0.1.9',
'apscheduler==2.1.0',
#'utilities',
],
)
| [
"[email protected]"
] | |
92f42f9f2b8c96f0d8e83bcaa79b8813312fbb2e | ba7c4862dfbc9d0469f389c0cdb3fed01f99ebe3 | /controls/Control_Toolbox/steady_state_error.py | 25f782973e80f3f0f6a329e2308fc7e37a7425c8 | [] | no_license | cmontalvo251/Python | 293cbdf8832d7637d5c0b31eadd02d3ccf2f2c05 | 2b12ce043ee41e08537cfb62301c6a55d4661e04 | refs/heads/master | 2023-06-22T21:50:21.225067 | 2023-06-14T13:42:16 | 2023-06-14T13:42:16 | 229,313,158 | 8 | 3 | null | 2021-07-31T16:01:54 | 2019-12-20T18:03:52 | Python | UTF-8 | Python | false | false | 1,015 | py | ###Integrate an ordinary differential equation
#in MATLAB that's using the function ode45.
#in Python we're going to use the Scipy toolbox and odeint
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as I
import control as ctl
import scipy.signal as S
import scipy.linalg as slin
plt.close("all")
####Zeros Poles and Gains
###Zeros are the roots of the numerator
####Poles are the roots of the denominator
####Gain is the leading coefficient of numerator/leading coefficient of the denominator
zeros = []
poles = [-3.0]
gain = 3.0
[N,D] = S.zpk2tf(zeros,poles,gain)
G = ctl.tf(N,D)
tout = np.linspace(0,5,1000)
tout,yout = ctl.step_response(G,tout)
zeros = []
poles = 0
gain = 2.0
[N,D] = S.zpk2tf(zeros,poles,gain)
C = ctl.tf(N,D)
sys_closed_loop = C*G/(1+C*G)
toutc,youtc = ctl.step_response(sys_closed_loop,tout)
plt.plot(tout,yout,'r-',label='Open Loop')
plt.plot(tout,youtc,'g-',label='Closed Loop')
plt.xlabel('Time (sec)')
plt.ylabel('State')
plt.legend()
plt.grid()
plt.show() | [
"[email protected]"
] | |
fdc34a5acdc9df05688e643c6e3939d0f1cfc1a2 | ca7162adc548c5937ebedd6234b40de7294e2da1 | /11-dababase/sqlite/createTable.py | b5b8058f153badef698c5a031aec4709f76832e6 | [] | no_license | meloLeeAnthony/PythonLearn | 03c259d745b1ccdc039e9999889ab54be14ae020 | 9915ec5bb7048712a97539a9c5bce8743567b22a | refs/heads/master | 2023-07-10T21:12:54.027143 | 2021-08-19T12:43:19 | 2021-08-19T12:43:19 | 289,487,502 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # coding=utf-8
'''
1.导入sqlite3模块
2.创建连接 sqlite3.connect()
3.创建游标对象
4.编写创建表的sql语句
5.执行sql
6.关闭连接
'''
import sqlite3
conn = None
# 创建连接
try:
conn = sqlite3.connect('e:/sqlite3Demo/demo.db')
print('连接sqlite库成功')
except Exception as e:
print('连接sqlite库失败:', e)
if conn is not None:
# 创建游标对象
cur = conn.cursor()
# 编写创建表的sql语句
sql = '''create table t_person(
pno INTEGER primary key autoincrement,
pname VARCHAR not null,
age INTEGER
)'''
try:
# 执行sql语句
cur.execute(sql)
print('创建表成功')
except Exception as e:
print(e)
print('创建表失败:', e)
finally:
# 关闭游标
cur.close()
# 关闭连接
conn.close()
| [
"[email protected]"
] | |
1eb5b10cb25082008a50ed8a90198526794f3f9b | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/container_http_get_py3.py | d9ca73f638183794cf20b5a1d7c59793e37d8b7d | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 1,433 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ContainerHttpGet(Model):
"""The container Http Get settings, for liveness or readiness probe.
All required parameters must be populated in order to send to Azure.
:param path: The path to probe.
:type path: str
:param port: Required. The port number to probe.
:type port: int
:param scheme: The scheme. Possible values include: 'http', 'https'
:type scheme: str or ~azure.mgmt.containerinstance.models.enum
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'scheme': {'key': 'scheme', 'type': 'str'},
}
def __init__(self, *, port: int, path: str=None, scheme=None, **kwargs) -> None:
super(ContainerHttpGet, self).__init__(**kwargs)
self.path = path
self.port = port
self.scheme = scheme
| [
"[email protected]"
] | |
06d91053430128ffbd2e417b7711666eb0bcf41a | 5dd47abf7061201d9378e73e51f08fbb314ba2fd | /envdsys/envdaq/migrations_old/0008_auto_20190215_2305.py | aaa12d1357f80fc2772ac6d5b38aa0b5464dee40 | [
"Unlicense"
] | permissive | NOAA-PMEL/envDataSystem | 4d264ae5209015e4faee648f37608d68a4461d0a | 4db4a3569d2329658799a3eef06ce36dd5c0597d | refs/heads/master | 2023-02-23T22:33:14.334737 | 2021-07-22T01:09:16 | 2021-07-22T01:09:16 | 191,809,007 | 1 | 0 | Unlicense | 2023-02-08T00:45:54 | 2019-06-13T17:50:03 | Python | UTF-8 | Python | false | false | 448 | py | # Generated by Django 2.1.5 on 2019-02-15 23:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('envdaq', '0007_auto_20190215_2300'),
]
operations = [
migrations.RemoveField(
model_name='devicedef',
name='contacts',
),
migrations.RemoveField(
model_name='instrumentdef',
name='measurements',
),
]
| [
"[email protected]"
] | |
7e94fa976fa69e30a6a8fecb112d0151ab64e36a | 14d66cef63d9e540f4a7dd76e8810b7f39c9e536 | /viewer.py | 81b3111c4649653ed33f54c8cbfa293e9a3acd44 | [
"MIT"
] | permissive | YarinAVI/GraphLayout | 5327daebdb45f8273cf75c53341bc02f42d293c7 | 187ea93b06730186ec25dd71a28dd2ccdfaa4a7c | refs/heads/master | 2021-06-17T04:21:02.746604 | 2017-05-31T16:23:20 | 2017-05-31T16:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,692 | py | import layout
import render
import wx
TESTS = [
[('t', 'te'), ('t', 'ti'), ('t', 'to'), ('te', 'tea'), ('te', 'ten'), ('tea', 'team'), ('ti', 'tin'), ('tin', 'tine'), ('to', 'ton'), ('ton', 'tone')],
[(5, 11), (11, 10), (11, 2), (3, 10), (3, 8), (8, 9), (11, 9), (7, 8), (7, 11)],
[(1, 2), (1, 5), (2, 5), (2, 3), (3, 4), (4, 5), (4, 6)],
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)],
[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)],
[(0, 1), (1, 2), (2, 0)],
[(1, 2), (1, 5), (1, 8), (5, 6), (2, 3), (3, 4), (4, 2), (6, 7), (6, 8), (6, 3)],
[(1, 2), (1, 3), (1, 4), (2, 4), (2, 5), (3, 6), (4, 3), (4, 6), (4, 7), (5, 4), (5, 7), (7, 6)],
[(1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 1), (1, 4), (2, 5), (3, 6)],
[(1, 3), (3, 2), (2, 1), (3, 5), (4, 1), (4, 2), (4, 12), (4, 13), (5, 6), (5, 8), (6, 7), (6, 8), (6, 10), (7, 10), (8, 9), (8, 10), (9, 5), (9, 11), (10, 9), (10, 11), (10, 14), (11, 12), (11, 14), (12, 13), (13, 11), (13, 15), (14, 13), (15, 14)],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)],
[(0, 1), (0, 3), (1, 4), (1, 2), (2, 5), (3, 4), (3, 6), (4, 5), (4, 7), (5, 8), (6, 7), (7, 8)],
]
class View(wx.Panel):
def __init__(self, parent):
super(View, self).__init__(parent)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_CHAR_HOOK, self.on_char)
self.index = -1
self.weights = {}
self.model = None
self.bitmap = None
wx.CallAfter(self.next)
def next(self):
self.index = (self.index + 1) % len(TESTS)
self.compute()
def compute(self):
edges = TESTS[self.index]
nodes = layout.layout(edges, self.weights)
self.set_model((edges, nodes))
def update(self):
if self.model is None:
return
cw, ch = self.GetClientSize()
bitmap = render.render(cw, ch, *self.model)
self.set_bitmap(bitmap)
def set_model(self, model):
self.model = model
self.update()
def set_weights(self, weights):
self.weights = weights
self.compute()
def set_bitmap(self, bitmap):
self.bitmap = bitmap
self.Refresh()
self.Update()
def on_char(self, event):
event.Skip()
if event.GetKeyCode() == wx.WXK_ESCAPE:
self.GetParent().Close()
elif event.GetKeyCode() == wx.WXK_SPACE:
self.next()
def on_size(self, event):
event.Skip()
self.update()
def on_paint(self, event):
dc = wx.AutoBufferedPaintDC(self)
dc.SetBackground(wx.Brush(render.BACKGROUND))
dc.Clear()
if self.bitmap is None:
return
cw, ch = self.GetClientSize()
bw, bh = self.bitmap.GetSize()
x = cw / 2 - bw / 2
y = ch / 2 - bh / 2
dc.DrawBitmap(self.bitmap, x, y)
dc.DrawText(str(self.index), 10, 10)
class Frame(wx.Frame):
def __init__(self):
super(Frame, self).__init__(None)
self.create_controls(self)
self.SetTitle('GraphLayout')
self.SetClientSize((800, 600))
self.Center()
def create_controls(self, parent):
panel = wx.Panel(parent)
self.view = self.create_view(panel)
sidebar = self.create_sidebar(panel)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.view, 1, wx.EXPAND)
sizer.Add(sidebar, 0, wx.EXPAND | wx.ALL, 10)
panel.SetSizer(sizer)
return panel
def create_view(self, parent):
return View(parent)
def create_sidebar(self, parent):
names = [
'edge_edge',
'rank',
'length',
'area',
]
sizer = wx.BoxSizer(wx.VERTICAL)
self.sliders = []
for name in names:
value = int(layout.WEIGHTS[name] * 10)
text = wx.StaticText(parent, -1, name)
slider = wx.Slider(parent, -1, value, 0, 100)
slider.name = name
slider.Bind(wx.EVT_SCROLL_THUMBRELEASE, self.on_slider)
self.sliders.append(slider)
sizer.Add(text)
sizer.Add(slider, 0, wx.EXPAND)
sizer.AddSpacer(10)
return sizer
def on_slider(self, event):
weights = {}
for slider in self.sliders:
weights[slider.name] = slider.GetValue() / 10.0
self.view.set_weights(weights)
def main():
app = wx.App(None)
frame = Frame()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ef5b018a2b5ea2a4f02c3054530d9c410298db2c | bc5d0c3e950ebd9a7fbd33fbbcad94979cbe344f | /src/scratch.py | 1e0bc9d7ea1f286057ac7e65ffcc5322727de2fe | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | bgshin/bert | 44e7b23016087e7fda6396bd4432188918d2af28 | 5d13d62909f120ec097c606a0366b49887ea1baf | refs/heads/master | 2020-04-06T11:41:44.788854 | 2018-12-03T04:48:56 | 2018-12-03T04:48:56 | 157,426,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | py | import os
import numpy as np
import tensorflow as tf
import collections
import tokenization
from run_classifier import SstProcessor, file_based_input_fn_builder, model_fn_builder, file_based_convert_examples_to_features
import modeling
import cPickle
def get_model_size(ckpt_fpath):
# Open TensorFlow ckpt
reader = tf.train.NewCheckpointReader(ckpt_fpath)
print('\nCount the number of parameters in ckpt file(%s)' % ckpt_fpath)
param_map = reader.get_variable_to_shape_map()
total_count = 0
for k, v in param_map.items():
if 'Momentum' not in k and 'global_step' not in k:
temp = np.prod(v)
total_count += temp
print('%s: %s => %d' % (k, str(v), temp))
print('Total Param Count: %d' % total_count)
# 324,935,430 ~324M
# tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
# result = tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? ")
# print(result)
# print(["hello", "!", "how", "are", "you", "?"])
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
TRAINED_MODDEL_DIR='../model/sst2/'
data_dir = '../data/glue_data/SST-2'
output_dir = '../model/sst2/eval/'
max_seq_length = 128
vocab_file='../data/cased_L-12_H-768_A-12/vocab.txt'
do_lower_case = True
eval_batch_size = 8
learning_rate=2e-5
# init_checkpoint='./data/cased_L-12_H-768_A-12/bert_model.ckpt'
init_checkpoint='../model/sst2/eval/model.ckpt-6313'
# init_checkpoint='../model/sst2/eval/checkpoint'
bert_config_file='../data/cased_L-12_H-768_A-12/bert_config.json'
get_model_size(init_checkpoint)
exit()
processor = SstProcessor()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
train_examples = processor.get_train_examples(data_dir)
trn_file = os.path.join(output_dir, "trn.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length, tokenizer, trn_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, max_seq_length, tokenizer, eval_file)
tst_examples = processor.get_test_examples(data_dir)
tst_file = os.path.join(output_dir, "tst.tf_record")
file_based_convert_examples_to_features(tst_examples, label_list,
max_seq_length, tokenizer,
tst_file)
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=10,
num_warmup_steps=10,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=32,
eval_batch_size=8,
predict_batch_size=8)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
embeddings = estimator.get_variable_value(estimator.get_variable_names()[12])
with open(output_dir+'embedding.cpkl', 'wb') as handle:
cPickle.dump(embeddings, handle)
result_predict = [val for val in estimator.predict(eval_input_fn)]
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key]))) | [
"[email protected]"
] | |
3a9cb5d26818969cfb8af7ed4edd0f0d30c3c771 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Python/Core/Lib/encodings/utf_32_be.py | f84e9045e1470bfbee6b819f251ac345d9b60040 | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 1,048 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: utf_32_be.py
"""
Python 'utf-32-be' Codec
"""
import codecs
encode = codecs.utf_32_be_encode
def decode(input, errors='strict'):
return codecs.utf_32_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_32_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_32_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_32_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_32_be_decode
def getregentry():
return codecs.CodecInfo(name='utf-32-be', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter) | [
"[email protected]"
] | |
0e3e32f77124f0e14236d74d16a1a23002091b70 | ae10b60cb92a69146bfb05ef5dde735a0aa45d4b | /examples/Extended Application/matplotlib/examples/userdemo/annotate_simple_coord02.py | c8424d6389371288876249d81040115991437882 | [
"MIT"
] | permissive | kantel/nodebox-pyobjc | 471cea4c5d7f1c239c490323186458a74edcc214 | 068ba64c87d607522a240ab60c3ba14f869f6222 | refs/heads/master | 2021-08-14T18:32:57.995445 | 2017-11-16T13:42:23 | 2017-11-16T13:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | """
=======================
Annotate Simple Coord02
=======================
"""
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(3, 2))
an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data",
va="center", ha="center",
bbox=dict(boxstyle="round", fc="w"))
an2 = ax.annotate("Test 2", xy=(0.5, 1.), xycoords=an1,
xytext=(0.5, 1.1), textcoords=(an1, "axes fraction"),
va="bottom", ha="center",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
fig.subplots_adjust(top=0.83)
pltshow(plt)
| [
"[email protected]"
] | |
0a8bd7608db9b29c07d9adbc6e136ba7c4662200 | 35e28d7705773eed54345af4440700522c9d1863 | /deps/libgdal/gyp-formats/tsx.gyp | b8f05037aec95a26a96a811114431a86f6467c1c | [
"Apache-2.0"
] | permissive | naturalatlas/node-gdal | 0ee3447861bf2d1abc48d4fbdbcf15aba5473a27 | c83e7858a9ec566cc91d65db74fd07b99789c0f0 | refs/heads/master | 2023-09-03T00:11:41.576937 | 2022-03-12T20:41:59 | 2022-03-12T20:41:59 | 19,504,824 | 522 | 122 | Apache-2.0 | 2022-06-04T20:03:43 | 2014-05-06T18:02:34 | C++ | UTF-8 | Python | false | false | 245 | gyp | {
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_tsx_frmt",
"type": "static_library",
"sources": [
"../gdal/frmts/tsx/tsxdataset.cpp"
],
"include_dirs": [
"../gdal/frmts/tsx"
]
}
]
}
| [
"[email protected]"
] | |
ffabd2487c7b9698475b0ebf663f97afebe11c6a | 0f7666900a3d203481b1009fe3d9dd2186938c30 | /gpu-example.py | e89103c48573461470473750392cc1d8d4e2ed0b | [] | no_license | MZ195/Cupy-GPU-Example | 4a7d99c25e9d26213c3f27b6618b6f8c8cbc4393 | 108b920848e60b3e2706749ca97404572433e130 | refs/heads/master | 2022-10-29T23:45:48.093569 | 2020-06-13T21:09:04 | 2020-06-13T21:09:04 | 272,086,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | import numpy as np
from timeit import default_timer as timer
from numba import vectorize
NUM_ELEMENTS = 100000000
# This is the CPU version.
def vector_add_cpu(a, b):
c = np.zeros(NUM_ELEMENTS, dtype=np.float32)
for i in range(NUM_ELEMENTS):
c[i] = a[i] + b[i]
return c
# This is the GPU version. Note the @vectorize decorator. This tells
# numba to turn this into a GPU vectorized function.
@vectorize(["float32(float32, float32)"], target='cuda')
def vector_add_gpu(a, b):
return a + b;
def main():
a_source = np.float32(np.random.rand(NUM_ELEMENTS))
b_source = np.float32(np.random.rand(NUM_ELEMENTS))
# Time the CPU function
start = timer()
vector_add_cpu(a_source, b_source)
vector_add_cpu_time = timer() - start
# Time the GPU function
start = timer()
vector_add_gpu(a_source, b_source)
vector_add_gpu_time = timer() - start
# Report times
print("CPU function took %f seconds." % vector_add_cpu_time)
print("GPU function took %f seconds." % vector_add_gpu_time)
return 0
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
31e36abf085c1f9a230b668343ec294935b1c1da | 91fe8f479fa921fa84111d19222a5c6aa6eff030 | /apps/django-web/learning_log/ll_env/Scripts/django-admin.py | 8785f03fb524b48fbb8c6393ffc1f4c9ea3ed203 | [] | no_license | romanticair/python | 2055c9cdaa46894c9788d5797643283786ed46dd | 6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141 | refs/heads/master | 2022-11-03T17:17:17.608786 | 2019-07-05T07:07:29 | 2019-07-05T07:07:29 | 195,356,190 | 0 | 1 | null | 2022-10-14T20:51:14 | 2019-07-05T07:00:33 | Python | UTF-8 | Python | false | false | 183 | py | #!l:\mypythonprogr\somepythonprojects\learning_log\ll_env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
f32ae2236fb684777b438596d850abf6885dc83b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /qLMZ2hEvrhRSSSnQw_20.py | dba82c5c1e6082f3f6e2d972820ab53ab7f449e3 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """
Graded lexicographic order (grlex order for short) is a way of ordering words
that:
1. First orders words by length.
2. Then orders words of the same size by their dictionary order.
For example, in grlex order:
* "tray" < "trapped" since "tray" has length 4 while "trapped" has length 7.
* "trap" < "tray" since both have length 4, but "trap" comes before "tray" in the dictionary.
Given a list of words, return that list in grlex order.
### Examples
make_grlex(["small", "big"]) ➞ ["big", "small"]
make_grlex(["cat", "ran", "for", "the", "rat"]) ➞ ["cat", "for", "ran", "rat", "the"]
make_grlex(["this", "is", "a", "small", "test"]) ➞ ["a", "is", "test", "this", "small"]
### Notes
N/A
"""
def make_grlex(lst):
lst.sort()
lst.sort(key=len)
return lst
| [
"[email protected]"
] | |
1c54e78c3a9fa11c3884d713d206b69fd669a135 | a08d885cb9150d7e84f5ffbf0c9734893105a898 | /2021/Day 04/giant_squid.py | 113e0335b32c0ec58d8789010615a252f8d92b66 | [] | no_license | vhsw/Advent-of-Code | ab422c389340a1caf2ec17c5db4981add6433fbe | 3c1dac27667472202ab15098c48efaac19348edf | refs/heads/master | 2022-12-29T03:56:59.648395 | 2022-12-26T11:01:45 | 2022-12-26T11:01:45 | 162,491,163 | 0 | 0 | null | 2022-05-10T08:43:32 | 2018-12-19T21:10:26 | Python | UTF-8 | Python | false | false | 1,821 | py | """Day 4: Giant Squid"""
from typing import Iterable
with open("2021/Day 04/input.txt", encoding="utf-8") as fp:
DATA = fp.read().strip()
def part1(data: str):
"""Part 1 solution"""
nums, boards = parse(data)
for num in nums:
for board in boards:
for line in board:
replace(line, num)
if check(board):
return num * score(board)
raise ValueError(data)
def part2(data):
"""Part 2 solution"""
nums, boards = parse(data)
board_finished = [False] * len(boards)
for num in nums:
for idx, board in enumerate(boards):
if board_finished[idx]:
continue
for line in board:
replace(line, num)
if check(board):
board_finished[idx] = True
if all(board_finished):
return num * score(board)
raise ValueError(data)
def parse(data: str):
nums, *boards = data.split("\n\n")
return parse_nums(nums), parse_boards(boards)
def parse_nums(nums: str):
return list(map(int, nums.split(",")))
def parse_boards(boards: list[str]):
return [
[list(map(int, line.split())) for line in board.splitlines()]
for board in boards
]
def replace(line: list[int | None], value: int):
for idx, _ in enumerate(line):
if line[idx] == value:
line[idx] = None
def check(board: list[list[str]]):
return check_lines(board) or check_lines(zip(*board))
def check_lines(board: Iterable[Iterable[str]]):
return any(all(num is None for num in line) for line in board)
def score(board):
return sum(num for line in board for num in line if num)
if __name__ == "__main__":
print(f"Part 1: { part1(DATA) }")
print(f"Part 2: { part2(DATA) }")
| [
"[email protected]"
] | |
861709efdf3acd7f6154b69f466c98536af3d22c | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part13-introduction-data-visualization/No33-Multiple-time-series-on-common-axes.py | d1207e9079b0d4e197a6b8b3812ed67aab364a3e | [] | no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | #Multiple time series on common axes
'''
For this exercise, you will construct a plot showing four time series stocks on the same axes. The time series in question are represented in the session using the identifiers aapl, ibm, csco, and msft. You'll generate a single plot showing all the time series on common axes with a legend.
Instructions
Plot the aapl time series in blue with a label of 'AAPL'.
Plot the ibm time series in green with a label of 'IBM'.
Plot the csco time series in red with a label of 'CSCO'.
Plot the msft time series in magenta with a label of 'MSFT'.
Specify a rotation of 60 for the xticks with plt.xticks().
Add a legend in the 'upper left' corner of the plot.
'''
# Code
# Import matplotlib.pyplot
import matplotlib.pyplot as plt
# Plot the aapl time series in blue
plt.plot(aapl, color='blue', label='AAPL')
# Plot the ibm time series in green
plt.plot(ibm, color='green', label='IBM')
# Plot the csco time series in red
plt.plot(csco, color='red', label='CSCO')
# Plot the msft time series in magenta
plt.plot(msft, color='magenta', label='MSFT')
# Add a legend in the top left corner of the plot
plt.legend(loc='upper left')
# Specify the orientation of the xticks
plt.xticks(rotation=60)
# Display the plot
plt.show()
| [
"[email protected]"
] | |
1b7c96a2295ea7e138deda6e3742ba95cd5763db | 6d05f11c55ea277a08fc375b1c5af5ecc076000b | /python/paddle/fluid/tests/unittests/test_allclose_op.py | 94e30621ef1377344457aa3d94a725abac99ef31 | [
"Apache-2.0"
] | permissive | sfraczek/Paddle | 8602df1b11937400f93ac5861a366226208a6f05 | d1e2c61b22b9675adc3c4a52227d2220babaa001 | refs/heads/develop | 2023-04-04T22:52:42.629243 | 2023-03-16T12:06:10 | 2023-03-16T12:06:10 | 140,574,617 | 0 | 0 | Apache-2.0 | 2019-03-26T15:54:00 | 2018-07-11T12:51:20 | C++ | UTF-8 | Python | false | false | 8,362 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
class TestAllcloseOp(OpTest):
def set_args(self):
self.input = np.array([10000.0, 1e-07]).astype("float32")
self.other = np.array([10000.1, 1e-08]).astype("float32")
self.rtol = np.array([1e-05]).astype("float64")
self.atol = np.array([1e-08]).astype("float64")
self.equal_nan = False
def setUp(self):
self.set_args()
self.op_type = "allclose"
self.python_api = paddle.allclose
self.inputs = {
'Input': self.input,
'Other': self.other,
"Rtol": self.rtol,
"Atol": self.atol,
}
self.attrs = {'equal_nan': self.equal_nan}
self.outputs = {
'Out': np.array(
[
np.allclose(
self.inputs['Input'],
self.inputs['Other'],
rtol=self.rtol,
atol=self.atol,
equal_nan=self.equal_nan,
)
]
)
}
def test_check_output(self):
self.check_output()
class TestAllcloseOpException(TestAllcloseOp):
def test_check_output(self):
def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output()
self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output()
self.assertRaises(ValueError, test_rtol_type)
def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output()
self.assertRaises(ValueError, test_atol_num)
def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output()
self.assertRaises(ValueError, test_atol_type)
class TestAllcloseOpSmallNum(TestAllcloseOp):
def set_args(self):
self.input = np.array([10000.0, 1e-08]).astype("float32")
self.other = np.array([10000.1, 1e-09]).astype("float32")
self.rtol = np.array([1e-05]).astype("float64")
self.atol = np.array([1e-08]).astype("float64")
self.equal_nan = False
class TestAllcloseOpNanFalse(TestAllcloseOp):
def set_args(self):
self.input = np.array([1.0, float('nan')]).astype("float32")
self.other = np.array([1.0, float('nan')]).astype("float32")
self.rtol = np.array([1e-05]).astype("float64")
self.atol = np.array([1e-08]).astype("float64")
self.equal_nan = False
class TestAllcloseOpNanTrue(TestAllcloseOp):
def set_args(self):
self.input = np.array([1.0, float('nan')]).astype("float32")
self.other = np.array([1.0, float('nan')]).astype("float32")
self.rtol = np.array([1e-05]).astype("float64")
self.atol = np.array([1e-08]).astype("float64")
self.equal_nan = True
class TestAllcloseDygraph(unittest.TestCase):
def test_api_case(self):
paddle.disable_static()
x_data = np.random.rand(10, 10)
y_data = np.random.rand(10, 10)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
out = paddle.allclose(x, y, rtol=1e-05, atol=1e-08)
expected_out = np.allclose(x_data, y_data, rtol=1e-05, atol=1e-08)
self.assertTrue((out.numpy() == expected_out).all(), True)
paddle.enable_static()
class TestAllcloseError(unittest.TestCase):
def test_input_dtype(self):
def test_x_dtype():
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='int32')
y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64')
result = paddle.allclose(x, y)
self.assertRaises(TypeError, test_x_dtype)
def test_y_dtype():
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32')
result = paddle.allclose(x, y)
self.assertRaises(TypeError, test_y_dtype)
def test_attr(self):
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64')
def test_rtol():
result = paddle.allclose(x, y, rtol=True)
self.assertRaises(TypeError, test_rtol)
def test_atol():
result = paddle.allclose(x, y, rtol=True)
self.assertRaises(TypeError, test_atol)
def test_equal_nan():
result = paddle.allclose(x, y, equal_nan=1)
self.assertRaises(TypeError, test_equal_nan)
class TestAllcloseOpFp16(unittest.TestCase):
def test_fp16(self):
x_data = np.random.rand(10, 10).astype('float16')
y_data = np.random.rand(10, 10).astype('float16')
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(shape=[10, 10], name='x', dtype='float16')
y = paddle.static.data(shape=[10, 10], name='x', dtype='float16')
out = paddle.allclose(x, y, rtol=1e-05, atol=1e-08)
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
out = exe.run(feed={'x': x_data, 'y': y_data}, fetch_list=[out])
class TestAllcloseOpFloat16(TestAllcloseOp):
def set_args(self):
self.input = np.array([10.1]).astype("float16")
self.other = np.array([10]).astype("float16")
self.rtol = np.array([0.01]).astype("float64")
self.atol = np.array([0]).astype("float64")
self.equal_nan = False
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place)
class TestAllcloseOpFloat32(TestAllcloseOp):
def set_args(self):
self.input = np.array([10.1]).astype("float32")
self.other = np.array([10]).astype("float32")
self.rtol = np.array([0.01]).astype("float64")
self.atol = np.array([0]).astype("float64")
self.equal_nan = False
class TestAllcloseOpFloat64(TestAllcloseOp):
def set_args(self):
self.input = np.array([10.1]).astype("float64")
self.other = np.array([10]).astype("float64")
self.rtol = np.array([0.01]).astype("float64")
self.atol = np.array([0]).astype("float64")
self.equal_nan = False
class TestAllcloseOpLargeDimInput(TestAllcloseOp):
def set_args(self):
self.input = np.array(np.zeros([2048, 1024])).astype("float64")
self.other = np.array(np.zeros([2048, 1024])).astype("float64")
self.input[-1][-1] = 100
self.rtol = np.array([1e-05]).astype("float64")
self.atol = np.array([1e-08]).astype("float64")
self.equal_nan = False
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
e7bb5f16a923ff9ad17aaa6eb1e32af386aebe07 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa1/sample/def_func_global-20.py | dc6dfd6544ed1c4f088d8b9645ac5760817cfc2e | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | z:int = 0
def foo(x:int) -> $ID:
global z
return x > z
foo(1)
| [
"[email protected]"
] | |
8718fb18f40bed47885972a98c7d3c06d6a3ca6c | 9db8e7bbd09eb07126a7f0c14e2f3af86bf6e8d9 | /datasets/centralia/convert.py | 51efa94fe2198d2ccf0de3656af8e6b03e2c11b4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jayschauer/TCPD | ddeb193eca2fa9ea04b791025fe529dfd3ee4686 | f99ab1decc165707115fd60e991ac1c12091f334 | refs/heads/master | 2022-12-05T02:21:16.517774 | 2020-09-02T18:56:49 | 2020-09-02T18:56:49 | 292,368,325 | 0 | 0 | MIT | 2020-09-02T18:52:00 | 2020-09-02T18:52:00 | null | UTF-8 | Python | false | false | 1,365 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--subsample",
help="Number of observations to skip during subsampling",
type=int,
)
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r") as fp:
rows = [l.strip().split("\t") for l in fp]
time = []
values = []
for year, pop in rows:
time.append(year)
values.append(int(pop))
name = "centralia"
longname = "Centralia Pennsylvania Population"
time_fmt = "%Y"
series = [{"label": "Population", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ac415525282185598f7818ce48ba05afe6778b4c | 78efa54b2b253f99ea7e073f783e6121c20cdb52 | /Codechef/Substraction Game 1.py | 70eba9c0690d035feb1d0ea1b9eec2fb43d7f3d6 | [] | no_license | NishchaySharma/Competitve-Programming | 32a93581ab17f05d20129471f7450f34ec68cc53 | 1ec44324d64c116098eb0beb74baac7f1c3395bb | refs/heads/master | 2020-04-08T04:02:46.599398 | 2020-01-01T15:51:39 | 2020-01-01T15:51:39 | 159,000,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | def gcd(a:int,b:int)->int:
if a==0 or b==0: return a+b
else: return gcd(b%a,a)
for _ in range(int(input())):
n=int(input())
arr=list(map(int,input().split()))
res=arr[0]
for i in arr[1:]:
res=gcd(res,i)
if res==1: break
print(res)
| [
"[email protected]"
] | |
20906ab3c18a8740b0c15f21a626b89b87f75c8d | 10fbe5526e5f0b8588b65f70f088cd86b6e9afbe | /qqpppzas/migrations/0015_auto_20150218_1630.py | 8c6435040612046c9d19f24e8b395845cbb10c31 | [] | no_license | MarkusH/django-migrations-benchmark | eb4b2312bb30a5a5d2abf25e95eca8f714162056 | e2bd24755389668b34b87d254ec8ac63725dc56e | refs/heads/master | 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cohutfvb', '0014_auto_20150218_1630'),
('qqpppzas', '0014_auto_20150218_1628'),
]
run_before = [
('ysgxuyu', '0012_delete_bmovnbnmed'),
]
operations = [
migrations.CreateModel(
name='Uxswpekqlt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('febtep', models.OneToOneField(null=True, related_name='+', to='cohutfvb.Ecgjvad')),
],
),
migrations.RemoveField(
model_name='shtlozkm',
name='wjznogs',
),
migrations.RemoveField(
model_name='vdscpy',
name='efspwnch',
),
migrations.AddField(
model_name='vdscpy',
name='tcyjunatyh',
field=models.CharField(default='', max_length=163),
),
]
| [
"[email protected]"
] | |
339fbbf64e0beffc337a085e31729f800c8127dd | d03582a9f9a853a07eeb36f746530dee29f7a258 | /build/catkin_generated/generate_cached_setup.py | b68e8b3be2e20e0a06efc00fe0407cd24717e837 | [] | no_license | tanmayshankar/decision_making | 6df835c6c26e9cb2f0fdfbec509f5a46bf3c98ae | fdd3b1ea127a89fb03181cb8f766e86e6d933eed | refs/heads/master | 2021-01-21T04:48:16.259813 | 2016-07-15T17:43:06 | 2016-07-15T17:43:06 | 44,739,541 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/tanmay/indigo_workspace/sandbox/decision_making/build/devel/env.sh')
output_filename = '/home/tanmay/indigo_workspace/sandbox/decision_making/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
efce919b3664c741e5e5cdd3efddf076345b6093 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/PyPattyrn-master/pypattyrn/structural/decorator.py | 947743818a1dceeb4c5e662540bafd0242dc5234 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,788 | py | from functools import partial
from abc import ABCMeta, abstractmethod
class Decorator(object, metaclass=ABCMeta):
"""
Base Decorator class that all decorator classes inherit from.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
def __get__(self, instance, owner):
"""
Override __get__ in order to get the instance of a bound of method call.
"""
return partial(self.__call__, instance)
@abstractmethod
def __call__(self, *args, **kwargs):
"""
All decorators must implement a __call__ method.
"""
pass
class DecoratorSimple(Decorator, metaclass=ABCMeta):
"""
A Base Decorator class for decorators with no arguments.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
def __init__(self, func):
"""
Initialize a new DecoratorSimple instance.
@param func: The function being decorated.
"""
self.func = func
class DecoratorComplex(Decorator, metaclass=ABCMeta):
"""
A Base Decorator class for decorators with arguments.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
@abstractmethod
def __init__(self, *args, **kwargs):
"""
Initialize a new DecoratorComplex instance.
@param args: Args for the decorator.
@param kwargs: Keyword args for the decorator.
"""
pass
@abstractmethod
def __call__(self, func, *args, **kwargs):
"""
Concrete DecoratorComplex instances must override the __call__ method.
@param func: The function being decorated.
@param args: Arguments for the decorated function.
@param kwargs: Keyword arguments for the decorated function.
@return:
"""
pass
class CallWrapper(DecoratorSimple):
"""
A Decorator for wrapping DecoratorComplex __call__ methods.
- External Usage Documentation: U{https://github.com/tylerlaberge/PyPattyrn#decorator-pattern}
- External Decorator Pattern documentation: U{https://en.wikipedia.org/wiki/Decorator_pattern}
"""
def __call__(self, instance, func):
"""
Wrap a concrete DecoratorComplex __call__ method.
"""
def wrapped(*args, **kwargs):
return self.func(instance, func, *args, **kwargs)
return wrapped
| [
"[email protected]"
] | |
ef0eff35233191513b76fde38c78e1f3e0f13ea5 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Attack/Attack.spec | c15c4364b14dd93124576beac54254b27f0aea6d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 975 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['Attack.py'],
pathex=['D:\\Users\\dhhyey\\Code'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='Attack',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='Attack')
| [
"[email protected]"
] | |
9099b68c27e3fbffcb996837b243735fb81db3c4 | cc5eb8eb50d64ffbca780c42a908053ec549f295 | /Python Course CSe11309x/quiz3_prog3.py | 2b9ed7f99272eb84762ebb8b2b6185efa3fb98e4 | [] | no_license | bemagee/LearnPython | 328b1f7a9d5046fe1503aece8a5134a7dd2727d2 | a42565f8fb45f9e2ebbcdcf359ebb9092bf837c2 | refs/heads/master | 2020-12-13T02:45:30.308604 | 2016-10-24T03:09:12 | 2016-10-24T03:09:12 | 10,793,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #!/usr/bin/env python3
# Write a program that asks the user for a positive number 'n' as input. Assume that the user
# enters a number greater than or equal to 3 and print a triangle as described below.
# For example if the user enters 6 then the output should be:
#*
#**
#***
#****
#*****
#******
#*****
#****
#***
#**
#*
my_int = int(input("enter a positive int: "))
for going_up in range(0, my_int) :
print(going_up * "*")
while my_int > 0 :
print(my_int * "*")
my_int -= 1
| [
"[email protected]"
] | |
d58ebf4ac0db230cddebff1dad95bb3e7734694c | 6ae1ba7d2ad2a97725699e1467171cc2deebb0d4 | /文件、异常/02-文件的路径.py | 3e1488362ed58e9b33d2e32db13dabf3db3ec841 | [] | no_license | weizt/python_studying | 6a95edcf781bd91d8b5f2684f1c6046a40a6d432 | 6dace3a6b73032de220c4b3ea4b3c37d47880ff1 | refs/heads/master | 2023-01-18T19:45:48.291898 | 2020-12-02T00:59:01 | 2020-12-02T00:59:01 | 293,965,283 | 2 | 0 | null | 2020-09-13T01:15:50 | 2020-09-09T01:04:03 | Python | UTF-8 | Python | false | false | 1,156 | py | # 路径:绝对路径 和 相对路径
# 绝对路径:是从盘符开始的路径,详细的路径
# 相对路径:从当前路径开始的路径,简写的路径 推荐
# 技巧:右键点击文件名,打开Show in Explorer。直接打开文件位置
import os
print(os.sep) # 在windows操作系里,文件夹之间用 \ 分隔;
# 而python中\表示字符串转义,所以会冲突
# 解决\冲突的三种方法
# 1.用两个\\表示一个\\
# 2.在字符串前加 r
# 3.在非windows系统用 / 推荐这一种
# file = open('D:\\桌面备份文件\\207操作手册.txt', encoding='gbk')
# file = open(r'D:\桌面备份文件\207操作手册.txt', encoding='gbk')
file = open('D:/桌面备份文件/207操作手册.txt', encoding='gbk')
print(file.read())
# 相对路径:当前文件所在的文件夹开始的路径。直接可以打开
file2 = open('01-文件的打开和关闭.py', encoding='utf8')
print(file2.read())
# ../ 表示返回到上一级文件夹
# ./ 可以省略不写,表示当前文件夹
file3 = open('./../requirements.txt', encoding='utf8')
print(file3.read())
file.close()
file2.close()
file3.close() | [
"[email protected]"
] | |
0d03883d7784067097556669a9d66f4dde920729 | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Medium/360.sort-transformed-array.py | 0f69da25ba1935966ee90c3a22fa9f0d71097b1f | [] | no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | #
# @lc app=leetcode id=360 lang=python3
#
# [360] Sort Transformed Array
#
# https://leetcode.com/problems/sort-transformed-array/description/
#
# algorithms
# Medium (48.92%)
# Total Accepted: 35.5K
# Total Submissions: 72.6K
# Testcase Example: '[-4,-2,2,4]\n1\n3\n5'
#
# Given a sorted array of integers nums and integer values a, b and c. Apply a
# quadratic function of the form f(x) = ax^2 + bx + c to each element x in the
# array.
#
# The returned array must be in sorted order.
#
# Expected time complexity: O(n)
#
#
# Example 1:
#
#
# Input: nums = [-4,-2,2,4], a = 1, b = 3, c = 5
# Output: [3,9,15,33]
#
#
#
# Example 2:
#
#
# Input: nums = [-4,-2,2,4], a = -1, b = 3, c = 5
# Output: [-23,-5,1,7]
#
#
#
#
class Solution:
def sortTransformedArray(self, nums: List[int], a: int, b: int, c: int) -> List[int]:
| [
"[email protected]"
] | |
df255a93aa6826829f42b9c50283a11d91b9f72d | 25d2afe5d12fe58a97da7b51e23fdc55929e38f5 | /anonymise_dataset_folder.py | 18e10508f9b9043df95dc7985d4acfe30a7b2071 | [] | no_license | apmoore1/tdsa_comparisons | 071396efe0c5e0bad297119d2ce48bf0c1cbb42f | ba613afece15239e6a38f277c455a035739f0b2d | refs/heads/master | 2021-06-23T16:00:49.803589 | 2021-05-25T09:32:53 | 2021-05-25T09:32:53 | 225,565,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | import argparse
from pathlib import Path
from target_extraction.data_types import TargetTextCollection
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
if __name__ == '__main__':
save_dir_help = ('File Path to directory where the anonymised results '
'will be saved.')
results_dir_help = ('File path to the directory that currently stores all '
'results')
parser = argparse.ArgumentParser()
parser.add_argument("results_dir", type=parse_path, help=results_dir_help)
parser.add_argument("save_dir", type=parse_path, help=save_dir_help)
args = parser.parse_args()
save_dir = args.save_dir
results_dir = args.results_dir
save_dir.mkdir(parents=True, exist_ok=True)
dataset_names = ['election', 'laptop', 'restaurant']
split_names = ['train', 'val', 'test']
for dataset_name in dataset_names:
dataset_result_folder = Path(results_dir, f'{dataset_name}_dataset')
save_dataset_folder = Path(save_dir, f'{dataset_name}_dataset')
save_dataset_folder.mkdir(parents=True, exist_ok=True)
for split_name in split_names:
split_fp = Path(dataset_result_folder, f'{split_name}.json')
split_dataset = TargetTextCollection.load_json(split_fp)
split_dataset: TargetTextCollection
split_dataset.anonymised = True
save_fp = Path(save_dataset_folder, f'{split_name}.json')
split_dataset.to_json_file(save_fp, include_metadata=True) | [
"[email protected]"
] | |
b223c25deb92c2584942abe587611364fd6452fb | 47c39800fa6f928e0d13f26727ba52bda2aa6ff0 | /venv/Lib/site-packages/aliyunsdkrds/request/v20140815/DescribeDBInstanceNetInfoRequest.py | aaaad8b6f364419423cb88b61323a126dae4413a | [
"MIT"
] | permissive | dddluke/zhihuipingtai | 952ed5f9a4011cb4fb2765a0571c978af784d708 | 4e46e01440f8c270c05259ac0f38bd56dd04016c | refs/heads/master | 2023-03-09T03:32:47.807760 | 2021-02-26T02:36:10 | 2021-02-26T02:36:10 | 341,816,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstanceNetInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstanceNetInfo','rds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Flag(self):
return self.get_query_params().get('Flag')
def set_Flag(self,Flag):
self.add_query_param('Flag',Flag)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DBInstanceNetRWSplitType(self):
return self.get_query_params().get('DBInstanceNetRWSplitType')
def set_DBInstanceNetRWSplitType(self,DBInstanceNetRWSplitType):
self.add_query_param('DBInstanceNetRWSplitType',DBInstanceNetRWSplitType) | [
"[email protected]"
] | |
cfac7879161065f706def9686ddfbc28119b790b | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/idlelib/keybindingdialog.py | de38be02b11f8caf736a506da269f98fbd744c30 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 11,956 | py | # 2016.08.04 19:59:37 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/idlelib/keybindingDialog.py
"""
Dialog for building Tkinter accelerator key bindings
"""
from Tkinter import *
import tkMessageBox
import string
import sys
class GetKeysDialog(Toplevel):
def __init__(self, parent, title, action, currentKeySequences):
"""
action - string, the name of the virtual event these keys will be
mapped to
currentKeys - list, a list of all key sequence lists currently mapped
to virtual events, for overlap checking
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol('WM_DELETE_WINDOW', self.Cancel)
self.parent = parent
self.action = action
self.currentKeySequences = currentKeySequences
self.result = ''
self.keyString = StringVar(self)
self.keyString.set('')
self.SetModifiersForPlatform()
self.modifier_vars = []
for modifier in self.modifiers:
variable = StringVar(self)
variable.set('')
self.modifier_vars.append(variable)
self.advanced = False
self.CreateWidgets()
self.LoadFinalKeyList()
self.withdraw()
self.update_idletasks()
self.geometry('+%d+%d' % (parent.winfo_rootx() + (parent.winfo_width() / 2 - self.winfo_reqwidth() / 2), parent.winfo_rooty() + (parent.winfo_height() / 2 - self.winfo_reqheight() / 2)))
self.deiconify()
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
self.buttonOK = Button(frameButtons, text='OK', width=8, command=self.OK)
self.buttonOK.grid(row=0, column=0, padx=5, pady=5)
self.buttonCancel = Button(frameButtons, text='Cancel', width=8, command=self.Cancel)
self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
self.frameKeySeqBasic = Frame(frameMain)
self.frameKeySeqAdvanced = Frame(frameMain)
self.frameControlsBasic = Frame(frameMain)
self.frameHelpAdvanced = Frame(frameMain)
self.frameKeySeqAdvanced.grid(row=0, column=0, sticky=NSEW, padx=5, pady=5)
self.frameKeySeqBasic.grid(row=0, column=0, sticky=NSEW, padx=5, pady=5)
self.frameKeySeqBasic.lift()
self.frameHelpAdvanced.grid(row=1, column=0, sticky=NSEW, padx=5)
self.frameControlsBasic.grid(row=1, column=0, sticky=NSEW, padx=5)
self.frameControlsBasic.lift()
self.buttonLevel = Button(frameMain, command=self.ToggleLevel, text='Advanced Key Binding Entry >>')
self.buttonLevel.grid(row=2, column=0, stick=EW, padx=5, pady=5)
labelTitleBasic = Label(self.frameKeySeqBasic, text="New keys for '" + self.action + "' :")
labelTitleBasic.pack(anchor=W)
labelKeysBasic = Label(self.frameKeySeqBasic, justify=LEFT, textvariable=self.keyString, relief=GROOVE, borderwidth=2)
labelKeysBasic.pack(ipadx=5, ipady=5, fill=X)
self.modifier_checkbuttons = {}
column = 0
for modifier, variable in zip(self.modifiers, self.modifier_vars):
label = self.modifier_label.get(modifier, modifier)
check = Checkbutton(self.frameControlsBasic, command=self.BuildKeyString, text=label, variable=variable, onvalue=modifier, offvalue='')
check.grid(row=0, column=column, padx=2, sticky=W)
self.modifier_checkbuttons[modifier] = check
column += 1
labelFnAdvice = Label(self.frameControlsBasic, justify=LEFT, text='Select the desired modifier keys\n' + 'above, and the final key from the\n' + 'list on the right.\n\n' + 'Use upper case Symbols when using\n' + 'the Shift modifier. (Letters will be\n' + 'converted automatically.)')
labelFnAdvice.grid(row=1, column=0, columnspan=4, padx=2, sticky=W)
self.listKeysFinal = Listbox(self.frameControlsBasic, width=15, height=10, selectmode=SINGLE)
self.listKeysFinal.bind('<ButtonRelease-1>', self.FinalKeySelected)
self.listKeysFinal.grid(row=0, column=4, rowspan=4, sticky=NS)
scrollKeysFinal = Scrollbar(self.frameControlsBasic, orient=VERTICAL, command=self.listKeysFinal.yview)
self.listKeysFinal.config(yscrollcommand=scrollKeysFinal.set)
scrollKeysFinal.grid(row=0, column=5, rowspan=4, sticky=NS)
self.buttonClear = Button(self.frameControlsBasic, text='Clear Keys', command=self.ClearKeySeq)
self.buttonClear.grid(row=2, column=0, columnspan=4)
labelTitleAdvanced = Label(self.frameKeySeqAdvanced, justify=LEFT, text="Enter new binding(s) for '" + self.action + "' :\n" + '(These bindings will not be checked for validity!)')
labelTitleAdvanced.pack(anchor=W)
self.entryKeysAdvanced = Entry(self.frameKeySeqAdvanced, textvariable=self.keyString)
self.entryKeysAdvanced.pack(fill=X)
labelHelpAdvanced = Label(self.frameHelpAdvanced, justify=LEFT, text='Key bindings are specified using Tkinter keysyms as\n' + 'in these samples: <Control-f>, <Shift-F2>, <F12>,\n<Control-space>, <Meta-less>, <Control-Alt-Shift-X>.\nUpper case is used when the Shift modifier is present!\n\n' + "'Emacs style' multi-keystroke bindings are specified as\n" + 'follows: <Control-x><Control-y>, where the first key\n' + "is the 'do-nothing' keybinding.\n\n" + 'Multiple separate bindings for one action should be\n' + 'separated by a space, eg., <Alt-v> <Meta-v>.')
labelHelpAdvanced.grid(row=0, column=0, sticky=NSEW)
def SetModifiersForPlatform(self):
"""Determine list of names of key modifiers for this platform.
The names are used to build Tk bindings -- it doesn't matter if the
keyboard has these keys, it matters if Tk understands them. The
order is also important: key binding equality depends on it, so
config-keys.def must use the same ordering.
"""
if sys.platform == 'darwin':
self.modifiers = ['Shift',
'Control',
'Option',
'Command']
else:
self.modifiers = ['Control', 'Alt', 'Shift']
self.modifier_label = {'Control': 'Ctrl'}
def ToggleLevel(self):
if self.buttonLevel.cget('text')[:8] == 'Advanced':
self.ClearKeySeq()
self.buttonLevel.config(text='<< Basic Key Binding Entry')
self.frameKeySeqAdvanced.lift()
self.frameHelpAdvanced.lift()
self.entryKeysAdvanced.focus_set()
self.advanced = True
else:
self.ClearKeySeq()
self.buttonLevel.config(text='Advanced Key Binding Entry >>')
self.frameKeySeqBasic.lift()
self.frameControlsBasic.lift()
self.advanced = False
def FinalKeySelected(self, event):
self.BuildKeyString()
def BuildKeyString(self):
keyList = modifiers = self.GetModifiers()
finalKey = self.listKeysFinal.get(ANCHOR)
if finalKey:
finalKey = self.TranslateKey(finalKey, modifiers)
keyList.append(finalKey)
self.keyString.set('<' + string.join(keyList, '-') + '>')
def GetModifiers(self):
modList = [ variable.get() for variable in self.modifier_vars ]
return [ mod for mod in modList if mod ]
def ClearKeySeq(self):
self.listKeysFinal.select_clear(0, END)
self.listKeysFinal.yview(MOVETO, '0.0')
for variable in self.modifier_vars:
variable.set('')
self.keyString.set('')
def LoadFinalKeyList(self):
self.functionKeys = ('F1', 'F2', 'F2', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12')
self.alphanumKeys = tuple(string.ascii_lowercase + string.digits)
self.punctuationKeys = tuple('~!@#%^&*()_-+={}[]|;:,.<>/?')
self.whitespaceKeys = ('Tab', 'Space', 'Return')
self.editKeys = ('BackSpace', 'Delete', 'Insert')
self.moveKeys = ('Home', 'End', 'Page Up', 'Page Down', 'Left Arrow', 'Right Arrow', 'Up Arrow', 'Down Arrow')
keys = self.alphanumKeys + self.punctuationKeys + self.functionKeys + self.whitespaceKeys + self.editKeys + self.moveKeys
self.listKeysFinal.insert(END, *keys)
def TranslateKey(self, key, modifiers):
"""Translate from keycap symbol to the Tkinter keysym"""
translateDict = {'Space': 'space',
'~': 'asciitilde',
'!': 'exclam',
'@': 'at',
'#': 'numbersign',
'%': 'percent',
'^': 'asciicircum',
'&': 'ampersand',
'*': 'asterisk',
'(': 'parenleft',
')': 'parenright',
'_': 'underscore',
'-': 'minus',
'+': 'plus',
'=': 'equal',
'{': 'braceleft',
'}': 'braceright',
'[': 'bracketleft',
']': 'bracketright',
'|': 'bar',
';': 'semicolon',
':': 'colon',
',': 'comma',
'.': 'period',
'<': 'less',
'>': 'greater',
'/': 'slash',
'?': 'question',
'Page Up': 'Prior',
'Page Down': 'Next',
'Left Arrow': 'Left',
'Right Arrow': 'Right',
'Up Arrow': 'Up',
'Down Arrow': 'Down',
'Tab': 'Tab'}
if key in translateDict.keys():
key = translateDict[key]
if 'Shift' in modifiers and key in string.ascii_lowercase:
key = key.upper()
key = 'Key-' + key
return key
def OK(self, event = None):
if self.advanced or self.KeysOK():
self.result = self.keyString.get()
self.destroy()
def Cancel(self, event = None):
self.result = ''
self.destroy()
def KeysOK(self):
"""Validity check on user's 'basic' keybinding selection.
Doesn't check the string produced by the advanced dialog because
'modifiers' isn't set.
"""
keys = self.keyString.get()
keys.strip()
finalKey = self.listKeysFinal.get(ANCHOR)
modifiers = self.GetModifiers()
keySequence = keys.split()
keysOK = False
title = 'Key Sequence Error'
if not keys:
tkMessageBox.showerror(title=title, parent=self, message='No keys specified.')
elif not keys.endswith('>'):
tkMessageBox.showerror(title=title, parent=self, message='Missing the final Key')
elif not modifiers and finalKey not in self.functionKeys + self.moveKeys:
tkMessageBox.showerror(title=title, parent=self, message='No modifier key(s) specified.')
elif modifiers == ['Shift'] and finalKey not in self.functionKeys + self.moveKeys + ('Tab', 'Space'):
msg = 'The shift modifier by itself may not be used with this key symbol.'
tkMessageBox.showerror(title=title, parent=self, message=msg)
elif keySequence in self.currentKeySequences:
msg = 'This key combination is already in use.'
tkMessageBox.showerror(title=title, parent=self, message=msg)
else:
keysOK = True
return keysOK
if __name__ == '__main__':
root = Tk()
def run():
keySeq = ''
dlg = GetKeysDialog(root, 'Get Keys', 'find-again', [])
print dlg.result
Button(root, text='Dialog', command=run).pack()
root.mainloop()
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\idlelib\keybindingdialog.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:59:38 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
a0ed35e6fc27549ff2800fb20c22fb6efdff37bf | ce3499f5d09396e72151f4d742a5562ebdb127c3 | /godap.py | 367a6d0ef7f1af07ba0250eebd821485442e3696 | [] | no_license | jscouet/godap | 08fb0e020b95e00e92f3a94df3761cc965dda52d | e0dda750aba60ad59dfb7183e9beead62712dbe4 | refs/heads/master | 2021-01-01T04:06:41.399768 | 2016-05-24T15:35:37 | 2016-05-24T15:35:37 | 59,554,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,244 | py | #!/usr/bin/python3
"""
script d'envoie de user dans LDAP
"""
VERSION = 0.2
import logging
logging.basicConfig(filename='client_application.log', level=logging.DEBUG)
from ldap3 import Server, Connection, SUBTREE
from ldap3.utils.log import set_library_log_detail_level, OFF, BASIC, NETWORK, EXTENDED, PROTOCOL
import argparse
import mods.ldap
set_library_log_detail_level(BASIC)
server='ldap-130a'
dn_connect='cn=admin,dc=edelia,dc=net'
dn_pass='M0j@ve3'
con=''
users={}
#pp.get_users()
#exit()
LDAP_USER_BRANCH='ou=people,dc=edelia,dc=net'
LDAP_GROUP_BRANCH='ou=groups,dc=edelia,dc=net'
LDAP_PTF_GROUP_BRANCH='ou=ptf,ou=groups,dc=edelia,dc=net'
GROUPS = { "dev" : "4000" ,
"dep" : "900" ,
"recette" : "2000",
"int" : "2500"
}
def read_config_file() :
config_file = 'godap.ini'
def usage() :
print ("godap -u user -G primary_group -g grp1,grp2 -m home_directory")
def connexion():
global con
print ("methode de connexion")
con=Connection(server, user=dn_connect , password=dn_pass)
if not con.bind():
print("error de connexion a {}".format(server) )
def get_user():
global con
groups_ids = []
print ("methode de get des users")
con.search(search_base='dc=edelia,dc=net',
search_filter = '(objectClass=person)',
search_scope = SUBTREE ,
attributes= ['*'] )
#for entry in con.response:
# print(entry['dn'], entry['attributes'])
#for entry in con.response:
#print(entry['dn'], entry['attributes'])
users = con.response
#print ( att )
#print (users)
return users
for i in users :
#print ( i )
print ("sn est : {}".format( i ) )
#for j in i.keys() :
#print ("j: {}".format(j) )
dn = i["dn"]
print ( dn )
def get_group_dn(group) :
global con
filter = ""
filter = '(cn=' + group +')'
print(filter)
con.search(search_base='ou=groups,dc=edelia,dc=net',
search_filter = filter,
search_scope = SUBTREE )
#attributes= ['*'] )
list_of_groups_dn = con.response
print (con.response[0]["dn"])
#def send_ldap_user_group():
#def send_ldap_request() :
if __name__ == "__main__" :
"""
parsage des arguments
"""
user_name=""
primary_group=""
groups=[]
parser = argparse.ArgumentParser(description='commend line for adding a user in LDAP')
parser.add_argument('-u', action='store', default="none" , dest='user',help='name of the user to add')
parser.add_argument('-n', action='store', default="none" , dest='name',help='name of the user to add')
parser.add_argument('-G', action='store', default="none", dest='primary_group',help='name of the primary group of the user')
parser.add_argument('-g', action='append', default=[], dest='groups',help='group to add to the user')
#parser.add_argument('count', action='store')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
results = parser.parse_args()
print ("options : user : {} {}, group primary : {} , groups : {}".format(results.user , results.name , results.primary_group , results.groups ))
#connexion()
ls = mods.ldap.ldap(server, dn_connect, dn_pass)
#
get_group_dn("ptf24")
exit()
send_ldap_user_creation( results.user , results.primary_group , results.groups , results.name )
exit()
users = get_user()
get_last_user_uid_from_group(users,"dev")
| [
"[email protected]"
] | |
5f5c2d5fd25e62e92717b420b164f974576182c3 | 3539d0e3ddd7849a14876e95f0332428ec28ebf7 | /Data Scientist Career Path/3. Python Fundamentals/6. Python Loop/2. List Comprehension Code Challenge/18. ages.py | e5fdc328f0eaafe3e447ed0b4cf91a74ee9f7ef5 | [
"MIT"
] | permissive | DincerDogan/Data-Science-Learning-Path | ff146de2cf4ebc5fedfa9377babf959208dfe7e6 | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | refs/heads/main | 2023-05-08T10:53:47.449974 | 2021-06-06T21:27:31 | 2021-06-06T21:27:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | names = ["Shilah", "Arya", "Kele"]
ages = [14, 9, 35]
users = ["Name: " + x[0] + ", Age: " + str(x[1]) for x in zip(names, ages)] | [
"[email protected]"
] | |
1347a718ac0fe7a1d32610c9e403b982b3bd4506 | c5f58af61e3577ded52acda210f4f664651b598c | /template/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py | 9f445458c8365c10abb325f59a583923ce416a73 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hojihun5516/object-detection-level2-cv-02 | 0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac | bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109 | refs/heads/master | 2023-08-31T09:50:59.150971 | 2021-10-16T15:00:19 | 2021-10-16T15:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | _base_ = "./vfnet_r50_fpn_mstrain_2x_coco.py"
model = dict(
backbone=dict(
type="ResNeXt",
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
init_cfg=dict(type="Pretrained", checkpoint="open-mmlab://resnext101_32x4d"),
)
)
| [
"[email protected]"
] | |
3a68029f539876708281d23051be8c94c328cd85 | 3171b42757e60a61c2a71474cb07f81b9219ee50 | /tests/test_upload.py | ebf1b3cf99f8570e73ec3366ba494fdd4c1da8d1 | [] | no_license | yoophi/flaskygram | cde431d1a177e5bf54558ab83bb07886b93bc9ed | 363e29f393ab3e1b8d40102add09b1f9bff65670 | refs/heads/main | 2023-05-14T15:11:05.307578 | 2021-06-05T06:55:53 | 2021-06-05T06:55:53 | 57,872,879 | 0 | 0 | null | 2023-05-02T20:26:02 | 2016-05-02T07:40:40 | Python | UTF-8 | Python | false | false | 695 | py | """
TestCase migration with Flask-Testing
"""
from flask import url_for
from cStringIO import StringIO
from flaskygram.models import Media
from tests import BaseTestCase
class MediaTest(BaseTestCase):
def test_upload(self):
res = self.client.post(url_for('api.media_upload'),
data=dict(
file=(StringIO("123456789 " * 1000), 'test.png'),
),
headers={
'Authorization': 'Bearer %s' % self.get_oauth2_token()
})
self.assert200(res)
self.assertEqual(1, Media.query.count())
| [
"[email protected]"
] | |
1bdad74646ded2dbe5d41d72449d186dbcc5986a | fa9bae32c203323dfb345d9a415d4eaecb27a931 | /492. Construct the Rectangle.py | 223ca17420f3b701265528b09aa68d0b6bc71233 | [] | no_license | IUIUN/The-Best-Time-Is-Now | 48a0c2e9d449aa2f4b6e565868a227b6d555bf29 | fab660f98bd36715d1ee613c4de5c7fd2b69369e | refs/heads/master | 2020-09-14T12:06:24.074973 | 2020-02-15T06:55:08 | 2020-02-15T06:55:08 | 223,123,743 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | class Solution:
def constructRectangle(self, area: int) -> List[int]:
mid = int(math.sqrt(area))
while mid > 0:
if area % mid == 0:
return [int(area // mid), int(mid)]
mid -= 1
| [
"[email protected]"
] | |
3d276284f2a17ce3cb97170644da2537cb159809 | 1c390cd4fd3605046914767485b49a929198b470 | /leetcode/find-the-kth-largest-integer-in-the-array.py | 0b79c89ea7dd853e247daaeaeb9b7d66593bed4d | [] | no_license | wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | class Solution:
def kthLargestNumber(self, a: List[str], k: int) -> str:
return sorted(a,key=int)[-k] | [
"[email protected]"
] | |
3a335ae6cc7bab264be2c947c01ec8df7a6e941e | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/8kyu/age-range-compatibility-equation/Python/solution1.py | 3657a02d2a92ac4c93199e60e1489bc9fdc04b1a | [
"MIT"
] | permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | # Python - 3.4.3
from math import floor
def dating_range(age):
minage = lambda a: (a / 2) + 7 if a > 14 else 0.9 * a
maxage = lambda a: (a - 7) * 2 if a > 14 else 1.1 * a
return '%d-%d' % (floor(minage(age)), floor(maxage(age)))
| [
"[email protected]"
] | |
d55ce02404f32b0877e1a295d5b671b302e5db52 | 0466559817d3a1be9409da2c83db99c4db3bacfe | /hubcheck/pageobjects/widgets/members_profile_mailpreference.py | cdbdc54f4a0ca509c4b8a5bcbc0be220497cb01e | [
"MIT"
] | permissive | ken2190/hubcheck | 955cf9b75a1ee77e28256dfd3a780cfbc17de961 | 2ff506eb56ba00f035300862f8848e4168452a17 | refs/heads/master | 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,363 | py | from hubcheck.pageobjects.basepageelement import Radio
from hubcheck.pageobjects.basepageelement import Select
from hubcheck.pageobjects.widgets.members_profile_element import MembersProfileElement
class MembersProfileMailPreference1(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileMailPreference1,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileMailPreference_Locators = self.load_class('MembersProfileMailPreference_Locators')
# update this object's locator
self.locators.update(MembersProfileMailPreference_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.mailpreference = Radio(self,{'Yes':'mail_yes','No':'mail_no'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary of the mailpreference and access values"""
return {'mailpreference' : self.mailpreference.value(),
'access' : self.access.value()}
def update(self,mailpreference=None,access=None):
"""update the mailpreference and access values"""
if mailpreference != None:
self.mailpreference.value = mailpreference
if access != None:
self.access.value = access
self.save.click()
class MembersProfileMailPreference1_Locators_Base(object):
"""locators for MembersProfileMailPreference2 object"""
locators = {
'base' : "css=.profile-optin",
'mail_yes' : "css=#mailPreferenceOptionYes",
'mail_no' : "css=#mailPreferenceOptionNo",
'access' : "css=.profile-optin select[name='access[optin]']",
'sectionkey' : "css=.profile-optin .key",
'sectionvalue' : "css=.profile-optin .value",
'open' : "css=.profile-optin .edit-profile-section",
'close' : "css=.profile-optin .edit-profile-section",
'save' : "css=.profile-optin .section-edit-submit",
'cancel' : "css=.profile-optin .section-edit-cancel",
}
class MembersProfileMailPreference2(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileMailPreference2,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileMailPreference_Locators = self.load_class('MembersProfileMailPreference_Locators')
# update this object's locator
self.locators.update(MembersProfileMailPreference_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.mailpreference = Select(self,{'base':'mailpref'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary of the mailpreference and access values"""
return {'mailpreference' : self.mailpreference.value(),
'access' : self.access.value()}
def update(self,mailpreference=None,access=None):
"""update the mailpreference and access values"""
if mailpreference != None:
self.mailpreference.value = mailpreference
if access != None:
self.access.value = access
self.save.click()
class MembersProfileMailPreference2_Locators_Base(object):
"""locators for MembersProfileMailPreference2 object"""
locators = {
'base' : "css=.profile-optin",
'mailpref' : "css=.profile-optin select[name='mailPreferenceOption']",
'access' : "css=.profile-optin select[name='access[optin]']",
'sectionkey' : "css=.profile-optin .key",
'sectionvalue' : "css=.profile-optin .value",
'open' : "css=.profile-optin .edit-profile-section",
'close' : "css=.profile-optin .edit-profile-section",
'save' : "css=.profile-optin .section-edit-submit",
'cancel' : "css=.profile-optin .section-edit-cancel",
}
| [
"[email protected]"
] | |
a9a1d3f8224ac0897447a887e426b0391bef5786 | 788f1d32045560ffafff468476c9c9897dabb31c | /Curso em Vídeo/Mundo 1 Fundamentos/Desafios/desafio019.py | 9a32db158cf6b88cdf869f23df5dd89ebfe57efa | [
"MIT"
] | permissive | henriqueumeda/-Python-study | 7f8d911e9e724aa2f183e652e6a7ae31b742b90e | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | refs/heads/main | 2023-08-10T21:10:32.360808 | 2021-09-21T02:37:16 | 2021-09-21T02:37:16 | 330,294,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | from random import choice
primeiroAluno = input('Primeiro aluno: ')
segundoAluno = input('Segundo aluno: ')
terceiroAluno = input('Terceiro aluno: ')
quartoAluno = input('Quarto aluno: ')
choiceList = [primeiroAluno, segundoAluno, terceiroAluno, quartoAluno]
print('O aluno escolhido foi {}'.format(choice(choiceList))) | [
"[email protected]"
] | |
de4d019cf164022898f2466c7ffb7d80085142ed | bb150497a05203a718fb3630941231be9e3b6a32 | /models/Paddle2ONNX/ocr_v2/main_test.py | 5e669c2b5e142253e0524cf4ddc5edf09bee4f17 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 4,519 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
main test
"""
import os
import shutil
import platform
class OcrV2Test(object):
"""
test Ocr to onnx tipc
"""
def __init__(self):
if os.path.exists("tipc_models_url_PaddleOCR_latest.txt"):
os.remove("tipc_models_url_PaddleOCR_latest.txt")
self.txt_url = (
"https://paddle-qa.bj.bcebos.com/fullchain_ce_test/"
"model_download_link/tipc_models_url_PaddleOCR_latest.txt"
)
os.system("wget -q --no-proxy {}".format(self.txt_url))
self.model_url_list = []
for line in open("tipc_models_url_PaddleOCR_latest.txt"):
self.model_url_list.append(line)
self.opset_v_list = [11, 12]
self.ignore_model = [
"ch_PP-OCRv3_rec_PACT",
"rec_mtb_nrtr",
"rec_mv3_tps_bilstm_att_v2.0",
"rec_mv3_tps_bilstm_ctc_v2.0",
"rec_r34_vd_tps_bilstm_att_v2.0",
"rec_r34_vd_tps_bilstm_ctc_v2.0",
]
def prepare_resource(self, tgz_url):
"""
prepare resource and pytest code
"""
tgz = tgz_url[tgz_url.rfind("/") + 1 : -1]
time_stamp = tgz[0 : tgz.find("^")]
tmp = tgz.replace(time_stamp + "^", "")
repo = tmp[0 : tmp.find("^")]
tmp = tgz.replace(time_stamp + "^" + repo + "^", "")
model_name = tmp[0 : tmp.find("^")]
model_path = model_name + "_upload"
tmp = tgz.replace(time_stamp + "^" + repo + "^" + model_name + "^", "")
paddle_commit = tmp[0 : tmp.find("^")]
tmp = tgz.replace(time_stamp + "^" + repo + "^" + model_name + "^" + paddle_commit + "^", "")
repo_commit = tmp[0 : tmp.find(".")]
str_all = ""
for opset_v in self.opset_v_list:
tmp = (
"def test_opt_v{}():\n"
' """test {} opt version {}"""\n'
" logging.info('time stamp: {} !!!')\n"
" logging.info('model name: {} !!!')\n"
" logging.info('paddle commit: {} !!!')\n"
" logging.info('repo commit: {} !!!')\n"
" unit_exit_code = os.system(\n"
' "paddle2onnx --model_dir={} "\n'
' "--model_filename=inference.pdmodel "\n'
' "--params_filename=inference.pdiparams "\n'
' "--save_file={} "\n'
' "--opset_version={} --enable_onnx_checker=True"\n'
" )\n"
" assert unit_exit_code == 0\n"
"\n"
"\n".format(
opset_v,
model_name,
opset_v,
time_stamp,
model_name,
paddle_commit,
repo_commit,
model_path,
os.path.join(model_path, "inference.onnx"),
opset_v,
)
)
str_all += tmp
case_name = model_name.replace(".", "_")
case_name = case_name.replace("-", "_")
with open("test_{}.py".format(case_name), "w") as f:
f.write(
"#!/bin/env python\n"
"# -*- coding: utf-8 -*-\n"
"# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n"
'"""\n'
"test {} to onnx\n"
'"""\n'
"import os\n"
"import logging\n"
"\n"
"\n".format(model_name)
)
f.write(str_all)
os.system("wget -q --no-proxy {}".format(tgz_url))
os.system("tar -xzf {}".format(tgz))
return tgz, case_name, model_path, model_name
def run(self):
"""
run test
"""
for tgz_url in self.model_url_list:
tgz, case_name, model_path, model_name = self.prepare_resource(tgz_url)
if model_name not in self.ignore_model:
if platform.system() == "Windows":
os.system("python.exe -m pytest {} --alluredir=report".format("test_" + case_name + ".py"))
else:
os.system("python -m pytest {} --alluredir=report".format("test_" + case_name + ".py"))
os.remove(tgz)
shutil.rmtree(model_path)
if __name__ == "__main__":
test = OcrV2Test()
test.run()
| [
"[email protected]"
] | |
ed2aff3a66672c8a682fc5200fdd435cdc11b20a | 7386480f7e09101b0518cf1f92b5902726c8816e | /RTMinuit/minuit_html.py | 46e3db8756e812009af255ede952105be3242ba4 | [] | no_license | mattbellis/RTMinuit | 51458bc9fd5a83895eba959c44895826e070741b | d67c35b9aaf904bed278d4ce237cc2e6cf9485dc | refs/heads/master | 2020-12-25T03:00:43.769380 | 2012-05-21T19:09:56 | 2012-05-21T19:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,872 | py |
class MinuitHTMLResult:
def __init__(self,m):
"""
:param m:
:type m Minuit:
"""
self.varnames = m.varname
self.values = m.values
self.errors = m.errors
self.mnerrors = m.minos_errors()
def _repr_html_(self):
tmp = []
header = u"""<tr>
<td></td>
<td>Parameter</td>
<td>Value</td>
<td>Parab Error</td>
<td>Minos Error-</td>
<td>Minos Error+</td>
</tr>"""
keys = sorted(self.values)
for i,k in enumerate(self.varnames):
val = self.values[k]
err = self.errors[k]
mnp = self.mnerrors[k].eplus
mnm = self.mnerrors[k].eminus
varno = i+1
line = u"""<tr>
<td align="right">{varno:d}</td>
<td align="left">{k}</td>
<td align="right">{val:e}</td>
<td align="left"> ±{err:e}</td>
<td align="left">{mnm:e}</td>
<td align="left">+{mnp:e}</td>
</tr>""".format(**locals())
tmp.append(line)
ret = '<table>%s\n%s\n</table>'%(header,'\n'.join(tmp))
return ret
class MinuitCorrelationMatrixHTML:
def __init__(self,m):
self.matrix = m.error_matrix(True)
self.params = m.list_of_vary_param()
self.nparams = len(self.params)
assert(self.matrix.shape==(self.nparams,self.nparams))
def style(self,val):
return 'background-color:%s'%Gradient.rgb_color_for(val)
def _repr_html_(self):
header = ''
for i in range(self.nparams):
header+='<td style="text-align:left"><div style="-webkit-writing-mode:vertical-rl;">%s</div></td>\n'%self.params[i]
header = '<tr><td></td>\n%s</tr>\n'%header
lines = list()
for i in range(self.nparams):
line = '<td>%s</td>'%self.params[i]
for j in range(self.nparams):
style = self.style(self.matrix[i,j])
line+='<td style="%s">%4.2f</td>\n'%(style,self.matrix[i,j])
line = '<tr>\n%s</tr>\n'%line
lines.append(line)
ret = '<table>\n%s%s</table>\n'%(header,''.join(lines))
return ret
class Gradient:
#A3FEBA pastel green
#FF7575 pastel red
#from http://code.activestate.com/recipes/266466-html-colors-tofrom-rgb-tuples/
@classmethod
def color_for(cls,v,min=0.,max=1.,startcolor=(163,254,186),stopcolor=(255,117,117)):
c = [0]*3
for i,sc in enumerate(startcolor):
c[i] = round(startcolor[i] + 1.0*(v-min)*(stopcolor[i]-startcolor[i])/(max-min))
return tuple(c)
@classmethod
def rgb_color_for(cls,v):
c = cls.color_for(abs(v))
return 'rgb(%d,%d,%d)'%c | [
"[email protected]"
] | |
f1c9b9076961020c81cbaf3646daba66e73e364d | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /Video_future_frame_prediction/fc_lstm_n202_ubu/old_code/old_code_v0002/models/lstmcell_simple_encoder_decoder.py | 95a2bc4e593826921cf1006deb6946f36735942c | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 1,400 | py |
# encoder decoder, only 1 layer, flatten style
# lstmcell
# v0004
import torch
import torch.nn as nn
import numpy as np
class FC_LSTM(nn.Module):
def __init__(self):
super(FC_LSTM, self).__init__()
self.encoder_lstmcell = nn.LSTMCell(4096, 4096)
self.decoder_lstmcell = nn.LSTMCell(4096, 4096)
def forward(self, x, future_step=10):
# x in is [seq=10, batch, 64, 64]
device = next(self.parameters()).device
seq_size = x.shape[0]
batch_size = x.shape[1]
h_e = torch.zeros((batch_size, 4096)).to(device)
c_e = torch.zeros((batch_size, 4096)).to(device)
x = x.reshape((seq_size, batch_size, -1))
# print(x.shape)
for seq in range(seq_size):
h_e, c_e = self.encoder_lstmcell(x[seq], (h_e, c_e))
# print(h_e.shape)
h_d = h_e
c_d = torch.zeros((batch_size, 4096)).to(device)
zero_input = torch.zeros((batch_size, 4096)).to(device)
outputs = []
for seq in range(future_step):
h_d, c_d = self.decoder_lstmcell(zero_input, (h_d, c_d))
outputs.append(h_d)
outputs = torch.stack(outputs)
outputs = torch.reshape(outputs, (seq_size, batch_size, 64, 64))
return outputs
if __name__ == "__main__":
model = FC_LSTM()
x = torch.randn((10, 100, 64, 64))
x = model(x)
print(x.shape) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.