blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15fb71f19b34ff9cebea5eb8b88bc3e5eb0da713 | 393b32e6b2d302cb20e4267d5fafe639114db816 | /ohri/tool/duckling/tests/test_duckling_tool.py | 85b8b6ea08dae6643f29568933b0a1fc0f3d54ce | [] | no_license | proximiant/ohri | 9b2ec0e2477ada191c1d8b335df5bfd58212274a | 72dc192b6c8e26f5c29f079efa4032b937123cad | refs/heads/master | 2020-12-02T09:50:25.106623 | 2019-12-31T02:47:12 | 2019-12-31T02:47:12 | 230,972,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,691 | py | import logging
import os
from datetime import time
from functools import reduce
from pprint import pprint
from unittest import TestCase
from future.utils import lmap
from ohri.hub.logger.duckling_logger import DucklingLogger
from ohri.tool.collection.collection_tool import luniq
from ohri.tool.duckling.duckling_tool import DucklingTool
from ohri.tool.testing.testing_tool import TestingTool
FILE_PATH = os.path.realpath(__file__)
FILE_DIR = os.path.dirname(FILE_PATH)
FILE_NAME = os.path.basename(FILE_PATH)
DucklingLogger.attach_stderr2loggers(logging.DEBUG)
logger = DucklingLogger.filename_level2logger(FILE_NAME, logging.DEBUG)
def hyp2norm_time_list(hyp):
return lmap(DucklingTool.parse2norm_time_list, hyp)
class TestDucklingTool(TestCase):
""" time """
def test_01(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'Let\'s meet at 11:45am',
DucklingTool.Dim.TIME,
)
ref = [{'dim': 'time',
'end': 21,
'start': 11,
'text': 'at 11:45am',
'value': ['11:45:00']}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
def test_02(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'at two',
DucklingTool.Dim.TIME,
)
ref = [{'dim': 'time',
'end': 6,
'start': 0,
'text': 'at two',
'value': ['02:00:00', '14:00:00']}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
@TestingTool.expected_failure_deco(reason="Alexa script variation support not expected")
def test_03(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'1010',
DucklingTool.Dim.TIME,
)
ref = [
{
"dim": "time",
"text": "1010",
"start": 0,
"end": 4,
"value": ['10:10:00', '22:10:00']}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
def test_04(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'10 pm',
DucklingTool.Dim.TIME,
)
ref = [{'dim': 'time', 'text': '10 pm', 'start': 0, 'end': 5, 'value': ['22:00:00']}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
@TestingTool.expected_failure_deco(reason="'two thirty' type not supported")
def test_05(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'two thirty',
DucklingTool.Dim.TIME,
)
ref = [{'dim': 'time', 'text': 'two thirty', 'start': 0, 'end': 10, 'value': ['2:30:00', '14:30:00']}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
def test_06(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'ten to two',
DucklingTool.Dim.TIME,
)
ref = [{'dim': 'time',
'end': 10,
'start': 0,
'text': 'ten to two',
'value': ['01:50:00', '13:50:00']}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
def test_07(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'five past ten',
DucklingTool.Dim.TIME,
)
ref = [{'dim': 'time',
'end': 13,
'start': 0,
'text': 'five past ten',
'value': ['10:05:00', '22:05:00',]}]
# pprint(hyp2norm_time_list(hyp))
self.assertEqual(hyp2norm_time_list(hyp), ref)
""" timezone """
def test_11(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'pst',
DucklingTool.Dim.TIMEZONE,
)
ref = [{'dim': 'timezone',
'end': 3,
'start': 0,
'text': 'pst',
'value': {'value': 'PST'}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
@TestingTool.expected_failure_deco(reason="'Asia/Seoul' type timezone not supported")
def test_12(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'Asia/Seoul',
DucklingTool.Dim.TIMEZONE,
)
ref = [{'dim': 'timezone',
'end': 3,
'start': 0,
'text': 'pst',
'value': {'value': 'Asia/Seoul'}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
def test_13(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'pt',
DucklingTool.Dim.TIMEZONE,
)
ref = [{'dim': 'timezone',
'end': 2,
'start': 0,
'text': 'pt',
'value': {'value': 'PT'}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
@TestingTool.expected_failure_deco(reason="'pacific time' type timezone not supported")
def test_14(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'pacific time',
DucklingTool.Dim.TIMEZONE,
)
ref = [{'dim': 'timezone',
'end': 12,
'start': 0,
'text': 'pacific time',
'value': {'value': 'PT'}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
""" temperature """
def test_21(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'Let\'s change the temperatur from thirty two celsius to 65 degrees',
DucklingTool.Dim.TEMPERATURE,)
ref = [{u'dim': u'temperature',
u'end': 65,
u'start': 55,
u'value': {u'unit': u'degree', u'value': 65.0},
u'text': u'65 degrees',
},
{u'dim': u'temperature',
u'end': 51,
u'start': 33,
u'value': {u'unit': u'celsius', u'value': 32.0},
u'text': u'thirty two celsius'}
]
# pprint(hyp)
self.assertEqual(hyp, ref)
def test_22(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
u'forty two degrees',
DucklingTool.Dim.TEMPERATURE,)
ref = [{'dim': 'temperature',
'end': 17,
'start': 0,
'text': 'forty two degrees',
'value': {'unit': 'degree', 'value': 42.0}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
""" number """
def test_31(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"thirty two",
DucklingTool.Dim.NUMBER, )
ref = [{'dim': 'number',
'end': 10,
'start': 0,
'text': 'thirty two',
'value': {'value': 32.0}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
def test_32(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"4,320", # comma supported !
DucklingTool.Dim.NUMBER, )
ref = [{'dim': 'number',
'end': 5,
'start': 0,
'text': '4,320',
'value': {'value': 4320.0}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
@TestingTool.expected_failure_deco(reason="'two and a half' not supported. required for age for clothes")
def test_33(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"two and a half",
DucklingTool.Dim.NUMBER, )
ref = [{'dim': 'number',
'end': 14,
'start': 0,
'text': 'two and a half',
'value': {'value': 2.5}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
@TestingTool.expected_failure_deco(reason="'three quarters' not supported. required for time.")
def test_34(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"three quarters",
DucklingTool.Dim.NUMBER, )
ref = [{'dim': 'number',
'end': 14,
'start': 0,
'text': 'two and a half',
'value': {'value': 2.5}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
@TestingTool.expected_failure_deco(reason="'second one'. 'one' as number")
def test_35(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"second one",
DucklingTool.Dim.NUMBER, )
ref = []
# pprint(hyp)
self.assertEqual(hyp, ref)
""" ordinal """
def test_41(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"third",
DucklingTool.Dim.ORDINAL, )
ref = [{'dim': 'ordinal',
'end': 5,
'start': 0,
'text': 'third',
'value': {'value': 3}}]
# pprint(hyp)
self.assertEqual(hyp, ref)
@TestingTool.expected_failure_deco(reason="'one second'. 'second' is not ordinal")
def test_42(self):
d = DucklingTool.duckling()
hyp = DucklingTool.str_dim2parse(d,
"one second",
DucklingTool.Dim.ORDINAL, )
ref = []
# pprint(hyp)
self.assertEqual(hyp, ref)
""" distance """
""" volume """
""" money """
""" duration """
""" email """
""" url """
""" phone_number """
""" level_product """
""" leven_unit """
""" quantity """
""" cycle """
""" unit """
""" unit_of_duration """
| [
"[email protected]"
] | |
39fd9bbe1207cc93d1f22eb3470fc29753fa9cbb | 605b5e612f8837a4962f444de0bd157f782c0504 | /exp/063.py | cd7d9465a5a33dba45372c3c746f0c6fd23113ef | [] | no_license | osuossu8/CommonLitReadabilityPrize | a32db7608c3e975cd9366cb224e33aca9a4f9a7e | c555581a020d6338786dd4a938e090f20b4d1c88 | refs/heads/main | 2023-07-01T23:59:08.834163 | 2021-08-03T10:47:28 | 2021-08-03T10:47:28 | 372,928,954 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,275 | py | import gc
import os
import math
import random
import time
import warnings
import sys
sys.path.append("/root/workspace/CommonLitReadabilityPrize")
import numpy as np
import pandas as pd
import transformers
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as torchdata
from pathlib import Path
from typing import List
from sklearn import model_selection
from sklearn import metrics
from tqdm import tqdm
from transformers import RobertaConfig, RobertaModel, RobertaTokenizer
from apex import amp
class CFG:
######################
# Globals #
######################
EXP_ID = '063'
seed = 71
epochs = 5
folds = [0, 1, 2, 3, 4]
N_FOLDS = 5
LR = 2e-5
max_len = 256
train_bs = 8 * 2
valid_bs = 16 * 2
log_interval = 10
model_name = 'roberta-large'
itpt_path = 'itpt/roberta_large_2/'
numerical_cols = [
'excerpt_num_chars', 'excerpt_num_capitals', 'excerpt_caps_vs_length',
'excerpt_num_exclamation_marks', 'excerpt_num_question_marks',
'excerpt_num_punctuation', 'excerpt_num_symbols', 'excerpt_num_words',
'excerpt_num_unique_words', 'excerpt_words_vs_unique'
]
def set_seed(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_device() -> torch.device:
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def init_logger(log_file='train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def calc_loss(y_true, y_pred):
return np.sqrt(metrics.mean_squared_error(y_true, y_pred))
def convert_examples_to_head_and_tail_features(data, tokenizer, max_len):
head_len = int(max_len//2)
tail_len = head_len
data = data.replace('\n', '')
len_tok = len(tokenizer.tokenize(data))
tok = tokenizer.encode_plus(
data,
max_length=max_len,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True
)
curr_sent = {}
if len_tok > max_len:
head_ids = tok['input_ids'][:head_len]
tail_ids = tok['input_ids'][-tail_len:]
head_mask = tok['attention_mask'][:head_len]
tail_mask = tok['attention_mask'][-tail_len:]
curr_sent['input_ids'] = head_ids + tail_ids
curr_sent['attention_mask'] = head_mask + tail_mask
else:
padding_length = max_len - len(tok['input_ids'])
curr_sent['input_ids'] = tok['input_ids'] + ([1] * padding_length)
curr_sent['attention_mask'] = tok['attention_mask'] + ([0] * padding_length)
return curr_sent
class CommonLitDataset:
def __init__(self, df, excerpt, tokenizer, max_len, numerical_features, tfidf):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.max_len = max_len
self.df = df
self.numerical_features = numerical_features
self.tfidf_df = tfidf
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
inputs = self.tokenizer(
text,
max_length=self.max_len,
padding="max_length",
truncation=True
)
# inputs = convert_examples_to_head_and_tail_features(text, tokenizer, self.max_len)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
targets = self.df["target"].values[item]
aux = self.df["aux_target"].values[item] + 4
aux_targets = np.zeros(7, dtype=float)
aux_targets[aux] = 1.0
numerical_features = self.numerical_features[item]
tfidf = self.tfidf_df.values[item]
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"targets" : torch.tensor(targets, dtype=torch.float32),
"aux_targets" : torch.tensor(aux_targets, dtype=torch.float32),
"numerical_features" : torch.tensor(numerical_features, dtype=torch.float32),
"tfidf" : torch.tensor(tfidf, dtype=torch.float32),
}
class AttentionHead(nn.Module):
def __init__(self, in_features, hidden_dim, num_targets):
super().__init__()
self.in_features = in_features
self.middle_features = hidden_dim
self.W = nn.Linear(in_features, hidden_dim)
self.V = nn.Linear(hidden_dim, 1)
self.out_features = hidden_dim
def forward(self, features):
att = torch.tanh(self.W(features))
score = self.V(att)
attention_weights = torch.softmax(score, dim=1)
context_vector = attention_weights * features
context_vector = torch.sum(context_vector, dim=1)
return context_vector
class RoBERTaLarge(nn.Module):
def __init__(self, model_path):
super(RoBERTaLarge, self).__init__()
self.in_features = 1024
self.roberta = RobertaModel.from_pretrained(model_path)
self.head = AttentionHead(self.in_features,self.in_features,1)
self.dropout = nn.Dropout(0.1)
self.process_tfidf = nn.Sequential(
nn.Linear(100, 32),
nn.BatchNorm1d(32),
nn.PReLU(),
nn.Dropout(0.1),
)
self.l0 = nn.Linear(self.in_features + 32, 1)
self.l1 = nn.Linear(self.in_features + 32, 7)
def forward(self, ids, mask, numerical_features, tfidf):
roberta_outputs = self.roberta(
ids,
attention_mask=mask
)
x1 = self.head(roberta_outputs[0]) # bs, 1024
x2 = self.process_tfidf(tfidf) # bs, 32
x = torch.cat([x1, x2], 1) # bs, 1024 + 32
logits = self.l0(self.dropout(x))
aux_logits = torch.sigmoid(self.l1(self.dropout(x)))
return logits.squeeze(-1), aux_logits
# ====================================================
# Training helper functions
# ====================================================
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MetricMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.y_true = []
self.y_pred = []
def update(self, y_true, y_pred):
self.y_true.extend(y_true.cpu().detach().numpy().tolist())
self.y_pred.extend(y_pred.cpu().detach().numpy().tolist())
@property
def avg(self):
self.rmse = calc_loss(self.y_true, self.y_pred)
return {
"RMSE" : self.rmse,
}
class RMSELoss(torch.nn.Module):
def __init__(self):
super(RMSELoss,self).__init__()
def forward(self,x,y):
criterion = nn.MSELoss()
loss = torch.sqrt(criterion(x, y))
return loss
def loss_fn(logits, targets):
loss_fct = RMSELoss()
loss = loss_fct(logits, targets)
return loss
def aux_loss_fn(logits, targets):
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, targets)
return loss
def train_fn(epoch, model, train_data_loader, valid_data_loader, device, optimizer, scheduler, best_score):
model.train()
losses = AverageMeter()
scores = MetricMeter()
tk0 = tqdm(train_data_loader, total=len(train_data_loader))
for batch_idx, data in enumerate(tk0):
optimizer.zero_grad()
inputs = data['input_ids'].to(device)
masks = data['attention_mask'].to(device)
targets = data['targets'].to(device)
aux_targets = data['aux_targets'].to(device)
numerical_features = data['numerical_features'].to(device)
tfidf = data['tfidf'].to(device)
outputs, aux_outs = model(inputs, masks, numerical_features, tfidf)
loss = loss_fn(outputs, targets) * 0.5 + aux_loss_fn(aux_outs, aux_targets) * 0.5
loss.backward()
optimizer.step()
scheduler.step()
losses.update(loss.item(), inputs.size(0))
scores.update(targets, outputs)
tk0.set_postfix(loss=losses.avg)
if (batch_idx > 0) and (batch_idx % CFG.log_interval == 0):
valid_avg, valid_loss = valid_fn(model, valid_data_loader, device)
logger.info(f"Epoch {epoch+1}, Step {batch_idx} - valid_rmse:{valid_avg['RMSE']:0.5f}")
if valid_avg['RMSE'] < best_score:
logger.info(f">>>>>>>> Model Improved From {best_score} ----> {valid_avg['RMSE']}")
torch.save(model.state_dict(), OUTPUT_DIR+f'fold-{fold}.bin')
best_score = valid_avg['RMSE']
return scores.avg, losses.avg, valid_avg, valid_loss, best_score
def valid_fn(model, data_loader, device):
model.eval()
losses = AverageMeter()
scores = MetricMeter()
tk0 = tqdm(data_loader, total=len(data_loader))
with torch.no_grad():
for data in tk0:
inputs = data['input_ids'].to(device)
masks = data['attention_mask'].to(device)
targets = data['targets'].to(device)
aux_targets = data['aux_targets'].to(device)
numerical_features = data['numerical_features'].to(device)
tfidf = data['tfidf'].to(device)
outputs, aux_outs = model(inputs, masks, numerical_features, tfidf)
loss = loss_fn(outputs, targets) * 0.5 + aux_loss_fn(aux_outs, aux_targets) * 0.5
losses.update(loss.item(), inputs.size(0))
scores.update(targets, outputs)
tk0.set_postfix(loss=losses.avg)
return scores.avg, losses.avg
def calc_cv(model_paths):
models = []
for p in model_paths:
if CFG.itpt_path:
model = RoBERTaLarge(CFG.itpt_path)
logger.info('load itpt model')
else:
model = RoBERTaLarge(CFG.model_name)
model.to("cuda")
model.load_state_dict(torch.load(p))
model.eval()
models.append(model)
tokenizer = RobertaTokenizer.from_pretrained(CFG.model_name)
df = pd.read_csv("inputs/train_folds.csv")
df['aux_target'] = np.round(df['target'], 0).astype(np.int8) # 7 classes
df = get_sentence_features(df, 'excerpt')
TP = TextPreprocessor()
preprocessed_text = TP.preprocess(df['excerpt'])
pipeline = make_pipeline(
TfidfVectorizer(max_features=100000),
make_union(
TruncatedSVD(n_components=50, random_state=42),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=50, random_state=42)
),
n_jobs=1,
),
)
z = pipeline.fit_transform(preprocessed_text)
tfidf_df = pd.DataFrame(z, columns=[f'cleaned_excerpt_tf_idf_svd_{i}' for i in range(50*2)])
y_true = []
y_pred = []
for fold, model in enumerate(models):
val_df = df[df.kfold == fold].reset_index(drop=True)
dataset = CommonLitDataset(df=val_df, excerpt=val_df.excerpt.values, tokenizer=tokenizer, max_len=CFG.max_len, numerical_features=df[CFG.numerical_cols].values, tfidf=tfidf_df)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=CFG.valid_bs, num_workers=0, pin_memory=True, shuffle=False
)
final_output = []
for b_idx, data in tqdm(enumerate(data_loader)):
with torch.no_grad():
inputs = data['input_ids'].to(device)
masks = data['attention_mask'].to(device)
numerical_features = data['numerical_features'].to(device)
tfidf = data['tfidf'].to(device)
output, _ = model(inputs, masks, numerical_features, tfidf)
output = output.detach().cpu().numpy().tolist()
final_output.extend(output)
logger.info(calc_loss(np.array(final_output), val_df['target'].values))
y_pred.append(np.array(final_output))
y_true.append(val_df['target'].values)
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
y_true = np.concatenate(y_true)
overall_cv_score = calc_loss(y_true, y_pred)
logger.info(f'cv score {overall_cv_score}')
return overall_cv_score
import nltk
import re
import scipy as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.feature_extraction.text import _document_frequency
from sklearn.pipeline import make_pipeline, make_union
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
class BM25Transformer(BaseEstimator, TransformerMixin):
def __init__(self, use_idf=True, k1=2.0, b=0.75):
self.use_idf = use_idf
self.k1 = k1
self.b = b
def fit(self, X):
if not sp.sparse.issparse(X):
X = sp.sparse.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
idf = np.log((n_samples - df + 0.5) / (df + 0.5))
self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)
doc_len = X.sum(axis=1)
self._average_document_len = np.average(doc_len)
return self
def transform(self, X, copy=True):
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
X = sp.sparse.csr_matrix(X, copy=copy)
else:
X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)
n_samples, n_features = X.shape
doc_len = X.sum(axis=1)
sz = X.indptr[1:] - X.indptr[0:-1]
rep = np.repeat(np.asarray(doc_len), sz)
nom = self.k1 + 1
denom = X.data + self.k1 * (1 - self.b + self.b * rep / self._average_document_len)
data = X.data * nom / denom
X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
X = X * self._idf_diag
return X
class TextPreprocessor(object):
def __init__(self):
self.puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '«',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', '(', ')', '~',
'➡', '%', '⇒', '▶', '「', '➄', '➆', '➊', '➋', '➌', '➍', '⓪', '①', '②', '③', '④', '⑤', '⑰', '❶', '❷', '❸', '❹', '❺', '❻', '❼', '❽',
'=', '※', '㈱', '、', '△', '℮', 'ⅼ', '‐', '」', '┝', '↳', '◉', '/', '+', '○',
'【', '】', '✅', '☑', '➤', '゙', '↳', '〶', '☛', '「', '⁺', '『', '≫',
]
self.numbers = ["0","1","2","3","4","5","6","7","8","9","0","1","2","3","4","5","6","7","8","9"]
self.stopwords = nltk.corpus.stopwords.words('english')
def _pre_preprocess(self, x):
return str(x).lower()
def rm_num(self, x, use_num=True):
x = re.sub('[0-9]{5,}', '', x)
x = re.sub('[0-9]{4}', '', x)
x = re.sub('[0-9]{3}', '', x)
x = re.sub('[0-9]{2}', '', x)
for i in self.numbers:
x = x.replace(str(i), '')
return x
def clean_puncts(self, x):
for punct in self.puncts:
x = x.replace(punct, '')
return x
def clean_stopwords(self, x):
list_x = x.split()
res = []
for w in list_x:
if w not in self.stopwords:
res.append(w)
return ' '.join(res)
def preprocess(self, sentence):
sentence = sentence.fillna(" ")
sentence = sentence.map(lambda x: self._pre_preprocess(x))
sentence = sentence.map(lambda x: self.clean_puncts(x))
sentence = sentence.map(lambda x: self.clean_stopwords(x))
sentence = sentence.map(lambda x: self.rm_num(x))
return sentence
def get_sentence_features(train, col):
train[col + '_num_chars'] = train[col].apply(len)
train[col + '_num_capitals'] = train[col].apply(lambda x: sum(1 for c in x if c.isupper()))
train[col + '_caps_vs_length'] = train.apply(lambda row: row[col + '_num_chars'] / (row[col + '_num_capitals']+1e-5), axis=1)
train[col + '_num_exclamation_marks'] = train[col].apply(lambda x: x.count('!'))
train[col + '_num_question_marks'] = train[col].apply(lambda x: x.count('?'))
train[col + '_num_punctuation'] = train[col].apply(lambda x: sum(x.count(w) for w in '.,;:'))
train[col + '_num_symbols'] = train[col].apply(lambda x: sum(x.count(w) for w in '*&$%'))
train[col + '_num_words'] = train[col].apply(lambda x: len(x.split()))
train[col + '_num_unique_words'] = train[col].apply(lambda comment: len(set(w for w in comment.split())))
train[col + '_words_vs_unique'] = train[col + '_num_unique_words'] / train[col + '_num_words']
return train
OUTPUT_DIR = f'outputs/{CFG.EXP_ID}/'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
warnings.filterwarnings("ignore")
logger = init_logger(log_file=Path("logs") / f"{CFG.EXP_ID}.log")
# environment
set_seed(CFG.seed)
device = get_device()
# data
train = pd.read_csv("inputs/train_folds.csv")
train['aux_target'] = np.round(train['target'], 0).astype(np.int8) # 7 classes
train = get_sentence_features(train, 'excerpt')
TP = TextPreprocessor()
preprocessed_text = TP.preprocess(train['excerpt'])
pipeline = make_pipeline(
TfidfVectorizer(max_features=100000),
make_union(
TruncatedSVD(n_components=50, random_state=42),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=50, random_state=42)
),
n_jobs=1,
),
)
z = pipeline.fit_transform(preprocessed_text)
tfidf_df = pd.DataFrame(z, columns=[f'cleaned_excerpt_tf_idf_svd_{i}' for i in range(50*2)])
print(train.shape)
train.head()
# main loop
for fold in range(5):
if fold not in CFG.folds:
continue
logger.info("=" * 120)
logger.info(f"Fold {fold} Training")
logger.info("=" * 120)
trn_df = train[train.kfold != fold].reset_index(drop=True)
val_df = train[train.kfold == fold].reset_index(drop=True)
if CFG.itpt_path:
model = RoBERTaLarge(CFG.itpt_path)
logger.info('load itpt model')
else:
model = RoBERTaLarge(CFG.model_name)
tokenizer = RobertaTokenizer.from_pretrained(CFG.model_name)
train_dataset = CommonLitDataset(df=trn_df, excerpt=trn_df.excerpt.values, tokenizer=tokenizer, max_len=CFG.max_len, numerical_features=trn_df[CFG.numerical_cols].values, tfidf=tfidf_df)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=CFG.train_bs, num_workers=0, pin_memory=True, shuffle=True
)
valid_dataset = CommonLitDataset(df=val_df, excerpt=val_df.excerpt.values, tokenizer=tokenizer, max_len=CFG.max_len, numerical_features=val_df[CFG.numerical_cols].values, tfidf=tfidf_df)
valid_dataloader = torch.utils.data.DataLoader(
valid_dataset, batch_size=CFG.valid_bs, num_workers=0, pin_memory=True, shuffle=False
)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.001},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
]
num_train_steps = int(len(trn_df) / CFG.train_bs * CFG.epochs)
optimizer = transformers.AdamW(optimizer_parameters, lr=CFG.LR)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, eta_min=1e-5, T_max=CFG.epochs)
model = model.to(device)
min_loss = 999
best_score = np.inf
for epoch in range(CFG.epochs):
logger.info("Starting {} epoch...".format(epoch+1))
start_time = time.time()
train_avg, train_loss, valid_avg, valid_loss, best_score = train_fn(epoch, model, train_dataloader, valid_dataloader, device, optimizer, scheduler, best_score)
scheduler.step()
elapsed = time.time() - start_time
logger.info(f'Epoch {epoch+1} - avg_train_loss: {train_loss:.5f} avg_val_loss: {valid_loss:.5f} time: {elapsed:.0f}s')
logger.info(f"Epoch {epoch+1} - train_rmse:{train_avg['RMSE']:0.5f} valid_rmse:{valid_avg['RMSE']:0.5f}")
if valid_avg['RMSE'] < best_score:
logger.info(f">>>>>>>> Model Improved From {best_score} ----> {valid_avg['RMSE']}")
torch.save(model.state_dict(), OUTPUT_DIR+f'fold-{fold}.bin')
best_score = valid_avg['RMSE']
if len(CFG.folds) == 1:
pass
else:
model_paths = [
f'outputs/{CFG.EXP_ID}/fold-0.bin',
f'outputs/{CFG.EXP_ID}/fold-1.bin',
f'outputs/{CFG.EXP_ID}/fold-2.bin',
f'outputs/{CFG.EXP_ID}/fold-3.bin',
f'outputs/{CFG.EXP_ID}/fold-4.bin',
]
overall_cv_score = calc_cv(model_paths)
print()
| [
"[email protected]"
] | |
782421c6cfd319b5bc114598b6cfb8469740d481 | ad2704933de4502ae9de91e6d915f9dbe010b446 | /kurosawa/chapter02/knock18.py | cd6c994ed7a88d77e47aa5bf1eadbd204aeae0e7 | [] | no_license | tmu-nlp/100knock2017 | 266e68917d8d5a7f5d0c064f1bc2da5fa402a253 | 629bd1155d0fe78cd9302ae9a7cdf0922b778fe7 | refs/heads/master | 2021-01-19T17:36:53.328997 | 2017-07-24T07:09:54 | 2017-07-24T07:09:54 | 88,334,932 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | with open('hightemp.txt','r') as f:
col = []
for list1 in f:
list1 = list1.split()
col.append(list1)
for i in sorted(col, key=lambda temp: temp[2]):
print("%s\t%s\t%s\t%s" %(i[0],i[1],i[2],i[3]))
# sort -k3 hightemp.txt
| [
"[email protected]"
] | |
c14f97bc5c0477ff54423437c819c182fbe525dc | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc043/A/4875921.py | ff12ba55782657e025469a48435adbb6fbb4dce0 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | N = int(input())
print(N * (N + 1) // 2) | [
"[email protected]"
] | |
8337e6a7dc81a4c23be3efe3bae20a6cb4c729ed | 84ef8aba277c90cc483ba6382044d0246049a5ea | /hgail/critic/critic.py | 0727bd98106c57a053ce048c5c0e8d183ea06f06 | [
"MIT"
] | permissive | intelligent-control-lab/Autoenv | 1c8c7085ce8de394525d6bdf0da471a67fd84ec0 | 8d7697421ca14e317ea7de24b6acb694ecae5148 | refs/heads/master | 2023-04-06T12:21:31.998120 | 2021-03-06T05:12:12 | 2021-03-06T05:12:12 | 206,652,547 | 5 | 1 | MIT | 2023-03-15T23:16:19 | 2019-09-05T20:33:23 | Python | UTF-8 | Python | false | false | 9,807 | py |
import numpy as np
import tensorflow as tf
import hgail.misc.utils
import hgail.misc.tf_utils
class Critic(object):
"""
Critic base class
"""
def __init__(
self,
network,
dataset,
obs_dim,
act_dim,
optimizer=tf.train.RMSPropOptimizer(0.0001),
n_train_epochs=5,
grad_norm_rescale=10000.,
grad_norm_clip=10000.,
summary_writer=None,
debug_nan=False,
verbose=0):
self.network = network
self.dataset = dataset
self.obs_dim = obs_dim
self.act_dim = act_dim
self.optimizer = optimizer
self.n_train_epochs = n_train_epochs
self.grad_norm_rescale = grad_norm_rescale
self.grad_norm_clip = grad_norm_clip
self.summary_writer = summary_writer
self.debug_nan = debug_nan
self.verbose = verbose
def critique(self, itr, paths):
"""
Compute and return rewards based on the (obs, action) pairs in paths
where rewards are a list of numpy arrays of equal length as the
corresponding path rewards
Args:
itr: iteration count
paths: list of dictionaries
"""
# convert to batch and use network to critique
obs = np.concatenate([d['observations'] for d in paths], axis=0)
acts = np.concatenate([d['actions'] for d in paths], axis=0)
# normalize
if self.dataset.observation_normalizer:
obs = self.dataset.observation_normalizer(obs)
if self.dataset.action_normalizer:
acts = self.dataset.action_normalizer(acts)
# compute rewards
rewards = self.network.forward(obs, acts, deterministic=True)
if np.any(np.isnan(rewards)) and self.debug_nan:
import ipdb
ipdb.set_trace()
# output as a list of numpy arrays, each of len equal to the rewards of
# the corresponding trajectory
path_lengths = [len(d['rewards']) for d in paths]
path_rewards = hgail.misc.utils.batch_to_path_rewards(rewards, path_lengths)
self._log_critique(itr, paths, rewards)
return path_rewards
def _log_critique(self, itr, paths, critic_rewards):
"""
Log information about the critique and paths
Args:
itr: algorithm batch iteration
paths: list of dictionaries containing trajectory information
critic_rewards: critic rewards
"""
# only write summaries if have a summary writer
if self.summary_writer:
env_rewards = np.concatenate([d['rewards'] for d in paths], axis=0)
summary = tf.Summary(value=[
tf.Summary.Value(tag="critic/mean_critique_reward", simple_value=np.mean(critic_rewards)),
tf.Summary.Value(tag="critic/max_critique_reward", simple_value=np.max(critic_rewards)),
tf.Summary.Value(tag="critic/std_dev_critique_reward", simple_value=np.std(critic_rewards)),
tf.Summary.Value(tag="critic/mean_env_reward", simple_value=np.mean(env_rewards)),
tf.Summary.Value(tag="critic/mean_path_len", simple_value=len(env_rewards) / float(len(paths))),
])
self.summary_writer.add_summary(summary, itr)
self.summary_writer.flush()
def train(self, itr, samples_data):
"""
Train the critic using real and sampled data
Args:
itr: iteration count
samples_data: dictionary containing generated data
"""
for train_itr in range(self.n_train_epochs):
for batch in self.dataset.batches(samples_data, store=train_itr == 0):
self._train_batch(batch)
def _train_batch(self, batch):
"""
Runs a single training batch
Args:
batch: dictionary with values needed for training network class member
"""
raise NotImplementedError()
def _build_summaries(
self,
loss,
real_loss,
gen_loss,
gradients,
clipped_gradients,
gradient_penalty=None,
batch_size=None):
summaries = []
summaries += [tf.summary.scalar('critic/loss', loss)]
summaries += [tf.summary.scalar('critic/w_dist', -(real_loss + gen_loss))]
summaries += [tf.summary.scalar('critic/real_loss', real_loss)]
summaries += [tf.summary.scalar('critic/gen_loss', gen_loss)]
summaries += [tf.summary.scalar('critic/global_grad_norm', tf.global_norm(gradients))]
summaries += [tf.summary.scalar('critic/global_clipped_grad_norm', tf.global_norm(clipped_gradients))]
summaries += [tf.summary.scalar('critic/global_var_norm', tf.global_norm(self.network.var_list))]
if gradient_penalty is not None:
summaries += [tf.summary.scalar('critic/gradient_penalty', gradient_penalty)]
if batch_size is not None:
summaries += [tf.summary.scalar('critic/batch_size', batch_size)]
return summaries
def _build_input_summaries(self, rx, ra, gx, ga):
summaries = []
summaries += [tf.summary.image('critic/real_obs', tf.reshape(rx[0], (-1, self.obs_dim, 1, 1)))]
summaries += [tf.summary.image('critic/real_act', tf.reshape(ra[0], (-1, self.act_dim, 1, 1)))]
summaries += [tf.summary.image('critic/gen_obs', tf.reshape(gx[0], (-1, self.obs_dim, 1, 1)))]
summaries += [tf.summary.image('critic/gen_act', tf.reshape(ga[0], (-1, self.act_dim, 1, 1)))]
return summaries
class WassersteinCritic(Critic):
def __init__(
self,
gradient_penalty=10.,
**kwargs):
super(WassersteinCritic, self).__init__(**kwargs)
self.gradient_penalty = gradient_penalty
self._build_placeholders()
self._build_model()
def _build_placeholders(self):
# placeholders for input
self.rx = tf.placeholder(tf.float32, shape=(None, self.obs_dim), name='rx')
self.ra = tf.placeholder(tf.float32, shape=(None, self.act_dim), name='ra')
self.gx = tf.placeholder(tf.float32, shape=(None, self.obs_dim), name='gx')
self.ga = tf.placeholder(tf.float32, shape=(None, self.act_dim), name='ga')
self.eps = tf.placeholder(tf.float32, shape=(None, 1), name='eps')
def _build_model(self):
# unpack placeholders
rx, ra, gx, ga, eps = self.rx, self.ra, self.gx, self.ga, self.eps
# gradient penalty
self.xhat = xhat = eps * rx + (1 - eps) * gx
self.ahat = ahat = eps * ra + (1 - eps) * ga
xhat_gradients, ahat_gradients = tf.gradients(self.network(xhat, ahat), [xhat, ahat])
self.hat_gradients = hat_gradients = tf.concat([xhat_gradients, ahat_gradients], axis=1)
slopes = tf.sqrt(tf.reduce_sum(hat_gradients ** 2, reduction_indices=[1]) + 1e-8)
self.gp_loss = gp_loss = self.gradient_penalty * tf.reduce_mean((slopes - 1) ** 2)
# loss and train op
self.real_loss = real_loss = -tf.reduce_mean(self.network(rx, ra))
self.gen_loss = gen_loss = tf.reduce_mean(self.network(gx, ga))
self.loss = loss = real_loss + gen_loss + gp_loss
if self.verbose >= 2:
loss = tf.Print(loss, [real_loss, gen_loss, gp_loss, loss],
message='real, gen, gp, total loss: ')
self.gradients = gradients = tf.gradients(loss, self.network.var_list)
clipped_gradients = hgail.misc.tf_utils.clip_gradients(
gradients, self.grad_norm_rescale, self.grad_norm_clip)
self.global_step = tf.Variable(0, name='critic/global_step', trainable=False)
self.train_op = self.optimizer.apply_gradients([(g,v)
for (g,v) in zip(clipped_gradients, self.network.var_list)],
global_step=self.global_step)
# summaries
summaries = self._build_summaries(loss, real_loss, gen_loss, gradients, clipped_gradients, gp_loss)
summaries += self._build_input_summaries(rx, ra, gx, ga)
self.summary_op = tf.summary.merge(summaries)
# debug_nan
self.gp_gradients = tf.gradients(self.gp_loss, self.network.var_list)[:-1]
def _train_batch(self, batch):
feed_dict = {
self.rx: batch['rx'],
self.ra: batch['ra'],
self.gx: batch['gx'],
self.ga: batch['ga'],
self.eps: np.random.uniform(0, 1, len(batch['rx'])).reshape(-1, 1)
}
outputs_list = [self.train_op, self.summary_op, self.global_step]
if self.debug_nan:
outputs_list += [
self.gradients,
self.xhat,
self.ahat,
self.hat_gradients,
self.gp_gradients,
self.gp_loss,
self.real_loss,
self.gen_loss
]
session = tf.get_default_session()
fetched = session.run(outputs_list, feed_dict=feed_dict)
summary, step = fetched[1], fetched[2]
if self.debug_nan:
grads, xhat, ahat, hat_grads, gp_grads, gp_loss, real_loss, gen_loss = fetched[3:]
grads_nan = np.any([np.any(np.isnan(g)) for g in grads])
if grads_nan or np.isnan(gp_loss) or np.isnan(real_loss) or np.isnan(gen_loss):
import ipdb
ipdb.set_trace()
if self.summary_writer:
self.summary_writer.add_summary(tf.Summary.FromString(summary), step)
self.summary_writer.flush()
| [
"[email protected]"
] | |
859c6751bcaac2d3846b424c0d80a24f60795267 | 81f2825e5bc73bcdaadb00570d8a8607974af3af | /scratch_42.py | efa6e3f7b33d10a97f8b038a9064310cf9de8fbb | [] | no_license | PrakharBansal24/Assignment-1 | 26fc316fe4bd8d2482ef34f147ba241fc2b20d80 | 927239e57309ca3ca794631eb61faf16784b9bf2 | refs/heads/master | 2022-11-23T20:28:30.628768 | 2020-07-27T05:39:30 | 2020-07-27T05:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | def printFrequency(strr):
M = {}
word = ""
for i in range(len(strr)):
if (strr[i] == ' '):
if (word not in M):
M[word] = 1
word = ""
else:
M[word] += 1
word = ""
else:
word += strr[i]
if (word not in M):
M[word] = 1
else:
M[word] += 1
for it in M:
print(it, "-", M[it])
strr = "Apple Apple boy boy boy boy token token frequency"
printFrequency(strr) | [
"[email protected]"
] | |
996906dd39fb3529cc39d2ec310d939fa819d3ed | e18222344f78f65e5a52480fa24b4720a1d4e36b | /tests/test_appsync.py | 8281801fa43e093c266aa15a27d2af81b3a9f707 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | vllrsatish/troposphere | 177c34fac39f668eda8c2baaed19ae1a6a05964b | 5ec03f329f2a91d3bb970ef0df7cf6232dccde16 | refs/heads/master | 2023-03-24T23:18:40.304921 | 2021-03-21T12:06:08 | 2021-03-21T16:31:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | import unittest
from troposphere.appsync import Resolver, PipelineConfig
class TestAppsyncResolver(unittest.TestCase):
def test_resolver_kind_bad_value(self):
with self.assertRaisesRegex(ValueError, 'Kind must be one of'):
Resolver(
'MutationField',
DataSourceName='SomeDatasource',
FieldName='Field',
TypeName='Mutation',
ApiId='some_api_id',
Kind='SOME_KIND',
PipelineConfig=PipelineConfig(
Functions=['FunctionId1', 'FunctionId']
),
RequestMappingTemplateS3Location=('s3://bucket/key.req.vtl'),
ResponseMappingTemplateS3Location=('s3://bucket/key.res.vtl')
)
def test_resolver(self):
Resolver(
'MutationField',
DataSourceName='SomeDatasource',
FieldName='Field',
TypeName='Mutation',
ApiId='some_api_id',
Kind='PIPELINE',
PipelineConfig=PipelineConfig(
Functions=['FunctionId1', 'FunctionId']
),
RequestMappingTemplateS3Location=('s3://bucket/key.req.vtl'),
ResponseMappingTemplateS3Location=('s3://bucket/key.res.vtl')
)
Resolver(
'MutationField',
DataSourceName='SomeDatasource',
FieldName='Field',
TypeName='Mutation',
ApiId='some_api_id',
Kind='UNIT',
RequestMappingTemplateS3Location=('s3://bucket/key.req.vtl'),
ResponseMappingTemplateS3Location=('s3://bucket/key.res.vtl')
)
| [
"[email protected]"
] | |
675e595f5196864d3257979b47697dfddbd5e4e4 | 9e1ee20e89229869b42cd5deceeb24ce7790b721 | /aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/PreCheckCreateOrderForTempUpgradeRequest.py | 9f2b126d63cd827d8382f32125666a242dfb382c | [
"Apache-2.0"
] | permissive | guwenbo/aliyun-openapi-python-sdk | 7503ed8f50897ea1ad7bdb390e140a2e570e30b8 | ef4f34e7e703ef2ddfdcb1f57573b9b14be77e0d | refs/heads/master | 2020-09-23T04:44:06.134661 | 2019-12-02T12:52:51 | 2019-12-02T12:52:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class PreCheckCreateOrderForTempUpgradeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'PreCheckCreateOrderForTempUpgrade','rds')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_DBInstanceStorage(self):
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self,DBInstanceStorage):
self.add_query_param('DBInstanceStorage',DBInstanceStorage)
def get_NodeType(self):
return self.get_query_params().get('NodeType')
def set_NodeType(self,NodeType):
self.add_query_param('NodeType',NodeType)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_EffectiveTime(self):
return self.get_query_params().get('EffectiveTime')
def set_EffectiveTime(self,EffectiveTime):
self.add_query_param('EffectiveTime',EffectiveTime)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_DBInstanceStorageType(self):
return self.get_query_params().get('DBInstanceStorageType')
def set_DBInstanceStorageType(self,DBInstanceStorageType):
self.add_query_param('DBInstanceStorageType',DBInstanceStorageType)
def get_BusinessInfo(self):
return self.get_query_params().get('BusinessInfo')
def set_BusinessInfo(self,BusinessInfo):
self.add_query_param('BusinessInfo',BusinessInfo)
def get_AutoPay(self):
return self.get_query_params().get('AutoPay')
def set_AutoPay(self,AutoPay):
self.add_query_param('AutoPay',AutoPay)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_Resource(self):
return self.get_query_params().get('Resource')
def set_Resource(self,Resource):
self.add_query_param('Resource',Resource)
def get_CommodityCode(self):
return self.get_query_params().get('CommodityCode')
def set_CommodityCode(self,CommodityCode):
self.add_query_param('CommodityCode',CommodityCode)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_UsedTime(self):
return self.get_query_params().get('UsedTime')
def set_UsedTime(self,UsedTime):
self.add_query_param('UsedTime',UsedTime)
def get_DBInstanceClass(self):
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self,DBInstanceClass):
self.add_query_param('DBInstanceClass',DBInstanceClass) | [
"[email protected]"
] | |
e2ec73ce9a92cc11f091aee17aacaa3ceb1eb9c9 | 7b79deca597eee678b521b808674948fc333fd40 | /Nanodet/client2.py | c5e1ccef29b6d952475a220940624ade856836a8 | [] | no_license | GitZzw/IERCAR | bfd4481ce1d1994a36f0587876c970b60f08d1c3 | cd3115b89f4b69a9adb2c26e412c0659bfa68aa6 | refs/heads/master | 2023-02-19T03:09:16.040070 | 2021-01-23T13:44:39 | 2021-01-23T13:44:39 | 332,219,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | #!/usr/bin/python
# coding: utf-8
import math
import socket
import rospy
import threading
import time
from std_msgs.msg import Float32
from geometry_msgs.msg import PoseStamped
global target_corner_msg
global t2
global flag
def callback(data):
global t2
global flag
global target_corner_msg
target_corner_msg = PoseStamped()
target_corner_msg.header.stamp = rospy.Time.now()
target_corner_msg.pose.position.y = data.data
if(flag == True):
flag = False
t2 = threading.Thread(target=tcpip)
t2.start()
def client():
global flag
flag = True
rospy.init_node('client', anonymous=True)
rospy.Subscriber("trans", Float32, callback)
rospy.spin()
def tcpip():
yolo_target_pub = rospy.Publisher('yolo_target_corner', PoseStamped, queue_size=1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 连接服务端
print ('connect state: ', s.connect_ex(('127.0.0.1', 8000)))
while True:
receive_msg = s.recv(39).decode()
#print(len(receive_msg))
msg = receive_msg.split(',')
if msg[0] == '1':
""" QuaternionStamped.x, y, z, w = xmin, ymin, xmax, ymax, x = time, y= distance """
cx = 317.657
cy = 234.635
f = 610.5250244140625
xmin = float(msg[1])
ymin = float(msg[2])
xmax = float(msg[3])
ymax = float(msg[4])
px = (xmin + xmax)/2
deltax = px-cx
py = (ymin + ymax)/2
deltay = py-cy
dis = target_corner_msg.pose.position.y
disz = dis/math.sqrt((abs(deltax)/f)*(abs(deltax)/f)+(abs(deltay)/f)*(abs(deltay)/f)+1)
disx = disz*deltax/f
disy = disz*deltay/f
target_corner_msg.pose.orientation.x = disx*100
target_corner_msg.pose.orientation.y = disy*100
target_corner_msg.pose.orientation.z = disz*100
target_corner_msg.pose.position.x = float(msg[5]) #time
#print(time.time()-target_corner_msg.pose.position.x)
# else:
# # print (" target not found ... ")
# target_corner_msg.pose.orientation.x = 0
# target_corner_msg.pose.orientation.y = 0
# target_corner_msg.pose.orientation.z = 0
# target_corner_msg.pose.orientation.w = 0
# target_corner_msg.pose.position.x = -1
yolo_target_pub.publish(target_corner_msg)
if __name__ == "__main__":
client()
| [
"[email protected]"
] | |
c4bc6a5ffd35c520978dd7344ea1af4675df3b58 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/reports_beta/azext_reports_beta/vendored_sdks/reports/operations/_audit_logs_operations.py | e24f3be10c3092488b6cb30b91551a7223035bae | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 79,966 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AuditLogsOperations(object):
"""AuditLogsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~reports.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_directory_audits(
self,
orderby=None, # type: Optional[List[Union[str, "models.Get5ItemsItem"]]]
select=None, # type: Optional[List[Union[str, "models.Get6ItemsItem"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfDirectoryAudit"]
"""Get directoryAudits from auditLogs.
Get directoryAudits from auditLogs.
:param orderby: Order items by property values.
:type orderby: list[str or ~reports.models.Get5ItemsItem]
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Get6ItemsItem]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfDirectoryAudit or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~reports.models.CollectionOfDirectoryAudit]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfDirectoryAudit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_directory_audits.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfDirectoryAudit', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_directory_audits.metadata = {'url': '/auditLogs/directoryAudits'} # type: ignore
def create_directory_audits(
self,
body, # type: "models.MicrosoftGraphDirectoryAudit"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDirectoryAudit"
"""Create new navigation property to directoryAudits for auditLogs.
Create new navigation property to directoryAudits for auditLogs.
:param body: New navigation property.
:type body: ~reports.models.MicrosoftGraphDirectoryAudit
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDirectoryAudit, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphDirectoryAudit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDirectoryAudit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_directory_audits.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDirectoryAudit')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDirectoryAudit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_directory_audits.metadata = {'url': '/auditLogs/directoryAudits'} # type: ignore
def get_directory_audits(
self,
directory_audit_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum19"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDirectoryAudit"
"""Get directoryAudits from auditLogs.
Get directoryAudits from auditLogs.
:param directory_audit_id: key: id of directoryAudit.
:type directory_audit_id: str
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum19]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDirectoryAudit, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphDirectoryAudit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDirectoryAudit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_directory_audits.metadata['url'] # type: ignore
path_format_arguments = {
'directoryAudit-id': self._serialize.url("directory_audit_id", directory_audit_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDirectoryAudit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_directory_audits.metadata = {'url': '/auditLogs/directoryAudits/{directoryAudit-id}'} # type: ignore
def update_directory_audits(
self,
directory_audit_id, # type: str
body, # type: "models.MicrosoftGraphDirectoryAudit"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property directoryAudits in auditLogs.
Update the navigation property directoryAudits in auditLogs.
:param directory_audit_id: key: id of directoryAudit.
:type directory_audit_id: str
:param body: New navigation property values.
:type body: ~reports.models.MicrosoftGraphDirectoryAudit
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_directory_audits.metadata['url'] # type: ignore
path_format_arguments = {
'directoryAudit-id': self._serialize.url("directory_audit_id", directory_audit_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDirectoryAudit')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_directory_audits.metadata = {'url': '/auditLogs/directoryAudits/{directoryAudit-id}'} # type: ignore
def delete_directory_audits(
self,
directory_audit_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property directoryAudits for auditLogs.
Delete navigation property directoryAudits for auditLogs.
:param directory_audit_id: key: id of directoryAudit.
:type directory_audit_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_directory_audits.metadata['url'] # type: ignore
path_format_arguments = {
'directoryAudit-id': self._serialize.url("directory_audit_id", directory_audit_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_directory_audits.metadata = {'url': '/auditLogs/directoryAudits/{directoryAudit-id}'} # type: ignore
def list_directory_provisioning(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum20"]]]
select=None, # type: Optional[List[Union[str, "models.Enum21"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfProvisioningObjectSummary"]
"""Get directoryProvisioning from auditLogs.
Get directoryProvisioning from auditLogs.
:param orderby: Order items by property values.
:type orderby: list[str or ~reports.models.Enum20]
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum21]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfProvisioningObjectSummary or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~reports.models.CollectionOfProvisioningObjectSummary]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfProvisioningObjectSummary"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_directory_provisioning.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfProvisioningObjectSummary', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_directory_provisioning.metadata = {'url': '/auditLogs/directoryProvisioning'} # type: ignore
def create_directory_provisioning(
self,
body, # type: "models.MicrosoftGraphProvisioningObjectSummary"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphProvisioningObjectSummary"
"""Create new navigation property to directoryProvisioning for auditLogs.
Create new navigation property to directoryProvisioning for auditLogs.
:param body: New navigation property.
:type body: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphProvisioningObjectSummary, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphProvisioningObjectSummary"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_directory_provisioning.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphProvisioningObjectSummary')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphProvisioningObjectSummary', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_directory_provisioning.metadata = {'url': '/auditLogs/directoryProvisioning'} # type: ignore
def get_directory_provisioning(
self,
provisioning_object_summary_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum22"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphProvisioningObjectSummary"
"""Get directoryProvisioning from auditLogs.
Get directoryProvisioning from auditLogs.
:param provisioning_object_summary_id: key: id of provisioningObjectSummary.
:type provisioning_object_summary_id: str
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum22]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphProvisioningObjectSummary, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphProvisioningObjectSummary"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_directory_provisioning.metadata['url'] # type: ignore
path_format_arguments = {
'provisioningObjectSummary-id': self._serialize.url("provisioning_object_summary_id", provisioning_object_summary_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphProvisioningObjectSummary', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_directory_provisioning.metadata = {'url': '/auditLogs/directoryProvisioning/{provisioningObjectSummary-id}'} # type: ignore
def update_directory_provisioning(
self,
provisioning_object_summary_id, # type: str
body, # type: "models.MicrosoftGraphProvisioningObjectSummary"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property directoryProvisioning in auditLogs.
Update the navigation property directoryProvisioning in auditLogs.
:param provisioning_object_summary_id: key: id of provisioningObjectSummary.
:type provisioning_object_summary_id: str
:param body: New navigation property values.
:type body: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_directory_provisioning.metadata['url'] # type: ignore
path_format_arguments = {
'provisioningObjectSummary-id': self._serialize.url("provisioning_object_summary_id", provisioning_object_summary_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphProvisioningObjectSummary')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_directory_provisioning.metadata = {'url': '/auditLogs/directoryProvisioning/{provisioningObjectSummary-id}'} # type: ignore
def delete_directory_provisioning(
self,
provisioning_object_summary_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property directoryProvisioning for auditLogs.
Delete navigation property directoryProvisioning for auditLogs.
:param provisioning_object_summary_id: key: id of provisioningObjectSummary.
:type provisioning_object_summary_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_directory_provisioning.metadata['url'] # type: ignore
path_format_arguments = {
'provisioningObjectSummary-id': self._serialize.url("provisioning_object_summary_id", provisioning_object_summary_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_directory_provisioning.metadata = {'url': '/auditLogs/directoryProvisioning/{provisioningObjectSummary-id}'} # type: ignore
def list_provisioning(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum23"]]]
select=None, # type: Optional[List[Union[str, "models.Enum24"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfProvisioningObjectSummary0"]
"""Get provisioning from auditLogs.
Get provisioning from auditLogs.
:param orderby: Order items by property values.
:type orderby: list[str or ~reports.models.Enum23]
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum24]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfProvisioningObjectSummary0 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~reports.models.CollectionOfProvisioningObjectSummary0]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfProvisioningObjectSummary0"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_provisioning.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfProvisioningObjectSummary0', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_provisioning.metadata = {'url': '/auditLogs/provisioning'} # type: ignore
def create_provisioning(
self,
body, # type: "models.MicrosoftGraphProvisioningObjectSummary"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphProvisioningObjectSummary"
"""Create new navigation property to provisioning for auditLogs.
Create new navigation property to provisioning for auditLogs.
:param body: New navigation property.
:type body: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphProvisioningObjectSummary, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphProvisioningObjectSummary"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_provisioning.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphProvisioningObjectSummary')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphProvisioningObjectSummary', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_provisioning.metadata = {'url': '/auditLogs/provisioning'} # type: ignore
def get_provisioning(
self,
provisioning_object_summary_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum25"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphProvisioningObjectSummary"
"""Get provisioning from auditLogs.
Get provisioning from auditLogs.
:param provisioning_object_summary_id: key: id of provisioningObjectSummary.
:type provisioning_object_summary_id: str
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum25]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphProvisioningObjectSummary, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphProvisioningObjectSummary"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_provisioning.metadata['url'] # type: ignore
path_format_arguments = {
'provisioningObjectSummary-id': self._serialize.url("provisioning_object_summary_id", provisioning_object_summary_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphProvisioningObjectSummary', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_provisioning.metadata = {'url': '/auditLogs/provisioning/{provisioningObjectSummary-id}'} # type: ignore
def update_provisioning(
self,
provisioning_object_summary_id, # type: str
body, # type: "models.MicrosoftGraphProvisioningObjectSummary"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property provisioning in auditLogs.
Update the navigation property provisioning in auditLogs.
:param provisioning_object_summary_id: key: id of provisioningObjectSummary.
:type provisioning_object_summary_id: str
:param body: New navigation property values.
:type body: ~reports.models.MicrosoftGraphProvisioningObjectSummary
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_provisioning.metadata['url'] # type: ignore
path_format_arguments = {
'provisioningObjectSummary-id': self._serialize.url("provisioning_object_summary_id", provisioning_object_summary_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphProvisioningObjectSummary')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_provisioning.metadata = {'url': '/auditLogs/provisioning/{provisioningObjectSummary-id}'} # type: ignore
def delete_provisioning(
self,
provisioning_object_summary_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property provisioning for auditLogs.
Delete navigation property provisioning for auditLogs.
:param provisioning_object_summary_id: key: id of provisioningObjectSummary.
:type provisioning_object_summary_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_provisioning.metadata['url'] # type: ignore
path_format_arguments = {
'provisioningObjectSummary-id': self._serialize.url("provisioning_object_summary_id", provisioning_object_summary_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_provisioning.metadata = {'url': '/auditLogs/provisioning/{provisioningObjectSummary-id}'} # type: ignore
def list_restricted_sign_ins(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum26"]]]
select=None, # type: Optional[List[Union[str, "models.Enum27"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfRestrictedSignIn"]
"""Get restrictedSignIns from auditLogs.
Get restrictedSignIns from auditLogs.
:param orderby: Order items by property values.
:type orderby: list[str or ~reports.models.Enum26]
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum27]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfRestrictedSignIn or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~reports.models.CollectionOfRestrictedSignIn]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfRestrictedSignIn"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_restricted_sign_ins.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfRestrictedSignIn', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_restricted_sign_ins.metadata = {'url': '/auditLogs/restrictedSignIns'} # type: ignore
def create_restricted_sign_ins(
self,
body, # type: "models.MicrosoftGraphRestrictedSignIn"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphRestrictedSignIn"
"""Create new navigation property to restrictedSignIns for auditLogs.
Create new navigation property to restrictedSignIns for auditLogs.
:param body: New navigation property.
:type body: ~reports.models.MicrosoftGraphRestrictedSignIn
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphRestrictedSignIn, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphRestrictedSignIn
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphRestrictedSignIn"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_restricted_sign_ins.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphRestrictedSignIn')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphRestrictedSignIn', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_restricted_sign_ins.metadata = {'url': '/auditLogs/restrictedSignIns'} # type: ignore
def get_restricted_sign_ins(
self,
restricted_sign_in_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum28"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphRestrictedSignIn"
"""Get restrictedSignIns from auditLogs.
Get restrictedSignIns from auditLogs.
:param restricted_sign_in_id: key: id of restrictedSignIn.
:type restricted_sign_in_id: str
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum28]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphRestrictedSignIn, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphRestrictedSignIn
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphRestrictedSignIn"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_restricted_sign_ins.metadata['url'] # type: ignore
path_format_arguments = {
'restrictedSignIn-id': self._serialize.url("restricted_sign_in_id", restricted_sign_in_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphRestrictedSignIn', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_restricted_sign_ins.metadata = {'url': '/auditLogs/restrictedSignIns/{restrictedSignIn-id}'} # type: ignore
def update_restricted_sign_ins(
self,
restricted_sign_in_id, # type: str
body, # type: "models.MicrosoftGraphRestrictedSignIn"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property restrictedSignIns in auditLogs.
Update the navigation property restrictedSignIns in auditLogs.
:param restricted_sign_in_id: key: id of restrictedSignIn.
:type restricted_sign_in_id: str
:param body: New navigation property values.
:type body: ~reports.models.MicrosoftGraphRestrictedSignIn
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_restricted_sign_ins.metadata['url'] # type: ignore
path_format_arguments = {
'restrictedSignIn-id': self._serialize.url("restricted_sign_in_id", restricted_sign_in_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphRestrictedSignIn')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_restricted_sign_ins.metadata = {'url': '/auditLogs/restrictedSignIns/{restrictedSignIn-id}'} # type: ignore
def delete_restricted_sign_ins(
self,
restricted_sign_in_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property restrictedSignIns for auditLogs.
Delete navigation property restrictedSignIns for auditLogs.
:param restricted_sign_in_id: key: id of restrictedSignIn.
:type restricted_sign_in_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_restricted_sign_ins.metadata['url'] # type: ignore
path_format_arguments = {
'restrictedSignIn-id': self._serialize.url("restricted_sign_in_id", restricted_sign_in_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_restricted_sign_ins.metadata = {'url': '/auditLogs/restrictedSignIns/{restrictedSignIn-id}'} # type: ignore
def list_sign_ins(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum29"]]]
select=None, # type: Optional[List[Union[str, "models.Enum30"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfSignIn"]
"""Get signIns from auditLogs.
Get signIns from auditLogs.
:param orderby: Order items by property values.
:type orderby: list[str or ~reports.models.Enum29]
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum30]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSignIn or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~reports.models.CollectionOfSignIn]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSignIn"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_sign_ins.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSignIn', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_sign_ins.metadata = {'url': '/auditLogs/signIns'} # type: ignore
def create_sign_ins(
self,
body, # type: "models.MicrosoftGraphSignIn"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphSignIn"
"""Create new navigation property to signIns for auditLogs.
Create new navigation property to signIns for auditLogs.
:param body: New navigation property.
:type body: ~reports.models.MicrosoftGraphSignIn
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSignIn, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphSignIn
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSignIn"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_sign_ins.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSignIn')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSignIn', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_sign_ins.metadata = {'url': '/auditLogs/signIns'} # type: ignore
def get_sign_ins(
self,
sign_in_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum31"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphSignIn"
"""Get signIns from auditLogs.
Get signIns from auditLogs.
:param sign_in_id: key: id of signIn.
:type sign_in_id: str
:param select: Select properties to be returned.
:type select: list[str or ~reports.models.Enum31]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSignIn, or the result of cls(response)
:rtype: ~reports.models.MicrosoftGraphSignIn
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSignIn"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_sign_ins.metadata['url'] # type: ignore
path_format_arguments = {
'signIn-id': self._serialize.url("sign_in_id", sign_in_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSignIn', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sign_ins.metadata = {'url': '/auditLogs/signIns/{signIn-id}'} # type: ignore
def update_sign_ins(
self,
sign_in_id, # type: str
body, # type: "models.MicrosoftGraphSignIn"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property signIns in auditLogs.
Update the navigation property signIns in auditLogs.
:param sign_in_id: key: id of signIn.
:type sign_in_id: str
:param body: New navigation property values.
:type body: ~reports.models.MicrosoftGraphSignIn
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sign_ins.metadata['url'] # type: ignore
path_format_arguments = {
'signIn-id': self._serialize.url("sign_in_id", sign_in_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSignIn')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sign_ins.metadata = {'url': '/auditLogs/signIns/{signIn-id}'} # type: ignore
def delete_sign_ins(
self,
sign_in_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property signIns for auditLogs.
Delete navigation property signIns for auditLogs.
:param sign_in_id: key: id of signIn.
:type sign_in_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_sign_ins.metadata['url'] # type: ignore
path_format_arguments = {
'signIn-id': self._serialize.url("sign_in_id", sign_in_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sign_ins.metadata = {'url': '/auditLogs/signIns/{signIn-id}'} # type: ignore
| [
"[email protected]"
] | |
7acee8deb1bf7f07bb324573b18412c4e2c80892 | cb8c62659f9509bbc01237a09cf8730b57f4a84f | /Webopedia/__init__.py | e27402d16322110945ef906d20fd6ce678573c79 | [] | no_license | stepnem/supybot-plugins | 5bd795319036ab21cd81b00a23e0c1f712876d3e | 6838f7ae22ad1905272cf7e003fb803e637c87d8 | refs/heads/master | 2021-01-01T18:49:44.478383 | 2012-01-05T04:14:24 | 2012-01-05T04:14:24 | 281,407 | 8 | 4 | null | 2016-11-01T20:15:17 | 2009-08-18T21:55:56 | Python | UTF-8 | Python | false | false | 1,965 | py | ###
# Copyright (c) 2004, Kevin Murphy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Provides commands and snarfers for the webopedia.com technical term dictionary
site.
"""
import supybot
__author__ = supybot.authors.skorobeus
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
Class = plugin.Class
configure = config.configure
| [
"[email protected]"
] | |
b7080a3388fa6748b93fdbe2e00ad522869923bb | 5ec48e90f711c9514a6d2ee36dbb46bc1ba71b74 | /accounts/migrations/0005_alter_user_zipcode.py | ce51f2e740d31fc46683c04ef22c1913cba2642e | [] | no_license | hanieh-mav/hanieh_shop | 1ca5042fefb970459d9f48fb716a95fec6a530bb | b7cf253e11b6c167e78b245f253a8d057f435026 | refs/heads/main | 2023-06-10T16:37:26.385048 | 2021-07-07T14:19:58 | 2021-07-07T14:19:58 | 372,892,835 | 2 | 0 | null | 2021-07-07T14:19:59 | 2021-06-01T16:19:48 | CSS | UTF-8 | Python | false | false | 437 | py | # Generated by Django 3.2.4 on 2021-06-26 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_remove_user_shahr'),
]
operations = [
migrations.AlterField(
model_name='user',
name='zipcode',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='کدپستی'),
),
]
| [
"[email protected]"
] | |
feca44382f1c83fcd137aacf6ceaefb3ddc82150 | 9795e787a54d15f2f249a17b616fec3df67d4559 | /exception/exceptions.py | f737f34a31e603fd34902bda5585322969e15d34 | [] | no_license | gebbz03/PythonProject | 377b6ccf5eafa37dd157012ce499138370ba882f | c12f939cf194a4c541ee77e1f614ba9867ef7090 | refs/heads/master | 2020-04-02T22:16:11.082863 | 2018-10-30T05:49:22 | 2018-10-30T05:49:22 | 154,827,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py |
#try and catch block
def div(x,y):
try:
result=x/y
except ZeroDivisionError:
print("Cannot divide by zero")
return None
except Exception as e:
print("Error occured",e)
return None
return result
print(div(4,2))
print(div(4,0))
print(div('1','2'))
| [
"[email protected]"
] | |
41fdbe9ba825572f3d44dfbc1f9d27d1ef7a631d | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/verification_ip_flow_parameters_py3.py | dd9ba7b70c0b60664f4e70b0301a8b45e220e3c8 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 3,706 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowParameters(Model):
"""Parameters that define the IP flow to be verified.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the target resource to
perform next-hop on.
:type target_resource_id: str
:param direction: Required. The direction of the packet represented as a
5-tuple. Possible values include: 'Inbound', 'Outbound'
:type direction: str or ~azure.mgmt.network.v2017_10_01.models.Direction
:param protocol: Required. Protocol to be verified on. Possible values
include: 'TCP', 'UDP'
:type protocol: str or ~azure.mgmt.network.v2017_10_01.models.Protocol
:param local_port: Required. The local port. Acceptable values are a
single integer in the range (0-65535). Support for * for the source port,
which depends on the direction.
:type local_port: str
:param remote_port: Required. The remote port. Acceptable values are a
single integer in the range (0-65535). Support for * for the source port,
which depends on the direction.
:type remote_port: str
:param local_ip_address: Required. The local IP address. Acceptable values
are valid IPv4 addresses.
:type local_ip_address: str
:param remote_ip_address: Required. The remote IP address. Acceptable
values are valid IPv4 addresses.
:type remote_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP
forwarding is enabled on any of them, then this parameter must be
specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'direction': {'required': True},
'protocol': {'required': True},
'local_port': {'required': True},
'remote_port': {'required': True},
'local_ip_address': {'required': True},
'remote_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'local_port': {'key': 'localPort', 'type': 'str'},
'remote_port': {'key': 'remotePort', 'type': 'str'},
'local_ip_address': {'key': 'localIPAddress', 'type': 'str'},
'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(self, *, target_resource_id: str, direction, protocol, local_port: str, remote_port: str, local_ip_address: str, remote_ip_address: str, target_nic_resource_id: str=None, **kwargs) -> None:
super(VerificationIPFlowParameters, self).__init__(**kwargs)
self.target_resource_id = target_resource_id
self.direction = direction
self.protocol = protocol
self.local_port = local_port
self.remote_port = remote_port
self.local_ip_address = local_ip_address
self.remote_ip_address = remote_ip_address
self.target_nic_resource_id = target_nic_resource_id
| [
"[email protected]"
] | |
7bcc22cbd071a1dfe4180d5e7295a60684678d5c | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/comm/webconnstatesaghist1qtr.py | b3ed2f7acb9a745a1878dcda438a532030afa58a | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,295 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class WebConnStatesAgHist1qtr(Mo):
"""
A class that represents historical aggregated statistics for web connections state in a 1 quarter sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.comm.WebConnStatesAgHist1qtr", "web connections state")
counter = CounterMeta("wait", CounterCategory.GAUGE, "connections", "current waiting connections")
meta._counters.append(counter)
counter = CounterMeta("write", CounterCategory.GAUGE, "connections", "current writing connections")
meta._counters.append(counter)
counter = CounterMeta("read", CounterCategory.GAUGE, "connections", "current reading connections")
meta._counters.append(counter)
meta.moClassName = "commWebConnStatesAgHist1qtr"
meta.rnFormat = "HDcommWebConnStatesAg1qtr-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical aggregated web connections state stats in 1 quarter"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.comm.Https")
meta.parentClasses.add("cobra.model.comm.Http")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.comm.WebConnStatesAgHist")
meta.rnPrefixes = [
('HDcommWebConnStatesAg1qtr-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 19394, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
d72a310d68e97683a91711d371b79255141f523c | 76d4d6f4edb3216ade81ba1d1b98ef17a1b9baa9 | /transactions/views.py | c3470d056a65b14161f48f361cc74ca9705c83a5 | [] | no_license | phemmylintry/crypto | 8455c4ed6fda14bf49fdad9527cb6de4134498d6 | 390816f8152514446d063728b7428d6633739855 | refs/heads/main | 2023-03-22T07:51:35.454040 | 2021-03-10T04:25:04 | 2021-03-10T04:25:04 | 340,151,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | py | from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.authentication import TokenAuthentication
from django.contrib.auth import get_user_model
from django_q.tasks import async_task, result
from drf_spectacular.utils import extend_schema, OpenApiParameter, OpenApiExample
from drf_spectacular.types import OpenApiTypes
from .serializers import TransactionSerializer, TransactionListSerializer
from .models import Transaction
from .tasks import send_transaction
import uuid
User = get_user_model()
class TransactionView(generics.CreateAPIView):
queryset = Transaction.objects.all()
serializer_class = TransactionSerializer
permission_classes = (IsAuthenticated, )
authenctication_classes = (TokenAuthentication, )
@extend_schema(
request=TransactionSerializer,
responses={201: TransactionSerializer},
)
def create(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
transaction = self.perform_create(serializer)
if transaction == "success":
get_transaction_id = serializer.data['transaction_ref']
transact = Transaction.objects.get(transaction_ref=get_transaction_id)
transact.state = "success"
transact.save(update_fields=['state'])
else:
return Response ({
'status' : "Tansaction not succesful",
'data' : {
'transaction_ref' : serializer.data['transaction_ref']
}
})
#update transaction state :(
headers = self.get_success_headers(serializer.data)
return Response({
'status' : "Transaction is successful.",
'data' : {
'transaction_ref' : serializer.data['transaction_ref']
}
}, status=status.HTTP_201_CREATED)
def perform_create(self, serializer):
currency_type = serializer.validated_data['currency_type']
target_user = serializer.validated_data['target_user']
get_target_user = User.objects.get(id=target_user)
serializer.validated_data['target_user'] = get_target_user
#generate randome id for transaction token
transaction_ref = uuid.uuid4()
serializer.validated_data['transaction_ref'] = transaction_ref
source_user = self.request.user
serializer.validated_data['source_user'] = source_user
serializer.save()
target_user = serializer.data['target_user']
source_user = serializer.data['source_user']
currency_type = serializer.data['currency_type']
transfer_amount = serializer.data['currency_amount']
task = async_task('transactions.tasks.send_transaction', source_user, target_user, currency_type, transfer_amount)
# task = send_transaction.apply_async((source_user, target_user, currency_type, transfer_amount), countdown=2)
results = result(task, 200)
print(results)
return results
class TransactionListView(APIView):
permission_classes = (IsAuthenticated, )
authenctication_classes = (TokenAuthentication, )
@extend_schema(
request=TransactionListSerializer,
responses={201: TransactionListSerializer},
)
def get(self, request, format='json'):
user = request.user.id
if not user:
return Response({
"status" : "Error",
"data" : {
"message" : "Invalid user"
}
}, status=status.HTTP_400_BAD_REQUEST)
transactions = Transaction.objects.all()
data = []
for items in transactions:
if items.source_user_id == user or items.target_user_id == user:
data.append({
'transaction_ref' : items.transaction_ref,
'currency_amount' : items.currency_amount,
'currency_type' : items.currency_type,
'source_user_id' : items.source_user_id,
'target_user_id' : items.target_user_id,
'state' : items.state,
'time_of_transaction': items.timestamp_created
})
if data == []:
return Response(
{
"data" : "No transaction history"
}, status=status.HTTP_200_OK)
return Response(data, status=status.HTTP_200_OK)
| [
"[email protected]"
] | |
017385197b19ce7d53a1c71903d01f34549125f6 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/922_sort_array_by_parity_ii.py | da1ab4097aaced49495bc27465f04ae81e4854cc | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
"""Array.
Running time: O(n) where n is the length of A.
"""
even, odd = [], []
for a in A:
if a % 2 == 1:
odd.append(a)
else:
even.append(a)
res = []
for i in range(len(A)):
if i % 2 == 1:
res.append(odd.pop())
else:
res.append(even.pop())
return res | [
"[email protected]"
] | |
61698d866bd746910d1e197d4205bbdc4be3429a | cd2d3b6be41eb9b96ecc3a22dc730325c21f22e6 | /charalog/log/woals.cgi | 00aadbd636a0b7515b9ff6fd8600b2962811ba09 | [] | no_license | cappuu/TC | c61f235349e9a68d472fa85bbea1adbef3ea154a | def08d09219e11bee2135f6b796569b769ee21c1 | refs/heads/master | 2021-09-10T19:37:33.847161 | 2018-03-31T22:56:05 | 2018-03-31T22:56:05 | 124,523,296 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 1,999 | cgi | 12월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(16일0시14분)
11월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일23시17분)
10월 : 사비의 방어시설을 <font color=red>+14</font> 강화했습니다.(15일22시15분)
9월 : 사비의 방어시설을 <font color=red>+10</font> 강화했습니다.(15일21시14분)
8월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일20시14분)
7월 : 사비의 방어시설을 <font color=red>+10</font> 강화했습니다.(15일19시14분)
7월 : 수확으로 <font color=red>3751</font>의 식량을 수확했습니다. [봉토추가봉록:51](15일19시14분)
6월 : <font color=red>[상승] </font>:평강의 통솔력이 1포인트 올랐다.(15일18시15분)
6월 : 사비의 방어시설을 <font color=red>+8</font> 강화했습니다.(15일18시15분)
5월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일17시14분)
4월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일16시14분)
3월 : 사비의 방어시설을 <font color=red>+8</font> 강화했습니다.(15일15시14분)
2월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일14시14분)
1월 : 숙박하여 피로를 대폭 회복하였습니다.(15일13시16분)
1월 : 세금으로 <font color=red>4341</font>의 돈을 징수했습니다. [관직추가봉록:400] [봉토추가봉록:241](15일13시16분)
12월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일12시14분)
11월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일11시16분)
10월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일10시14분)
9월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일9시15분)
8월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일8시14분)
| [
"[email protected]"
] | |
3f2822cf8074a1923bebb0ea6f5d14b816b76656 | dde1cf596cf5969812ecda999828baa9c73e788d | /test/test_snapshot_alias_extended.py | 6bca0e0b2c31ef9b79fad88ed829b0806416cbaa | [] | no_license | dctalbot/isilon_sdk_python3.7 | bea22c91096d80952c932d6bf406b433af7f8e21 | 4d9936cf4b9e6acbc76548167b955a7ba8e9418d | refs/heads/master | 2020-04-25T20:56:45.523351 | 2019-02-28T19:32:11 | 2019-02-28T19:32:11 | 173,065,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.snapshot_alias_extended import SnapshotAliasExtended # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestSnapshotAliasExtended(unittest.TestCase):
"""SnapshotAliasExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSnapshotAliasExtended(self):
"""Test SnapshotAliasExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.snapshot_alias_extended.SnapshotAliasExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5f92a9568bee1058fc272d28084d6c7ad10f802b | 9b45d301869631cf464da34eadf5ddb96ce80ae2 | /annotations/subsample_json_annotations.py | 92590f3a15f62c2c1ad6db9dce734c2926bcc825 | [] | no_license | zhanght021/segment-any-moving | df6605bfee4bb9c6f76f3e09d38a493914eb5750 | a72f1afd9f52bc9151221112dbc8a8fc0891807e | refs/heads/master | 2020-12-02T15:30:12.301579 | 2019-12-22T20:24:02 | 2019-12-22T20:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | import argparse
import json
import logging
import random
from pathlib import Path
from utils.log import setup_logging
def main():
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-json', required=True)
parser.add_argument('--output-json', required=True)
parser.add_argument('--keep-num-images', type=int, required=True)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
random.seed(args.seed)
input_path = Path(args.input_json)
output_path = Path(args.output_json)
log_path = args.output_json + '.log'
setup_logging(log_path)
logging.info('Args:\n%s' % vars(args))
assert input_path.exists()
assert not output_path.exists()
with open(input_path, 'r') as f:
data = json.load(f)
image_ids = [x['id'] for x in data['images']]
import collections
ids_count = collections.Counter(image_ids)
repeated = {x: y for x, y in ids_count.items() if y > 1}
random.shuffle(image_ids)
kept_image_ids = set(image_ids[:args.keep_num_images])
__import__('ipdb').set_trace()
subsampled_images = [
x for x in data['images'] if x['id'] in kept_image_ids
]
subsampled_annotations = [
x for x in data['annotations'] if x['image_id'] in kept_image_ids
]
logging.info(
'Kept %s/%s images' % (len(subsampled_images), len(data['images'])))
logging.info('Kept %s/%s annotations' % (len(subsampled_annotations),
len(data['annotations'])))
data['images'] = subsampled_images
data['annotations'] = subsampled_annotations
with open(output_path, 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9fc96baae700f71e09894a414eeaf395736030fc | 15f0514701a78e12750f68ba09d68095172493ee | /Python3/765.py | e2bb292960f894265919bae5d4515259ff95dcdb | [
"MIT"
] | permissive | strengthen/LeetCode | 5e38c8c9d3e8f27109b9124ae17ef8a4139a1518 | 3ffa6dcbeb787a6128641402081a4ff70093bb61 | refs/heads/master | 2022-12-04T21:35:17.872212 | 2022-11-30T06:23:24 | 2022-11-30T06:23:24 | 155,958,163 | 936 | 365 | MIT | 2021-11-15T04:02:45 | 2018-11-03T06:47:38 | null | UTF-8 | Python | false | false | 1,501 | py | __________________________________________________________________________________________________
sample 28 ms submission
class Solution(object):
def minSwapsCouples(self, row):
ans = 0
for i in range(0, len(row), 2):
x = row[i]
if row[i+1] == x^1: continue
ans += 1
for j in range(i+1, len(row)):
if row[j] == x^1:
row[i+1], row[j] = row[j], row[i+1]
break
return ans
__________________________________________________________________________________________________
sample 13208 kb submission
class UnionFind:
def __init__(self, N):
self.parents = [i for i in range(N)]
self.count = 0
def find(self, x):
if self.parents[x] == x:
return x
return self.find(self.parents[x])
def union(self, x, y):
px = self.find(x)
py = self.find(y)
if px != py:
self.count += 1
self.parents[py] = px
class Solution:
def minSwapsCouples(self, row: List[int]) -> int:
N = len(row) // 2
UF = UnionFind(N)
for i in range(N):
x_couple = row[i * 2] // 2
y_couple = row[i * 2 + 1] // 2
if x_couple != y_couple:
UF.union(x_couple, y_couple)
return UF.count
__________________________________________________________________________________________________
| [
"[email protected]"
] | |
38de7b425a57a3405bffde541d15fa26e09f9a1f | 184d8b600b66ceed4e065878447fd3b99d137a48 | /SRT/lib/models/ProHG.py | 95db3bb47c61a50ee61bb8f163cdf9b24561181d | [
"CC-BY-NC-4.0",
"MIT"
] | permissive | arnoldjair/landmark-detection | 10d45bcdfbb469a3f59fb7d3916fe508fc0b150f | 1ad9db7d94397d81898f6f7c05abe76806d3d85e | refs/heads/master | 2023-03-07T20:06:57.594994 | 2021-02-15T02:56:49 | 2021-02-15T02:57:44 | 261,280,519 | 0 | 0 | MIT | 2020-05-04T19:48:14 | 2020-05-04T19:48:13 | null | UTF-8 | Python | false | false | 8,323 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#
# Stacked Hourglass Networks for Human Pose Estimation (https://arxiv.org/abs/1603.06937)
from __future__ import division
import time, math, copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .basic_batch import find_tensor_peak_batch
class Residual(nn.Module):
def __init__(self, numIn, numOut):
super(Residual, self).__init__()
self.numIn = numIn
self.numOut = numOut
middle = self.numOut // 2
self.conv_A = nn.Sequential(
nn.BatchNorm2d(numIn), nn.ReLU(inplace=True),
nn.Conv2d(numIn, middle, kernel_size=1, dilation=1, padding=0, bias=True))
self.conv_B = nn.Sequential(
nn.BatchNorm2d(middle), nn.ReLU(inplace=True),
nn.Conv2d(middle, middle, kernel_size=3, dilation=1, padding=1, bias=True))
self.conv_C = nn.Sequential(
nn.BatchNorm2d(middle), nn.ReLU(inplace=True),
nn.Conv2d(middle, numOut, kernel_size=1, dilation=1, padding=0, bias=True))
if self.numIn != self.numOut:
self.branch = nn.Sequential(
nn.BatchNorm2d(self.numIn), nn.ReLU(inplace=True),
nn.Conv2d(self.numIn, self.numOut, kernel_size=1, dilation=1, padding=0, bias=True))
def forward(self, x):
residual = x
main = self.conv_A(x)
main = self.conv_B(main)
main = self.conv_C(main)
if hasattr(self, 'branch'):
residual = self.branch( residual )
return main + residual
class HierarchicalPMS(nn.Module):
def __init__(self, numIn, numOut):
super(HierarchicalPMS, self).__init__()
self.numIn = numIn
self.numOut = numOut
cA, cB, cC = self.numOut//2, self.numOut//4, self.numOut-self.numOut//2-self.numOut//4
assert cA + cB + cC == numOut, '({:}, {:}, {:}) = {:}'.format(cA, cB, cC, numOut)
self.conv_A = nn.Sequential(
nn.BatchNorm2d(numIn), nn.ReLU(inplace=True),
nn.Conv2d(numIn, cA, kernel_size=3, dilation=1, padding=1, bias=True))
self.conv_B = nn.Sequential(
nn.BatchNorm2d(cA), nn.ReLU(inplace=True),
nn.Conv2d(cA, cB, kernel_size=3, dilation=1, padding=1, bias=True))
self.conv_C = nn.Sequential(
nn.BatchNorm2d(cB), nn.ReLU(inplace=True),
nn.Conv2d(cB, cC, kernel_size=3, dilation=1, padding=1, bias=True))
if self.numIn != self.numOut:
self.branch = nn.Sequential(
nn.BatchNorm2d(self.numIn), nn.ReLU(inplace=True),
nn.Conv2d(self.numIn, self.numOut, kernel_size=1, dilation=1, padding=0, bias=True))
def forward(self, x):
residual = x
A = self.conv_A(x)
B = self.conv_B(A)
C = self.conv_C(B)
main = torch.cat((A, B, C), dim=1)
if hasattr(self, 'branch'):
residual = self.branch( residual )
return main + residual
class Hourglass(nn.Module):
def __init__(self, n, nModules, nFeats, module):
super(Hourglass, self).__init__()
self.n = n
self.nModules = nModules
self.nFeats = nFeats
self.res = nn.Sequential(*[module(nFeats, nFeats) for _ in range(nModules)])
down = [nn.MaxPool2d(kernel_size = 2, stride = 2)]
down += [module(nFeats, nFeats) for _ in range(nModules)]
self.down = nn.Sequential(*down)
if self.n > 1:
self.mid = Hourglass(n - 1, self.nModules, self.nFeats, module)
else:
self.mid = nn.Sequential(*[module(nFeats, nFeats) for _ in range(nModules)])
up = [module(nFeats, nFeats) for _ in range(nModules)]
#up += [nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)]
self.up = nn.Sequential(*up)
def forward(self, x):
res = self.res(x)
down = self.down(res)
mid = self.mid(down)
up = self.up(mid)
up = torch.nn.functional.interpolate(up, [res.size(2), res.size(3)], mode='bilinear', align_corners=True)
return res + up
class HourGlassNet(nn.Module):
def __init__(self, config, points, sigma, input_dim):
super(HourGlassNet, self).__init__()
self.downsample = 4
self.sigma = sigma
self.config = copy.deepcopy( config )
if self.config.module == 'Residual':
module = Residual
elif self.config.module == 'HierarchicalPMS':
module = HierarchicalPMS
else:
raise ValueError('Invaliad module for HG : {:}'.format(self.config.module))
self.pts_num = points
self.nStack = self.config.stages
self.nModules = self.config.nModules
self.nFeats = self.config.nFeats
self.recursive = self.config.recursive
#self.conv = nn.Sequential(
# nn.Conv2d(input_dim, 64, kernel_size = 7, stride = 2, padding = 3, bias = True),
# nn.BatchNorm2d(64), nn.ReLU(inplace = True))
self.conv = nn.Sequential(
nn.Conv2d(input_dim, 32, kernel_size = 3, stride = 2, padding = 1, bias = True),
nn.BatchNorm2d(32), nn.ReLU(inplace = True),
nn.Conv2d( 32, 32, kernel_size = 3, stride = 1, padding = 1, bias = True),
nn.BatchNorm2d(32), nn.ReLU(inplace = True),
nn.Conv2d( 32, 64, kernel_size = 3, stride = 1, padding = 1, bias = True),
nn.BatchNorm2d(64), nn.ReLU(inplace = True))
self.ress = nn.Sequential(
module(64, 128),
nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1),
module(128, 128), module(128, self.nFeats))
_features, _tmpOut, _ll_, _tmpOut_ = [], [], [], []
for i in range(self.nStack):
feature = Hourglass(self.recursive, self.nModules, self.nFeats, module)
feature = [feature] + [module(self.nFeats, self.nFeats) for _ in range(self.nModules)]
feature += [nn.Conv2d(self.nFeats, self.nFeats, kernel_size = 1, stride = 1, bias = True),
nn.BatchNorm2d(self.nFeats), nn.ReLU(inplace = True)]
feature = nn.Sequential(*feature)
_features.append(feature)
_tmpOut.append(nn.Conv2d(self.nFeats, self.pts_num, kernel_size = 1, stride = 1, bias = True))
if i < self.nStack - 1:
_ll_.append(nn.Conv2d(self.nFeats, self.nFeats, kernel_size = 1, stride = 1, bias = True))
_tmpOut_.append(nn.Conv2d(self.pts_num, self.nFeats, kernel_size = 1, stride = 1, bias = True))
self.features = nn.ModuleList(_features)
self.tmpOuts = nn.ModuleList(_tmpOut)
self.trsfeas = nn.ModuleList(_ll_)
self.trstmps = nn.ModuleList(_tmpOut_)
if self.config.sigmoid:
self.sigmoid = nn.Sigmoid()
else:
self.sigmoid = None
def extra_repr(self):
return ('{name}(sigma={sigma}, downsample={downsample})'.format(name=self.__class__.__name__, **self.__dict__))
def forward(self, inputs):
assert inputs.dim() == 4, 'This model accepts 4 dimension input tensor: {}'.format(inputs.size())
batch_size, feature_dim = inputs.size(0), inputs.size(1)
x = self.conv(inputs)
x = self.ress(x)
features, heatmaps, batch_locs, batch_scos = [], [], [], []
for i in range(self.nStack):
feature = self.features[i](x)
features.append(feature)
tmpOut = self.tmpOuts[i](feature)
if self.sigmoid is not None:
tmpOut = self.sigmoid(tmpOut)
heatmaps.append(tmpOut)
if i < self.nStack - 1:
ll_ = self.trsfeas[i](feature)
tmpOut_ = self.trstmps[i](tmpOut)
x = x + ll_ + tmpOut_
# The location of the current batch
for ibatch in range(batch_size):
batch_location, batch_score = find_tensor_peak_batch(heatmaps[-1][ibatch], self.sigma, self.downsample)
batch_locs.append( batch_location )
batch_scos.append( batch_score )
batch_locs, batch_scos = torch.stack(batch_locs), torch.stack(batch_scos)
return features, heatmaps, batch_locs, batch_scos
def ProHourGlass(config, points, sigma, use_gray):
print ('Initialize hourglass with configure : {}'.format(config))
idim = 1 if use_gray else 3
model = HourGlassNet(config, points, sigma, idim)
return model
| [
"[email protected]"
] | |
adf64c5294fa2fab3264b87dbecf9ac69a941936 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/tepitopepan/mat/DRB1_1404_9.py | b3815c80ab8c940ec4b8b436ba15b069868b509c | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,168 | py | DRB1_1404_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.98558, 'I': -0.014418, 'H': -999.0, 'K': -999.0, 'M': -0.014418, 'L': -0.014418, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.98558, 'V': -0.014418, 'Y': -0.98558}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.1336, 'D': -0.88952, 'G': -1.056, 'F': 0.37787, 'I': 0.42789, 'H': -0.7843, 'K': 1.3955, 'M': 1.2064, 'L': 0.72364, 'N': 0.0092111, 'Q': -0.75181, 'P': -1.1538, 'S': -0.79301, 'R': 1.3303, 'T': -0.91622, 'W': -0.084645, 'V': 0.27085, 'Y': 1.2987}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4087, 'D': -2.3867, 'G': -0.70627, 'F': -1.3964, 'I': 0.69222, 'H': -0.11208, 'K': 1.2652, 'M': -0.90101, 'L': 0.18823, 'N': -0.58182, 'Q': -0.31126, 'P': 0.4949, 'S': -0.089495, 'R': 0.96923, 'T': 0.80924, 'W': -1.3956, 'V': 1.1961, 'Y': -1.3995}, 6: {'A': 0.0, 'E': -0.79277, 'D': -1.2459, 'G': -0.7096, 'F': -0.15733, 'I': 0.066354, 'H': -0.47376, 'K': -0.82466, 'M': 0.67126, 'L': 0.33385, 'N': 0.0045172, 'Q': -0.361, 'P': -0.45654, 'S': -0.19575, 'R': -0.74293, 'T': -0.43948, 'W': -0.75274, 'V': -0.18667, 'Y': -0.43394}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.24345, 'D': -0.39245, 'G': -0.35253, 'F': -0.53237, 'I': -0.18595, 'H': 0.64856, 'K': -0.63126, 'M': 0.15453, 'L': -0.5039, 'N': -0.43168, 'Q': 0.86605, 'P': -1.089, 'S': 0.70805, 'R': -0.96918, 'T': -0.7571, 'W': -0.57158, 'V': -0.53639, 'Y': -0.44963}} | [
"[email protected]"
] | |
be8dd059ed81f4842d06142a8a046d206f83a4eb | a7b66311c2ce113789933ec3162f1128b2862f13 | /app/waterQual/basinAll/tsMapSeq.py | 5bfffb5cc94cf4e8469788d0e47b6a2dd1db36cc | [
"MIT"
] | permissive | ChanJeunlam/geolearn | 214b2c42359ea1164b39117fad2d7470adeb6d35 | 791caa54eb70920823ea7d46714dc8a3e7fa7445 | refs/heads/master | 2023-07-16T04:13:15.526364 | 2021-08-16T05:24:18 | 2021-08-16T05:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,278 | py | import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataName = 'basinAll'
wqData = waterQuality.DataModelWQ('basinAll')
outName = 'basinAll-Y8090-opt1'
trainset = 'Y8090'
testset = 'Y0010'
# point test
outFolder = os.path.join(kPath.dirWQ, 'model', outName)
yP1, ycP1 = basins.testModel(outName, trainset, wqData=wqData, ep=200)
errFile1 = os.path.join(outFolder, 'errMat1_ep200.npy')
# errMat1 = wqData.errBySiteC(ycP1, subset=trainset, varC=wqData.varC)
# np.save(errFile1, errMat1)
errMat1 = np.load(errFile1)
errFile2 = os.path.join(outFolder, 'errMat2_ep200.npy')
yP2, ycP2 = basins.testModel(outName, testset, wqData=wqData, ep=200)
# errMat2 = wqData.errBySiteC(ycP2, subset=testset, varC=wqData.varC)
# np.save(errFile2, errMat2)
errMat2 = np.load(errFile2)
# seq test
siteNoLst = wqData.info['siteNo'].unique().tolist()
# basins.testModelSeq(outName, siteNoLst, wqData=wqData, ep=200)
# figure out number of samples
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
df0 = pd.read_csv(os.path.join(dirInv, 'codeCount.csv'),
dtype={'siteNo': str}).set_index('siteNo')
df1 = pd.read_csv(os.path.join(dirInv, 'codeCount_B2000.csv'),
dtype={'siteNo': str}).set_index('siteNo')
df2 = pd.read_csv(os.path.join(dirInv, 'codeCount_A2000.csv'),
dtype={'siteNo': str}).set_index('siteNo')
matN = df0.loc[siteNoLst].values
matN1 = df1.loc[siteNoLst].values
matN2 = df2.loc[siteNoLst].values
# plot box
codePdf = usgs.codePdf
groupLst = codePdf.group.unique().tolist()
for group in groupLst:
codeLst = codePdf[codePdf.group == group].index.tolist()
indLst = [wqData.varC.index(code) for code in codeLst]
labLst1 = [codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
labLst2 = ['train opt1', 'test opt1', 'train opt2', 'test opt2']
dataBox = list()
for ic in indLst:
temp = list()
for errMat in [errMat1, errMat2]:
ind = np.where((matN1[:, ic] > 50) & (matN2[:, ic] > 50))[0]
temp.append(errMat[ind, ic, 1])
dataBox.append(temp)
title = 'correlation of {} group'.format(group)
fig = figplot.boxPlot(dataBox, label1=labLst1, label2=labLst2)
fig.suptitle(title)
fig.show()
# plot map
siteNoLst = wqData.info['siteNo'].unique().tolist()
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
codePdf = usgs.codePdf
codeLst = ['00940', '00915']
def funcMap():
nM = len(codeLst)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
for k in range(0, nM):
code = codeLst[k]
ic = wqData.varC.index(code)
shortName = codePdf.loc[code]['shortName']
title = '{} {}'.format(shortName, code)
axplot.mapPoint(axM[k], lat, lon, errMat2[:, ic, 1], s=12)
axM[k].set_title(title)
figP, axP = plt.subplots(nM+1, 1, figsize=(8, 6))
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfPred, dfObs = basins.loadSeq(outName, siteNo, ep=200)
dfPred = dfPred[dfPred.index >= np.datetime64('1980-01-01')]
dfObs = dfObs[dfObs.index >= np.datetime64('1980-01-01')]
t = dfPred.index.values.astype(np.datetime64)
tBar = np.datetime64('2000-01-01')
axplot.plotTS(axP[0], t, [dfPred['00060'], dfObs['00060']], tBar=tBar,
legLst=['pred', 'obs'], styLst='--', cLst='br')
axP[0].set_title('streamflow')
for k, var in enumerate(codeLst):
styLst = '-*'
shortName = codePdf.loc[var]['shortName']
title = ' {} {}'.format(shortName, var)
axplot.plotTS(axP[k+1], t, [dfPred[var], dfObs[var]], tBar=tBar,
legLst=['pred', 'obs'], styLst=styLst, cLst='br')
axP[k+1].set_title(title)
figplot.clickMap(funcMap, funcPoint)
| [
"[email protected]"
] | |
b0a7e19fb390fa57b3835fc1e4f1ca42566c3f7d | efd471380d976614667e56c92f0aed671371fc63 | /All Programs/Tuples.py | 7c2c7e039ad6d52062ffa5f8b2189cb68d4273cf | [] | no_license | anshumanairy/Hacker-Rank | 39af46e76182d34637340d1755aff4afd7820083 | 6fef4c6a415422d9379232932358e4ee7430a6af | refs/heads/master | 2021-07-04T07:41:37.769152 | 2020-10-12T05:49:24 | 2020-10-12T05:49:24 | 181,359,750 | 2 | 2 | null | 2020-10-12T05:49:25 | 2019-04-14T19:38:18 | Python | UTF-8 | Python | false | false | 201 | py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
def func():
N=int(input())
list1=[""]*N
c=input("")
list1=list(map(int,c.split()))
print(hash(tuple(list1)))
func()
# In[ ]:
| [
"[email protected]"
] | |
85a39828733a6f7bfe8b8897c68b984eaf80db3c | 7d549faf0de691a63acae85e60b081d4b6b7ddc7 | /slowfast/datasets/__init__.py | dee63427c45337ec1e5ac384762abd5fb36e5d31 | [
"Apache-2.0"
] | permissive | billcai/SlowFast | be05f7852810d43211c4e6ab7faef27f86d035af | 778888e63351e55861801996b37c7ff9a3746587 | refs/heads/master | 2021-08-01T17:02:11.539218 | 2021-07-26T22:05:16 | 2021-07-26T22:06:15 | 248,907,066 | 0 | 0 | Apache-2.0 | 2020-03-21T04:34:41 | 2020-03-21T04:34:40 | null | UTF-8 | Python | false | false | 497 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .ava_dataset import Ava # noqa
from .build import DATASET_REGISTRY, build_dataset # noqa
from .charades import Charades # noqa
from .imagenet import Imagenet # noqa
from .kinetics import Kinetics # noqa
from .ssv2 import Ssv2 # noqa
try:
from .ptv_datasets import Ptvcharades, Ptvkinetics, Ptvssv2 # noqa
except Exception:
print("Please update your PyTorchVideo to latest master")
| [
"[email protected]"
] | |
2b1c26fde124b5e4b985e7dbd4c006afa1344ee9 | 3856dbedcf846f9845290e9b2efa4d18e300623d | /swagger_client/models/execute_method_request.py | 5de657d2476279d5b1948879d67b6c17ae6dc62a | [] | no_license | Valandur/webapi-client-python | 5b314da41803f5b55a5c6cce62d2384b86d0fa37 | 8502726bf3facb17c6fa681faf0f600207eb61ae | refs/heads/master | 2022-02-04T21:45:37.686703 | 2019-07-23T12:11:47 | 2019-07-23T12:11:47 | 113,748,693 | 2 | 0 | null | 2019-01-09T16:07:31 | 2017-12-10T12:38:14 | Python | UTF-8 | Python | false | false | 6,574 | py | # coding: utf-8
"""
Web-API
Access Sponge powered Minecraft servers through a WebAPI # Introduction This is the documentation of the various API routes offered by the WebAPI plugin. This documentation assumes that you are familiar with the basic concepts of Web API's, such as `GET`, `PUT`, `POST` and `DELETE` methods, request `HEADERS` and `RESPONSE CODES` and `JSON` data. By default this documentation can be found at http:/localhost:8080 (while your minecraft server is running) and the various routes start with http:/localhost:8080/api/v5... As a quick test try reaching the route http:/localhost:8080/api/v5/info (remember that you can only access \\\"localhost\\\" routes on the server on which you are running minecraft). This route should show you basic information about your server, like the motd and player count. # List endpoints Lots of objects offer an endpoint to list all objects (e.g. `GET: /world` to get all worlds). These endpoints return only the properties marked 'required' by default, because the list might be quite large. If you want to return ALL data for a list endpoint add the query parameter `details`, (e.g. `GET: /world?details`). > Remember that in this case the data returned by the endpoint might be quite large. # Debugging endpoints Apart from the `?details` flag you can also pass some other flags for debugging purposes. Remember that you must include the first query parameter with `?`, and further ones with `&`: `details`: Includes details for list endpoints `accept=[json/xml]`: Manually set the accept content type. This is good for browser testing, **BUT DON'T USE THIS IN PRODUCTION, YOU CAN SUPPLY THE `Accepts` HEADER FOR THAT** `pretty`: Pretty prints the data, also good for debugging in the browser. An example request might look like this: `http://localhost:8080/api/v5/world?details&accpet=json&pretty&key=MY-API-KEY` # Additional data Certain endpoints (such as `/player`, `/entity` and `/tile-entity` have additional properties which are not documented here, because the data depends on the concrete object type (eg. `Sheep` have a wool color, others do not) and on the other plugins/mods that are running on your server which might add additional data. You can also find more information in the github docs (https:/github.com/Valandur/Web-API/tree/master/docs/DATA.md) # noqa: E501
OpenAPI spec version: 5.4.2-S7.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.execute_method_param import ExecuteMethodParam # noqa: F401,E501
class ExecuteMethodRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'method': 'str',
'parameters': 'list[ExecuteMethodParam]'
}
attribute_map = {
'method': 'method',
'parameters': 'parameters'
}
def __init__(self, method=None, parameters=None): # noqa: E501
"""ExecuteMethodRequest - a model defined in Swagger""" # noqa: E501
self._method = None
self._parameters = None
self.discriminator = None
self.method = method
if parameters is not None:
self.parameters = parameters
@property
def method(self):
"""Gets the method of this ExecuteMethodRequest. # noqa: E501
The method that is executed # noqa: E501
:return: The method of this ExecuteMethodRequest. # noqa: E501
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this ExecuteMethodRequest.
The method that is executed # noqa: E501
:param method: The method of this ExecuteMethodRequest. # noqa: E501
:type: str
"""
if method is None:
raise ValueError("Invalid value for `method`, must not be `None`") # noqa: E501
self._method = method
@property
def parameters(self):
"""Gets the parameters of this ExecuteMethodRequest. # noqa: E501
The parameters of the method (if applicable) # noqa: E501
:return: The parameters of this ExecuteMethodRequest. # noqa: E501
:rtype: list[ExecuteMethodParam]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this ExecuteMethodRequest.
The parameters of the method (if applicable) # noqa: E501
:param parameters: The parameters of this ExecuteMethodRequest. # noqa: E501
:type: list[ExecuteMethodParam]
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExecuteMethodRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExecuteMethodRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
61d9f8923dce64ff0f4801e9d15ae1d5e69c756c | 71f47bd812a420c9849ecc7609e99f9b969c4d3d | /push_endpoint/migrations/0018_pusheddata_datasource.py | 1f6ba580925a882b85b136592f91b7ce41408b7e | [] | no_license | erinspace/shareregistration | e2bd0d8086a60eac616057a225bda07a0cd385a9 | e04bfe443fda49644a12778a4826c9cb04930f5b | refs/heads/master | 2020-05-27T18:25:05.858413 | 2016-02-24T20:17:37 | 2016-02-24T20:17:37 | 30,875,095 | 0 | 1 | null | 2016-02-24T20:17:37 | 2015-02-16T15:47:22 | JavaScript | UTF-8 | Python | false | false | 467 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('push_endpoint', '0017_auto_20151113_1532'),
]
operations = [
migrations.AddField(
model_name='pusheddata',
name='datasource',
field=models.CharField(default='test', max_length=100),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
85bb1583d91110edde4ea3a582c960e697bc9b4e | 0bf6ecbdebc7424a8946b29127d55c5bc1e7442e | /organization/migrations/0062_auto_20170727_2109.py | 146f45e3415b6fb573a6dc9d046e806ac30d424d | [] | no_license | dekkerlab/cLIMS | 2351a9c81f3e3ba982e073500a4a5cf2fd38ed51 | e76731032a5707027b53746a8f2cc9b01ab7c04e | refs/heads/master | 2021-03-27T06:28:49.718401 | 2017-10-10T19:22:33 | 2017-10-10T19:22:33 | 71,837,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-27 21:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wetLab', '0076_auto_20170727_2109'),
('organization', '0061_auto_20170426_1541'),
]
operations = [
migrations.AddField(
model_name='experiment',
name='authentication_docs',
field=models.ManyToManyField(blank=True, help_text='Attach any authentication document for your biosample here. e.g. Fragment Analyzer document, Gel images.', related_name='expAddProto', to='wetLab.Protocol', verbose_name='authentication_docs'),
),
migrations.AlterField(
model_name='experiment',
name='imageObjects',
field=models.ManyToManyField(blank=True, help_text='additional images.', related_name='expImg', to='dryLab.ImageObjects'),
),
]
| [
"[email protected]"
] | |
e4771e45015373752e3153f63e5089990296b822 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_312/ch6_2019_02_28_19_18_08_378411.py | 51733cbb770c32be20d9f73baf7b228425fd72c1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | def encontra_maximo(lista):
max=lista[0][0]
eixox=len(lista)-1
eixoy=len(lista[0])-1
count=0
count2=0
while count<eixox:
while count2<eixoy:
if lista[count][count2]>max:
max=lista[count][count2]
count2+=1
count+=1
return max | [
"[email protected]"
] | |
1d46acae02e3b015cc09aa5e952ee973faf108a4 | 9fc768c541145c1996f2bdb8a5d62d523f24215f | /code/jPB371/ch02ok/bool.py | e1be86dd69a983a3bbf2acb2dbadcdb6822c76c5 | [] | no_license | jumbokh/pyclass | 3b624101a8e43361458130047b87865852f72734 | bf2d5bcca4fff87cb695c8cec17fa2b1bbdf2ce5 | refs/heads/master | 2022-12-25T12:15:38.262468 | 2020-09-26T09:08:46 | 2020-09-26T09:08:46 | 283,708,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # -*- coding: utf-8 -*-
print( bool(0) )
print( bool("") )
print( bool(" ") )
print( bool(1) )
print( bool("ABC") )
| [
"[email protected]"
] | |
fb3acbea24022a162db419f7994a985a3cf44ed8 | c331d0f5e3d4ae0c60dd5cc7aa3dc9c76faec88c | /WebApp/python/setup_db.py | e39937533186ae75b43b74afa11e44c1e6d8c2e7 | [] | no_license | sahahn/BPt_app | 744be29dad8710b5113a50cd12d8d250e51587d6 | f849a8bad43419b334000e57f2ce874d38a6d3d5 | refs/heads/master | 2023-03-09T01:50:44.401955 | 2020-10-19T17:59:37 | 2020-10-19T17:59:37 | 280,518,561 | 0 | 0 | null | 2020-10-19T17:59:39 | 2020-07-17T20:25:27 | JavaScript | UTF-8 | Python | false | false | 2,872 | py | import os
import json
import shutil
from Dataset import Dataset
def process_dataset(base_loc, dataset_name):
# Init dataset with locs, etc...
dataset = Dataset(base_loc=base_loc,
dataset_name=dataset_name)
# Process files (skips if not needed)
dataset.process_files()
def process_datasets(base_loc):
# Make data info if doesnt exist
data_info_loc = os.path.join(base_loc, 'bpt/Data_Info')
os.makedirs(data_info_loc, exist_ok=True)
# Process each dataset
sources_loc = os.path.join(base_loc, 'sources')
datasets = [f for f in os.listdir(sources_loc) if not f.startswith('.')]
for dataset in datasets:
process_dataset(base_loc, dataset)
# Check each dataset for its events
# Also check to make sure dataset isnt empty
non_empty_datasets = []
all_events = set()
for dataset in datasets:
event_file = os.path.join(data_info_loc, dataset, 'eventnames.json')
with open(event_file, 'r') as f:
events = set(json.load(f))
all_events.update(events)
# Only add dataset if atleast 1 event (only 0 events when empty)
if len(events) > 0:
non_empty_datasets.append(dataset)
# Save overlapped events
all_events_loc = os.path.join(base_loc, 'bpt/all_events.json')
with open(all_events_loc, 'w') as f:
json.dump(list(all_events), f)
# Save datasets.json w/ non-empty datasets
datasets_loc = os.path.join(base_loc, 'bpt/datasets.json')
with open(datasets_loc, 'w') as f:
json.dump(sorted(non_empty_datasets), f)
# Go through and delete any saved data info if
# not in the compiled datasets
saved_datasets = os.listdir(data_info_loc)
for dataset in saved_datasets:
if dataset not in non_empty_datasets:
shutil.rmtree(os.path.join(data_info_loc, dataset))
def main():
base_loc = '/var/www/html/data'
# Locs + checks
lock_loc = os.path.join(base_loc, 'bpt/lock')
ready_loc = os.path.join(base_loc, 'bpt/ready')
error_loc = os.path.join(base_loc, 'bpt/process_datasets_errors.txt')
# Check for db-add lock
if (os.path.isfile(lock_loc)):
return None
else:
with open(lock_loc, 'w') as f:
f.write('locked')
# If previous error file exists, remove it
if os.path.exists(error_loc):
os.remove(error_loc)
# Call process datasets only if no
try:
process_datasets(base_loc)
# If processed no errors add ready
with open(ready_loc, 'w') as f:
f.write('ready')
# If error, save to text file
except Exception as e:
with open(error_loc, 'w') as f:
f.write(repr(e))
# Remove the lock - regardless of if error or success
os.remove(lock_loc)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d32773b4574f486d5a2b781344e5fd1204cf04d9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03192/s543941815.py | a08019e6e984eb90d4fba884f43c987fbc8436a3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | N = list(input())
print(N.count("2")) | [
"[email protected]"
] | |
6e82d9acc53a5eabc323bec3d2068e5365b6bdec | 42f83595d24addd3cf8be828e282b37509825b3e | /src/collective/cfgconfig/view.py | dd91876c2f239c361f74514a6c6a9a457b10ca9f | [] | no_license | datakurre/collective.cfgconfig | 825a26b7704932b5ea70f688cda8112623b42493 | 3325c6cbd5defd40c40bce7ed43814e9f77263ae | refs/heads/master | 2016-09-06T21:39:03.209274 | 2013-11-24T12:13:50 | 2013-11-24T12:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | # -*- coding: utf-8 -*-
from Products.Five.browser import BrowserView
class HelloWorld(BrowserView):
def __call__(self):
return u"Hello world"
| [
"[email protected]"
] | |
e45b7ab72944b666ff1639a4f0be0b3e38507d7b | 9947d1e328a3262a35a61385dc537c3dc557ab7d | /机器学习/day05/demo07_ac.py | 334d5013ba072bb2730ca8167dd3d93753dc93a3 | [] | no_license | nuass/lzh | d0a7c74a3295523d1fe15eeaa73997fc04469f06 | 3cb1cf1e448b88ade226d113a7da4eab7bbb5c09 | refs/heads/master | 2021-02-06T06:10:32.772831 | 2019-06-10T08:54:49 | 2019-06-10T08:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | # coding=utf-8
"""
凝聚层次
"""
import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp
x = np.loadtxt("../ml_data/multiple3.txt",delimiter=",")
model=sc.AgglomerativeClustering(n_clusters=4)
pred_y = model.fit_predict(x)
mp.figure("AgglomerativeClustering",facecolor="lightgray")
mp.title("AgglomerativeClustering",fontsize=14)
mp.xlabel("x",fontsize=12)
mp.ylabel("y",fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=":")
mp.scatter(x[:,0],x[:,1],s=60,marker='o',c=pred_y,cmap="brg",label="Sample Points")
mp.legend()
mp.show() | [
"[email protected]"
] | |
2fe314ad72a9bdc13e5025c1c7a864fb01d73ee6 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/haid.py | 2679dc565822ff254b7856dca134b0346cb0bf3e | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 83 | py | ii = [('RogePAV2.py', 1), ('KiddJAE.py', 1), ('FitzRNS.py', 1), ('FitzRNS2.py', 1)] | [
"[email protected]"
] | |
e269cfa019462d7b553ac3efa865d6eca08f96e4 | 19f1dc4e728714e66af8e1e8262f2b7c47d3beb6 | /Samples/UserSamples/2017/STTConfig.py | 3e551660149292cc15d24516e961c55717591b35 | [] | no_license | samhiggie/DatacardCreator | 74cbbbea928770d3ca5669604d96ffb582416b45 | 8e838816cfb9adee8b2276adf79904da6449ca52 | refs/heads/master | 2020-09-11T19:35:48.985441 | 2019-11-18T23:51:18 | 2019-11-18T23:51:18 | 222,169,538 | 0 | 0 | null | 2019-11-16T22:56:02 | 2019-11-16T22:56:02 | null | UTF-8 | Python | false | false | 1,350 | py | from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.JES import JESUncertainty
from Samples.Uncertainties.UserUncertainties.METUES import METUESUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
from Samples.Uncertainties.UserUncertainties.Prefiring import PrefiringUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
STSample = Sample()
STSample.name = 'STT'
STSample.path = '/data/aloeliger/SMHTT_Selected_2017_Deep/'
STSample.files = ['ST_t_top.root',
'ST_t_antitop.root',
'ST_tW_top.root',
'ST_tW_antitop.root']
STSample.definition = '(gen_match_1 == 1 || gen_match_1 == 2) && gen_match_2 == 5'
STSample.uncertainties = [
TESUncertainty(),
JESUncertainty(),
METUESUncertainty(),
MuonESUncertainty(),
PrefiringUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
]
STSample.eventDictionaryInstance = MuTauEventDictionary
STSample.CreateEventWeight = STSample.CreateEventWeight_Standard
| [
"[email protected]"
] | |
e32886648d45201263ed378387cc9fab9df32a4e | d68be566e1b7dbb9c716b8165e9d546a6e294e5d | /course/models.py | 265df4287ea82f383070bffcb7676c8a7a8d5f77 | [] | no_license | NeuSovo/Neusoft-ecard | 0e5d525360522d4abf3a7f39ec4d205ec17d571d | 41138be9280fc92e98d6dce7394ac66204672b40 | refs/heads/master | 2021-03-24T09:32:55.543809 | 2018-09-12T04:48:29 | 2018-09-12T04:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py | from django.db import models
# Create your models here.
class RoomModel(models.Model):
class Meta:
verbose_name = "RoomModel"
verbose_name_plural = "RoomModels"
def info(self):
result = {
'RoomID': self.RoomID,
'RoomTime': self.RoomTime,
'RoomWeek': self.RoomWeek,
'ClassName': self.ClassName,
'ClassTeacher': self.ClassTeacher,
'ClassTime': self.ClassTime,
'RoomCount': self.RoomCount
}
RoomFloor = models.CharField(
max_length=10
)
RoomID = models.CharField(
max_length=30
)
RoomTime = models.IntegerField(
default=0
)
RoomWeek = models.IntegerField(
default=0
)
ClassName = models.CharField(
max_length=155,
)
ClassTeacher = models.CharField(
max_length=155,
)
ClassTime = models.CharField(
max_length=100,
)
RoomCount = models.IntegerField(
default=0
)
class RoomTest(models.Model):
class Meta:
verbose_name = "课程信息"
verbose_name_plural = "课程信息"
ordering = ['id']
def info(self, has_grade=False):
result = {
'RoomID': self.RoomID,
'ClassName': self.ClassName,
'ClassTeacher': self.ClassTeacher,
'ClassWeek': self.ClassWeek,
'ClassCount': self.ClassCount,
'ClassTimeWeek': self.ClassTimeWeek,
'ClassTimeTime': self.ClassTimeTime
}
if has_grade:
result['ClassGrade'] = self.ClassGrade
return result
ClassTimeTime_choices = (
('1', '1-2节'),
('2', '3-4节'),
('3', '5-6节'),
('4', '7-8节'),
('5', '9-10节'),
('5', '9-11节'),
('1-2', '1-4节'),
('1-2-3-4', '1-8节'),
('3-4', '5-7节'),
('3-4', '5-8节'),
('1-2-3-4', '1-8节'),
('1-2-3-4-5', '1-10节'),
('1-2-3-4-5', '1-11节'),
)
ClassTimeWeek_choices = (
(1, '周一'),
(2, '周二'),
(3, '周三'),
(4, '周四'),
(5, '周五'),
(6, '周六'),
(7, '周日'),
)
RoomID = models.CharField(
max_length=30,
null=True
)
ClassName = models.CharField(
max_length=155,
null=True
)
ClassTeacher = models.CharField(
max_length=120,
null=True,
default='0'
)
ClassWeek = models.CharField(
max_length=30,
null=True
)
ClassCount = models.IntegerField(default=0)
ClassGrade = models.TextField(
default='0',
null=True
)
ClassTimeWeek = models.IntegerField(
default=0, choices=ClassTimeWeek_choices)
ClassTimeTime = models.CharField(
default='0',
max_length=10,
choices=ClassTimeTime_choices,
null=True
)
| [
"[email protected]"
] | |
04bd05b63a5e2c8534a1ddac43a2d5cafcb436e0 | 133dc799865134325975afeff2d1aa1ed4a1f5ca | /migrations/versions/15fd2af90843_users_table.py | fd6d349e4f08d2cc27f0e80cb3078dc01b9cf887 | [] | no_license | Serrones/microblog | 5eb72baf86ad363e607ac29775f8c1f24234a18d | 917eec12890c8485d44dbef4742dae268837c15b | refs/heads/master | 2020-03-14T16:48:47.328601 | 2018-05-07T00:01:20 | 2018-05-07T00:01:20 | 131,705,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | """Users Table
Revision ID: 15fd2af90843
Revises:
Create Date: 2018-05-01 17:23:06.763488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '15fd2af90843'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
37465d7be87d3f32cd3ed96223af113778b5d931 | 24d6d41989d676f3532013de3a6d847586fa3663 | /permissions_widget/settings.py | adac7edee3ac2392f01a48f2c9f39405a49144f4 | [] | no_license | diegofer/compu | 92da75e79a4f286840f127698961bd1f99edf567 | 4407896e899e057a928f63455f29bba370bf5c7a | refs/heads/master | 2021-01-22T19:54:11.588140 | 2014-04-01T05:41:59 | 2014-04-01T05:41:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | """
Settings for permissions_widget.
EXCLUDE_APPS
The permissions widget will exclude any permission for any model in any app
in the EXCLUDE_APPS list. It contains sensible defaults which you can
override: sessions, admin and contenttypes for example, as in most cases
users won't even have the possibility of adding/changing/deleting sessions,
logentries and content types so why even bother proposing permissions for
them ? This would just confuse the admin.
Can be overridden in settings.PERMISSIONS_WIDGET_EXCLUDE_APPS.
EXCLUDE_MODELS
The permissions widget will exclude any permission for any listed model.
Models should be listed in the form of `app.model`.
Can be overridden in settings.PERMISSIONS_WIDGET_EXCLUDE_MODELS.
PATCH_GROUPADMIN
If True, `permissions_widget.admin` will override the registered GroupAdmin
form's user_permission field to use this widget for permissions.
Can be overridden (ie. to False) in
settings.PERMISSIONS_WIDGET_PATCH_GROUPADMIN.
PATCH_USERADMIN
If True, `permissions_widget.admin` will override the registered UserAdmin
form's user_permission field to use this widget for permissions.
Can be overridden (ie. to False) in
settings.PERMISSIONS_WIDGET_PATCH_USERADMIN.
"""
from django.conf import settings
EXCLUDE_APPS = getattr(settings, 'PERMISSIONS_WIDGET_EXCLUDE_APPS', [
'sites', 'reversion', 'contenttypes', 'admin', 'sessions',
'easy_thumbnails',])
EXCLUDE_MODELS = getattr(settings, 'PERMISSIONS_WIDGET_EXCLUDE_MODELS', [
'auth.permission',])
#PATCH_USERADMIN = getattr(settings, 'PERMISSIONS_WIDGET_PATCH_USERADMIN', True)
#PATCH_GROUPADMIN = getattr(settings, 'PERMISSIONS_WIDGET_PATCH_GROUPADMIN', True)
| [
"[email protected]"
] | |
8f1f7c3c01e7eb7b6ba4c53e5fdf863524943160 | e56214188faae8ebfb36a463e34fc8324935b3c2 | /test/test_hcl_operating_system_vendor_all_of.py | fe64827eedaceb6e5940512cfecfcd68d34590c9 | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 2,009 | py | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.hcl_operating_system_vendor_all_of import HclOperatingSystemVendorAllOf # noqa: E501
from intersight.rest import ApiException
class TestHclOperatingSystemVendorAllOf(unittest.TestCase):
"""HclOperatingSystemVendorAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHclOperatingSystemVendorAllOf(self):
"""Test HclOperatingSystemVendorAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.hcl_operating_system_vendor_all_of.HclOperatingSystemVendorAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
84019c47c3970e23b49d08af58fa3ddfb4190e74 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /programming_computer_vision_with_python/cvbook-contrib/ch05_stereo.py | 8cfe22f5dbe6c0a38f1888e9cb984ffbc05f7bdd | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 465 | py | from PIL import Image
import numpy
import stereo
im_l = numpy.array(Image.open('out_stereo1.ppm').convert('L'), 'f')
im_r = numpy.array(Image.open('out_stereo2.ppm').convert('L'), 'f')
steps = 12
start = 4
wid = 9
res = stereo.plane_sweep_ncc(im_l, im_r, start, steps, wid)
wid = 3
res_gauss = stereo.plane_sweep_gauss(im_l, im_r, start, steps, wid)
import scipy.misc
scipy.misc.imsave('out_depth.png', res)
scipy.misc.imsave('out_depth_gauss.png', res_gauss)
| [
"[email protected]"
] | |
2d9b1d187eb1175b5bcb291481e18ea6d1dd82b2 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/albatrossandco/brubeck_cms/brubeck/common/geography/fields.py | a782283b0983066628ff4b3c9c3ac8bb6bd8e614 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 3,849 | py | from django import forms
from django.conf import settings
from django.db import models
from django.utils.safestring import mark_safe
class Coordinates:
def __init__(self, lat, lng):
self.lat = float(lat)
self.lng = float(lng)
def __repr__(self):
return ','.join([str(self.lat), str(self.lng)])
lat = float()
lng = float()
# NOTE: Came from http://www.djangosnippets.org/snippets/615/ (-JCM)
# The development of this code was sponsored by MIG Internacional
# This code is released under the terms of the BSD license
# http://code.djangoproject.com/browser/django/trunk/LICENSE
# Feel free to use it at your whim/will/risk :D
# Contact info: Javier Rojas <[email protected]>
class LocationWidget(forms.widgets.Widget):
def __init__(self, *args, **kw):
super(LocationWidget, self).__init__(*args, **kw)
self.inner_widget = forms.widgets.HiddenInput()
def render(self, name, value, *args, **kwargs):
try:
lat = value.lat
lng = value.lng
except AttributeError:
lat = settings.DEFAULT_LATITUDE
lng = settings.DEFAULT_LONGITUDE
js = '''
</script>
<script type="text/javascript">
//<![CDATA[
var %(name)s_marker ;
$(document).ready(function () {
if (GBrowserIsCompatible()) {
var map = new GMap2(document.getElementById("map_%(name)s"));
map.setCenter(new GLatLng(%(default_lat)s,%(default_lng)s), 13);
%(name)s_marker = new GMarker(new GLatLng(%(default_lat)s,%(default_lng)s), {draggable: true});
map.addOverlay(%(name)s_marker);
map.addControl(new GLargeMapControl());
map.addControl(new GMapTypeControl());
$('#%(name)s_id')[0].value = %(name)s_marker.getLatLng().lat() + "," + %(name)s_marker.getLatLng().lng();
GEvent.addListener(%(name)s_marker, "dragend", function() {
var point = %(name)s_marker.getLatLng();
$('#%(name)s_id')[0].value = point.lat() + "," + point.lng();
});
}});
$(document).unload(function () {GUnload()});
//]]>
</script>
''' % {'name': name, 'default_lat': lat, 'default_lng': lng}
# % dict(name=name)
html = self.inner_widget.render("%s" % name, None, dict(id='%s_id' % name))
html += "<div id=\"map_%s\" style=\"width: 500px; height: 500px\"></div>" % name
return mark_safe(js+html)
class LocationField(forms.Field):
widget = LocationWidget
def clean(self, value):
lat, lng = value.split(',')
return Coordinates(lat, lng)
# My stuff again. (-JCM)
class CoordinatesField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 70
kwargs['default'] = Coordinates(settings.DEFAULT_LATITUDE, settings.DEFAULT_LONGITUDE)
super(CoordinatesField, self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(value, Coordinates):
return value
lat, lng = value.split(',')
return Coordinates(lat, lng)
def get_db_prep_value(self, value, connection, prepared=True):
return str(value)
def formfield(self, **kwargs):
defaults = {'form_class': LocationField}
defaults.update(kwargs)
return super(CoordinatesField, self).formfield(**defaults)
def db_type(self, connection):
return 'varchar(70)'
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
lat, lng = str(value).split(',')
return '%s, %s' % (str(lat).strip(), str(lng).strip())
| [
"[email protected]"
] | |
46e42a8851daf5a097db3ca58345f605faf477e9 | 67c0d7351c145d756b2a49e048500ff361f7add6 | /xpresso/ai/admin/infra/packages/package_dependency.py | da30e097a2988bd38de9035b38da48c3c253c61f | [] | no_license | Krishnaarunangsu/XpressoDataHandling | ba339ae85b52e30715f47406ddb74966350848aa | 0637a465088b468d6fdb6d1bb6f7b087547cec56 | refs/heads/master | 2020-06-27T19:58:43.358340 | 2019-08-29T16:59:08 | 2019-08-29T16:59:08 | 200,035,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,870 | py | """ Package Dependency MOdule
"""
from xpresso.ai.admin.controller.exceptions.xpr_exceptions import \
PackageFailedException
__all__ = ["PackageDependency"]
__author__ = "Srijan Sharma"
import json
import os
import networkx as nx
import matplotlib.pyplot as plt
from xpresso.ai.core.utils.xpr_config_parser import XprConfigParser
from xpresso.ai.core.logging.xpr_log import XprLogger
class PackageDependency:
"""
Created a directed acyclic package dependency graph
using a given dependency json.
"""
NONE_PACKAGE = "None"
DEPENDENCY_SECTION = "pkg_dependency"
DEPENDENCY_CONFIG_FILE = "dependency_config_file"
def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH):
super().__init__()
self.config = XprConfigParser(config_path)["packages_setup"]
self.logger = XprLogger()
dependency_config_file = self.config[self.DEPENDENCY_SECTION][
self.DEPENDENCY_CONFIG_FILE]
if not os.path.exists(dependency_config_file):
self.logger.error(("Unable to find the dependency js"
"file at the mentioned path"))
raise PackageFailedException("Invalid dependency config file")
try:
with open(dependency_config_file) as config_fs:
dependency_config = json.load(config_fs)
except EnvironmentError as err:
self.logger.fatal(err)
raise PackageFailedException("Invalid config file")
self.graph = nx.DiGraph()
edges = list()
for key in dependency_config:
for value in dependency_config[key]:
edges.append((key, value))
self.graph.add_edges_from(edges)
if not nx.is_directed_acyclic_graph(self.graph):
self.logger.fatal(("Unable to handle dependencies due to cyclic "
"loop"))
self.graph = None
raise PackageFailedException("Cyclic Dependency Found")
def visualize_dependency_graph(self):
"""
Created a plot for the directed dependency graph
"""
if self.graph is None:
self.logger.error("Graph value none cannot be plotted")
return
nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)
plt.show()
def check_if_supported(self, package_name: str):
"""
Args:
package_name(str)
:return:
bool: Return True if supported. False, otherwise
"""
return bool(self.graph.has_node(package_name))
def list_all(self):
"""
Extracts the value of all nodes(packages) present in graph
Returns:
list: Array consisting of all node(packages) value
"""
if self.graph is None:
self.logger.error("Graph value none cannot be iterated")
return list()
nodes = list()
for node in self.graph.nodes():
if node == self.NONE_PACKAGE:
continue
nodes.append(node)
return nodes
def get_dependency(self, package_name: str) -> list:
"""
List of dependencies
Args:
package_name(str): Name of the package
Returns:
list: List of dependencies required for the package_name
installation
"""
if not self.check_if_supported(package_name=package_name):
self.logger.error("{} package not present in config"
.format(package_name))
return list()
self.logger.info(("Running Topological sorting on "
"Package Dependency Graph"))
try:
topological_sort_list = list(reversed(list(
nx.topological_sort(self.graph))))
except nx.NetworkXError as error:
self.logger.error(error)
raise PackageFailedException("Topological sort is defined for "
"directed graphs only")
except nx.NetworkXUnfeasible as error:
self.logger.error(error)
raise PackageFailedException("Not a directed acyclic graph (DAG) "
"and hence no topological sort exists")
descendants = nx.descendants(self.graph, package_name)
dependent_packages = []
for pkg in topological_sort_list:
if pkg in descendants and pkg != self.NONE_PACKAGE:
dependent_packages.append(pkg)
if package_name != self.NONE_PACKAGE:
dependent_packages.append(package_name)
return dependent_packages
if __name__ == "__main__":
pkg_dep = PackageDependency()
pkg_dep.visualize_dependency_graph()
print(pkg_dep.list_all())
print(pkg_dep.get_dependency("PythonPackage"))
| [
"[email protected]"
] | |
aed4b7492ab51ac5f0be52b4803a554e1a88e1a5 | d33d25c752aa9604ccbd3ce75a26d31e8a12151a | /models/spational_transformer_sampler_interp.py | 0e4a348dcc3db3c7a4160ae048eb387f2e289d4f | [] | no_license | yasunorikudo/sfm-learner-chainer | 78bbb080c54e6af4278f31448d7b4067492b2dce | 06d722a2a71ea9c51c4755862be7b211c35ac2b1 | refs/heads/master | 2022-07-07T12:03:10.076957 | 2019-08-07T07:39:59 | 2019-08-07T07:39:59 | 200,998,542 | 0 | 0 | null | 2022-06-21T22:28:07 | 2019-08-07T07:38:37 | Python | UTF-8 | Python | false | false | 4,722 | py | import numpy
import chainer
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
from chainer import cuda
class SpatialTransformerSamplerInterp(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 == n_in)
x_type = in_types[0]
grid_type = in_types[1]
type_check.expect(
x_type.dtype.char == 'f',
grid_type.dtype.char == 'f',
x_type.ndim == 4,
grid_type.ndim == 4,
grid_type.shape[1] == 2,
x_type.shape[0] == grid_type.shape[0],
)
def forward_cpu(self, inputs):
return self._forward(inputs)
def forward_gpu(self, inputs):
return self._forward(inputs)
def _forward(self, inputs):
x, grid = inputs
xp = cuda.get_array_module(x)
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
u = grid[:, 0].reshape(-1)
v = grid[:, 1].reshape(-1)
u0 = xp.floor(u)
u1 = u0 + 1
v0 = xp.floor(v)
v1 = v0 + 1
u0 = u0.clip(0, W - 1)
v0 = v0.clip(0, H - 1)
u1 = u1.clip(0, W - 1)
v1 = v1.clip(0, H - 1)
# weights
wt_x0 = u1 - u
wt_x1 = u - u0
wt_y0 = v1 - v
wt_y1 = v - v0
w1 = wt_x0 * wt_y0
w2 = wt_x1 * wt_y0
w3 = wt_x0 * wt_y1
w4 = wt_x1 * wt_y1
w1 = w1.astype(x.dtype)
w2 = w2.astype(x.dtype)
w3 = w3.astype(x.dtype)
w4 = w4.astype(x.dtype)
u0 = u0.astype(numpy.int32)
v0 = v0.astype(numpy.int32)
u1 = u1.astype(numpy.int32)
v1 = v1.astype(numpy.int32)
batch_index = xp.repeat(xp.arange(B), out_H * out_W)
y = w1[:, None] * x[batch_index, :, v0, u0]
y += w2[:, None] * x[batch_index, :, v0, u1]
y += w3[:, None] * x[batch_index, :, v1, u0]
y += w4[:, None] * x[batch_index, :, v1, u1]
y = y.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
return y,
def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def backward_gpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def _backward(self, inputs, grad_outputs):
x, grid = inputs
xp = cuda.get_array_module(x)
gy, = grad_outputs
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
u = grid[:, 0].reshape(-1)
v = grid[:, 1].reshape(-1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u)
u1 = u0 + 1
v0 = xp.floor(v)
v1 = v0 + 1
u0 = u0.clip(0, W - 1)
v0 = v0.clip(0, H - 1)
u1 = u1.clip(0, W - 1)
v1 = v1.clip(0, H - 1)
# weights
wt_x0 = u1 - u
wt_x1 = u - u0
wt_y0 = v1 - v
wt_y1 = v - v0
wt_x0 = wt_x0.astype(gy.dtype)
wt_x1 = wt_x1.astype(gy.dtype)
wt_y0 = wt_y0.astype(gy.dtype)
wt_y1 = wt_y1.astype(gy.dtype)
u0 = u0.astype(numpy.int32)
v0 = v0.astype(numpy.int32)
u1 = u1.astype(numpy.int32)
v1 = v1.astype(numpy.int32)
batch_index = xp.repeat(xp.arange(B), out_H * out_W)
x_indexed_1 = x[batch_index, :, v0, u0]
x_indexed_2 = x[batch_index, :, v0, u1]
x_indexed_3 = x[batch_index, :, v1, u0]
x_indexed_4 = x[batch_index, :, v1, u1]
gu = -wt_y0[:, None] * x_indexed_1
gu += wt_y0[:, None] * x_indexed_2
gu -= wt_y1[:, None] * x_indexed_3
gu += wt_y1[:, None] * x_indexed_4
gv = -wt_x0[:, None] * x_indexed_1
gv -= wt_x1[:, None] * x_indexed_2
gv += wt_x0[:, None] * x_indexed_3
gv += wt_x1[:, None] * x_indexed_4
gu = gu.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gv = gv.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gu *= gy
gv *= gy
gu = xp.sum(gu, axis=1)
gv = xp.sum(gv, axis=1)
# Offsets scaling of the coordinates and clip gradients.
ggrid = xp.concatenate((gu[:, None], gv[:, None]), axis=1)
gx = xp.zeros_like(x)
return gx, ggrid
def spatial_transformer_sampler_interp(x, grid, **kwargs):
argument.check_unexpected_kwargs(
kwargs, use_cudnn="The argument \"use_cudnn\" is not "
"supported anymore. "
"Use chainer.using_config('use_cudnn', value) "
"context where value can be `always`, `never`, or `auto`.")
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerSamplerInterp()(x, grid)
| [
"[email protected]"
] | |
d70a299da56c555c36523cf59b82f9260fee8453 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/v6g.py | c558deb65007de213bf4145030d8f280d59936fc | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'v6G':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
e4b369b060fd48853273f89894747a4af8741872 | 1620f9900da8d18e647b61f543a68d9386967d65 | /histoslider/image/channel_image_item.py | c05461e333146b1701df565bde6e091c4107ebb3 | [
"MIT"
] | permissive | ch-king/HistoSlider-1 | c57c748fc3b2ec2600b79b132373bc6b4c686369 | 2dbad7a91072626206fb3fad776291c1c68e342f | refs/heads/master | 2021-10-24T13:41:18.807783 | 2019-03-26T11:05:27 | 2019-03-26T11:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,082 | py | from collections import Callable
import numpy as np
from PyQt5.QtCore import pyqtSignal, QRectF, QPointF, Qt, QPoint, QTimer
from PyQt5.QtGui import QTransform, QPixmap
from PyQt5.QtWidgets import QMenu, QAction
from pyqtgraph import GraphicsObject, getConfigOption, fn, Point
from histoslider.models.channel import Channel
class ChannelImageItem(GraphicsObject):
"""
**Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
GraphicsObject displaying an image. Optimized for rapid update (ie video display).
This item displays either a 2D numpy array (height, width) or
a 3D array (height, width, RGBa). This array is optionally scaled (see
:func:`setLevels <pyqtgraph.ImageItem.setLevels>`) and/or colored
with a lookup table (see :func:`setLookupTable <pyqtgraph.ImageItem.setLookupTable>`)
before being displayed.
ImageItem is frequently used in conjunction with
:class:`HistogramLUTItem <pyqtgraph.HistogramLUTItem>` or
:class:`HistogramLUTWidget <pyqtgraph.HistogramLUTWidget>` to provide a GUI
for controlling the levels and lookup table used to display the image.
"""
sigImageChanged = pyqtSignal()
sigRemoveRequested = pyqtSignal(object) # self; emitted when 'remove' is selected from context menu
def __init__(self, image: np.ndarray, channel: Channel, **kargs):
"""
See :func:`setImage <pyqtgraph.ImageItem.setImage>` for all allowed initialization arguments.
"""
GraphicsObject.__init__(self)
self.channel = channel
self.setPxMode(False)
self.setAutoDownsample(False)
self.menu = None
self.image = None ## original image data
self.qimage = None ## rendered image for display
self.paintMode = None
self.levels = channel.settings.levels
self.lut = None
self.autoDownsample = False
self.axisOrder = getConfigOption('imageAxisOrder')
# In some cases, we use a modified lookup table to handle both rescaling
# and LUT more efficiently
self._effectiveLut = None
self.drawKernel = None
self.border = None
self.removable = False
if image is not None:
self.setImage(image, **kargs)
else:
self.setOpts(**kargs)
def setCompositionMode(self, mode):
"""Change the composition mode of the item (see QPainter::CompositionMode
in the Qt documentation). This is useful when overlaying multiple ImageItems.
============================================ ============================================================
**Most common arguments:**
QtGui.QPainter.CompositionMode_SourceOver Default; image replaces the background if it
is opaque. Otherwise, it uses the alpha channel to blend
the image with the background.
QtGui.QPainter.CompositionMode_Overlay The image color is mixed with the background color to
reflect the lightness or darkness of the background.
QtGui.QPainter.CompositionMode_Plus Both the alpha and color of the image and background pixels
are added together.
QtGui.QPainter.CompositionMode_Multiply The output is the image color multiplied by the background.
============================================ ============================================================
"""
self.paintMode = mode
self.update()
def setBorder(self, b):
self.border = fn.mkPen(b)
self.update()
def width(self):
if self.image is None:
return None
axis = 0 if self.axisOrder == 'col-major' else 1
return self.image.shape[axis]
def height(self):
if self.image is None:
return None
axis = 1 if self.axisOrder == 'col-major' else 0
return self.image.shape[axis]
def channels(self):
if self.image is None:
return None
return self.image.shape[2] if self.image.ndim == 3 else 1
def boundingRect(self):
if self.image is None:
return QRectF(0., 0., 0., 0.)
return QRectF(0., 0., float(self.width()), float(self.height()))
def setLevels(self, levels, update=True):
"""
Set image scaling levels. Can be one of:
* [blackLevel, whiteLevel]
* [[minRed, maxRed], [minGreen, maxGreen], [minBlue, maxBlue]]
Only the first format is compatible with lookup tables. See :func:`makeARGB <pyqtgraph.makeARGB>`
for more details on how levels are applied.
"""
if levels is not None:
levels = np.asarray(levels)
if not fn.eq(levels, self.levels):
self.levels = levels
self._effectiveLut = None
if update:
self.updateImage()
def getLevels(self):
return self.levels
def setLookupTable(self, lut, update=True):
"""
Set the lookup table (numpy array) to use for this image. (see
:func:`makeARGB <pyqtgraph.makeARGB>` for more information on how this is used).
Optionally, lut can be a callable that accepts the current image as an
argument and returns the lookup table to use.
Ordinarily, this table is supplied by a :class:`HistogramLUTItem <pyqtgraph.HistogramLUTItem>`
or :class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
"""
if lut is not self.lut:
self.lut = lut
self._effectiveLut = None
if update:
self.updateImage()
def setAutoDownsample(self, ads):
"""
Set the automatic downsampling mode for this ImageItem.
Added in version 0.9.9
"""
self.autoDownsample = ads
self.qimage = None
self.update()
def setOpts(self, update=True, **kargs):
if 'axisOrder' in kargs:
val = kargs['axisOrder']
if val not in ('row-major', 'col-major'):
raise ValueError('axisOrder must be either "row-major" or "col-major"')
self.axisOrder = val
if 'lut' in kargs:
self.setLookupTable(kargs['lut'], update=update)
if 'levels' in kargs:
self.setLevels(kargs['levels'], update=update)
# if 'clipLevel' in kargs:
# self.setClipLevel(kargs['clipLevel'])
if 'opacity' in kargs:
self.setOpacity(kargs['opacity'])
if 'compositionMode' in kargs:
self.setCompositionMode(kargs['compositionMode'])
if 'border' in kargs:
self.setBorder(kargs['border'])
if 'removable' in kargs:
self.removable = kargs['removable']
self.menu = None
if 'autoDownsample' in kargs:
self.setAutoDownsample(kargs['autoDownsample'])
if update:
self.update()
def setRect(self, rect):
"""Scale and translate the image to fit within rect (must be a QRect or QRectF)."""
self.resetTransform()
self.translate(rect.left(), rect.top())
self.scale(rect.width() / self.width(), rect.height() / self.height())
def clear(self):
self.image = None
self.prepareGeometryChange()
self.informViewBoundsChanged()
self.update()
def setImage(self, image=None, autoLevels=None, **kargs):
"""
Update the image displayed by this item. For more information on how the image
is processed before displaying, see :func:`makeARGB <pyqtgraph.makeARGB>`
================= =========================================================================
**Arguments:**
image (numpy array) Specifies the image data. May be 2D (width, height) or
3D (width, height, RGBa). The array dtype must be integer or floating
point of any bit depth. For 3D arrays, the third dimension must
be of length 3 (RGB) or 4 (RGBA). See *notes* below.
autoLevels (bool) If True, this forces the image to automatically select
levels based on the maximum and minimum values in the data.
By default, this argument is true unless the levels argument is
given.
lut (numpy array) The color lookup table to use when displaying the image.
See :func:`setLookupTable <pyqtgraph.ImageItem.setLookupTable>`.
levels (min, max) The minimum and maximum values to use when rescaling the image
data. By default, this will be set to the minimum and maximum values
in the image. If the image array has dtype uint8, no rescaling is necessary.
opacity (float 0.0-1.0)
compositionMode See :func:`setCompositionMode <pyqtgraph.ImageItem.setCompositionMode>`
border Sets the pen used when drawing the image border. Default is None.
autoDownsample (bool) If True, the image is automatically downsampled to match the
screen resolution. This improves performance for large images and
reduces aliasing. If autoDownsample is not specified, then ImageItem will
choose whether to downsample the image based on its size.
================= =========================================================================
**Notes:**
For backward compatibility, image data is assumed to be in column-major order (column, row).
However, most image data is stored in row-major order (row, column) and will need to be
transposed before calling setImage()::
imageitem.setImage(imagedata.T)
This requirement can be changed by calling ``image.setOpts(axisOrder='row-major')`` or
by changing the ``imageAxisOrder`` :ref:`global configuration option <apiref_config>`.
"""
gotNewData = False
if image is None:
if self.image is None:
return
else:
gotNewData = True
shapeChanged = (self.image is None or image.shape != self.image.shape)
image = image.view(np.ndarray)
if self.image is None or image.dtype != self.image.dtype:
self._effectiveLut = None
self.image = image
if self.image.shape[0] > 2 ** 15 - 1 or self.image.shape[1] > 2 ** 15 - 1:
if 'autoDownsample' not in kargs:
kargs['autoDownsample'] = True
if shapeChanged:
self.prepareGeometryChange()
self.informViewBoundsChanged()
if autoLevels is None:
if 'levels' in kargs:
autoLevels = False
else:
autoLevels = True
if autoLevels:
img = self.image
while img.size > 2 ** 16:
img = img[::2, ::2]
mn, mx = np.nanmin(img), np.nanmax(img)
# mn and mx can still be NaN if the data is all-NaN
if mn == mx or np.isnan(mn) or np.isnan(mx):
mn = 0
mx = 255
kargs['levels'] = [mn, mx]
self.setOpts(update=False, **kargs)
self.qimage = None
self.update()
if gotNewData:
self.sigImageChanged.emit()
def dataTransform(self):
"""Return the transform that maps from this image's input array to its
local coordinate system.
This transform corrects for the transposition that occurs when image data
is interpreted in row-major order.
"""
# Might eventually need to account for downsampling / clipping here
tr = QTransform()
if self.axisOrder == 'row-major':
# transpose
tr.scale(1, -1)
tr.rotate(-90)
return tr
def inverseDataTransform(self):
"""Return the transform that maps from this image's local coordinate
system to its input array.
See dataTransform() for more information.
"""
tr = QTransform()
if self.axisOrder == 'row-major':
# transpose
tr.scale(1, -1)
tr.rotate(-90)
return tr
def mapToData(self, obj):
tr = self.inverseDataTransform()
return tr.map(obj)
def mapFromData(self, obj):
tr = self.dataTransform()
return tr.map(obj)
def quickMinMax(self, targetSize=1e6):
"""
Estimate the min/max values of the image data by subsampling.
"""
data = self.image
while data.size > targetSize:
ax = np.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[sl]
return np.nanmin(data), np.nanmax(data)
def updateImage(self, *args, **kargs):
## used for re-rendering qimage from self.image.
## can we make any assumptions here that speed things up?
## dtype, range, size are all the same?
defaults = {
'autoLevels': False,
}
defaults.update(kargs)
return self.setImage(*args, **defaults)
def render(self):
# Convert data to QImage for display.
if self.image is None or self.image.size == 0:
return
# Request a lookup table if this image has only one channel
if self.image.ndim == 2 or self.image.shape[2] == 1:
if isinstance(self.lut, Callable):
lut = self.lut(self.image)
else:
lut = self.lut
else:
lut = None
if self.autoDownsample:
# reduce dimensions of image based on screen resolution
o = self.mapToDevice(QPointF(0, 0))
x = self.mapToDevice(QPointF(1, 0))
y = self.mapToDevice(QPointF(0, 1))
# Check if graphics view is too small to render anything
if o is None or x is None or y is None:
return
w = Point(x - o).length()
h = Point(y - o).length()
if w == 0 or h == 0:
self.qimage = None
return
xds = max(1, int(1.0 / w))
yds = max(1, int(1.0 / h))
axes = [1, 0] if self.axisOrder == 'row-major' else [0, 1]
image = fn.downsample(self.image, xds, axis=axes[0])
image = fn.downsample(image, yds, axis=axes[1])
self._lastDownsample = (xds, yds)
# Check if downsampling reduced the image size to zero due to inf values.
if image.size == 0:
return
else:
image = self.image
# if the image data is a small int, then we can combine levels + lut
# into a single lut for better performance
levels = self.levels
if levels is not None and levels.ndim == 1 and image.dtype in (np.ubyte, np.uint16):
if self._effectiveLut is None:
eflsize = 2 ** (image.itemsize * 8)
ind = np.arange(eflsize)
minlev, maxlev = levels
levdiff = maxlev - minlev
levdiff = 1 if levdiff == 0 else levdiff # don't allow division by 0
if lut is None:
efflut = fn.rescaleData(ind, scale=255. / levdiff,
offset=minlev, dtype=np.ubyte)
else:
lutdtype = np.min_scalar_type(lut.shape[0] - 1)
efflut = fn.rescaleData(ind, scale=(lut.shape[0] - 1) / levdiff,
offset=minlev, dtype=lutdtype, clip=(0, lut.shape[0] - 1))
efflut = lut[efflut]
self._effectiveLut = efflut
lut = self._effectiveLut
levels = None
# Convert single-channel image to 2D array
if image.ndim == 3 and image.shape[-1] == 1:
image = image[..., 0]
# Assume images are in column-major order for backward compatibility
# (most images are in row-major order)
if self.axisOrder == 'col-major':
image = image.transpose((1, 0, 2)[:image.ndim])
argb, alpha = fn.makeARGB(image, lut=lut, levels=levels)
self.qimage = fn.makeQImage(argb, alpha, transpose=False)
def paint(self, p, *args):
if self.image is None:
return
if self.qimage is None:
self.render()
if self.qimage is None:
return
if self.paintMode is not None:
p.setCompositionMode(self.paintMode)
shape = self.image.shape[:2] if self.axisOrder == 'col-major' else self.image.shape[:2][::-1]
p.drawImage(QRectF(0, 0, *shape), self.qimage)
if self.border is not None:
p.setPen(self.border)
p.drawRect(self.boundingRect())
def save(self, fileName, *args):
"""Save this image to file. Note that this saves the visible image (after scale/color changes), not the original data."""
if self.qimage is None:
self.render()
self.qimage.save(fileName, *args)
def getHistogram(self, bins='auto', step='auto', perChannel=False, targetImageSize=200,
targetHistogramSize=500, **kwds):
"""Returns x and y arrays containing the histogram values for the current image.
For an explanation of the return format, see numpy.histogram().
The *step* argument causes pixels to be skipped when computing the histogram to save time.
If *step* is 'auto', then a step is chosen such that the analyzed data has
dimensions roughly *targetImageSize* for each axis.
The *bins* argument and any extra keyword arguments are passed to
np.histogram(). If *bins* is 'auto', then a bin number is automatically
chosen based on the image characteristics:
* Integer images will have approximately *targetHistogramSize* bins,
with each bin having an integer width.
* All other types will have *targetHistogramSize* bins.
If *perChannel* is True, then the histogram is computed once per channel
and the output is a list of the results.
This method is also used when automatically computing levels.
"""
if self.image is None or self.image.size == 0:
return None, None
if step == 'auto':
step = (max(1, int(np.ceil(self.image.shape[0] / targetImageSize))),
max(1, int(np.ceil(self.image.shape[1] / targetImageSize))))
if np.isscalar(step):
step = (step, step)
stepData = self.image[::step[0], ::step[1]]
if bins == 'auto':
mn = np.nanmin(stepData)
mx = np.nanmax(stepData)
if np.isnan(mn) or np.isnan(mx):
# the data are all-nan
return None, None
if stepData.dtype.kind in "ui":
# For integer data, we select the bins carefully to avoid aliasing
step = np.ceil((mx - mn) / 500.)
bins = np.arange(mn, mx + 1.01 * step, step, dtype=np.int)
else:
# for float data, let numpy select the bins.
bins = np.linspace(mn, mx, 500)
if len(bins) == 0:
bins = [mn, mx]
kwds['bins'] = bins
if perChannel:
hist = []
for i in range(stepData.shape[-1]):
stepChan = stepData[..., i]
stepChan = stepChan[np.isfinite(stepChan)]
h = np.histogram(stepChan, **kwds)
hist.append((h[1][:-1], h[0]))
return hist
else:
stepData = stepData[np.isfinite(stepData)]
hist = np.histogram(stepData, **kwds)
return hist[1][:-1], hist[0]
def setPxMode(self, b):
"""
Set whether the item ignores transformations and draws directly to screen pixels.
If True, the item will not inherit any scale or rotation transformations from its
parent items, but its position will be transformed as usual.
(see GraphicsItem::ItemIgnoresTransformations in the Qt documentation)
"""
self.setFlag(self.ItemIgnoresTransformations, b)
def setScaledMode(self):
self.setPxMode(False)
def getPixmap(self):
if self.qimage is None:
self.render()
if self.qimage is None:
return None
return QPixmap.fromImage(self.qimage)
def pixelSize(self):
"""return scene-size of a single pixel in the image"""
br = self.sceneBoundingRect()
if self.image is None:
return 1, 1
return br.width() / self.width(), br.height() / self.height()
def viewTransformChanged(self):
if self.autoDownsample:
self.qimage = None
self.update()
def mouseDragEvent(self, ev):
if ev.button() != Qt.LeftButton:
ev.ignore()
return
elif self.drawKernel is not None:
ev.accept()
self.drawAt(ev.pos(), ev)
def mouseClickEvent(self, ev):
if ev.button() == Qt.RightButton:
if self.raiseContextMenu(ev):
ev.accept()
if self.drawKernel is not None and ev.button() == Qt.LeftButton:
self.drawAt(ev.pos(), ev)
def raiseContextMenu(self, ev):
menu = self.getMenu()
if menu is None:
return False
menu = self.scene().addParentContextMenus(self, menu, ev)
pos = ev.screenPos()
menu.popup(QPoint(pos.x(), pos.y()))
return True
def getMenu(self):
if self.menu is None:
if not self.removable:
return None
self.menu = QMenu()
self.menu.setTitle("Image")
remAct = QAction("Remove image", self.menu)
remAct.triggered.connect(self.removeClicked)
self.menu.addAction(remAct)
self.menu.remAct = remAct
return self.menu
def hoverEvent(self, ev):
if not ev.isExit() and self.drawKernel is not None and ev.acceptDrags(Qt.LeftButton):
ev.acceptClicks(
Qt.LeftButton) ## we don't use the click, but we also don't want anyone else to use it.
ev.acceptClicks(Qt.RightButton)
elif not ev.isExit() and self.removable:
ev.acceptClicks(Qt.RightButton) ## accept context menu clicks
def tabletEvent(self, ev):
pass
# print(ev.device())
# print(ev.pointerType())
# print(ev.pressure())
def drawAt(self, pos, ev=None):
pos = [int(pos.x()), int(pos.y())]
dk = self.drawKernel
kc = self.drawKernelCenter
sx = [0, dk.shape[0]]
sy = [0, dk.shape[1]]
tx = [pos[0] - kc[0], pos[0] - kc[0] + dk.shape[0]]
ty = [pos[1] - kc[1], pos[1] - kc[1] + dk.shape[1]]
for i in [0, 1]:
dx1 = -min(0, tx[i])
dx2 = min(0, self.image.shape[0] - tx[i])
tx[i] += dx1 + dx2
sx[i] += dx1 + dx2
dy1 = -min(0, ty[i])
dy2 = min(0, self.image.shape[1] - ty[i])
ty[i] += dy1 + dy2
sy[i] += dy1 + dy2
ts = (slice(tx[0], tx[1]), slice(ty[0], ty[1]))
ss = (slice(sx[0], sx[1]), slice(sy[0], sy[1]))
mask = self.drawMask
src = dk
if isinstance(self.drawMode, Callable):
self.drawMode(dk, self.image, mask, ss, ts, ev)
else:
src = src[ss]
if self.drawMode == 'set':
if mask is not None:
mask = mask[ss]
self.image[ts] = self.image[ts] * (1 - mask) + src * mask
else:
self.image[ts] = src
elif self.drawMode == 'add':
self.image[ts] += src
else:
raise Exception("Unknown draw mode '%s'" % self.drawMode)
self.updateImage()
def setDrawKernel(self, kernel=None, mask=None, center=(0, 0), mode='set'):
self.drawKernel = kernel
self.drawKernelCenter = center
self.drawMode = mode
self.drawMask = mask
def removeClicked(self):
## Send remove event only after we have exited the menu event handler
self.removeTimer = QTimer()
self.removeTimer.timeout.connect(self.emitRemoveRequested)
self.removeTimer.start(0)
def emitRemoveRequested(self):
self.removeTimer.timeout.disconnect(self.emitRemoveRequested)
self.sigRemoveRequested.emit(self)
| [
"[email protected]"
] | |
02cc57abadc1b35abd7611414ff7e4803bf5be52 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/minCost_20200826170809.py | 416df3034399b192e979acb250e8e4c2b5d35bb5 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | def minCost(days,costs):
# brute force approach
# find if numbers are consecutive
# if they are past 7 then means we do a 30 day pass
# once they stop being consecutive means to opt for something different
# like [1,4,6,7,8,20]
ways = [0] * days[len(days)-1]
newDays = set(days)
for i in range(1,len(ways)+1):
total = ways[i-1]+ costs[0]
if i-7 > 0:total1 = ways[i-7] + costs[1]
else:total1 = 0 + costs[1]
if i-15 > 0:total2 = ways[i-15] + costs[2]
else: total2 = 0 + costs[2]
if i in newDays:
ways[i] = min(total,total1,total2)
else:
ways[i] = ways[i-1]
print(ways)
minCost([1,4,6,7,8,20],[2,7,15]) | [
"[email protected]"
] | |
a0b93c64d418d2f9953bebcebd810c6e68451a2e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_088/ch20_2020_09_11_22_25_42_229462.py | 9fda66b3766ee69c033f0fc481cd067588ecd032 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | distância = float(input("Digite a distância a percorrer: "))
if (distância <= 200):
total = 0.5 * distância
print("%.2f" %total)
else:
total = 0.45 * (distância)
print("%.2f" %total) | [
"[email protected]"
] | |
f24d5225fa62af98c084fc698de3044b4f04814b | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003/Gather2_W_fixGood_C_change/train/pyr_2s/L5/step10_a.py | 6cb947073aa28b6129b66598bd02aa5d9bb0a1c9 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,597 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L5 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003.W_w_M_to_C_pyr.pyr_2s.L5.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1_and_1s6_2s6.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="Gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__1") .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__ep010, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_ckpt_keep_amount(20).set_result_name(result_name="")
gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="Gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__2") .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__ep010, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_ckpt_keep_amount(20).set_result_name(result_name="")
gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="Gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__3") .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__ep010, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_ckpt_keep_amount(20).set_result_name(result_name="")
gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="Gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__4") .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__ep010, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_ckpt_keep_amount(20).set_result_name(result_name="")
gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="Gather_ep010__ch032_1s6_2s5_3s2__ch032_1s6_2s6__5") .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__ep010, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_ckpt_keep_amount(20).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
16a1f15487d6a04ef8e315b7e87984f406ce40f4 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/vns/rtconntoaconninst.py | 0a35dac1b96046c398e2ffe6d152e8610f74460e | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 5,644 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtConnToAConnInst(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.vns.RtConnToAConnInst", "cobra.model.vns.FuncConnInst")
meta.moClassName = "vnsRtConnToAConnInst"
meta.rnFormat = "rtconnToAConnInst-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Connector Instance"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x6000000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.vns.AbsFuncConn")
meta.parentClasses.add("cobra.model.vns.FuncConnInst")
meta.parentClasses.add("cobra.model.vns.AbsTermConn")
meta.parentClasses.add("cobra.model.vns.TermConnInst")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtconnToAConnInst-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20739, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4830
prop.defaultValueStr = "vnsFuncConnInst"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsFuncConnInst", None, 4830)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 20738, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsGraphToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsNodeToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("NodeInstToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsGraphToCompVNic", "Virtual Nics", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsNodeToCompVNic", "Virtual Nics", "cobra.model.comp.VNic"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("NodeInstToCompVNic", "Virtual Nics", "cobra.model.comp.VNic"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsGraphToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
845bd92c060e393e1feb07efce537f9a3b65d67b | f3cd7727bb731e359e93e86771ed66ccc4587937 | /generic_images/managers.py | d462ed0e927ec90f5d3d9b728c53544c74c016ad | [
"MIT"
] | permissive | kmike/django-generic-images | bb8344751c27056c88abedb6a3669204f0b5b25b | 4e45068ed219ac35396758eb6b6e1fe5306147df | refs/heads/origin/master | 2023-08-18T04:12:04.668596 | 2009-12-25T15:45:13 | 2009-12-25T15:45:13 | 2,316,219 | 5 | 3 | null | 2017-11-10T15:16:30 | 2011-09-02T20:16:38 | Python | UTF-8 | Python | false | false | 2,306 | py | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_model
from generic_utils.managers import GenericModelManager
def get_model_class_by_name(name):
app_label, model_name = name.split(".")
model = get_model(app_label, model_name, False)
return model
class ImagesAndUserManager(models.Manager):
""" Useful manager for models that have AttachedImage (or subclass) field
and 'injector=GenericIngector()' manager.
"""
def __init__(self, *args, **kwargs):
try:
image_model_class = kwargs.pop('image_model_class')
except KeyError:
image_model_class = 'generic_images.AttachedImage'
self.image_model_class = get_model_class_by_name(image_model_class)
super(ImagesAndUserManager, self).__init__(*args, **kwargs)
def select_with_main_images(self, limit=None, **kwargs):
''' Select all objects with filters passed as kwargs.
For each object it's main image instance is accessible as ``object.main_image``.
Results can be limited using ``limit`` parameter.
Selection is performed using only 2 or 3 sql queries.
'''
objects = self.get_query_set().filter(**kwargs)[:limit]
self.image_model_class.injector.inject_to(objects,'main_image', is_main=True)
return objects
def for_user_with_main_images(self, user, limit=None):
return self.select_with_main_images(user=user, limit=limit)
def get_for_user(self, user):
objects = self.get_query_set().filter(user=user)
return objects
class AttachedImageManager(GenericModelManager):
''' Manager with helpful functions for attached images
'''
def get_for_model(self, model):
''' Returns all images that are attached to given model.
Deprecated. Use `for_model` instead.
'''
return self.for_model(model)
def get_main_for(self, model):
'''
Returns main image for given model
'''
try:
return self.for_model(model).get(is_main=True)
except models.ObjectDoesNotExist:
return None
| [
"[email protected]"
] | |
63456deeb37fe3d0953db49310e7b28446f990fe | f4924a0a6d1eb17f3b7dca035f7dedfe0231254a | /src/dsgrn_net_query/queries/CountStableFC_large_networks.py | be619af9d650d362d75488d495c1be52ad016a78 | [
"MIT"
] | permissive | julianfox8/dsgrn_net_query | b22f4ac3f75a6d0d21fc7b3a703389486b7a27f6 | 89df8bded9d60384864b04703ef52dfbd52632d9 | refs/heads/master | 2023-08-22T08:36:01.137658 | 2021-10-01T17:25:50 | 2021-10-01T17:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,304 | py | import DSGRN
import os, json, sys,subprocess,ast,shutil
def query(network_file,params_file,resultsdir=""):
'''
:param network_file: a .txt file containing either a single DSGRN network specification or a list of network specification strings in DSGRN format
:param params_file: A json file with the keys
"num_proc" = number of processes to use for each database creation
"count" = True or False (true or false in .json format)
whether or not to return the number of matches (True) or just whether or not there is at least one match (False)
"datetime" : optional datetime string to append to subdirectories in resultsdir, default = system time
:param resultsdir: optional path to directory where results will be written, default is current directory
:return: Writes a .json file containing a dictionary keyed by DSGRN network specification with a list of results.
The results are DSGRN parameter count that have at least one Morse set that is a stable full cycle,
or True (existence of at least one stable full cycle) or False (none exist), depending on the value of the parameter "count".
The size of the DSGRN parameter graph for the network is also recorded.
{ networkspec : [result, DSGRN param graph size] }.
'''
networks = read_networks(network_file)
params = json.load(open(params_file))
datetime = None if "datetime" not in params else params["datetime"]
if not networks:
raise ValueError("No networks available for analysis. Make sure network file is in the correct format.")
else:
num_proc, count = sanity_check(params)
results = {}
for k,netspec in enumerate(networks):
netfile = "temp{}.txt".format(k)
dbfile = "temp{}.db".format(k)
if os.path.exists(dbfile):
os.remove(dbfile)
with open(netfile,"w") as f:
f.write(netspec)
subprocess.check_call("mpiexec -n {} Signatures {} {}".format(num_proc,netfile,dbfile),shell=True)
db = DSGRN.Database(dbfile)
N = db.parametergraph.size()
matches = len(DSGRN.StableFCQuery(db).matches())
if count:
results[netspec] = (matches,N)
else:
results[netspec] = (matches > 0, N)
subprocess.call(["rm",netfile])
subprocess.call(["rm",dbfile])
print("Network {} of {} complete".format(k + 1, len(networks)))
sys.stdout.flush()
record_results(network_file,params_file,results,resultsdir,datetime)
def sanity_check(params):
'''
Checks to be sure the correct keys are in the dictionary params.
:param params: dictionary
:return: Either the values of the keys "num_proc" and "count" in the parameter dictionary, or an error is raised.
'''
if "num_proc" not in params or "count" not in params:
raise ValueError("The keys 'num_proc' and 'count' must be specified in the parameter file.")
return params["num_proc"],params["count"]
def record_results(network_file,params_file,results,resultsdir,datetime):
'''
Record results in a .json file.
:param network_file: The input .txt file containing the list of DSGRN network specifications.
:param params_file: The input .json parameter file.
:param results: The dictionary of results.
:param resultsdir: The location to save the dictionary of results.
:param datetime: None or string with datetime
:return: None. File is written.
'''
resultsdir = create_results_folder(network_file, params_file, resultsdir,datetime)
rname = os.path.join(resultsdir,"query_results.json")
if os.path.exists(rname):
os.rename(rname,rname+".old")
json.dump(results,open(rname,'w'))
print(resultsdir)
def read_networks(network_file):
'''
NOTE: Forced to copy from file_utilities due to collision between import of MPI and the mpiexec call inside this file.
Read a .txt network file that has either a single DSGRN network specification or a list of them
:param networks: A .txt file containing a single DSGRN network specification or a list of network specifications,
:return: list of DSGRN network specifications
'''
network_str = open(network_file).read()
if not network_str:
networks = []
elif network_str[0] == "[":
networks = ast.literal_eval(network_str)
else:
while network_str[-1] == '\n':
network_str = network_str[:-1]
networks = [network_str]
return networks
def create_results_folder(network_file, params_file, resultsdir,datetime):
'''
NOTE: Forced to copy from file_utilities due to collision between import of MPI and the mpiexec call inside this file.
Create a date-time stamped folder to save results. Copy over input files.
:param network_file: A .txt file
:param params_file: A .json file
:param resultsdir: optional path to directory where results will be written
:return: string containing path to date-time stamped directory to save results file
'''
if datetime is None:
datetime = subprocess.check_output(['date +%Y_%m_%d_%H_%M_%S'], shell=True).decode(sys.stdout.encoding).strip()
dirname = os.path.join(os.path.expanduser(resultsdir), "dsgrn_net_query_results" + datetime)
queriesdir = os.path.join(dirname, "queries" + datetime)
os.makedirs(queriesdir)
sys.stdout.flush()
inputfilesdir = os.path.join(dirname, "inputs" + datetime)
os.makedirs(inputfilesdir)
# save input files to computations folder
shutil.copy(network_file, inputfilesdir)
if params_file:
shutil.copy(params_file, inputfilesdir)
return queriesdir
if __name__ == "__main__":
if len(sys.argv) < 3:
print(
"Calling signature has two required arguments \n " \
"python CountStableFC_large_networks.py <path_to_network_file> <path_to_parameter_file>"
)
exit(1)
network_file = sys.argv[1]
params_file = sys.argv[2]
if len(sys.argv) > 3:
resultsdir = sys.argv[3]
query(network_file, params_file, resultsdir)
else:
query(network_file, params_file)
| [
"[email protected]"
] | |
2e82b624c7bd7f45c6340878eaaf67bc60fc5bad | d437914461b775a21ced89300d39893d1bc11c53 | /apps/about/views/__init__.py | 8031bc3edbf6fcf8db52e39ff4c8ec3df2570da5 | [] | no_license | RumorIO/healersource | 86975107da02a18eac89bc65c72dd06f71ac2d72 | 681ef09e4044879840f7f0c8bccc836c3cffec3c | refs/heads/master | 2020-12-03T02:18:56.378837 | 2016-02-19T15:32:52 | 2016-02-19T15:32:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,297 | py | # coding=utf-8
import os
import json
import operator
import random
from datetime import timedelta
from collections import defaultdict
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.gis.measure import D
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.db.models import Count, Sum
from django.http import Http404, HttpResponse, HttpResponseBadRequest, QueryDict
from django.utils.timezone import datetime
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template.context import RequestContext
from django.conf import settings
from django.utils import timezone
from django.utils.text import slugify
from django_messages.models import Message
from mezzanine.blog.models import BlogPost
from oauth_access.models import UserAssociation
from rest_framework.response import Response
from rest_framework.views import APIView
from util import add_to_mailchimp, utm_tracking, full_url
from healing_requests.models import HealingRequest, HealingRequestSearch
from modality.models import get_modality_menu, Modality, ModalityCategory
from messages_hs.forms import ComposeFormUnregistered
from search.utils import add_to_healer_search_history
from intake_forms.models import IntakeFormSentHistory, IntakeForm
from messages_hs.models import UnregisteredMessage, MessageExtra
from account_hs.authentication import user_authenticated, user_is_superuser
from account_hs.forms import ClientSignupForm
from clients.models import Client, SiteJoinInvitation, ReferralsSent, ClientVideo
from client_notes.models import Note
from phonegap.utils import render_page
from healers.forms import HealerSearchForm, ConciergeForm
from healers.models import (Healer, Referrals, Appointment, Zipcode,
WellnessCenter, is_healer, get_account_type, Concierge, Clients)
from healers.utils import (Setup_Step_Names, get_fill_warning_links,
send_hs_mail, get_full_url)
from healers.views import get_healers_geosearch
from payments.models import Payment, Customer, Charge
from send_healing.utils import ghp_members
from send_healing.models import SentHealing
from about.utils import TOP_CITIES, UnicodeWriter, get_featured_providers
from about.models import UserAssociationDated
from about.sitemap import (search_top_categories, search_top_cities,
search_top_specialties)
@login_required
def what_next(request):
def get_suggested_recommendations():
from friends_app.recommendations import ProviderRecommendationsFinder
referrals_from_me = [o["friend"] for o in Referrals.objects.referrals_from(request.user)]
return ProviderRecommendationsFinder(request, referrals_from_me, recommendations_limit=4).recommendations
client = get_object_or_404(Client, user=request.user)
healer = is_healer(request.user)
first_login_cached = client.first_login
if first_login_cached:
client.first_login = False
client.save()
if healer:
return redirect('provider_setup', 0)
else:
incomplete_forms = client.incomplete_forms()
if incomplete_forms:
return redirect('intake_form_answer',
incomplete_forms[0].healer.user.username)
if not healer:
if request.user.client.ambassador_program:
return redirect('ambassador:dashboard')
return redirect('friends', 'healers')
fill_warning_links = get_fill_warning_links(healer)
# referrals_to_me_count = Referrals.objects.referrals_to(healer.user, count=True)
request.step_names = Setup_Step_Names.get_steps(healer)
suggested_recommendations = get_suggested_recommendations()
return render_to_response('about/what_next.html', {
'healer': healer,
'fill_warning_links': fill_warning_links,
# 'referrals_to_me_count': referrals_to_me_count,
"suggested_recommendations": suggested_recommendations,
'friend_type': 'referrals',
"first_login": first_login_cached,
'editing_self': True,
}, context_instance=RequestContext(request))
@user_is_superuser
def wcenter(request):
wellness_centers = WellnessCenter.objects.all().order_by('-pk')
output = []
for wellness_center in wellness_centers:
referrals_to_me_count = Referrals.objects.referrals_from(wellness_center.user, count=True)
output.append({
'title': wellness_center.user.username,
'url': wellness_center.get_full_url,
'date': wellness_center.user.date_joined,
'providers': referrals_to_me_count,
})
return render_to_response('about/wellness_center_list.html', {
'wellness_centers': output,
}, context_instance=RequestContext(request))
def get_city_select_code(query):
if 'city' in query and 'state' in query:
if query['city'] and query['state']:
zipcodes = Zipcode.objects\
.filter(city=query['city'].upper(), state=query['state'])\
.values_list('id')
if zipcodes:
return max(zipcodes)[0]
return 0
DAY_VALUES = [1, 2, 3, 7, 14, 30, 60, 90, 180, 365]
def render_search(extra_context, request=None, template=None):
if template is None:
template = 'about/search_results.html'
ctx = {
'search_form': HealerSearchForm(),
'compose_form': ComposeFormUnregistered(),
'modality_menu': get_modality_menu(),
'no_find_popup': True,
'url_search': '%s%s' % (settings.DEFAULT_HTTP_PROTOCOL, full_url(reverse('search_ajax'))),
}
return render_page(request, template, ctx, extra_context)
def search(request, template_name=None, modality=None,
city=None, specialty=None, all=False, skip_first_hr=False,
state=None, embed_user=None, phonegap=False):
"""
Search providers by parameters of HealerSearchForm
skip_first_hr - false to show hr in infinite scroll
"""
def get_concierge_and_city():
if not point:
return None, None
concierges = Concierge.objects.all().values_list('pk', flat=True)
healers = Healer.objects.filter(
clientlocation__location__point__distance_lte=(point, D(mi=50)), pk__in=concierges).distinct()
if healers:
healer = healers[0]
return healers[0], healer.clientlocation_set.filter(location__point__distance_lte=(point, D(mi=50)))[0].location.city
return None, None
def get_stats():
def filter_last_30_days(qset, field_name, date_field_name):
fiter_name = '%s__%s__gte' % (field_name, date_field_name)
filters = {fiter_name: datetime.today() - timedelta(30)}
return qset.filter(**filters)
def get_specialties_top10():
specialties_top10 = (Modality.objects_approved
.values('title')
.annotate(healers=Count('healer'))
.order_by('-healers'))
specialties_top10_complete = (Modality.objects_approved
.filter(healer__in=healers_complete)
.values('title')
.annotate(healers_c=Count('healer'))
.order_by('-healers_c'))
specialties_top10 = specialties_top10[:10]
specialties_top10_complete = specialties_top10_complete[:10]
return healers_complete_combiner(
specialties_top10, specialties_top10_complete)
def get_specialties_categories():
specialties_categories = (ModalityCategory.objects
.values('title')
.annotate(healers=Count('modality__healer'))
.order_by('-healers'))
specialties_categories_complete = (ModalityCategory.objects
.filter(modality__healer__in=healers_complete)
.values('title')
.annotate(healers_c=Count('modality__healer'))
.order_by('-healers_c'))
return healers_complete_combiner(
specialties_categories, specialties_categories_complete)
def get_healers_in_zip():
zip_city = (Zipcode.objects
.filter(code__in=TOP_CITIES)
.values('code', 'city', 'state', 'point'))
healers_in_zip = {}
healers_in_zip_complete = {}
for zipcode in zip_city:
hz = Healer.objects.filter(
clientlocation__location__point__distance_lte=
(zipcode['point'], D(mi=50))).distinct()
healers_in_zip.update({zipcode['code']: hz.count()})
hz = Healer.objects.filter(
clientlocation__location__point__distance_lte=
(zipcode['point'], D(mi=50)),
pk__in=healers_complete).distinct()
healers_in_zip_complete.update({zipcode['code']: hz.count()})
healers_in_zip = [{
'code': z['code'],
'city': z['city'],
'state': z['state'],
'healers': healers_in_zip.get(z['code'], 0),
'healers_c': healers_in_zip_complete.get(z['code'], 0)
} for z in zip_city]
healers_in_zip.sort(key=lambda h: TOP_CITIES.index(h['code']))
return healers_in_zip
def get_intake_form():
now = datetime.now().date()
intake_form_stats = {'created': [], 'sent': []}
intake_forms = IntakeForm.objects.all()
intake_forms_sent = IntakeFormSentHistory.objects.all()
for n in DAY_VALUES:
start_day = now - timedelta(n)
intake_form_stats['created'].append(intake_forms.filter(
created_at__range=(start_day, now)).count())
intake_form_stats['sent'].append(intake_forms_sent.filter(
created_at__range=(start_day, now)).count())
intake_form_stats['created'].append(intake_forms.count())
intake_form_stats['sent'].append(intake_forms_sent.count())
return intake_form_stats
def get_messages():
now = datetime.now().date()
message_stats = {'registered': [], 'unregistered': []}
messages_reg_all = Message.objects.all()
messages_unreg_all = UnregisteredMessage.objects.all()
for n in DAY_VALUES:
start_day = now - timedelta(n)
message_stats['registered'].append(messages_reg_all
.filter(sent_at__range=(start_day, now)).count())
message_stats['unregistered'].append(messages_unreg_all
.filter(sent_at__range=(start_day, now)).count())
message_stats['registered'].append(messages_reg_all.count())
message_stats['unregistered'].append(messages_unreg_all.count())
return message_stats
def get_source():
how_did_you_find_us_stats = []
for item in Client.LINK_SOURCE_CHOICES:
how_did_you_find_us_stats.append({'name': item[1],
'total': Client.objects.filter(link_source=item[0]).count()})
return how_did_you_find_us_stats
def get_weekstat():
NUM_WEEKS = 5
now = datetime.now().date()
weekstat = {}
for week in range(0, NUM_WEEKS):
start_week = now - timedelta(weeks=week + 1)
end_week = start_week + timedelta(days=7)
weekstat['week_%s' % week] = [
start_week,
end_week - timedelta(days=1)]
providers_all = (Healer.objects
.filter(user__is_active=True,
user__date_joined__range=[start_week, end_week])
.count())
providers_complete = (Healer.objects
.filter(user__is_active=True, pk__in=healers_complete,
user__date_joined__range=[start_week, end_week])
.count())
weekstat['week_%s_all' % week] = providers_all
weekstat['week_%s_complete' % week] = providers_complete
clients = (Client.objects
.filter(user__is_active=True,
user__date_joined__range=[start_week, end_week])
.exclude(id__in=providers)
.count())
weekstat['week_%s_clients' % week] = clients
appointments = (Appointment.objects.without_relations()
.filter(created_date__range=[start_week, end_week])
.count())
weekstat['week_%s_appointments' % week] = appointments
return weekstat
def get_30_day_user_stats():
NUM_DAYS = 30
now = datetime.now().date()
daystat = {'n': [], 'all': [], 'complete': [],
'clients': [], 'appointments': []}
for n in range(1, NUM_DAYS + 1):
start_day = now - timedelta(n)
end_day = start_day + timedelta(1)
all = (Healer.objects
.filter(user__is_active=True,
user__date_joined__range=[start_day, end_day])
.count())
complete = (Healer.objects
.filter(user__is_active=True, pk__in=healers_complete,
user__date_joined__range=[start_day, end_day])
.count())
clients = (Client.objects
.filter(user__is_active=True,
user__date_joined__range=[start_day, end_day])
.exclude(id__in=providers).count())
appointments = (Appointment.objects.without_relations()
.filter(created_date__range=[start_day, end_day]).count())
daystat['n'].append(n)
daystat['all'].append(all)
daystat['complete'].append(complete)
daystat['clients'].append(clients)
daystat['appointments'].append(appointments)
return daystat
def get_posts_and_videos_stats():
now = datetime.now().date()
daystat = {'posts_all': [], 'posts_published': [], 'videos': []}
posts_all_total = BlogPost.objects.all()
posts_published_total = posts_all_total.filter(status=2)
videos_total = ClientVideo.objects.all()
for n in DAY_VALUES:
start_day = now - timedelta(n)
posts_all = posts_all_total.filter(publish_date__range=(start_day, now)).count()
posts_published = posts_published_total.filter(publish_date__range=(start_day, now)).count()
videos = videos_total.filter(date_added__range=(start_day, now)).count()
daystat['posts_all'].append(posts_all)
daystat['posts_published'].append(posts_published)
daystat['videos'].append(videos)
daystat['posts_all'].append(posts_all_total.count())
daystat['posts_published'].append(posts_published_total.count())
daystat['videos'].append(videos_total.count())
return daystat
def get_invs():
# ['2',] maybe more statuses?
invs = {}
all_invitations = SiteJoinInvitation.objects.filter(status__in=['2', ])
client_invitations = SiteJoinInvitation.objects.filter(is_to_healer=False, status__in=['2', ])
provider_invitations = SiteJoinInvitation.objects.filter(is_to_healer=True, status__in=['2', ])
invs['all_total'] = all_invitations.count()
invs['all_dated'] = (all_invitations
.filter(sent__range=[start_date, timezone.now()])
.extra({'day': "(EXTRACT (DAY FROM (now() - sent)))"})
.values('day')
.annotate(qty=Count('id')))
invs['client_total'] = client_invitations.count()
invs['client_dated'] = (client_invitations
.filter(sent__range=[start_date, timezone.now()])
.extra({'day': "(EXTRACT (DAY FROM (now() - sent)))"})
.values('day')
.annotate(qty=Count('id')))
invs['provider_inv_total'] = provider_invitations.filter(create_friendship=False).count()
invs['provider_inv_dated'] = (provider_invitations
.filter(create_friendship=False, sent__range=[start_date, timezone.now()])
.extra({'day': "(EXTRACT (DAY FROM (now() - sent)))"})
.values('day')
.annotate(qty=Count('id')))
invs['provider_rec_total'] = provider_invitations.filter(create_friendship=True).count()
invs['provider_rec_dated'] = (provider_invitations
.filter(create_friendship=True, sent__range=[start_date, timezone.now()])
.extra({'day': "(EXTRACT (DAY FROM (now() - sent)))"})
.values('day')
.annotate(qty=Count('id')))
invs['referrals_total'] = ReferralsSent.objects.all().count()
invs['referrals'] = (ReferralsSent.objects
.filter(date_sent__range=[start_date, timezone.now()])
.extra({'day': "(EXTRACT (DAY FROM (now() - date_sent)))"})
.values('day')
.annotate(qty=Count('id')))
invs['all_dated'] = calculate_daily_inv(invs['all_dated'])
invs['client_dated'] = calculate_daily_inv(invs['client_dated'])
invs['provider_inv_dated'] = calculate_daily_inv(invs['provider_inv_dated'])
invs['provider_rec_dated'] = calculate_daily_inv(invs['provider_rec_dated'])
invs['referrals'] = calculate_daily_inv(invs['referrals'])
return invs
def get_stats_dated(objects):
def convert_to_dated(providers):
output = {}
now = datetime.now().date()
for n in DAY_VALUES:
start_date = now - timedelta(n)
providers_count = providers.filter(
user__date_joined__range=[start_date, now]).count()
output['day%d' % n] = providers_count
return output
# \
# .extra({'day': "(EXTRACT (DAY FROM (now() - \"auth_user\".\"date_joined\")))"}) \
# .values('day') \
# .annotate(qty=Count('id'))
# return calculate_daily_inv(providers)
return {
'count': objects.count(),
'dated': convert_to_dated(objects),
}
def get_stats_daily(objects, created=False, added=False,
created_reversed=False):
def get_filter():
if added:
return 'added__range'
elif created:
return 'date_created__range'
elif created_reversed:
return 'created_date__range'
return 'user__date_joined__range'
def get_extra():
if added:
return {'day': '(EXTRACT (DAY FROM (now() - added)))'}
elif created:
return {'day': '(EXTRACT (DAY FROM (now() - date_created)))'}
elif created_reversed:
return {'day': '(EXTRACT (DAY FROM (now() - created_date)))'}
return {'day': '(EXTRACT (DAY FROM (now() - "auth_user"."date_joined")))'}
def get_count_id_name():
if created:
return 'user_assoc'
return 'id'
return {'count': objects.count(),
'dated': calculate_daily_inv(objects
.filter(**{get_filter(): [start_date, timezone.now()]})
.extra(get_extra())
.values('day')
.annotate(qty=Count(get_count_id_name())))}
providers = Healer.objects.all().order_by('-id')
healers_complete = Healer.complete.values_list('id', flat=True)
start_date = timezone.now() - timezone.timedelta(days=365)
wellness_centers_all = WellnessCenter.objects.filter(user__is_active=True)
wellness_centers_complete = wellness_centers_all.filter(pk__in=healers_complete)
providers_all = (Healer.objects
.filter(user__is_active=True)
.exclude(pk__in=wellness_centers_all))
providers_complete = (providers_all
.filter(pk__in=healers_complete)
.exclude(pk__in=wellness_centers_complete))
providers_blank_about = providers_all.filter(about='')
providers_blank_location = providers_all.filter(clientlocation__isnull=True)
providers_blank_avatar = providers_all.filter(user__avatar__isnull=True)
providers_using_schedule = providers_complete.filter(scheduleVisibility=Healer.VISIBLE_EVERYONE)
facebook_imports = (UserAssociationDated.objects
.filter(user_assoc__user__id__in=providers, user_assoc__service='facebook'))
google_imports = (UserAssociationDated.objects
.filter(user_assoc__user__id__in=providers, user_assoc__service='google'))
clients = Client.objects.filter(user__is_active=True).exclude(id__in=providers)
healers_with_notes = (Healer.objects.all()
.annotate(number_of_notes=Count('notes'))
.filter(number_of_notes__gt=0))
stripe_connect_users = UserAssociation.objects.filter(service='stripe').values_list('user', flat=True)
stripe_connect_healers = Healer.objects.filter(user__in=stripe_connect_users)
payments = Payment.objects.all()
payments_healers = payments.values_list('appointment__healer', flat=True).distinct()
payments_all = []
for healer in payments_healers:
payments_all.append((Healer.objects.get(pk=healer), payments.filter(appointment__healer=healer).aggregate(total_charge=Sum('amount')).values()[0]))
payments_all = sorted(payments_all, key=lambda tup: tup[1], reverse=True)
stats = {
'top_city_searches': (filter_last_30_days(Zipcode.objects.all(), 'healersearchhistory', 'created_at')
.annotate(num_of_searches=Count('healersearchhistory'))
.exclude(num_of_searches=0).order_by('-num_of_searches')[:10]),
'top_modality_searches': (filter_last_30_days(Modality.objects_approved.all(), 'healersearchhistory', 'created_at')
.annotate(num_of_searches=Count('healersearchhistory'))
.exclude(num_of_searches=0).order_by('-num_of_searches')[:10]),
'healers_with_most_appointments': (filter_last_30_days(Healer.objects.all(), 'healer_appointments', 'created_date')
.annotate(num_appointments=Count('healer_appointments'))
.order_by('-num_appointments')[:10]),
'specialties_top10': get_specialties_top10(),
'specialties_categories': get_specialties_categories(),
'healers_in_zip': get_healers_in_zip(),
'intake_form_stats': get_intake_form(),
'messages': get_messages(),
'source': get_source(),
'weekstat': get_weekstat(),
'users_daystat': get_30_day_user_stats(),
'posts_and_video_daystat': get_posts_and_videos_stats(),
'invs': get_invs(),
'wellness_centers_all': get_stats_dated(wellness_centers_all),
'providers_all': get_stats_dated(providers_all),
'wellness_centers_complete': get_stats_dated(wellness_centers_complete),
'providers_complete': get_stats_dated(providers_complete),
'providers_blank_about': get_stats_daily(providers_blank_about),
'providers_blank_location': get_stats_daily(providers_blank_location),
'providers_blank_avatar': get_stats_daily(providers_blank_avatar),
'providers_using_schedule': get_stats_daily(providers_using_schedule),
'recommendations': get_stats_daily(Referrals.objects, added=True),
'facebook_imports': get_stats_daily(facebook_imports, created=True),
'google_imports': get_stats_daily(google_imports, created=True),
'clients': get_stats_daily(clients),
'appointments': get_stats_daily(
Appointment.objects.without_relations(), created_reversed=True),
'hr': {
'requests_count': HealingRequest.objects.count(),
'searches_count': HealingRequestSearch.objects.count(),
'saved_searches_count': HealingRequestSearch.objects.filter(saved=True).count(),
'people_contacted_count': MessageExtra.objects.filter(
source=MessageExtra.SOURCE_CHOICE_HR).distinct('message').count(),
},
'notes': {
'healers_with_most_notes': (healers_with_notes
.order_by('-number_of_notes')[:10]),
'total_number_of_notes': Note.objects.all().count(),
'number_of_healers_with_notes': healers_with_notes.count(),
'number_of_healers_with_more_than_seven_notes': healers_with_notes.filter(number_of_notes__gt=7).count()
},
'stripe': {
'stripe_connect_users_count': stripe_connect_users.count(),
'stripe_connect_percentage_fee': stripe_connect_healers.filter(
booking_healersource_fee=Healer.BOOKING_HEALERSOURCE_FEE_PERCENTAGE).count(),
'stripe_connect_fixed_fee': stripe_connect_healers.filter(
booking_healersource_fee=Healer.BOOKING_HEALERSOURCE_FEE_FIXED).count(),
'stripe_connect_total_amount': Payment.objects.all().aggregate(Sum('amount')).values()[0] / 100,
'gc_enabled_total': Healer.objects.filter(gift_certificates_enabled=True).count(),
'BOOKING_HEALERSOURCE_FEE': settings.BOOKING_HEALERSOURCE_FEE,
},
'ghp': {
'total_healing_sent': SentHealing.total_healing_sent(),
'number_of_members': ghp_members().count(),
},
'phonegap_registrations_number': Client.objects.filter(
signup_source=Client.SIGNUP_SOURCE_APP).count(),
'ambassador': {
'top_five': (Client.objects
.exclude(ambassador=None)
.annotate(number_of_signed_up_users=Count('ambassador'))
.order_by('-number_of_signed_up_users')[:5]),
'total': Client.objects.filter(ambassador_program=True).count(),
'number_of_signed_up_users': Client.objects.exclude(ambassador=None).count(),
},
'payments_notes_subscriptions_total': Customer.objects.exclude(payment_notes=0).count(),
'payments_all': payments_all
}
return providers[:50], stats
message = ""
point = None
query = request.GET.copy()
remote_sessions = query.pop('remote_sessions', [False])[0]
exclude_centers = query.pop('exclude_centers', [False])[0]
if city is not None:
if search_top_cities.get(city, False) is not False:
point = Zipcode.objects.filter(
code=search_top_cities.get(city))[0]
query.update({
'zipcode': point.code,
'city': point.city,
'state': point.state,
'search_city_or_zipcode': '%s %s' % (point.city, point.state)
})
else:
if city != 'all':
point = Zipcode.objects.filter(city=city)
if state is not None:
point = point.filter(state=state)
if len(point) > 0:
query.update({
'zipcode': point[0].code,
'city': point[0].city,
'state': point[0].state,
'search_city_or_zipcode': '%s %s' % (point[0].city,
point[0].state)
})
else:
raise Http404
city_select_code = get_city_select_code(query)
if specialty is not None:
if specialty in map(slugify, search_top_specialties):
idx = map(slugify, search_top_specialties).index(specialty)
modality = Modality.objects_approved.filter(
title=search_top_specialties[idx])[0]
query.update({
'modality_id': modality.pk,
'modality': modality.title
})
elif specialty in map(slugify, search_top_categories):
idx = map(slugify, search_top_categories).index(specialty)
category = ModalityCategory.objects.filter(
title=search_top_categories[idx])[0]
query.update({
'modality_category_id': category.pk,
'modality': category.title,
})
else:
category = ModalityCategory.objects.filter(title=specialty)
if len(category) > 0:
query.update({
'modality_category_id': category[0].pk,
'modality': category[0].title,
})
else:
modality = Modality.objects_approved.filter(title=specialty)
if len(modality) > 0:
query.update({
'modality_id': modality[0].pk,
'modality': modality[0].title
})
else:
raise Http404
if query.get('modality_id', False):
query['modality3'] = 'c:' + str(query['modality_id'])
if query.get('modality_category_id', False):
query['modality3'] = 'p:' + str(query['modality_category_id'])
if not query.get('search_city_or_zipcode', False):
query['search_city_or_zipcode'] = 'Everywhere'
form = HealerSearchForm(query or None)
zipcode = modality_category_id = modality_id = concierge = concierge_city = concierge_form = None
providers = []
if form.is_valid() and not all:
point, zipcode, message = process_zipcode(form, request)
modality_category_id = form.cleaned_data.get('modality_category_id')
modality_id = form.cleaned_data.get('modality_id')
if modality_id:
modality = Modality.objects_approved.filter(id=modality_id)[0]
elif modality_category_id:
modality = ModalityCategory.objects.filter(id=modality_category_id)[0]
name_or_email = form.cleaned_data.get('name_or_email')
request.session['search_state'] = form.cleaned_data
if not message or settings.DEBUG:
filter_by_referral = None
if embed_user and not embed_user.client.embed_search_all:
filter_by_referral = embed_user
providers, more = get_healers_geosearch(
request, None, point, modality_category_id, modality_id,
name_or_email, remote_sessions=remote_sessions,
exclude_centers=exclude_centers,
filter_by_referral=filter_by_referral)
concierge, concierge_city = get_concierge_and_city()
if concierge is not None:
concierge_form = ConciergeForm(request.POST or None)
if concierge_form.is_valid():
concierge_form.email_data(concierge.user)
if request.user.is_authenticated() and concierge.has_intake_form():
concierge.send_intake_form(request.user.client)
request.session['concierge_request'] = True
return redirect(reverse('intake_form_answer', args=[concierge.username()]))
else:
return redirect(reverse('concierge_thanks', args=[concierge.username()]))
else:
request.session['point'] = None
modality_id = modality
name_or_email = None
form = HealerSearchForm(initial=request.session.get('search_state', None))
stats = None
if all:
if request.user.is_superuser or request.user.is_staff:
providers, stats = get_stats()
else:
raise Http404
embed = None
if embed_user:
embed = {
'username': embed_user.username,
'background_color': embed_user.client.embed_background_color
}
search_display_type = request.GET.get('search_display_type', 'list')
ctx = {
'stats': stats,
"search_form": form,
'search_display_type': search_display_type,
"found_providers": providers,
"message": message,
"modality_id": modality_id,
"modality_category_id": modality_category_id,
"modality": modality,
"zipcode": zipcode,
'all': all,
'skip_first_hr': skip_first_hr,
'city_select_code': city_select_code,
'concierge': concierge,
'concierge_city': concierge_city,
'concierge_form': concierge_form,
'remote_sessions': remote_sessions,
'embed': embed,
'phonegap': phonegap,
}
return render_search(ctx, request, template_name)
def process_zipcode(form, request):
point = None
message = None
zipcode = form.get_zipcode()
if zipcode:
point = zipcode.point
request.session['point'] = point
request.session['zipcode'] = zipcode.code
else:
request.session['zipcode'] = ""
# if not point: #and not settings.DEBUG:
request.session['point'] = None
if form.cleaned_data['zipcode'] and form.cleaned_data['zipcode'] != '0':
message = "Could not find zipcode %s" % form.cleaned_data['zipcode']
user_agent = request.META.get('HTTP_USER_AGENT')
if user_agent and user_agent.find('compatible') == -1:
add_to_healer_search_history(form.data.copy(), request.user, zipcode)
return point, zipcode, message
def tour(request, template_name="about/tour.html"):
client_signup_form = ClientSignupForm()
form = client_signup_form
return render_to_response(template_name, {
"client_signup_form": client_signup_form,
"form": form,
}, context_instance=RequestContext(request))
def healers_complete_combiner(healers, healers_comp):
sp = defaultdict(dict)
for dd in (healers, healers_comp):
for d in dd:
if 'healers' not in sp[d['title']]:
sp[d['title']].update({'healers': 0})
if 'healers_c' not in sp[d['title']]:
sp[d['title']].update({'healers_c': 0})
sp[d['title']].update(d)
return sorted([d[1] for d in sp.items()], key=lambda k: k['healers'],
reverse=True)
def calculate_daily_inv(inv_dated):
"""
makes a dict for invitations sum 1,2,3,7,14.... days ago period
"""
#boilerpalte for invitations per day
empty_dates = [{'day': d, 'qty': 0} for d in range(1, 366)]
dt = defaultdict(dict)
for dd in (empty_dates, inv_dated):
for d in dd:
dt[d['day']].update(d)
inv_dated_final = {
'day1': 0,
'day2': 0,
'day3': 0,
'day7': 0,
'day14': 0,
'day30': 0,
'day60': 0,
'day90': 0,
'day180': 0,
'day365': 0,
}
tsum = 0
for cid in dt.values():
tsum += cid['qty']
for d_value in DAY_VALUES:
if cid['day'] <= d_value:
inv_dated_final['day%s' % d_value] = tsum
break
return inv_dated_final
@user_authenticated
def all_users(request, healers_only=False, center_clients_only=False, **kwargs):
if not request.user.is_superuser and (healers_only or not center_clients_only):
raise Http404
fname_str = ''
if healers_only:
profiles = Healer.objects.all().order_by('-user__date_joined')
if kwargs.pop('boston', False):
print Zipcode.objects.get(code='02134').point
profiles = Healer.objects.filter(clientlocation__location__point__distance_lte=(Zipcode.objects.get(code='02134').point, D(mi=100)) ).distinct()
fname_str = 'Healers'
else:
if center_clients_only:
profiles = sorted([o['friend'].client for o in Clients.objects.friends_for_user(request.user)], key=lambda k: k.user.first_name)
else:
profiles = Client.objects.filter(user__is_active=True).order_by('-user__date_joined')
fname_str = 'Clients'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="All_%s_%s.csv"' % (fname_str, str(datetime.today()).split(' ')[0])
writer = UnicodeWriter(response)
for p in profiles:
u = p.user
writer.writerow([unicode(u.first_name), unicode(u.last_name), unicode(u.email), get_account_type(u), unicode(u.date_joined)])
return response
def concierge_thanks(request, username):
concierge = Concierge.objects.get(user__username='center')
message = render_to_string(
'about/concierge_thanks_message.html',
{'name': concierge.user.get_full_name()})
return render_to_response('about/concierge_thanks.html', {
'thanks_message': message
}, context_instance=RequestContext(request))
def render_notes_landing(extra_context, request=None):
template = 'landing_pages/notes.html'
ctx = {
'images': sorted(os.listdir(settings.LANDING_NOTES_ROOT)),
'signup_form': ClientSignupForm(),
}
return render_page(request, template, ctx, extra_context)
@utm_tracking
def landing_page_notes(request):
tracking_code_update = request.session.get('tracking_code', {})
tracking_code = settings.TRACKING_CODES['notes_landing']
tracking_code.update(tracking_code_update)
request.session['tracking_code'] = tracking_code
request.session['notes_lp'] = True
return render_notes_landing({}, request)
@utm_tracking
def landing_page_book(request):
return render_to_response('landing_pages/book.html', {
}, context_instance=RequestContext(request))
def render_book_thanks(extra_context, request=None):
ctx = {'thanks_message': render_to_string('about/book_download.html')}
if (request is not None and
request.META.get('HTTP_REFERER', '') == get_full_url('landing_page_book')):
ctx['tracking_code'] = settings.TRACKING_CODES['book_landing']
template = 'account/thanks.html'
return render_page(request, template, ctx, extra_context)
def book_thanks(request):
return render_book_thanks({}, request)
def render_error_report(extra_context, request=None):
ctx = {}
template = 'about/error_report.html'
return render_page(request, template, ctx, extra_context)
def error_report(request):
return render_error_report({}, request)
@cache_page(60 * 60 * 24)
def featured(request):
def get_centers():
centers = list(WellnessCenter.objects.all())
centers.sort(key=lambda x: len(x.get_healers_in_wcenter()), reverse=True)
return centers[:5]
session = request.session.get('search_state', None)
form = HealerSearchForm(initial=session)
point = request.session.get('point', None)
providers = get_healers_geosearch(request, point=point,
schedule_visibility=Healer.VISIBLE_EVERYONE, exclude_centers=True)[0]
centers = get_centers()
return render_to_response(
'about/featured.html', {
'search_form': form,
'found_providers': providers,
'centers': centers,
'compose_form': ComposeFormUnregistered(),
}, context_instance=RequestContext(request, {}))
def pricing(request):
def get_healers():
healers = list(Healer.complete.all().select_related('user'))
random.shuffle(healers)
return healers
return render_to_response(
'about/pricing.html', {
'healers': get_healers(),
'client_signup_form': ClientSignupForm(),
}, context_instance=RequestContext(request, {}))
@cache_page(60 * 60 * 24)
def learn(request, only_posts=False):
def get_clients_for_videos_list():
"""Return 5 Clients. One client - username 'healersource_tv', others - sorted by number of videos desc."""
def filter_videos(qset):
return qset.filter(videotype=ClientVideo.VIDEO_TYPE_CLIENT)
def get_clients_with_videos():
videos = ClientVideo.objects.all()
videos = filter_videos(videos)
clients = list(set(videos.values_list('client', flat=True)))
return Client.objects.filter(pk__in=clients).exclude(pk=healersource_tv_client.pk)
def get_clients_sorted_by_number_of_videos():
clients_videos_number = [
{'client': client,
'number_of_videos': filter_videos(client.videos.all()).count()}
for client in get_clients_with_videos()]
clients_videos_number.sort(key=lambda x: x['number_of_videos'], reverse=True)
return clients_videos_number[:4]
def get_hs_tv_client_number_of_videos():
videos = healersource_tv_client.videos.all()
return filter_videos(videos).count()
healersource_tv_client = Client.objects.get(user__username='healersource_tv')
return [{'client': healersource_tv_client,
'number_of_videos': get_hs_tv_client_number_of_videos()}] + get_clients_sorted_by_number_of_videos()
def get_top_bloggers():
"""Return 5 top bloggers."""
def get_users_with_posts():
posts = blogposts.distinct()
users = list(set(posts.values_list('user', flat=True)))
return User.objects.filter(pk__in=users)
users_number_of_articles = [{'user': user, 'number_of_articles': user.blogposts.count()} for user in get_users_with_posts()]
users_number_of_articles.sort(key=lambda x: x['number_of_articles'], reverse=True)
# return User.objects.annotate(number_of_articles=Count('blogposts')).order_by('-number_of_articles')[:5]
return users_number_of_articles[:5]
complete_users = Healer.complete.all().values_list('user', flat=True)
blogposts = BlogPost.objects.published().filter(user__in=complete_users)
posts = blogposts.order_by('-publish_date').distinct()
ctx = {'posts': posts}
if not only_posts:
ctx.update({
'clients_for_videos_list': get_clients_for_videos_list(),
'top_bloggers': get_top_bloggers(),
})
return render_to_response(
'about/learn.html', ctx, context_instance=RequestContext(request, {}))
class SearchAjax(APIView):
def get(self, request, format=None):
try:
display_type = request.GET.get('display_type', 'list')
embed_user = request.GET.get('embed_user', False)
phonegap = json.loads(request.GET.get('phonegap', 'false'))
except:
return HttpResponseBadRequest()
if embed_user:
embed_user = User.objects.get(username__exact=embed_user)
response = search(
request,
template_name='about/search/results_%s.html' % display_type,
skip_first_hr=True,
embed_user=embed_user,
phonegap=phonegap)
return Response(response.content)
class CitySelectCode(APIView):
def get(self, request, format=None):
try:
query = json.loads(request.GET['query'])
except:
return HttpResponseBadRequest()
return Response(get_city_select_code(query))
class FeaturedProviders(APIView):
"""Return html blocks with featured providers for homepage."""
def get(self, request, format=None):
try:
featured_providers = request.GET['featured_providers']
except:
return HttpResponseBadRequest()
available_providers = get_featured_providers()
try:
featured_providers = json.loads(featured_providers)
except ValueError:
return HttpResponseBadRequest()
result = ''
for provider in featured_providers:
if provider in available_providers:
try:
provider = Healer.objects.get(user__username__iexact=provider)
is_schedule_visible = provider.is_schedule_visible(request.user)
result += render_to_string(
'about/homepage_featured_provider.html',
{
'featured_provider': provider,
'is_schedule_visible': is_schedule_visible,
'hide': True,
})
except Healer.DoesNotExist:
pass
return Response(result)
class GeoSpecialties(APIView):
def get(self, request, format=None):
geo = int(request.GET.get('geo', False))
healers_complete = Healer.complete
if geo:
zipcode = Zipcode.objects.get(pk=geo)
healers = healers_complete.within_50_miles(zipcode)
if len(healers) == 0:
return Response()
else:
healers = healers_complete
healers = healers.values_list('pk', flat=True)
modalities = Modality.objects_approved.prefetch_related('category')
if len(healers) > 0:
modalities = modalities.filter(healer__pk__in=healers)
modalities = modalities.order_by('title').distinct()
categories = {}
categories_tree = {}
for modality in modalities:
for category in modality.category.all():
categories.update({
category.title: category.pk
})
if not categories_tree.get(category.title, False):
categories_tree[category.title] = []
if modality not in categories_tree[category.title]:
categories_tree[category.title].append(modality)
categories_tree = sorted(categories_tree.iteritems(), key=operator.itemgetter(0))
out = '<option selected="selected" value="">Any Specialty</option>'
for cat in categories_tree:
out += '<optgroup label="%s">' % cat[0]
out += '<option value="p:%s">Any type of %s</option>' % (categories[cat[0]], cat[0])
for spec in cat[1]:
out += '<option value="c:%s">%s</option>' % (spec.pk, spec.title)
out += '</optgroup>'
return Response(out)
class ContactUs(APIView):
def post(self, request, format=None):
try:
data = QueryDict(request.POST['data'])
except:
return HttpResponseBadRequest()
from_ = '%s <%s>' % (data['name'], data['email'])
send_hs_mail('Contact Us message', "about/contact_us.txt",
{'message': data['message']}, from_, ['[email protected]'])
return Response()
class EmailConfirmationRequiredView(TemplateView):
template_name = 'about/confirmation_required.html'
def get_context_data(self, **kwargs):
context = super(EmailConfirmationRequiredView, self).get_context_data(**kwargs)
context.update(kwargs)
return context
| [
"[email protected]"
] | |
ad41c5695cf98fe7852c8050c4ce5462a713dacf | de0d5fafb49f603ca4979d6f4c8eba52888714c2 | /applied_social_network_analysis/network_connectivity/visualizing_networks.py | fac080b713f74e529d55299dc831ae26587a0fd8 | [] | no_license | sivaneshl/python_data_analysis | 1ab42569d5cc843f79765332a30769588447d6f6 | 36af66ae9e03827f5dfe3cc64d993b84b1b31e9b | refs/heads/master | 2020-09-11T17:28:51.459573 | 2020-07-05T18:43:59 | 2020-07-05T18:43:59 | 222,137,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | import networkx as nx
import matplotlib.pyplot as plt
G = nx.read_gpickle('resources/major_us_cities')
fig = plt.figure(figsize=(10, 9))
nx.draw_networkx(G) # uses default spring layout
# using random layout
plt.figure(figsize=(10, 9))
pos = nx.random_layout(G)
nx.draw_networkx(G, pos)
# circular layour
plt.figure(figsize=(10, 9))
pos = nx.circular_layout(G)
nx.draw_networkx(G, pos)
# using own layout by passing positions as the 'location' attribute
plt.figure(figsize=(10, 9))
pos = nx.get_node_attributes(G, 'location')
nx.draw_networkx(G, pos)
# change attributes
plt.figure(figsize=(10, 9))
nx.draw_networkx(G, pos, alpha=0.7, # transparency
with_labels=False, # remove labels
edge_color='0.4') # make edges grey
plt.axis('off') # remove the axis
plt.tight_layout() # reduce padding
# change node color, size and edge width
plt.figure(figsize=(10, 7))
node_color = [G.degree(v) for v in G] # set the node color based on the degree of the node
node_size = [0.0005*nx.get_node_attributes(G, 'population')[v] for v in G] # set the node size based on the population attribute
edge_width = [0.0005*G[u][v]['weight'] for u, v in G.edges()] # set the edge width based on weight of the edge
nx.draw_networkx(G, pos, node_size=node_size, node_color=node_color, edge_width=edge_width,
alpha=0.7, with_labels=False, edge_color='0.4', cmap=plt.cm.Blues)
plt.axis('off') # remove the axis
plt.tight_layout() # reduce padding
# draw specific edges and add labels to specific nodes
greater_than_770 = [x for x in G.edges(data=True) if x[2]['weight'] > 770]
nx.draw_networkx_edges(G, pos, edgelist=greater_than_770, edge_color='r', alpha=0.7, edge_width=6)
nx.draw_networkx_labels(G, pos, labels={'Los Angeles, CA': 'LA', 'New York, NY': 'NYC'},
font_size=18, font_color='white')
plt.axis('off') # remove the axis
plt.tight_layout() # reduce padding
plt.show() | [
"[email protected]"
] | |
496912f6a5efc1cfacb3505a445c8d08b57768e8 | 601a5ac66309608772db5a9fa65faca4a0acad4f | /spyder/plugins/completion/providers/snippets/widgets/__init__.py | 6a3215fe6b5108d8e1e1b68a4a100bb027af8530 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LGPL-3.0-only",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-or-later",
"CC-BY-2.5",
"CC-BY-4.0",
"MIT",
"LGPL-2.1-only",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"Python-2.0",
"GPL-2.0-only",
"Apache-2.0",
"GPL-3.0-only",
"GPL-1.0-or-later"
] | permissive | juanis2112/spyder | ea5e5727d4dbec5c3e40cb87aad644cc722ff27e | 0b4929cef420ba6c625566e52200e959f3566f33 | refs/heads/master | 2021-08-09T15:14:49.011489 | 2021-04-28T20:18:06 | 2021-04-28T20:18:06 | 158,863,080 | 1 | 1 | MIT | 2018-11-23T17:50:04 | 2018-11-23T17:50:04 | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Snippets related widgets."""
from .snippetsconfig import (
SnippetModelsProxy, SnippetTable, SUPPORTED_LANGUAGES_PY, PYTHON_POS)
| [
"[email protected]"
] | |
075faaca072840771480c8dad744b6400d118856 | 6268655719a46c9d2b6b38ea21babd8b877724dd | /ecom/urls.py | c96fa0cafc244270befb0361102e589c71c8180a | [] | no_license | MahmudulHassan5809/Ecommerce-Django | f84b968621eed61fdf08c55cd43c7a09d8bc8ba7 | f416536a6b5ce583283139e7271f3fcd1da49739 | refs/heads/master | 2022-12-31T15:39:34.405140 | 2020-10-24T18:15:38 | 2020-10-24T18:15:38 | 292,297,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from django.views.generic.base import TemplateView
app_name = "ecom"
urlpatterns = [
path('', views.HomeView.as_view(), name="home"),
path('category/<str:category_slug>/<int:category_id>/',
views.CategoryView.as_view(), name="category_view"),
path('category/product/filter/<int:category_id>/',
views.CategoryFilterView.as_view(), name='category_filter'),
path('search/product/',
views.CategoryFilterView.as_view(), name='search_product'),
path('sub-category/product/<int:category_id>/<int:subcat_id>/',
views.CategoryFilterView.as_view(), name='subcategory_product'),
path('product/detail/<str:product_slug>/<int:pk>/',
views.ProductDetailView.as_view(), name='product_detail'),
path('add-wishlist/<int:product_id>/',
views.AddWishListView.as_view(), name='add_wishlist'),
path('remove-wishlist/<int:product_id>/',
views.RemoveWishListView.as_view(), name='remove_wishlist'),
path('add-compare/<int:product_id>/',
views.AddCompareView.as_view(), name='add_compare'),
path('remove-compare/<int:product_id>/',
views.RemoveCompareView.as_view(), name='remove_compare'),
path('product/rating/<int:product_id>/',
views.ProductReviewView.as_view(), name='product_review')
]
| [
"[email protected]"
] | |
775f151f9bac97b1672a3701d47cd1066bbde102 | b23d627c04402ffaafdf6bf3af4e40ee027d015b | /viscum/scripting/exception.py | f85e17d80f46d223cc08a7da940581e59b8d6986 | [
"MIT"
] | permissive | brunosmmm/viscum | fad2e26f33eab74165633905144d6e8ccd205fb9 | a6b90ae6203998fc016ef89972a3b5d6cf441eb0 | refs/heads/master | 2021-01-17T11:16:08.725747 | 2018-03-23T13:59:44 | 2018-03-23T13:59:44 | 55,721,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """Scripting Exceptions."""
class InvalidModuleError(Exception):
"""Invalid module."""
pass
class DeferScriptLoading(Exception):
"""Defer script loading."""
pass
class ScriptSyntaxError(Exception):
"""Script syntax error."""
pass
class CancelScriptLoading(Exception):
"""Cancel script loading process."""
pass
| [
"[email protected]"
] | |
ddc9ee2417f9490178e8cb2ea3a9cf5a360d9328 | 53e2254b83ac5ac71ff390a7c77070ff97b31c0b | /max_subarray.py | 234927450cf501f40718aefbeab37a510db496a4 | [] | no_license | Ponkiruthika112/codekataset1 | 83a02b96a6b35c33ae7c5a6d6b21c34e63a7eef4 | 4f164864a59e22122b647dd62d36d24e7ace7dac | refs/heads/master | 2020-04-15T04:58:44.427824 | 2019-09-06T10:10:12 | 2019-09-06T10:10:12 | 164,404,367 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | def subarray(s):
l=[" "]
for i in range(0,len(s)):
for j in range(i+1,len(s)+1):
l.append(s[i:j])
return l
a=input()
b=input()
x=subarray(a)
y=subarray(b)
d=[]
for i in x:
if y.count(i)!=0:
d.append([len(i),i])
d.sort(reverse=True)
print(d[0][1])
#subarray
| [
"[email protected]"
] | |
e6376f06046b2ad23a065b75e0b7a5dc34d784d9 | 1bc3894dfd8eef5605a6ff746462a5cfd3baef3c | /srctools/filesys.py | d5b9785feec09ca35ce79c0a75ae2d2325df832d | [
"Unlicense"
] | permissive | alicerunsonfedora/srctools | 0aa9c3ba0b4bfdfc94cd5ee3c54192b61e4c4ba6 | 3648c244f6d2c70371f95927fd374dcaf90a038d | refs/heads/master | 2020-04-01T08:32:30.460404 | 2018-10-15T01:22:23 | 2018-10-15T01:22:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,517 | py | """Implements a consistent interface for accessing files.
This allows accessing raw files, zips and VPKs in the same way.
Files are case-insensitive, and both slashes are converted to '/'.
"""
from zipfile import ZipFile, ZipInfo
import io
import os
from srctools.vpk import VPK, FileInfo as VPKFile
from srctools.property_parser import Property
from typing import (
Union, Iterator,
List, Tuple, Dict,
TextIO, BinaryIO,
)
__all__ = [
'File', 'FileSystem', 'get_filesystem',
'RawFileSystem', 'VPKFileSystem', 'ZipFileSystem',
'VirtualFileSystem', 'FileSystemChain',
]
def get_filesystem(path: str) -> 'FileSystem':
"""Return a filesystem given a path.
If the path is a directory this returns a RawFileSystem.
Otherwise it returns a VPK or zip, depending on extension.
"""
if os.path.isdir(path):
return RawFileSystem(path)
ext = path[-4:]
if ext == '.zip':
return ZipFileSystem(path)
if ext == '.vpk':
return VPKFileSystem(path)
raise ValueError('Unrecognised filesystem for "{}"'.format(path))
class File:
"""Represents a file in a system. Should not be created directly."""
def __init__(self, system: 'FileSystem', path: str, data=None):
"""Create a File.
system should be the filesystem which matches.
path is the relative path for the file.
data is a filesystem-specific data, used to pass to open_bin() and open_str().
"""
self.sys = system
self.path = path
self._data = path if data is None else data
def __fspath__(self) -> str:
"""This can be interpreted as a path."""
return self.path
def open_bin(self) -> BinaryIO:
"""Return a file-like object in bytes mode.
This should be closed when done.
"""
return self.sys.open_bin(self._data)
def open_str(self, encoding='utf8') -> TextIO:
"""Return a file-like object in unicode mode.
This should be closed when done.
"""
return self.sys.open_str(self._data, encoding)
def cache_key(self) -> int:
"""Return a checksum or last-modified date suitable for caching.
This allows preventing re-parsing the file. If not possible, return -1.
"""
return self.sys._get_cache_key(self)
class FileSystem:
"""Base class for different systems defining the interface."""
def __init__(self, path: str):
self.path = os.fspath(path)
self._ref = None
self._ref_count = 0
def open_ref(self) -> None:
"""Lock open a reference to this system."""
self._ref_count += 1
if self._ref is None:
self._create_ref()
def close_ref(self) -> None:
"""Reverse self.open_ref() - must be done in pairs."""
self._ref_count -= 1
if self._ref_count < 0:
raise ValueError('Closed too many times!')
if self._ref_count == 0 and self._ref is not None:
self._delete_ref()
def read_prop(self, path: str, encoding='utf8') -> Property:
"""Read a Property file from the filesystem.
This handles opening and closing files.
"""
with self, self.open_str(path, encoding) as file:
return Property.parse(
file,
self.path + ':' + path,
)
def _check_open(self) -> None:
"""Ensure self._ref is valid."""
if self._ref is None:
raise ValueError('The filesystem must have a valid reference!')
def __eq__(self, other: 'FileSystem') -> bool:
"""Filesystems are equal if they have the same type and same path."""
if not isinstance(other, type(self)):
return NotImplemented # If both ours -> False
return os.path.normpath(self.path) == os.path.normpath(other.path)
def __hash__(self) -> int:
return hash(type(self).__name__ + os.path.normpath(self.path))
def __enter__(self) -> 'FileSystem':
"""Temporarily get access to the system's reference.
This makes it more efficient to access files.
"""
self.open_ref()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close_ref()
def __iter__(self) -> Iterator[File]:
return self.walk_folder('')
def __getitem__(self, name: str) -> File:
return self._get_file(name)
def __contains__(self, name: str) -> bool:
return self._file_exists(name)
def _file_exists(self, name: str) -> bool:
try:
self._get_file(name)
return True
except FileNotFoundError:
return False
def _get_file(self, name: str) -> File:
"""Return a specific file."""
raise NotImplementedError
def walk_folder(self, folder: str) -> Iterator[File]:
"""Yield files in a folder."""
raise NotImplementedError
def _create_ref(self) -> None:
"""Create the _ref object."""
raise NotImplementedError
def _delete_ref(self) -> None:
"""Destroy and clean up the _ref object."""
raise NotImplementedError
def open_str(self, name: str, encoding='utf8') -> TextIO:
"""Open a file in unicode mode or raise FileNotFoundError.
This should be closed when done.
"""
raise NotImplementedError
def open_bin(self, name: str) -> BinaryIO:
"""Open a file in bytes mode or raise FileNotFoundError.
This should be closed when done.
"""
raise NotImplementedError
def _get_cache_key(self, file: File) -> int:
"""Return a checksum or last-modified date suitable for caching.
This allows preventing re-parsing the file. If not possible, return -2.
"""
return -1
class FileSystemChain(FileSystem):
"""Chains several filesystem into one prioritised whole."""
def __init__(self, *systems: Union[FileSystem, Tuple[str, FileSystem]]):
super().__init__('')
self.systems = [] # type: List[Tuple[FileSystem, str]]
for sys in systems:
if isinstance(sys, tuple):
self.add_sys(*sys)
else:
self.add_sys(sys)
def __repr__(self):
return 'FileSystemChain(\n{})'.format(',\n '.join(map(repr, self.systems)))
def __eq__(self, other: 'FileSystemChain'):
if not isinstance(other, FileSystemChain):
return NotImplemented
return self.systems == other.systems
def __hash__(self):
return hash(tuple(self.systems))
@staticmethod
def get_system(file: File) -> FileSystem:
"""Retrieve the system for a File, if it was produced from a FileSystemChain."""
if not isinstance(file.sys, FileSystemChain):
raise ValueError('File is not from a FileSystemChain..')
return file._data.sys
def add_sys(self, sys: FileSystem, prefix=''):
"""Add a filesystem to the list."""
self.systems.append((sys, prefix))
# If we're currently open, apply that to the added systems.
if self._ref_count > 0:
sys.open_ref()
def _get_file(self, name: str) -> File:
"""Search for a file on each filesystem in turn."""
self._check_open()
for sys, prefix in self.systems:
full_name = os.path.join(prefix, name).replace('\\', '/')
try:
file_info = sys._get_file(full_name)
except FileNotFoundError:
pass
else:
# Pass the original file instance, so we can open
# from the original system.
return File(self, full_name, file_info)
raise FileNotFoundError(name)
def open_str(self, name: str, encoding='utf8') -> TextIO:
"""Open a file in unicode mode or raise FileNotFoundError.
This should be closed when done.
"""
if isinstance(name, File):
return name.open_str(encoding)
return self._get_file(name).open_str(encoding)
def open_bin(self, name: str) -> BinaryIO:
"""Open a file in bytes mode or raise FileNotFoundError.
This should be closed when done.
"""
if isinstance(name, File):
return name.open_bin()
return self._get_file(name).open_bin()
def walk_folder(self, folder: str) -> Iterator[File]:
"""Walk folders, not repeating files."""
done = set()
for file in self.walk_folder_repeat(folder):
folded = file.path.casefold()
if folded in done:
continue
done.add(folded)
yield file
def walk_folder_repeat(self, folder: str='') -> Iterator[File]:
"""Walk folders, but allow repeating files.
If a file is contained in multiple systems, it will be yielded
for each. The first is the highest-priority.
"""
for sys, prefix in self.systems:
full_folder = os.path.join(prefix, folder).replace('\\', '/')
for file in sys.walk_folder(full_folder):
yield File(
self,
os.path.relpath(file.path, prefix).replace('\\', '/'),
file,
)
def _delete_ref(self) -> None:
"""Creating and deleting refs affects the underlying systems."""
for sys, prefix in self.systems:
sys.close_ref()
self._ref = None
def _create_ref(self) -> None:
"""Creating and deleting refs affects the underlying systems."""
for sys, prefix in self.systems:
sys.open_ref()
self._ref = True
def _get_cache_key(self, file: File) -> int:
"""Return the last modified time of this file.
If individual timestamps are not stored, the modification time of the
filesystem is returned instead."""
# Delegate to the original File stored in ours.
if not isinstance(file.sys, FileSystemChain):
raise ValueError('File is not from a FileSystemChain..')
return file._data.cache_key()
class VirtualFileSystem(FileSystem):
"""Access a dict as if it were a filesystem.
The dict should map file paths to either bytes or strings.
The encoding arg specifies how text data is presented if open_bin()
is called.
"""
def __init__(self, mapping: Dict[str, Union[str, bytes]], encoding='utf8'):
super().__init__('<virtual>')
self._mapping = {
self._clean_path(filename): (filename, data)
for filename, data in
dict(mapping).items()
}
self.bytes_encoding = encoding
def __eq__(self, other: 'VirtualFileSystem'):
if not isinstance(other, VirtualFileSystem):
return NotImplemented
return (
self.bytes_encoding == other.bytes_encoding and
self._mapping == other._mapping
)
def __hash__(self):
return hash(self.bytes_encoding) ^ hash(tuple(self._mapping.values()))
@staticmethod
def _clean_path(path: str) -> str:
"""Convert paths to one representation."""
return os.path.normpath(path).replace('\\', '/').casefold()
def open_bin(self, name: str) -> BinaryIO:
"""Return a bytes buffer for a 'file'."""
# We don't need this, but it should match other filesystems.
self._check_open()
try:
filename, data = self._mapping[self._clean_path(name)]
except KeyError:
raise FileNotFoundError(name)
if isinstance(data, str):
data = data.encode(self.bytes_encoding)
return io.BytesIO(data)
def open_str(self, name: str, encoding='utf8') -> TextIO:
"""Return a string buffer for a 'file'.
This performs universal newlines conversion.
The encoding argument is ignored for files which are
originally text.
"""
# We don't need this, but it should match other filesystems.
self._check_open()
try:
filename, data = self._mapping[self._clean_path(name)]
except KeyError:
raise FileNotFoundError(name)
if isinstance(data, bytes):
# Decode on the fly, with universal newlines.
return io.TextIOWrapper(
io.BytesIO(data),
encoding=encoding,
)
else:
# None = universal newlines mode directly.
# No encoding is needed obviously.
return io.StringIO(data, newline=None)
def walk_folder(self, folder: str) -> Iterator[File]:
# We don't need this, but it should match other filesystems.
self._check_open()
for filename, data in self._mapping.values():
yield File(self, filename)
def _file_exists(self, name: str) -> bool:
return self._clean_path(name) in self._mapping
def _get_file(self, name: str) -> File:
# We don't need this, but it should match other filesystems.
self._check_open()
try:
filename, data = self._mapping[self._clean_path(name)]
except KeyError:
raise FileNotFoundError(name)
return File(self, filename)
def _delete_ref(self) -> None:
"""The virtual filesystem doesn't need a reference to anything."""
self._ref = None
def _create_ref(self) -> None:
"""The virtual filesystem doesn't need a reference to anything."""
self._ref = True
class RawFileSystem(FileSystem):
"""Accesses files in a real folder.
This prohibits access to folders above the root.
"""
def __init__(self, path: str):
super().__init__(os.path.abspath(path))
def __repr__(self):
return 'RawFileSystem({!r})'.format(self.path)
def _resolve_path(self, path: str) -> str:
"""Get the absolute path."""
abs_path = os.path.abspath(os.path.join(self.path, path))
if not abs_path.startswith(self.path):
raise ValueError('Path "{}" escaped "{}"!'.format(path, self.path))
return abs_path
def walk_folder(self, folder: str) -> Iterator[File]:
"""Yield files in a folder."""
path = self._resolve_path(folder)
for dirpath, dirnames, filenames in os.walk(path):
for file in filenames:
rel_path = os.path.relpath(
os.path.join(dirpath, file),
self.path,
)
yield File(
self,
rel_path.replace('\\', '/'),
)
def open_str(self, name: str, encoding='utf8') -> TextIO:
"""Open a file in unicode mode or raise FileNotFoundError.
This should be closed when done.
"""
# We don't need this, but it should match other filesystems.
self._check_open()
return open(self._resolve_path(name), mode='rt', encoding=encoding)
def open_bin(self, name: str) -> BinaryIO:
"""Open a file in bytes mode or raise FileNotFoundError.
This should be closed when done.
"""
# We don't need this, but it should match other filesystems.
self._check_open()
return open(self._resolve_path(name), mode='rb')
def _file_exists(self, name: str) -> bool:
# We don't need this, but it should match other filesystems.
self._check_open()
return os.path.isfile(self._resolve_path(name))
def _get_file(self, name: str):
# We don't need this, but it should match other filesystems.
self._check_open()
if os.path.isfile(self._resolve_path(name)):
return File(self, name.replace('\\', '/'))
raise FileNotFoundError(name)
def _delete_ref(self) -> None:
"""The raw filesystem doesn't need a reference to anything."""
self._ref = None
def _create_ref(self) -> None:
"""The raw filesystem doesn't need a reference to anything."""
self._ref = True
def _get_cache_key(self, file: File) -> int:
"""Our cache key is the last modification time."""
try:
return os.stat(self._resolve_path(file.path)).st_mtime_ns
except FileNotFoundError:
return -1
class ZipFileSystem(FileSystem):
"""Accesses files in a zip file."""
def __init__(self, path: str, zipfile: ZipFile=None):
self._ref = None # type: ZipFile
self._name_to_info = {}
super().__init__(path)
if zipfile is not None:
# Use the zipfile directly, and don't close it.
self._ref_count += 1
self._ref = zipfile
def __repr__(self):
return 'ZipFileSystem({!r})'.format(self.path)
def walk_folder(self, folder: str) -> Iterator[File]:
"""Yield files in a folder."""
self._check_open()
# \\ is not allowed in zips.
folder = folder.replace('\\', '/').casefold()
for filename, fileinfo in self._name_to_info.items():
if filename.startswith(folder):
yield File(self, fileinfo.filename, fileinfo)
def open_bin(self, name: str):
"""Open a file in bytes mode or raise FileNotFoundError.
The filesystem needs to be open while accessing this.
"""
self._check_open()
# We need the zipinfo object.
if isinstance(name, ZipInfo):
info = name
else:
name = name.replace('\\', '/')
try:
info = self._name_to_info[name.casefold()]
except KeyError:
raise FileNotFoundError('{}:{}'.format(self.path, name)) from None
return self._ref.open(info)
def open_str(self, name: str, encoding='utf8'):
"""Open a file in unicode mode or raise FileNotFoundError.
The filesystem needs to be open while accessing this.
"""
# Zips only open in binary, so just open that, then wrap to decode.
return io.TextIOWrapper(self.open_bin(name), encoding)
def _get_file(self, name: str) -> File:
name = name.replace('\\', '/')
self._check_open()
try:
info = self._name_to_info[name.casefold()]
except KeyError:
raise FileNotFoundError('{}:{}'.format(self.path, name))
return File(self, name, info)
def _file_exists(self, name: str) -> bool:
self._check_open()
return name.replace('\\', '/').casefold() in self._name_to_info
def _delete_ref(self) -> None:
self._ref.close()
self._name_to_info.clear()
self._ref = None
def _create_ref(self) -> None:
self._ref = zipfile = ZipFile(self.path)
self._name_to_info.clear()
for info in zipfile.infolist():
# Some zipfiles include entries for the directories too. They have
# a trailing slash.
if not info.filename.endswith('/'):
self._name_to_info[info.filename.casefold()] = info
def _get_cache_key(self, file: File):
"""Return the CRC of the VPK file."""
return file._data.CRC
class VPKFileSystem(FileSystem):
"""Accesses files in a VPK file."""
def __init__(self, path: str):
self._ref = None # type: VPK
super().__init__(path)
def __repr__(self):
return 'VPKFileSystem({!r})'.format(self.path)
def _create_ref(self):
self._ref = VPK(self.path)
def _delete_ref(self):
# We only read from VPKs, so no cleanup needs to be done.
self._ref = None
def _file_exists(self, name: str):
self._check_open()
return name in self._ref
def _get_file(self, name: str):
try:
file = self._ref[name]
except KeyError:
raise FileNotFoundError(name) from None
return File(self, name.replace('\\', '/'), file)
def walk_folder(self, folder: str) -> Iterator[File]:
"""Yield files in a folder."""
# All VPK files use forward slashes.
folder = folder.replace('\\', '/')
for file in self._ref:
if file.dir.startswith(folder):
yield File(self, file.filename, file)
def open_bin(self, name: str) -> BinaryIO:
"""Open a file in bytes mode or raise FileNotFoundError."""
with self:
# File() calls with the VPK object we need directly.
if isinstance(name, VPKFile):
file = name
else:
try:
file = self._ref[name]
except KeyError:
raise FileNotFoundError(name)
return io.BytesIO(file.read())
def open_str(self, name: str, encoding='utf8') -> TextIO:
"""Open a file in unicode mode or raise FileNotFoundError."""
with self:
# File() calls with the VPK object we need directly.
if isinstance(name, VPKFile):
file = name
else:
try:
file = self._ref[name]
except KeyError:
raise FileNotFoundError(name)
# Wrap the data to treat it as bytes, then
# wrap that to decode and clean up universal newlines.
return io.TextIOWrapper(io.BytesIO(file.read()), encoding)
def _get_cache_key(self, file: File):
"""Return the CRC of the VPK file."""
return file._data.crc
| [
"[email protected]"
] | |
3b2c7bf4b9c033f46fa9264303c0326bb30d648c | 12c2168d1b2db8de3246f59e8f911a0a40ec0512 | /Produto/forms.py | b8a944f8220f46ce172ea1d92eea43233709754f | [] | no_license | carlafcf/BD_TADS | 1bc145aa8668f994ec45fb8dc20c0505a86cbbc5 | 72e835a281dade32072c4715d91825ed8b7483ca | refs/heads/master | 2023-04-03T07:54:48.646902 | 2021-03-30T00:06:36 | 2021-03-30T00:06:36 | 341,566,522 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | from django import forms
from django.db import connection
from django.core.exceptions import ValidationError
from .models import Produto
class ProdutoForm(forms.ModelForm):
class Meta:
model = Produto
fields = ['nome', 'descricao', 'fornecedor', 'quantidade_maxima',
'valor_unitario', 'licitacao', 'no_item']
def clean(self):
cleaned_data = super().clean()
# Pega o nome que foi adicionado no formulário
nome = cleaned_data.get("nome")
# Seleciona se há produtos com este mesmo nome
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM Produto_produto WHERE nome=%s", [nome])
resultado_produto = cursor.fetchall()
# Se a lista não foi vazia, há produto com o mesmo nome
if (len(resultado_produto) != 0):
raise ValidationError("Já foi criado um produto com este nome. Escolha outro nome.")
| [
"[email protected]"
] | |
2c4578d7aad69ef2eb58b0b9ef7d419426c3e8b0 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/1393.py | 7306be9c406116a4b86b2512dbd82fc3a7f4b436 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py |
def compare_list(l1, l2):
l = []
for i,j in zip(l1,l2):
if i > j:
l.append(1)
else:
l.append(-1)
return l
def dwar(w1, w2):
w1.sort()
w2.sort()
while len(w1) > 0:
l = compare_list(w1, w2)
lset = list(set(l))
if len(lset) == 1 and lset[0] == 1:
return len(w1)
w1.pop(0)
w2.pop(-1)
return 0
# def dchoose_block(w1, w2):
# # naomi cheats, arranges ken's block from big to small and let him win initially
# # we expect w1 and w2 to be sorted
# if
#
def war(w1, w2):
score = 0
w2.sort()
for weight1 in w1:
optimal_weight = choose_block(w2, weight1)
if weight1 > optimal_weight:
score += 1
w2.pop(w2.index(optimal_weight))
return score
def choose_block(w, b):
# we expect w to be sorted
if b > w[-1]:
# use the minimum
return w[0]
# use the minimum that's higher than b
l = [x if x > b else 100 for x in w]
l.sort();
return l[0]
def main():
T = int(raw_input())
for i in range(T):
n = int(raw_input())
w1 = [float(a) for a in raw_input().split(" ")]
w2 = [float(a) for a in raw_input().split(" ")]
ww1 = w1[:]
ww2 = w2[:]
w1.sort()
w2.sort()
print("Case #%d: %d %d" % (i+1, dwar(w1, w2), war(ww1, ww2)))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
143ded849c4e7c0e8ca61a4374f43a742eb7fd22 | 9c84378e88df12a83d3ca6dde5d16b76e3778a1b | /appengine/gce-backend/handlers_queues.py | a4b113c55398b459ac8e4955fe494dcd81c42646 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | eakuefner/luci-py | 681364457a43724965ee70168354e1c097e4d3df | d9a337e2fd5151eac24b3164963e086091d769a3 | refs/heads/master | 2021-01-15T14:58:37.310142 | 2015-10-06T19:08:08 | 2015-10-06T19:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,628 | py | # Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Task queues for the GCE Backend."""
import json
import logging
from google.appengine.ext import ndb
import webapp2
from components import decorators
from components import machine_provider
from components import net
import models
@ndb.transactional
def uncatalog_instances(instances):
"""Uncatalogs cataloged instances.
Args:
instances: List of instance names to uncatalog.
"""
put_futures = []
get_futures = [
models.Instance.generate_key(instance_name).get_async()
for instance_name in instances
]
while get_futures:
ndb.Future.wait_any(get_futures)
instances = [future.get_result() for future in get_futures if future.done()]
get_futures = [future for future in get_futures if not future.done()]
for instance in instances:
if instance.state == models.InstanceStates.CATALOGED:
# handlers_cron.py sets each Instance's state to
# CATALOGED before triggering InstanceGroupCataloger.
logging.info('Uncataloging instance: %s', instance.name)
instance.state = models.InstanceStates.UNCATALOGED
put_futures.append(instance.put_async())
else:
logging.info('Ignoring already uncataloged instance: %s', instance.name)
if put_futures:
ndb.Future.wait_all(put_futures)
else:
logging.info('Nothing to uncatalog')
class InstanceGroupCataloger(webapp2.RequestHandler):
"""Worker for cataloging instance groups."""
@decorators.require_taskqueue('catalog-instance-group')
def post(self):
"""Reclaim a machine.
Params:
dimensions: JSON-encoded string representation of
machine_provider.Dimensions describing the members of the instance
group.
instances: JSON-encoded list of instances in the instance group to
catalog:
policies: JSON-encoded string representation of machine_provider.Policies
governing the members of the instance group.
"""
dimensions = json.loads(self.request.get('dimensions'))
instances = json.loads(self.request.get('instances'))
policies = json.loads(self.request.get('policies'))
requests = []
instances_to_uncatalog = set()
for instance_name in instances:
instances_to_uncatalog.add(instance_name)
requests.append({
'dimensions': dimensions.copy(), 'policies': policies})
requests[-1]['dimensions']['hostname'] = instance_name
try:
responses = machine_provider.add_machines(requests).get('responses', {})
except net.Error as e:
logging.warning(e)
responses = {}
for response in responses:
request = response.get('machine_addition_request', {})
error = response.get('error')
instance_name = request.get('dimensions', {}).get('hostname')
if instance_name in instances:
if not error:
logging.info('Instance added to Catalog: %s', instance_name)
instances_to_uncatalog.discard(instance_name)
elif error == 'HOSTNAME_REUSE':
logging.warning('Hostname reuse in Catalog: %s', instance_name)
instances_to_uncatalog.discard(instance_name)
else:
logging.warning('Instance not added to Catalog: %s', instance_name)
else:
logging.info('Unknown instance: %s', instance_name)
uncatalog_instances(instances_to_uncatalog)
def create_queues_app():
return webapp2.WSGIApplication([
('/internal/queues/catalog-instance-group', InstanceGroupCataloger),
])
| [
"[email protected]"
] | |
c3b48a065a2c7682c9fc54e729f2fedf0c552bd6 | 4f8664ad0282872648307fd1cc693ddc75543748 | /swftp/test/functional/test_ftp.py | ff4255b2a9593388eea7b91a500c84af30c3b232 | [
"MIT"
] | permissive | benroeder/swftp | be4a6d2d6260df0c0bddd6ed1ff776e27bb411eb | eb4bdbf3bff5ee6924961cc12eeddf561b44b025 | refs/heads/master | 2021-01-18T08:52:17.226403 | 2013-04-03T12:44:59 | 2013-04-03T12:44:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,178 | py | """
See COPYING for license information.
"""
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.web.client import HTTPConnectionPool
import ftplib
import tempfile
import shutil
import time
import os
from . import get_config, has_item, create_test_file, clean_swift, \
compute_md5, upload_file, utf8_chars, get_swift_client
conf = get_config()
class FTPFuncTest(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.pool = HTTPConnectionPool(reactor, persistent=True)
self.swift = get_swift_client(conf, pool=self.pool)
self.tmpdir = tempfile.mkdtemp()
self.ftp = get_ftp_client(conf)
yield clean_swift(self.swift)
@defer.inlineCallbacks
def tearDown(self):
shutil.rmtree(self.tmpdir)
self.ftp.close()
yield clean_swift(self.swift)
yield self.pool.closeCachedConnections()
def get_ftp_client(config):
for key in 'ftp_host ftp_port account username password'.split():
if key not in config:
raise unittest.SkipTest("%s not set in the test config file" % key)
hostname = config['ftp_host']
port = int(config['ftp_port'])
username = "%s:%s" % (config['account'], config['username'])
password = config['password']
ftp = ftplib.FTP()
ftp.connect(hostname, port)
ftp.login(username, password)
return ftp
class BasicTests(unittest.TestCase):
def test_get_client(self):
ftp = get_ftp_client(conf)
ftp.getwelcome()
ftp.quit()
class ClientTests(unittest.TestCase):
def test_get_many_client(self):
for i in range(32):
ftp = get_ftp_client(conf)
ftp.close()
def test_get_many_concurrent(self):
connections = []
for i in range(32):
ftp = get_ftp_client(conf)
connections.append(ftp)
time.sleep(10)
for ftp in connections:
ftp.close()
class RenameTests(FTPFuncTest):
def test_rename_account(self):
self.assertRaises(ftplib.error_perm, self.ftp.rename, '/', '/a')
@defer.inlineCallbacks
def test_rename_container(self):
yield self.swift.put_container('ftp_tests')
self.ftp.rename('ftp_tests', 'ftp_tests_2')
r, listing = yield self.swift.get_account()
self.assertTrue(has_item('ftp_tests_2', listing))
self.assertFalse(has_item('ftp_tests', listing))
@defer.inlineCallbacks
def test_rename_container_populated(self):
yield self.swift.put_container('ftp_tests')
yield self.swift.put_object('ftp_tests', 'a')
self.assertRaises(ftplib.error_perm, self.ftp.rename, 'ftp_tests',
'ftp_tests_2')
@defer.inlineCallbacks
def test_rename_object(self):
yield self.swift.put_container('ftp_tests')
yield self.swift.put_object('ftp_tests', 'a')
yield self.swift.put_object(
'ftp_tests', 'b',
headers={'Content-Type': 'application/directory'})
yield self.swift.put_object('ftp_tests', 'b/nested')
yield self.swift.put_object('ftp_tests', 'c/nested')
self.ftp.rename('ftp_tests/a', 'ftp_tests/a1')
r, listing = yield self.swift.get_container('ftp_tests')
self.assertTrue(has_item('a1', listing))
self.assertFalse(has_item('a', listing))
self.assertRaises(ftplib.error_perm, self.ftp.rename, 'ftp_tests/b',
'ftp_tests/b1')
self.assertRaises(ftplib.error_perm, self.ftp.rename, 'ftp_tests/c',
'ftp_tests/c1')
def test_rename_object_not_found(self):
self.assertRaises(ftplib.error_perm, self.ftp.rename, 'ftp_tests/a',
'ftp_tests/b')
class DownloadTests(FTPFuncTest):
@defer.inlineCallbacks
def _test_download(self, size, name):
yield self.swift.put_container('ftp_tests')
src_path, md5 = create_test_file(self.tmpdir, size)
yield upload_file(self.swift, 'ftp_tests', name, src_path, md5)
dlpath = '%s/%s.dat' % (self.tmpdir, name)
resp = self.ftp.retrbinary('RETR ftp_tests/%s' % name,
open(dlpath, 'wb').write)
self.assertEqual('226 Transfer Complete.', resp)
self.assertEqual(os.stat(dlpath).st_size, size)
self.assertEqual(md5, compute_md5(dlpath))
def test_zero_byte_file(self):
return self._test_download(0, '0b.dat')
def test_32kb_file(self):
return self._test_download(32 * 1024 + 1, '32kb.dat')
def test_1mb_file(self):
return self._test_download(1024 * 1024, '1mb.dat')
def test_10mb_file(self):
return self._test_download(1024 * 1024 * 10, '10mb.dat')
class UploadTests(FTPFuncTest):
@defer.inlineCallbacks
def _test_upload(self, size, name):
yield self.swift.put_container('ftp_tests')
src_path, md5 = create_test_file(self.tmpdir, size)
resp = self.ftp.storbinary('STOR ftp_tests/%s' % name,
open(src_path, 'rb'))
self.assertEqual('226 Transfer Complete.', resp)
headers = yield self.swift.head_object('ftp_tests', name)
self.assertEqual(md5, headers['etag'])
self.assertEqual(size, int(headers['content-length']))
def test_zero_byte_file(self):
return self._test_upload(0, '0b.dat')
def test_32kb_file(self):
return self._test_upload(1024 * 32 + 1, '32kb.dat')
def test_1mb_file(self):
return self._test_upload(1024 * 1024, '1mb.dat')
def test_10mb_file(self):
return self._test_upload(1024 * 1024 * 10, '10mb.dat')
class SizeTests(FTPFuncTest):
def test_size_root(self):
# Testing For Error Only
self.ftp.size('')
@defer.inlineCallbacks
def test_size_container(self):
yield self.swift.put_container('ftp_tests')
size = self.ftp.size('ftp_tests')
self.assertEqual(0, size)
@defer.inlineCallbacks
def test_size_directory(self):
yield self.swift.put_container('ftp_tests')
yield self.swift.put_object(
'ftp_tests', 'test_size_directory',
headers={'Content-Type': 'application/directory'})
size = self.ftp.size('ftp_tests/test_size_directory')
self.assertEqual(0, size)
@defer.inlineCallbacks
def test_size_object(self):
yield self.swift.put_container('ftp_tests')
src_path, md5 = create_test_file(self.tmpdir, 1024)
yield upload_file(self.swift, 'ftp_tests', 'test_size_object',
src_path, md5)
size = self.ftp.size('ftp_tests')
self.assertEqual(1024, size)
def test_size_container_missing(self):
self.assertRaises(ftplib.error_perm, self.ftp.size, 'ftp_tests')
def test_size_object_missing(self):
self.assertRaises(ftplib.error_perm, self.ftp.size,
'ftp_tests/test_size_container_missing')
@defer.inlineCallbacks
def test_size_dir_dir(self):
yield self.swift.put_container('ftp_tests')
yield self.swift.put_object(
'ftp_tests',
'%s/%s' % (utf8_chars.encode('utf-8'), utf8_chars.encode('utf-8')))
size = self.ftp.size('ftp_tests/%s' % utf8_chars.encode('utf-8'))
self.assertEqual(0, size)
class DeleteTests(FTPFuncTest):
@defer.inlineCallbacks
def test_delete_populated_container(self):
yield self.swift.put_container('sftp_tests')
yield self.swift.put_object(
'sftp_tests', 'dir1',
headers={'Content-Type': 'application/directory'})
self.assertRaises(ftplib.error_perm, self.ftp.rmd, 'sftp_tests')
@defer.inlineCallbacks
def test_delete_populated_dir(self):
yield self.swift.put_container('sftp_tests')
yield self.swift.put_object(
'sftp_tests', 'dir1',
headers={'Content-Type': 'application/directory'})
yield self.swift.put_object('sftp_tests', 'dir1/obj2')
self.ftp.rmd('sftp_tests/dir1')
@defer.inlineCallbacks
def test_delete_populated_dir_not_existing(self):
yield self.swift.put_container('sftp_tests')
yield self.swift.put_object('sftp_tests', 'dir1/obj2')
self.ftp.rmd('sftp_tests/dir1')
class ListingTests(FTPFuncTest):
def test_listing(self):
listing = self.ftp.nlst('')
self.assertNotIn('sftp_tests', listing)
@defer.inlineCallbacks
def test_listing_exists(self):
yield self.swift.put_container('sftp_tests')
listing = self.ftp.nlst('')
self.assertIn('sftp_tests', listing)
@defer.inlineCallbacks
def test_directory_listing(self):
yield self.swift.put_container('sftp_tests')
yield self.swift.put_object(
'sftp_tests', 'dir1',
headers={'Content-Type': 'application/directory'})
yield self.swift.put_object(
'sftp_tests', 'dir2',
headers={'Content-Type': 'application/directory'})
yield self.swift.put_object('sftp_tests', 'dir2/obj1')
yield self.swift.put_object('sftp_tests', 'dir3/obj2')
listing = self.ftp.nlst('sftp_tests')
self.assertIn('dir1', listing)
self.assertIn('dir2', listing)
self.assertIn('dir3', listing)
self.assertEqual(3, len(listing))
listing = self.ftp.nlst('sftp_tests/dir1')
self.assertEqual(0, len(listing))
listing = self.ftp.nlst('sftp_tests/dir2')
self.assertIn('obj1', listing)
self.assertEqual(1, len(listing))
listing = self.ftp.nlst('sftp_tests/dir3')
self.assertIn('obj2', listing)
self.assertEqual(1, len(listing))
@defer.inlineCallbacks
def test_long_listing(self):
yield self.swift.put_container('sftp_tests')
for i in range(101):
yield self.swift.put_object(
'sftp_tests', str(i),
headers={'Content-Type': 'application/directory'})
time.sleep(2)
listing = self.ftp.nlst('sftp_tests')
self.assertEqual(101, len(listing))
| [
"[email protected]"
] | |
a9af732e804394d5d2b35f5a479a30695122d13a | 46ef191ca0c170ca1d8afc5eb5134de52eba15f1 | /abc167/venv/D.py | 52607613479c21ed6d9f20b94042a0ff10aeb2a9 | [] | no_license | anthonyouch/Competitive-Programming- | 9a84cd7ff4b816d2e7ece4e4d6438dbeb23f5795 | 39109a7be1cd007bd0080a9694ac256efc10eab9 | refs/heads/master | 2023-03-04T00:49:00.688118 | 2021-02-05T13:19:46 | 2021-02-05T13:19:46 | 334,131,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # create a list that eventually goes back to 1
import sys
n, k = [int(i) for i in input().split()]
lst = [int(i) for i in input().split()]
lst.insert(0, 0)
path = [1]
curr = 1
visited = set()
while True:
if lst[curr] in visited:
path.append(lst[curr])
#print(path)
index_val = path.index(lst[curr])
pre_path = path[:index_val + 1]
path = path[index_val + 1:]
break
visited.add(lst[curr])
path.append(lst[curr])
curr = lst[curr]
if k <= len(pre_path):
print(pre_path[k])
sys.exit()
else:
k-= (len(pre_path) - 1)
remainder = k % len(path)
#print(pre_path)
#print(path)
print(path[remainder - 1 ]) | [
"[email protected]"
] | |
96db5839bb144bc4546626c155142610d2a4061a | 87d13c3c1e4d37909a584ae5be5abd5576dafb9b | /backend/todos/migrations/0001_initial.py | 34753a9ef9bd73a7895aee22b6ad24d76e98806f | [] | no_license | Tanmoy-Sarkar/Todo-App-with-Django-Rest-Framework | 8c5a6fcf2e5d6d15bcb8acbc421aefb0b9e5519d | d8dc88968a94c74b6d3dab008abdab68088aacb6 | refs/heads/master | 2023-07-29T00:28:51.198787 | 2020-08-12T05:51:24 | 2020-08-12T05:51:24 | 278,842,084 | 0 | 0 | null | 2021-09-22T19:30:38 | 2020-07-11T10:48:24 | JavaScript | UTF-8 | Python | false | false | 531 | py | # Generated by Django 3.0.8 on 2020-07-11 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('body', models.TextField()),
],
),
]
| [
"[email protected]"
] | |
fb845fdfab1ea433b53665abb0f88557207567d7 | 6ff4671a00db5b5b97eea71f80b30dd4ff3ca020 | /Notebooks/Stride_testing.py | 1076f13f26c8dce5fb0bff66b691279c729f93ca | [
"MIT"
] | permissive | jason-neal/equanimous-octo-tribble | 36cbe912282bb9210a8fc4e959795bbda1a5f1e4 | a8788909331034725afe38ae96c83584b17c9fbd | refs/heads/master | 2021-01-23T19:57:05.022746 | 2018-07-18T21:37:27 | 2018-07-18T21:37:27 | 46,361,899 | 1 | 1 | MIT | 2020-06-11T09:35:48 | 2015-11-17T17:00:51 | HTML | UTF-8 | Python | false | false | 6,037 | py |
# coding: utf-8
# # Testing numpy Stride
# For snr calculation windowing
# In[21]:
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from astropy.io import fits
from numpy.lib import stride_tricks
get_ipython().magic('matplotlib inline')
# In[22]:
fname = "Test_spectra.fits"
data = fits.getdata(fname)
hdr = fits.getheader(fname)
wl = data["Wavelength"]
I = data["Extracted_DRACS"]
# print(type(I))
print(I.dtype)
wl = np.array(wl, dtype="float64") # Turn >f4 into float64
I = np.array(I, dtype="float64") # Turn >f4 into float64
print(I.dtype)
print(I)
# In[ ]:
binsize = 100
# Try using stride on np.array
# striding
nums = np.arange(len(I), dtype="int")
print("itemsize", nums.itemsize, "dtype", nums.dtype)
hop_length = 1
# stride_tests with numbers
frame_length = binsize
num_frames = 1 + (len(nums) - frame_length) / hop_length
row_stride = nums.itemsize * hop_length # *hopesize
print(frame_length)
print(num_frames)
print(row_stride)
col_stride = nums.itemsize
nums_strided = stride_tricks.as_strided(nums, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("nums", nums)
print("nums_strided =", nums_strided)
# row wise transform
row_sum = np.sum(nums_strided, axis=1)
# print(row_sum)
snr = 1 / np.std(nums_strided, axis=1)
print(snr)
# In[ ]:
# with I
frame_length = binsize
num_frames = 1 + (len(I) - frame_length) / hop_length
row_stride = I.itemsize * hop_length # *hopesize
print(frame_length)
print(num_frames)
print(row_stride)
col_stride = I.itemsize
I_strided = stride_tricks.as_strided(I, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
# print("nums", I)
# print("nums_strided =", I_strided)
snr = 1 / np.std(I_strided, axis=1)
print(snr)
# In[ ]:
plt.plot(snr)
plt.show()
# In[23]:
def strided_snr(data, frame_length, hop_length=1):
num_frames = 1 + (len(data) - frame_length)/hop_length
row_stride = data.itemsize * hop_length # *hopesize
col_stride = data.itemsize
data_strided = stride_tricks.as_strided(data, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("length of data_strided", len(data_strided))
snr = 1/np.std( data_strided, axis=1)
# print("frame_length", frame_length)
# print("num_frames", num_frames)
# print("len(snr)", len(snr))
# print(snr)
# zeropad to make uniform length of spectra
missing_size = len(data) - len(snr)
print("missing size", missing_size)
before = missing_size // 2
end = missing_size // 2
if missing_size % 2 is not 0:
print("missing size is not even")
padded_snr = np.pad(snr, (before, end), "constant")
# print("padded length", len(padded_snr))
# print(padded_snr)
return padded_snr
def strided_sum(data, frame_length, hop_length=1):
num_frames = 1 + (len(data) - frame_length) / hop_length
row_stride = data.itemsize * hop_length # *hopesize
col_stride = data.itemsize
data_strided = stride_tricks.as_strided(data, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("length of data_strided", len(data_strided))
print("binsize", frame_length)
print("hop_length", hop_length)
print(data_strided)
total = np.sum(data_strided, axis=1)
# print("frame_length", frame_length)
# print("num_frames", num_frames)
# print("len(snr)", len(snr))
# print(snr)
# zeropad to make uniform length of spectra
missing_size = len(data) - len(total)
pad_size = (len(data) - len(total)) // 2
# print("missing size", missing_size)
before = missing_size // 2
end = missing_size // 2
if missing_size % 2 is not 0:
print("missing size is not even")
padded_total = np.pad(total, (pad_size, pad_size), "constant")
# print("padded length", len(padded_snr))
# print(padded_snr)
return padded_total
# This doesn't seem to work that well with pandas not sure why
# store_array = np.empty((1024, len(bins)), dtype=data.dtype)
# for i, bin in enumerate(bins):
# store_array[:, i] = strided_snr(I, bin)
# In[30]:
# loop over the different bin sizes
bins = np.arange(3, 51, 2)
hopper = 1
store_list = []
for i, b in enumerate(bins):
store_list.append(strided_snr(I, b, hop_length=hopper))
print("done")
# In[31]:
# print(store_array)
print(store_list)
# In[32]:
# turn into a pandas dataframe
# dataframe = pd.DataFrame(data=store_array, columns=range(1024), index=bins)
# dataframe = pd.DataFrame(store_array, index=bins, columns=list(range(1024)))
# print(dataframe)
# print(dataframe.dtypes)
# In[33]:
df_list = pd.DataFrame(store_list, index=bins, columns=np.round(wl, 2))
print(df_list)
# In[36]:
sns.set()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(store_list, cmap=cmap, xticklabels=200, vmax=300, vmin=10)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize", xlabel="Wavelenght")
# In[37]:
# seaborn heatmap plot
sns.set()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(df_list, xticklabels=200, vmax=300, vmin=10)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize",
xlabel="Wavelenght")
# In[35]:
# ax = sns.heatmap(store_list)
wl[50]-wl[0]
# In[ ]:
# # test on known data
# In[17]:
data = np.arange(20)
binsizes = range(1, 6, 2)
store = []
# opt = np.get_printoptions()
# np.set_printoptions(threshold='nan')
for b in binsizes:
store.append(strided_sum(data, b))
# np.set_printoptions(**opt)
# In[18]:
SNRrand = pd.DataFrame(store, index=binsizes)
print(SNRrand)
# In[19]:
sns.set()
# cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(SNRrand, xticklabels=20)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize",
xlabel="Wavelenght")
# In[ ]:
# In[ ]:
| [
"[email protected]"
] | |
37712452ff5adaa4113178fb9c5623c3e941fee9 | d67bd00f8fe819bd3011ce154c19cbc765d59f1d | /branches/4.0_buildout/openlegis/sagl/skins/consultas/parlamentar/vereadores_atuais_json.py | aac059d617ea5f4ce9e532f0b3e6b9cfb8903a88 | [] | no_license | openlegis-br/sagl | 90f87bdbbaa8a6efe0ccb5691ea8424575288c46 | eabf7529eefe13a53ed088250d179a92218af1ed | refs/heads/master | 2023-08-31T12:29:39.382474 | 2023-08-29T16:12:01 | 2023-08-29T16:12:01 | 32,593,838 | 17 | 1 | null | 2023-08-29T06:16:55 | 2015-03-20T16:11:04 | Python | UTF-8 | Python | false | false | 1,827 | py | ## Script (Python) "vereadores_atuais"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
import simplejson as json
context.REQUEST.RESPONSE.setHeader("Access-Control-Allow-Origin", "*")
request=context.REQUEST
for item in context.zsql.legislatura_atual_obter_zsql():
num_legislatura = item.num_legislatura
data_atual = DateTime().strftime("%d/%m/%Y")
lista_exercicio = []
exercicio = []
for item in context.zsql.autores_obter_zsql(txt_dat_apresentacao=data_atual):
dic = {}
dic['cod_parlamentar'] = item.cod_parlamentar
dic['nom_parlamentar'] = item.nom_parlamentar
dic['nom_completo'] = item.nom_completo
foto = str(item.cod_parlamentar) + "_foto_parlamentar"
if hasattr(context.sapl_documentos.parlamentar.fotos, foto):
dic['foto'] = request.SERVER_URL + '/sapl_documentos/parlamentar/fotos/' + foto
else:
dic['foto'] = request.SERVER_URL + '/imagens/avatar.png'
dic['link'] = request.SERVER_URL + '/consultas/parlamentar/parlamentar_mostrar_proc?cod_parlamentar=' + item.cod_parlamentar + '%26iframe=1'
dic['partido'] = ''
for filiacao in context.zsql.parlamentar_data_filiacao_obter_zsql(num_legislatura=num_legislatura, cod_parlamentar=item.cod_parlamentar):
if filiacao.dat_filiacao != '0' and filiacao.dat_filiacao != None:
for partido in context.zsql.parlamentar_partido_obter_zsql(dat_filiacao=filiacao.dat_filiacao, cod_parlamentar=item.cod_parlamentar):
dic['partido'] = partido.sgl_partido
lista_exercicio.append(dic)
lista_exercicio.sort(key=lambda dic: dic['nom_completo'])
#listaVereador={}
#listaVereador.update({'vereadores': lista_exercicio})
return json.dumps(lista_exercicio)
| [
"[email protected]"
] | |
bf6f76dc39e4234d6a8f1eaad9548249a8dc530d | a7947a129fa5318517f35f17163840f24178d6aa | /examples/core_geometry_bspline.py | c254afdd34a3fe70426aefe3c789019e5e6cad4d | [] | no_license | fboussuge/pythonocc-demos | 993abe7634ab74fc3619fea12519c176b4e26658 | 8f9756653eaecc505238d43fa22a0057bbd14b56 | refs/heads/master | 2021-06-23T01:29:29.611505 | 2020-12-08T13:49:04 | 2020-12-08T13:49:04 | 156,134,578 | 0 | 0 | null | 2018-11-04T23:17:31 | 2018-11-04T23:17:30 | null | UTF-8 | Python | false | false | 2,743 | py | #!/usr/bin/env python
##Copyright 2009-2014 Jelle Feringa ([email protected])
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from OCC.Core.gp import gp_Pnt2d
from OCC.Core.Geom2dAPI import Geom2dAPI_Interpolate, Geom2dAPI_PointsToBSpline
from OCC.Core.TColgp import TColgp_HArray1OfPnt2d, TColgp_Array1OfPnt2d
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
def bspline():
# the first bspline
array = TColgp_Array1OfPnt2d(1, 5)
array.SetValue(1, gp_Pnt2d(0, 0))
array.SetValue(2, gp_Pnt2d(1, 2))
array.SetValue(3, gp_Pnt2d(2, 3))
array.SetValue(4, gp_Pnt2d(4, 3))
array.SetValue(5, gp_Pnt2d(5, 5))
bspline_1 = Geom2dAPI_PointsToBSpline(array).Curve()
# the second one
harray = TColgp_HArray1OfPnt2d(1, 5)
harray.SetValue(1, gp_Pnt2d(0, 0))
harray.SetValue(2, gp_Pnt2d(1, 2))
harray.SetValue(3, gp_Pnt2d(2, 3))
harray.SetValue(4, gp_Pnt2d(4, 3))
harray.SetValue(5, gp_Pnt2d(5, 5))
anInterpolation = Geom2dAPI_Interpolate(harray.GetHandle(), False, 0.01)
anInterpolation.Perform()
bspline_2 = anInterpolation.Curve()
harray2 = TColgp_HArray1OfPnt2d(1, 5)
harray2.SetValue(1, gp_Pnt2d(11, 0))
harray2.SetValue(2, gp_Pnt2d(12, 2))
harray2.SetValue(3, gp_Pnt2d(13, 3))
harray2.SetValue(4, gp_Pnt2d(15, 3))
harray2.SetValue(5, gp_Pnt2d(16, 5))
anInterpolation2 = Geom2dAPI_Interpolate(harray.GetHandle(), True, 0.01)
anInterpolation2.Perform()
bspline_3 = anInterpolation2.Curve()
for j in range(array.Lower(), array.Upper()+1):
p = array.Value(j)
display.DisplayShape(p, update=False)
for j in range(harray.Lower(), harray.Upper()+1):
p = harray.Value(j)
display.DisplayShape(p, update=False)
display.DisplayShape(bspline_1, update=False)
display.DisplayShape(bspline_2, update=False, color='GREEN')
display.DisplayShape(bspline_3, update=True, color='BLUE')
if __name__ == '__main__':
bspline()
start_display()
| [
"[email protected]"
] | |
e08f90549f8a32c66d9622898dd5fc889d376b1d | 57c570d1b5a621158d8763f935e2069be6b8c90a | /tykj-operation/tykj-operation/MarketSearchCrawler/services/db.py | 48e2998f975d9b156c3edad34d84c9fd20d44542 | [] | no_license | liuliainio/liuli | e011decf45f7eca7009a12ad4a96f33a17055945 | 203fbf4f135efb6432c77b937633003ce2f2c9a2 | refs/heads/master | 2021-01-10T20:35:08.070770 | 2018-08-21T05:52:59 | 2018-08-21T05:52:59 | 25,625,853 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | #-*- coding: utf-8 -*-
'''
Created on Sep 12, 2013
@author: gmliao
'''
from crawler import settings
import MySQLdb
class MySQLdbWrapper:
conn = None
def connect(self):
self.conn = MySQLdb.connect(settings.DATABASES['default'][0], settings.DATABASES['default'][1],
settings.DATABASES['default'][2], settings.DATABASES['default'][3],
charset='utf8', use_unicode=True)
#self.conn = MySQLdb.connect('localhost', 'root', '1111', 'market')
self.conn.set_character_set('utf8')
def reconnect(self):
self.conn = None
def cursor(self):
try:
if not self.conn:
self.connect()
return self.conn.cursor()
except MySQLdb.OperationalError:
self.connect()
return self.conn.cursor()
| [
"[email protected]"
] | |
2d39f028eeabb883b01ffc250ab1059e0f677292 | 0aa2db201678205e9eccd3f4f2dcb6f95a97b5f6 | /tut_42.py | 22a41be2b48cd8deb77c4a835b8a9d4c9ca6fee1 | [] | no_license | udoy382/PyTutorial_telusko | ffa76b4b6772d289c787e4b682df2d0965a2bf62 | 5dc5f3fc331605310f7c3923d7865f55a4592e28 | refs/heads/main | 2023-06-09T11:00:41.915456 | 2021-06-30T14:29:56 | 2021-06-30T14:29:56 | 381,730,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | # this is our simple functions
def square(a):
return a*a
result = square(5)
print(result)
# this is our lambda functions
f = lambda j,k : j*k
print(f(4, 5)) | [
"[email protected]"
] | |
5ce02b22a691c4e7bbbb7c9b5b276d863edba49d | 486fa0a987ab1648de91efeb4b7ba8be3dd6b016 | /TermTk/TTkCore/TTkTerm/__init__.py | 22928704063777cde14e1466e9c3a9c63600d837 | [
"MIT"
] | permissive | ceccopierangiolieugenio/pyTermTk | 9f5103d6af9e93fe2572b61486919020d2007550 | f9c2a4d97f2cd04f0b86cf10661f63a61edae48e | refs/heads/main | 2023-08-30T20:58:39.239718 | 2023-08-02T22:51:02 | 2023-08-02T22:51:02 | 339,475,110 | 414 | 16 | MIT | 2023-08-31T23:16:10 | 2021-02-16T17:23:36 | Python | UTF-8 | Python | false | false | 176 | py | from .inputkey import TTkKeyEvent
from .inputmouse import TTkMouseEvent
from .colors import TTkTermColor
from .term import TTkTerm
from .input import TTkInput
| [
"[email protected]"
] | |
2be7fcff78fe289f0631a2d1fdca66d77e9dac22 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/safezone/PicnicBasket.py | 30d2bfbd1a1e067efa18df7add4622d6558f99b5 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 7,319 | py | # 2013.08.22 22:24:42 Pacific Daylight Time
# Embedded file name: toontown.safezone.PicnicBasket
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from direct.showbase import PythonUtil
class PicnicBasket(StateData.StateData):
__module__ = __name__
def __init__(self, safeZone, parentFSM, doneEvent, tableNumber, seatNumber):
StateData.StateData.__init__(self, doneEvent)
self.tableNumber = tableNumber
self.seatNumber = seatNumber
self.fsm = ClassicFSM.ClassicFSM('PicnicBasket', [State.State('start', self.enterStart, self.exitStart, ['requestBoard', 'trolleyHFA', 'trolleyTFA']),
State.State('trolleyHFA', self.enterTrolleyHFA, self.exitTrolleyHFA, ['final']),
State.State('trolleyTFA', self.enterTrolleyTFA, self.exitTrolleyTFA, ['final']),
State.State('requestBoard', self.enterRequestBoard, self.exitRequestBoard, ['boarding']),
State.State('boarding', self.enterBoarding, self.exitBoarding, ['boarded']),
State.State('boarded', self.enterBoarded, self.exitBoarded, ['requestExit',
'trolleyLeaving',
'final',
'exiting']),
State.State('requestExit', self.enterRequestExit, self.exitRequestExit, ['exiting', 'trolleyLeaving']),
State.State('trolleyLeaving', self.enterTrolleyLeaving, self.exitTrolleyLeaving, ['final']),
State.State('exiting', self.enterExiting, self.exitExiting, ['final']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.parentFSM = parentFSM
return None
def load(self):
self.parentFSM.getStateNamed('picnicBasketBlock').addChild(self.fsm)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
def unload(self):
self.parentFSM.getStateNamed('trolley').removeChild(self.fsm)
del self.fsm
del self.parentFSM
self.buttonModels.removeNode()
del self.buttonModels
del self.upButton
del self.downButton
del self.rolloverButton
def enter(self):
self.fsm.enterInitialState()
if base.localAvatar.hp > 0:
messenger.send('enterPicnicTableOK_%d_%d' % (self.tableNumber, self.seatNumber))
self.fsm.request('requestBoard')
else:
self.fsm.request('trolleyHFA')
return None
def exit(self):
self.ignoreAll()
return None
def enterStart(self):
return None
def exitStart(self):
return None
def enterTrolleyHFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyHFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyHFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def enterTrolleyTFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyTFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyTFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def __handleNoTrolleyAck(self):
ntbDoneStatus = self.noTrolleyBox.doneStatus
if ntbDoneStatus == 'ok':
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
else:
self.notify.error('Unrecognized doneStatus: ' + str(ntbDoneStatus))
def enterRequestBoard(self):
return None
def handleRejectBoard(self):
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
def exitRequestBoard(self):
return None
def enterBoarding(self, nodePath, side):
camera.wrtReparentTo(nodePath)
heading = PythonUtil.fitDestAngle2Src(camera.getH(nodePath), 90 * side)
self.cameraBoardTrack = LerpPosHprInterval(camera, 1.5, Point3(14.4072 * side, 0, 3.8667), Point3(heading, -15, 0))
self.cameraBoardTrack.start()
return None
def exitBoarding(self):
self.ignore('boardedTrolley')
return None
def enterBoarded(self):
self.enableExitButton()
return None
def exitBoarded(self):
self.cameraBoardTrack.finish()
self.disableExitButton()
return None
def enableExitButton(self):
self.exitButton = DirectButton(relief=None, text=TTLocalizer.TrolleyHopOff, text_fg=(1, 1, 0.65, 1), text_pos=(0, -0.23), text_scale=0.8, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0, 0, 0.8), scale=0.15, command=lambda self = self: self.fsm.request('requestExit'))
return
def disableExitButton(self):
self.exitButton.destroy()
def enterRequestExit(self):
messenger.send('trolleyExitButton')
return None
def exitRequestExit(self):
return None
def enterTrolleyLeaving(self):
self.acceptOnce('playMinigame', self.handlePlayMinigame)
self.acceptOnce('picnicDone', self.handlePicnicDone)
return None
def handlePlayMinigame(self, zoneId, minigameId):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
doneStatus = {}
doneStatus['mode'] = 'minigame'
doneStatus['zoneId'] = zoneId
doneStatus['minigameId'] = minigameId
messenger.send(self.doneEvent, [doneStatus])
def handlePicnicDone(self):
doneStatus = {}
doneStatus['mode'] = 'exit'
messenger.send(self.doneEvent, [doneStatus])
def exitTrolleyLeaving(self):
self.ignore('playMinigame')
taskMgr.remove('leavingCamera')
return self.notify.debug('handling golf kart done event')
def enterExiting(self):
return None
def handleOffTrolley(self):
doneStatus = {}
doneStatus['mode'] = 'exit'
messenger.send(self.doneEvent, [doneStatus])
return None
def exitExiting(self):
return None
def enterFinal(self):
return None
def exitFinal(self):
return None
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\safezone\PicnicBasket.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:24:43 Pacific Daylight Time
| [
"[email protected]"
] | |
57d4dcac150c0ad4bc8e320c1158a43a936253e8 | 3411ee4095d15057aa7195bdb4a71fbf8f7eb96a | /pysimu/pysimu.py | c6e2b5cc7175a2e1bb710efeff8fb7272e380383 | [] | no_license | a-marano/pysimu | 7dedf9ab7d13aa4fe86456d3b40c00f54c882acd | 388f9afedd8b1d830f6393eaaa307e50188249e5 | refs/heads/master | 2021-01-21T06:14:32.102200 | 2015-01-20T12:36:49 | 2015-01-20T12:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,545 | py | # -*- coding: utf-8 -*-
"""
Main pysimu module
Created on Thu Aug 14 20:21:56 2014
@author: jmmauricio-m
"""
import numpy as np
from scipy.integrate import ode
from models.psys import sys_freq_model_1, gen_nc
class sim:
'''
Class to performe simuations
'''
def __init__(self):
self.x = np.array([])
self.t = 0.0
self.T = np.array([])
self.X = np.array([])
self.Y = np.array([])
self.max_step = 0.1
self.nsteps = 5000
def h(self,x):
return x
def odefun(self,t,x):
self.x = x
return self.f(t,x)
def odeout(self,t,x):
self.T = np.hstack((self.T,t))
self.X = np.vstack((self.X,x))
self.Y = np.vstack((self.Y,self.h(t,self.x)))
return self.h(t,self.x)
def run(self, t_end):
r = ode(self.odefun)
r.set_integrator('dopri5', max_step=self.max_step, nsteps = self.nsteps)
r.set_solout(self.odeout)
if len(self.X)==0:
self.X = self.x_0
self.T = np.array(self.t)
self.Y = np.array(self.h(self.t,self.x_0))
r.set_initial_value(self.x_0, self.t)
r.integrate(t_end)
self.t = t_end
self.r = r
self.x = r.y
'''
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
>>> r.integrate(r.t+dt)
>>> print("%g %g" % (r.t, r.y))
'''
class system:
def __init__(self):
self.sys_list = []
self.pssys= sys_freq_model_1()
self.gen_nc = gen_nc()
# n_x_global_ini = 0
# n_x_global_end = self.pssys.n_x
# self.pssys.x_idx = range(n_x_global_ini,n_x_global_end)
# n_x_global_ini = n_x_global_end
# n_x_global_end = n_x_global_end + self.gen_nc.n_x
# self.gen_nc.x_idx = range(n_x_global_ini,n_x_global_end)
#
# self.dx = np.zeros((3,1))
self.max_step = 0.1
self.nsteps = 5000
self.x = np.array([])
self.t = 0.0
self.channels = {}
def setup(self):
for item_name, item_model in self.models_list:
exec('self.{:s} = {:s}()'.format(item_name,item_model))
exec("self.sys_list += [self.{:s}]".format(item_name))
k = 0
for item in self.sys_list:
item.k = k
k += item.n_x
self.chan = {}
for item_model,item_var in self.channels:
if not self.chan.has_key(item_model):
self.chan.update({item_model:{item_var:np.array([])}})
else:
self.chan[item_model].update({item_var:np.array([])})
self.r = ode(self.f)
self.r.set_integrator('dopri5', max_step =self.max_step, nsteps=self.nsteps)
self.r.set_solout(self.out)
self.r.set_initial_value(self.ini(),self.t)
def ini(self):
current_out = ''
for item_out, item_in in self.backward_connections:
item_name_out, item_var_out = item_out.split('.')
item_name_in, item_var_in = item_in.split('.')
if current_out == '':
exec('self.{:s}.ini()'.format(item_name_out))
current_out = item_name_out
to_exec = 'self.{:s}.{:s} = self.{:s}.{:s}'.format(item_name_in,item_var_in,item_name_out,item_var_out)
print to_exec
exec(to_exec)
x_list = []
for item in self.sys_list:
item.ini()
x_list += [item.x]
self.x = np.vstack(x_list)
return self.x
def f(self,t,x):
self.perturbation(t,x)
for item_out, item_in in self.foward_connections:
item_name_out, item_var_out = item_out.split('.')
item_name_in, item_var_in = item_in.split('.')
to_exec = 'self.{:s}.{:s} = self.{:s}.{:s}'.format(item_name_in,item_var_in,item_name_out,item_var_out)
# print to_exec
exec(to_exec)
# print self.pssys.freq
dx_list = []
for item in self.sys_list:
# print x
x_i = x[item.k:item.k+item.n_x]
item.x = x_i
item.update()
item.f(t,x_i)
dx_list += [item.dx]
self.dx = np.vstack(dx_list)
return self.dx
def update(self):
for item in self.sys_list:
item.update()
def out(self, t,x):
self.t = t
for item_model,item_var in self.channels:
if item_model == 'sys':
exec('item_value = self.{:s}'.format(item_var))
self.chan[item_model][item_var] = np.hstack((self.chan[item_model][item_var],item_value))
else:
exec('item_value = self.{:s}.{:s}'.format(item_model,item_var))
self.chan[item_model][item_var] = np.hstack((self.chan[item_model][item_var],item_value))
# print self.pssys.freq
def run(self, t_end):
self.r.integrate(t_end)
self.t = t_end
def perturbation(self,t,x):
pass
if __name__ == '__main__':
simu_1 = sim()
p_m = 1.0
X = 0.5
e = 1.0
v = 1.0
H = 3.5
omega_s = 1.0
omega = omega_s
D = 1.0
Omega_b = 2.0*np.pi*50.0
def f(t,x):
delta = x[0]
omega = x[1]
p_e = e*v/X*np.sin(delta)
ddelta = Omega_b*(omega - omega_s)
domega = 1.0/(2*H)*(p_m - p_e - D*(omega - omega_s))
return [ddelta, domega]
def h(t,x):
delta = x[0]
omega = x[1]
p_e = e*v/X*np.sin(delta)
return np.array(p_e)
p_e = p_m
delta_0 = np.arcsin(p_e*X/(e*v))
omega_0 = omega_s
x_0 = np.array([delta_0, omega_0])
simu_1.f = f
simu_1.x_0 = x_0
simu_1.h = h
simu_1.run(1.0)
v = 0.0
simu_1.run(1.2)
v = 1.0
simu_1.x_0 = simu_1.x
simu_1.run(5.0)
# sys_1 = system()
#
# sys_1.models_list = [('pssys','sys_freq_model_1'),
# ('gen_nc','gen_nc')]
#
# sys_1.backward_connections = [('pssys.p_nc','gen_nc.p_nc')]
#
# sys_1.foward_connections = [('pssys.freq','gen_nc.freq'),
# ('gen_nc.p_nc','pssys.p_nc')]
#
# sys_1.channels = [('sys','t'),
# ('pssys','freq'),
# ('pssys','p_nc'),
# ('pssys','p_l')]
#
# def perturbation(self,t,x):
# if t>1.0:
# self.sys.p_l = 2200.0
#
# sys_1.setup()
## sys_1.ini()
# sys_1.gen_nc.K_f = 100000.0
# print sys_1.pssys.p_nc
# print sys_1.gen_nc.p_nc
# sys_1.run(1.0)
# sys_1.pssys.p_l = 2200.0
# sys_1.run(10.0)
Delta = np.linspace(0.0, np.pi,100)
P_e = e*v/X*np.sin(Delta)
#
import matplotlib.pyplot as plt
fig_1 = plt.figure( figsize=(14, 8))
ax_delta = fig_1.add_subplot(2,2,1)
ax_omega = fig_1.add_subplot(2,2,3)
ax_delta_omega = fig_1.add_subplot(2,2,(2,4))
ax_delta.plot(simu_1.T,simu_1.X[:,0], linewidth=2)
ax_omega.plot(simu_1.T,simu_1.X[:,1], linewidth=2)
ax_delta_omega.plot(Delta,P_e, label='$\sf p_e$')
ax_delta_omega.plot(Delta,P_e/P_e*p_m, label='$\sf p_m$', linewidth=2)
ax_delta_omega.plot(simu_1.X[:,0],simu_1.Y[:], 'r', linewidth=2)
# ax_delta.set_xlabel('Time (s)')
ax_delta.set_ylabel('$\sf \delta $ (rad)')
ax_omega.set_xlabel('Time (s)')
ax_omega.set_ylabel('$\sf \omega $ (p.u.)')
ax_delta_omega.set_xlabel('$\sf \delta $ (rad)')
ax_delta_omega.set_ylabel('$\sf Power $ (p.u.)')
ax_delta.grid(True)
ax_omega.grid(True)
ax_delta_omega.grid(True)
# ax_powers = fig_freq.add_subplot(212)
# ax_powers.plot(sys_1.chan['sys']['t'],sys_1.chan['pssys']['p_nc'])
# ax_powers.plot(sys_1.chan['t'],sys_1.chan['p_g'])
import plotly.plotly as py
py.sign_in("jmmauricio", "rwdnrmvuyg")
plot_url = py.plot_mpl(fig_1, auto_open=True)
| [
"[email protected]"
] | |
01dabdc5ab680e0a4c9bb2dcef1040d08b4915eb | f56a00622ea3799f25d52138ffaafc6dcad46574 | /aggtrend/aggtrends/migrations/0003_code_post_code_post.py | 45ea624a140d69851dd7bf0cd4e284f631260bcf | [] | no_license | SardarDawar/aggregate | 063b384421ef1f3b5c8d1eb1975cd8396d38f553 | b062023bc2d3e6fdeb1c17743345cb8b70f90b1c | refs/heads/master | 2022-12-29T05:09:16.438663 | 2019-12-30T22:49:56 | 2019-12-30T22:49:56 | 230,547,601 | 1 | 0 | null | 2019-12-30T10:25:30 | 2019-12-28T02:31:17 | HTML | UTF-8 | Python | false | false | 395 | py | # Generated by Django 2.2.6 on 2019-12-30 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aggtrends', '0002_auto_20191227_0614'),
]
operations = [
migrations.AddField(
model_name='code_post',
name='code_post',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
1c51a3fc01837f6b1233b786c73615dcb572c8c7 | 624ccdaf85ebebf0a03636fbd1ff234bd89c7972 | /product/serializers.py | 75f8d0097fb3b1ce82d5f1f29e448da23a3cf786 | [] | no_license | kairat3/bella_basket | 613294985c1f71efdee9d0f43fa2557a412ca9b8 | e76f3f950957ae4051472d374ccee9350def6cd1 | refs/heads/master | 2023-07-13T00:13:18.475695 | 2021-08-25T09:51:17 | 2021-08-25T09:51:17 | 396,841,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,016 | py | from rest_framework import serializers
from account.serializers import ProfileSerializer, UserSerializer
from .models import Product, Category, Favorite, Color, Size, Additional, Image
class AdditionalSerializer(serializers.ModelSerializer):
class Meta:
model = Additional
fields = ('key', 'value', )
class ColorSerializer(serializers.ModelSerializer):
class Meta:
model = Color
fields = ('id', 'color', )
class SizeSerializer(serializers.ModelSerializer):
class Meta:
model = Size
fields = ('size', )
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
fields = ('title', 'image', )
class IsHitSerializer(serializers.ModelSerializer):
images = ImageSerializer(many=True, read_only=True)
color = ColorSerializer(many=True)
size = SizeSerializer(many=True)
class Meta:
model = Product
fields = (
'id', 'is_hit', 'title', 'description', 'old_price', 'price', 'discount', 'additional', 'color', 'size',
'images', 'category')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'title', 'slug',)
def to_representation(self, instance):
representation = super().to_representation(instance)
representation2 = super(ProductSerializer)
print(representation2)
if instance.children.exists():
representation['children'] = CategorySerializer(instance=instance.children.all(), many=True).data
return representation
class ProductSerializer(serializers.ModelSerializer):
additional = AdditionalSerializer(many=True)
color = ColorSerializer(many=True)
size = SizeSerializer(many=True)
images = ImageSerializer(many=True, read_only=True)
category = CategorySerializer()
class Meta:
model = Product
fields = ('id', 'title', 'description', 'old_price', 'price', 'discount', 'additional', 'color', 'size', 'images', 'category', 'is_hit')
class FavoriteSerializer(serializers.ModelSerializer):
product = ProductSerializer(read_only=True)
class Meta:
model = Favorite
fields = ('id', 'favorite', 'user', 'product')
def create(self, validated_data):
request = self.context.get('request')
user = request.user
favorite = Favorite.objects.create(user=user, **validated_data)
return favorite
def to_representation(self, instance):
representation = super(FavoriteSerializer, self).to_representation(instance)
representation['user'] = instance.user.phone_number
return representation
class CartSerializer(serializers.Serializer):
user = ProfileSerializer()
products = ProductSerializer(many=True)
created_at = serializers.DateTimeField()
class AddToCartSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
class Meta:
model = Product
fields = ['id']
| [
"[email protected]"
] | |
d027b4e8adc33d04712f2639893d0e1b309d38c0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02603/s199763378.py | 915ed29aa0e93fd701e5b05d825c6c1857d7be8c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import sys
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
dp = [0]*(N+1)
dp[0] = 1000
for i in range(1, N+1):
dp[i] = dp[i-1]
for j in range(i):
dp[i] = max(dp[i], dp[j]//A[j]*A[i-1]+dp[j]%A[j])
print(dp[N]) | [
"[email protected]"
] | |
f2059742af36092696997c24446f840d262c752a | 081ea255a45d2e0f255ebab00aea487c1bc01da2 | /OCP/tasks/task_motion.py | 01f077468501d4bcbe408f077aad8405ce9c9082 | [] | no_license | ggory15/HQP-cogimon | 8b2d906d179864c613d8741fb1997c650feedf3c | e809fcc2a421066b7c0c02ce70898ec96ba584af | refs/heads/master | 2022-07-03T01:21:47.831298 | 2020-05-10T22:21:13 | 2020-05-10T22:21:13 | 262,884,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # __author__ = "Sanghyun Kim"
# __copyright__ = "Copyright (C) 2020 Sanghyun Kim"
import numpy as np
import copy
from .task_abstract import *
class TaskMotion(TaskBase):
def __init__(self, name, robot):
TaskBase.__init__(self, name, robot)
self.mask = 0
def setMask(self, mask):
self.mask = copy.deepcopy(mask)
def hasMase(self):
return self.mask is not 0
| [
"[email protected]"
] | |
e50c7c7cb40e44a0cdcd058a2b011502e8d7cb21 | 93a4edf14cd2284d58fe0218cdce2eac00db66c6 | /tests/sdfg_validate_names_test.py | 51490383701f01b1caa015a7e2bb7c6e1b8da622 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gronerl/dace | f50dbeb70feb35c2afb4ee92b2dd4a9613a024ea | 886e14cfec5df4aa28ff9a5e6c0fe8150570b8c7 | refs/heads/master | 2023-07-23T12:30:20.561679 | 2020-02-24T07:25:34 | 2020-02-24T07:25:34 | 242,930,054 | 0 | 0 | BSD-3-Clause | 2020-02-25T06:45:23 | 2020-02-25T06:45:22 | null | UTF-8 | Python | false | false | 4,584 | py | import unittest
import dace
# Try to detect invalid names in SDFG
class NameValidationTests(unittest.TestCase):
# SDFG label
def test_sdfg_name1(self):
try:
sdfg = dace.SDFG(' ')
sdfg.validate()
self.fail('Failed to detect invalid SDFG')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
def test_sdfg_name2(self):
try:
sdfg = dace.SDFG('3sat')
sdfg.validate()
self.fail('Failed to detect invalid SDFG')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
# State
def test_state_duplication(self):
try:
sdfg = dace.SDFG('ok')
sdfg.add_state('also_ok')
s2 = sdfg.add_state('also_ok')
s2.set_label('also_ok')
sdfg.validate()
self.fail('Failed to detect duplicate state')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
def test_state_name1(self):
try:
sdfg = dace.SDFG('ok')
sdfg.add_state('not ok')
sdfg.validate()
self.fail('Failed to detect invalid state')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
def test_state_name2(self):
try:
sdfg = dace.SDFG('ok')
sdfg.add_state('$5')
sdfg.validate()
self.fail('Failed to detect invalid state')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
# Array
def test_array(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
_8 = state.add_array('8', [1], dace.float32)
t = state.add_tasklet('tasklet', {'a'}, {}, 'print(a)')
state.add_edge(_8, None, t, 'a',
dace.Memlet.from_array(_8.data, _8.desc(sdfg)))
sdfg.validate()
self.fail('Failed to detect invalid array name')
except (dace.sdfg.InvalidSDFGError, NameError) as ex:
print('Exception caught:', ex)
# Tasklet
def test_tasklet(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
A = state.add_array('A', [1], dace.float32)
B = state.add_array('B', [1], dace.float32)
t = state.add_tasklet(' tasklet', {'a'}, {'b'}, 'b = a')
state.add_edge(A, None, t, 'a',
dace.Memlet.from_array(A.data, A.desc(sdfg)))
state.add_edge(t, 'b', B, None,
dace.Memlet.from_array(B.data, B.desc(sdfg)))
sdfg.validate()
self.fail('Failed to detect invalid tasklet name')
except dace.sdfg.InvalidSDFGNodeError as ex:
print('Exception caught:', ex)
# Connector
def test_connector(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
A = state.add_array('A', [1], dace.float32)
B = state.add_array('B', [1], dace.float32)
t = state.add_tasklet('tasklet', {'$a'}, {' b'}, '')
state.add_edge(A, None, t, '$a',
dace.Memlet.from_array(A.data, A.desc(sdfg)))
state.add_edge(t, ' b', B, None,
dace.Memlet.from_array(B.data, B.desc(sdfg)))
sdfg.validate()
self.fail('Failed to detect invalid connectors')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
# Interstate edge
def test_interstate_edge(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
A = state.add_array('A', [1], dace.float32)
B = state.add_array('B', [1], dace.float32)
t = state.add_tasklet('tasklet', {'a'}, {'b'}, 'b = a')
state.add_edge(A, None, t, 'a',
dace.Memlet.from_array(A.data, A.desc(sdfg)))
state.add_edge(t, 'b', B, None,
dace.Memlet.from_array(B.data, B.desc(sdfg)))
sdfg.add_edge(
state, state, dace.InterstateEdge(assignments={'%5': '1'}))
sdfg.validate()
self.fail('Failed to detect invalid interstate edge')
except dace.sdfg.InvalidSDFGInterstateEdgeError as ex:
print('Exception caught:', ex)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
94125c9112a64584e17d1955f1b02efaedd6fcd0 | 2da8bcfb9a72e507812a8723e38ad6d030c300f1 | /simplify_path_71.py | ac6e8a021f9a97b8369035d457680de94b0e3a0f | [] | no_license | aditya-doshatti/Leetcode | 1a4e0f391a7d6ca2d7f8fdc35e535f4ec10fb634 | eed20da07896db471ea6582785335e52d4f04f85 | refs/heads/master | 2023-04-06T02:18:57.287263 | 2023-03-17T03:08:42 | 2023-03-17T03:08:42 | 218,408,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | '''
71. Simplify Path
Medium
Given a string path, which is an absolute path (starting with a slash '/') to a file or directory in a Unix-style file system, convert it to the simplified canonical path.
In a Unix-style file system, a period '.' refers to the current directory, a double period '..' refers to the directory up a level, and any multiple consecutive slashes (i.e. '//') are treated as a single slash '/'. For this problem, any other format of periods such as '...' are treated as file/directory names.
The canonical path should have the following format:
The path starts with a single slash '/'.
Any two directories are separated by a single slash '/'.
The path does not end with a trailing '/'.
The path only contains the directories on the path from the root directory to the target file or directory (i.e., no period '.' or double period '..')
Return the simplified canonical path.
Example 1:
Input: path = "/home/"
Output: "/home"
Explanation: Note that there is no trailing slash after the last directory name.
https://leetcode.com/problems/simplify-path/
'''
class Solution:
def simplifyPath(self, path: str) -> str:
stack = []
for val in path[1:].split('/'):
if val == '..':
if stack:
stack.pop()
elif val and val !='.':
stack.append(val)
return '/' + '/'.join(stack)
| [
"[email protected]"
] | |
390175f4e92c1ae351811ad85ecce0a6c1de7920 | 241cebd26fbcbd20bae804fd868722b2673328fc | /histogram_2002_r75.py | 64b8fb63166cbdb365a64badb8c95216fdfd1d5c | [] | no_license | shouldsee/golly_utils | b3339e9ba4e5213e98ec1b35755cd605e3f93df8 | 03959f0c593d4a811ba20f2372d6663d126dbab2 | refs/heads/master | 2021-01-19T11:04:25.661858 | 2018-04-01T13:19:51 | 2018-04-01T13:19:51 | 82,230,847 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,914 | py | # Creates a histogram plot showing the frequencies of all cell states
# in the current selection (if one exists) or the entire pattern.
# Author: Andrew Trevorrow ([email protected]), September 2009.
import golly as g
import math
from glife import getminbox, rect, rccw, pattern
from glife.text import make_text
from time import time
# --------------------------------------------------------------------
barwd = 40 # width of each bar
# length of axes
xlen = g.numstates() * barwd
ylen = 500
totalcells = 0
# --------------------------------------------------------------------
def draw_line(x1, y1, x2, y2, state = 1):
# draw a line of cells in given state from x1,y1 to x2,y2
# using Bresenham's algorithm
g.setcell(x1, y1, state)
if x1 == x2 and y1 == y2: return
dx = x2 - x1
ax = abs(dx) * 2
sx = 1
if dx < 0: sx = -1
dy = y2 - y1
ay = abs(dy) * 2
sy = 1
if dy < 0: sy = -1
if ax > ay:
d = ay - (ax / 2)
while x1 != x2:
g.setcell(x1, y1, state)
if d >= 0:
y1 += sy
d -= ax
x1 += sx
d += ay
else:
d = ax - (ay / 2)
while y1 != y2:
g.setcell(x1, y1, state)
if d >= 0:
x1 += sx
d -= ay
y1 += sy
d += ax
g.setcell(x2, y2, state)
# --------------------------------------------------------------------
def color_text(string, extrastate):
t = make_text(string, "mono")
bbox = getminbox(t)
# convert two-state pattern to multi-state and set state to extrastate
mlist = []
tlist = list(t)
for i in xrange(0, len(tlist), 2):
mlist.append(tlist[i])
mlist.append(tlist[i+1])
mlist.append(extrastate)
if len(mlist) % 2 == 0: mlist.append(0)
p = pattern(mlist)
return p, bbox.wd, bbox.ht
# --------------------------------------------------------------------
def draw_bar(state, extrastate):
barht = int( float(ylen) * float(statecount[state]) / float(totalcells) )
x = barwd * state
draw_line(x, 0, x, -barht, extrastate)
draw_line(x, -barht, x+barwd, -barht, extrastate)
draw_line(x+barwd, 0, x+barwd, -barht, extrastate)
if barht > 1:
# fill bar with corresponding color
x1 = x + 1
x2 = x + barwd - 1
for y in xrange(barht - 1):
draw_line(x1, -(y+1), x2, -(y+1), state)
if statecount[state] > 0:
# show count on top of bar
t, twd, tht = color_text(str(statecount[state]), extrastate)
t.put(barwd * (state+1) - barwd/2 - twd/2, -barht - tht - 3)
# --------------------------------------------------------------------
if g.empty(): g.exit("There is no pattern.")
if g.numstates() == 256: g.exit("No room for extra state.")
# check that a layer is available for the histogram
histname = "histogram"
histlayer = -1
for i in xrange(g.numlayers()):
if g.getname(i) == histname:
histlayer = i
break
if histlayer == -1 and g.numlayers() == g.maxlayers():
g.exit("You need to delete a layer.")
# use selection rect if it exists, otherwise use pattern bounds
label = "Selection"
r = rect( g.getselrect() )
if r.empty:
label = "Pattern"
r = rect( g.getrect() )
# count all cell states in r
g.show("Counting cell states...")
counted = 0
totalcells = r.wd * r.ht
statecount = [0] * g.numstates()
oldsecs = time()
for row in xrange(r.top, r.top + r.height):
for col in xrange(r.left, r.left + r.width):
counted += 1
statecount[g.getcell(col,row)] += 1
newsecs = time()
if newsecs - oldsecs >= 1.0: # show % done every sec
oldsecs = newsecs
done = 100.0 * float(counted) / float(totalcells)
g.show("Counting cell states... %.2f%%" % done)
g.dokey( g.getkey() )
statecount=[int(10*math.log((x+1),2)) for x in statecount]
totalcells=sum(statecount)
if statecount[0] == counted: g.exit("Selection is empty.")
# save current layer's info before we switch layers
currname = g.getname()
currcursor = g.getcursor()
currcolors = g.getcolors()
currstates = g.numstates()
deads, deadr, deadg, deadb = g.getcolors(0)
# create histogram in separate layer
g.setoption("stacklayers", 0)
g.setoption("tilelayers", 0)
g.setoption("showlayerbar", 1)
if histlayer == -1:
histlayer = g.addlayer()
else:
g.setlayer(histlayer)
g.new(histname)
g.setcursor(currcursor)
# use a Generations rule so we can append extra state for drawing text & lines
g.setrule("//" + str(currstates+1))
extrastate = currstates
currcolors.append(extrastate)
if (deadr + deadg + deadb) / 3 > 128:
# use black if light background
currcolors.append(0)
currcolors.append(0)
currcolors.append(0)
else:
# use white if dark background
currcolors.append(255)
currcolors.append(255)
currcolors.append(255)
g.setcolors(currcolors)
# draw axes with origin at 0,0
draw_line(0, 0, xlen, 0, extrastate)
draw_line(0, 0, 0, -ylen, extrastate)
# add annotation using mono-spaced ASCII font
t, twd, tht = color_text("Pattern name: "+currname, extrastate)
t.put(0, -ylen - 30 - tht)
t, twd, tht = color_text("%s size: %d x %d (%d cells)" %
(label, r.wd, r.ht, totalcells), extrastate)
t.put(0, -ylen - 15 - tht)
t, twd, tht = color_text("% FREQUENCY", extrastate)
t.put(-35 - tht, -(ylen - twd)/2, rccw)
for perc in xrange(0, 101, 10):
t, twd, tht = color_text(str(perc), extrastate)
y = -perc * (ylen/100)
t.put(-twd - 10, y - tht/2)
### draw_line(-3, y, 0, y, extrastate)
# draw dotted horizontal line from 0 to xlen
for x in xrange(0, xlen, 2): g.setcell(x, y, extrastate)
t, twd, tht = color_text("STATE", extrastate)
t.put((xlen - twd)/2, 30)
for state in xrange(extrastate):
t, twd, tht = color_text(str(state), extrastate)
t.put(barwd * (state+1) - barwd/2 - twd/2, 10)
draw_bar(state, extrastate)
# display result at scale 1:1
g.fit()
g.setmag(0)
g.show("")
| [
"[email protected]"
] | |
19605ba78e49b3853aa764606e01d48cf28335f0 | dc940e2aa628eff693af36584cfad935990ebe7d | /v3.1.0/getBookTXT.py | 4274ac9cf37fbcba256bc4cd576bfa8e5e20c9b8 | [] | no_license | 520wsl/getXs8Novels | 865572ea488e0bf3d4e21664eb576237b6dd18be | ecf6d0bc5dfdbe4b5c3e8a9aac313bf7abce614b | refs/heads/master | 2020-04-18T00:59:56.777416 | 2019-02-15T08:52:11 | 2019-02-15T08:52:11 | 167,101,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '文章抓取'
__author__ = 'Mad Dragon'
__mtime__ = '2019/1/12'
# 我不懂什么叫年少轻狂,只知道胜者为王
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import moment
import time
from tool.GetBookInfoTool import GetBookInfoTool
from tool.SaveBookInfoToMySqlTool import SaveBookInfoToMySqlToo
from public.MySqlTool import MySqlTool
from public.Logger import Logger
from public.DataTool import DataTool
from public.RedisTool import RedisTool
class GetBookTXT(object):
def __init__(self, getBookIdsListSize, rdsKeyName):
self.b_getBookIdsListSize = int(getBookIdsListSize)
self.b_rdsKeyName = rdsKeyName
self.b_title = 'getBookTXT'
self.b_second = 1
self.b_timeStr = moment.now().format('YYYY-MM-DD-HH-mm-ss')
self.dataToo = DataTool(logName=self.b_title, second=self.b_second, timeStr=self.b_timeStr)
self.mySql = MySqlTool(logName=self.dataToo.initLogName())
self.logger = Logger(logname=self.dataToo.initLogName(), loglevel=1, logger=self.b_title).getlog()
self.rds = RedisTool()
self.getBookInfoToo = GetBookInfoTool(second=self.b_second, dataToo=self.dataToo, logger=self.logger)
self.saveBookInfoToMySqlToo = SaveBookInfoToMySqlToo(second=self.b_second, logger=self.logger,
getBookInfoToo=self.getBookInfoToo,
mySql=self.mySql, dataToo=self.dataToo)
def target(self):
links = []
for i in range(self.b_getBookIdsListSize):
link = self.rds.r.lpop(self.b_rdsKeyName)
if link != None:
link = link.decode(encoding='utf-8')
links.append(link)
return links
def contentsLoad(self):
links = self.target()
if len(links) <= 0:
self.logger.debug('bookTxtLoad 没有数据\n')
return
for item in links:
self.logger.debug(item)
self.saveBookInfoToMySqlToo.saveText(link=item)
self.isOk()
def isOk(self):
self.contentsLoad()
if __name__ == '__main__':
rdsKeyName = 'bookIdsList3'
getBookIdsListSize = input("每次获取多少条链接(最大1000): >>")
maxCatalogNex = 1
print(
'\n\n参数确认: rdsKeyName : %s | getBookIdsListSize : %s \n\n' % (rdsKeyName, getBookIdsListSize))
time.sleep(1)
isStart = input("是否开始?(y/n): >>")
if (isStart == 'y'):
book = GetBookTXT(getBookIdsListSize=getBookIdsListSize, rdsKeyName=rdsKeyName)
book.contentsLoad()
else:
print('取消抓取')
| [
"[email protected]"
] | |
8cef065bb4c5e40d9a10b44e754dc7f3bd86eee2 | e5e9ee9e4db2e400e7f87647501ee412c13d76e5 | /python/open cv/magic_cloth.py | 36c4deda71edb9cb0519277e6e8dc032facd3f67 | [] | no_license | beingveera/whole-python | 524441eec44379c36cb1cfeccdbc65bf1c15d2f6 | 3f2b3cb7528afb9605ab6f9d4d2efc856a247af5 | refs/heads/main | 2023-05-15T06:28:03.058105 | 2021-06-05T09:37:47 | 2021-06-05T09:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | '''
import cv2
import time
import numpy as np
cap = cv2.VideoCapture(0)
time.sleep(2)
background=0
#capturing the background
for i in range(30):
ret,background=cap.read()
while(cap.isOpened()):
ret,img = cap.read()
if not ret:
break
#if the image is flipped, use image = np.flip(image, axis = 1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red=np.array([0,120,70])
upper_red=np.array([10,255,255])
mask1=cv2.inRange(hsv,lower_red,upper_red)
lower_red=np.array([170,120,70])
upper_red=np.array([180,255,255])
mask2=cv2.inRange(hsv,lower_red,upper_red)
mask1 = mask1 + mask2
mask1=cv2.morphologyEx(mask1,cv2.MORPH_OPEN,np.ones((3,3),np.uint8),iterations=2)
mask1=cv2.morphologyEx(mask1,cv2.MORPH_DILATE,np.ones((3,3),np.uint8),iterations=1)
mask2=cv2.bitwise_not(mask1)
res1=cv2.bitwise_and(background,background,mask=mask1)
res2=cv2.bitwise_and(img,img,mask=mask2)
final_output=cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow('Eureka !!', final_output)
k=cv2.waitKey(10)
if k==27:
break
cap.release()
cv2.destroyAllWindows()
'''
import cv2
import numpy as np
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background = 0
for i in range(30):
ret, background = cap.read()
while (cap.isOpened()):
ret, img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([80, 125, 50])
upper_red = np.array([90, 255,255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([110, 120, 70])
upper_red = np.array([120, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 = mask1 + mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(img, img, mask=mask2)
res2 = cv2.bitwise_and(background, background, mask=mask1)
final_output=cv2.addWeighted(res1, 1, res2, 1, 0)
cv2.imshow('Magical Cloack' ,final_output)
k=cv2.waitKey(1)
if k==27:
break
cap.release()
cv2.destroyAllWindows()
exit()
| [
"[email protected]"
] | |
36951d7ee0ef3e7005163dbe47389a4623d839f5 | 7df2816cfbc7f48bfbab7ff37626a88296c39713 | /Phần 2 Xác Xuất/Bai8.py | 43539a2abbe4988bf2304e79727a492779968e48 | [] | no_license | maithinhatlan/BaiTapTuan7 | 7e49de0b9d5de1bb5eba8143a767dea62aca3ca5 | a6c9c1a425e296e878513e496b17f6352f95a84e | refs/heads/master | 2023-08-14T21:13:45.546548 | 2021-10-09T10:37:11 | 2021-10-09T10:37:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | import numpy as np
#numpy.random.uniform
u = np.random.uniform(size=4)
print("u = ",u) | [
"[email protected]"
] | |
5b1cc4a6717f7b454656c18eceed3a052a2f5586 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/311.py | b83b53e1e53057ef2a8ec9616114038309e9174b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | import sys
input = sys.stdin
def solve(S,k):
S = list(S)
count = 0
for i in range(len(S)-k+1):
if S[i]=='-':
for j in range(k):
S[i+j] = '-' if S[i+j]=='+' else '+'
count+=1
for j in range(k):
if S[-j]=='-':
return 'IMPOSSIBLE'
return count
for case in range(int(input.readline())):
values = input.readline().split()
print("Case #"+ str(case+1) +":", solve(values[0],int(values[1])))
| [
"[email protected]"
] | |
2db407d8e2e6ffc8407c3b7340d1f4b8c3f2563d | b99bbc50ab1d039948ccf853963ae044a97498fb | /src/api/com_interface/urls/live.py | 5c48f7ee8cbec7860dd179d7c5a533b6aabf7838 | [] | no_license | fan1018wen/Alpha | 26899cc0eb6761bf6bd8089e7d12716c9e7ae01e | c50def8cde58fd4663032b860eb058302cbac6da | refs/heads/master | 2021-05-12T12:54:15.747220 | 2017-10-11T10:58:51 | 2017-10-11T10:58:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@date: 2016-06-13
@author: Devin
"""
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from ..views import live
urlpatterns = [
url(r'^$', live.LiveViewList.as_view()),
url(r'^(?P<pk>\S+)$', live.LiveView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"[email protected]"
] | |
e908fff3d5d94413a53b9568c9463d0369bdf469 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_11423-2311/sdB_EC_11423-2311_coadd.py | 236e5bdcb2b2d2516b7158fc1ff8a94b2e130934 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[176.209125,-23.471569], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_EC_11423-2311/sdB_EC_11423-2311_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_EC_11423-2311/sdB_EC_11423-2311_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f5a7b3700c827074a7351ba3ab12a4f25393f769 | f8a317ace8e91d5b962586953bc38ef6ff2d3a20 | /src/finanzas/authentication/serializers.py | a7ed97b3d5c459ca39609412c46d4b1f1b884296 | [
"Apache-2.0"
] | permissive | jualjiman/finanzas | e63cae335d33b773874c913d23fc54a21a7ea5e9 | a1af6f1a212a3cf172bf84eb668245dbffeb33a9 | refs/heads/master | 2020-05-18T17:15:48.246703 | 2015-08-10T02:38:08 | 2015-08-10T02:38:08 | 40,459,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | from django.contrib.auth import update_session_auth_hash
from rest_framework import serializers
from finanzas.authentication.models import Account
class AccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(
write_only=True,
required=False
)
confirm_password = serializers.CharField(
write_only=True,
required=False
)
class Meta:
model = Account
fields = (
'id', 'email', 'username',
'created_at', 'updated_at',
'first_name', 'last_name', 'password',
'confirm_password'
)
read_only_fields = (
'created_at',
'updated_at'
)
def create(self, validated_data):
return Account.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.username = validated_data.get(
'username',
instance.username
)
password = validated_data.get(
'password',
None
)
confirm_password = validated_data.get(
'confirm_password',
None
)
if password and confirm_password and password == confirm_password:
instance.set_password(password)
instance.save()
update_session_auth_hash(
self.context.get('request'),
instance
)
return instance
| [
"[email protected]"
] | |
546b04313b4d6e5b2d9dd2d7e686e24a9dbd28b8 | cda34a391e1d3fd96cdff8ea64d5dd73dc0e83e4 | /educa/courses/models.py | 63db5d5651ab6ae7c028bcc918bcd834624cf9b5 | [
"MIT"
] | permissive | prakharchoudhary/DjangoSpree | ee824dd44c015984a85f68105e40e1202093f757 | 20c5a1d9eb5d00288ebe16d238525ba8cc5fad09 | refs/heads/master | 2021-01-02T09:46:43.599914 | 2018-06-26T08:14:02 | 2018-06-26T08:14:02 | 99,300,583 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from .fields import OrderField
# Create your models here.
class Subject(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Course(models.Model):
owner = models.ForeignKey(User,
related_name='courses_created')
subject = models.ForeignKey(Subject,
related_name='courses')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
overview = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.title
class Module(models.Model):
course = models.ForeignKey(Course,
related_name='modules')
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
order = OrderField(blank=True, for_fields=['course'])
class Meta:
ordering = ['order']
def __str__(self):
return '{}. {}'.format(self.order, self.title)
class Content(models.Model):
"""
- content_type : A ForeignKey field to the ContentType model
- object_id : This is PositiveIntegerField to store the primary key of the related object
- item : A GenericForeignKey field to the related object by combining the two previous fields
"""
module = models.ForeignKey(Module,
related_name='contents')
content_type = models.ForeignKey(ContentType,
limit_choices_to={'model__in': ('text',
'video', 'image', 'file')})
object_id = models.PositiveIntegerField()
item = GenericForeignKey('content_type', 'object_id')
order = OrderField(blank=True, for_fields=['module'])
class Meta:
ordering = ['order']
class ItemBase(models.Model):
owner = models.ForeignKey(User,
related_name='%(class)s_related')
title = models.CharField(max_length=250)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __str__(self):
return self.title
class Text(ItemBase):
content = models.TextField()
class File(ItemBase):
file = models.FileField(upload_to='files')
class Image(ItemBase):
file = models.FileField(upload_to='images')
class Video(ItemBase):
url = models.URLField() | [
"[email protected]"
] | |
f637b0557e6e594194bba1fd65a263d48d42cad6 | 6a95b330e1beec08b917ff45eccfd6be3fd4629f | /kubernetes/test/test_v1_config_map_projection.py | daaae0c01f9732a6e5e977ef6d7d6d9335d2fbe3 | [
"Apache-2.0"
] | permissive | TokkoLabs/client-python | f4a83d6540e64861b59e322c951380a670578d7f | f1ad9c6889105d8510472606c98f8d3807f82020 | refs/heads/master | 2023-07-14T01:36:46.152341 | 2017-12-21T21:32:11 | 2017-12-21T21:32:11 | 115,042,671 | 0 | 0 | Apache-2.0 | 2021-08-06T03:29:17 | 2017-12-21T20:05:15 | Python | UTF-8 | Python | false | false | 1,003 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_config_map_projection import V1ConfigMapProjection
class TestV1ConfigMapProjection(unittest.TestCase):
""" V1ConfigMapProjection unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ConfigMapProjection(self):
"""
Test V1ConfigMapProjection
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_config_map_projection.V1ConfigMapProjection()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e856163f7b9d175e64f44a92b7a8655da0287f5a | cf7b827958166c8569eb58deb511cc3f07567741 | /in_Python/0832 Flipping an Image.py | 78099861650cb1394f6cac05f0f76c2d739941cc | [] | no_license | YangLiyli131/Leetcode2020 | e4e36eb36b1983f73b0e733455b4a7953dfebe6d | 20623defecf65cbc35b194d8b60d8b211816ee4f | refs/heads/master | 2023-08-22T06:00:55.924112 | 2021-09-18T19:04:15 | 2021-09-18T19:04:15 | 251,426,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | class Solution(object):
def iv(self,t):
if t == 1:
return 0
else:
return 1
def flipAndInvertImage(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
row = len(A)
col = len(A[0])
for r in range(row):
cur_row = A[r]
i = 0
j = col-1
while i <= j:
temp = cur_row[i]
cur_row[i] = self.iv(cur_row[j])
cur_row[j] = self.iv(temp)
i += 1
j -= 1
A[r] = cur_row
return A
| [
"[email protected]"
] | |
b22d08a95310a44da7bc43077102ca35e025dda5 | dbf48e804e1792999854832e64a7dae9f42f71e2 | /Spikes/spikedetekt2/spikedetekt2/core/tests/test_script.py | 4a48caf55fc470a9bf6bde40d574f657ecf725dc | [] | no_license | sapphire008/Python | 15d3d7885ac82333654b6729c2a57ed760e796a8 | b2783eabb1987091051614b8f12a4778e158a90b | refs/heads/master | 2023-08-09T04:38:43.077285 | 2023-07-28T18:36:03 | 2023-07-28T18:36:03 | 9,880,648 | 15 | 7 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | """Main module tests."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import os.path as op
import numpy as np
import tempfile
from kwiklib import (excerpts, get_params, pydict_to_python, get_filenames,
itervalues, create_trace, Experiment)
from spikedetekt2.core.script import run_spikedetekt
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
DIRPATH = None
prm_filename = 'myexperiment.prm'
prb_filename = 'myprobe.prb'
dat_filename = 'myexperiment.dat'
name = 'myexperiment'
sample_rate = 20000.
duration = 1.
nchannels = 8
nsamples = int(sample_rate * duration)
def setup():
global DIRPATH
DIRPATH = tempfile.mkdtemp()
# Create DAT file.
raw_data = create_trace(nsamples, nchannels)
for start, end in excerpts(nsamples, nexcerpts=10, excerpt_size=10):
raw_data[start:end] += np.random.randint(low=-10000, high=10000,
size=(10, nchannels))
raw_data.tofile(op.join(DIRPATH, dat_filename))
# Create PRM file.
prm = get_params(**{
'raw_data_files': dat_filename,
'experiment_name': name,
'nchannels': nchannels,
'sample_rate': sample_rate,
'detect_spikes': 'positive',
'prb_file': prb_filename,
})
prm_contents = pydict_to_python(prm)
with open(op.join(DIRPATH, prm_filename), 'w') as f:
f.write(prm_contents)
# Create PRB file.
prb_contents = """
nchannels = %NCHANNELS%
channel_groups = {0:
{
'channels': list(range(nchannels)),
'graph': [(i, i + 1) for i in range(nchannels - 1)],
}
}""".replace('%NCHANNELS%', str(nchannels)).replace(' ', '')
with open(op.join(DIRPATH, prb_filename), 'w') as f:
f.write(prb_contents)
def teardown():
os.remove(op.join(DIRPATH, prm_filename))
os.remove(op.join(DIRPATH, prb_filename))
files = get_filenames(name, dir=DIRPATH)
[os.remove(path) for path in itervalues(files)]
# -----------------------------------------------------------------------------
# Main tests
# -----------------------------------------------------------------------------
def test_main_1():
run_spikedetekt(op.join(DIRPATH, prm_filename))
# Open the data files.
with Experiment(name, dir=DIRPATH) as exp:
nspikes = len(exp.channel_groups[0].spikes)
assert exp.channel_groups[0].spikes.clusters.main.shape[0] == nspikes
assert exp.channel_groups[0].spikes.features_masks.shape[0] == nspikes
assert exp.channel_groups[0].spikes.waveforms_filtered.shape[0] == nspikes
fm = exp.channel_groups[0].spikes.features_masks
assert fm[:,:,0].min() < fm[:,:,0].max()
# Make sure the masks are not all null.
assert fm[:,:,1].max() > 0
| [
"[email protected]"
] | |
c4560ba0e5f05479e057ca93209cfac3c81a3528 | c3ace26cd05f3dc2097b2302ff9f5078468df8b3 | /flask-app/models.py | 1edc8fe6f05a14a0b72284518d949d87002760f1 | [] | no_license | rid47/lecture4 | ccdf5ff49c99eb28c098c9169648cbcbee66207e | 398b1f1d94ad19bf9abce843e1750621297f6e4e | refs/heads/master | 2022-04-24T16:56:42.697938 | 2020-04-26T08:35:03 | 2020-04-26T08:35:03 | 258,978,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Flight(db.Model):
__tablename__ = "flights"
id = db.Column(db.Integer, primary_key=True)
origin = db.Column(db.String, nullable=False)
destination = db.Column(db.String, nullable=False)
duration = db.Column(db.Integer, nullable=False)
passengers = db.relationship("Passenger", backref="flight", lazy=True)
def add_passenger(self, name):
p = Passenger(name=name, flight_id=self.id)
db.session.add(p)
db.session.commit()
class Passenger(db.Model):
__tablename__ = "passengers"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=True)
flight_id = db.Column(db.Integer, db.ForeignKey("flights.id"), nullable=False)
| [
"[email protected]"
] | |
1d121b81f5661077acf9d3396c2d18b2a8aafe47 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5753053697277952_1/Python/Caust1c/a1.py | bf1d9a842fdbb5f014850c2d3225eece06188450 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,426 | py | ### Set the input and output file names
import time
import datetime
import string
import operator
filename = 'A-large'
input_filename = filename + '.in'
output_filename = filename + '.out.' + datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H%M%S') + '.txt'
def findmax(arr_int):
m = max(arr_int)
return [i for i, j in enumerate(arr_int) if j == m]
### Open input file for reading
with open(input_filename) as f:
lines = f.read().splitlines()
### Open output file for writing
with open(output_filename, 'w') as output:
######################################################
### Initialise variables from first line of the file
######################################################
vars = lines[0].split(' ')
cases = int(vars[0]) # number of cases
print(str(cases) + ' cases detected.') # [soft validation]
lineNum = 1 # first case starts here
caseNum = 0 # for counting the num of cases
caseSize_r = 1 # number of rows in each case; default = 1
caseSize_c = 1 # number of columns in each case; default = 1
infoLines = True # Toggle according to question
#infoLines = False # Toggle according to question
### i.e. infoLines == True
if infoLines:
while lineNum < len(lines):
### A new case! Initialize some variables
caseNum += 1 # case number count
party_count = int(lines[lineNum].split(' ')[0])
party_sizes = map(int, lines[lineNum + 1].split(' '))
party_names = string.uppercase[:party_count]
room_total = sum(party_sizes)
print('--------')
print('Case #%d: %s' % (caseNum, " ".join(str(x) for x in party_names)))
print('Case #%d: %s' % (caseNum, " ".join(str(x) for x in party_sizes)))
print('Case #%d: %d total people' % (caseNum, room_total))
print('Case #%d: maxcases in index: %s' % (caseNum, " ".join(str(x) for x in findmax(party_sizes))))
print('Case #%d: %d maxcases' % (caseNum, len(findmax(party_sizes))))
### Do the Work!
### TODO!
myAns = ''
while room_total > 0:
#if room_total == 1:
# myAns = join(myAns, ' ',
if room_total >= 4:
party_maxes = findmax(party_sizes)
if len(party_maxes) == 1:
print('step. 4+ remain (1 max)')
party_sizes[party_maxes[0]] += -1
myAns = myAns + (' %s' % (party_names[party_maxes[0]]))
print('%s' % (party_names[party_maxes[0]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -1
else:
print('step. 4+ remain (2+ max)')
party_sizes[party_maxes[0]] += -1
party_sizes[party_maxes[1]] += -1
myAns = myAns + (' %s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -2
elif room_total == 3:
print('step. 3 remain')
party_maxes = findmax(party_sizes)
party_sizes[party_maxes[0]] += -1
myAns = myAns + (' %s' % (party_names[party_maxes[0]]))
print('%s' % (party_names[party_maxes[0]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -1
elif room_total == 2:
print('step. 2 remain')
party_maxes = findmax(party_sizes)
party_sizes[party_maxes[0]] += -1
party_sizes[party_maxes[1]] += -1
myAns = myAns + (' %s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -2
else:
print('###################ERROR')
room_total = -1
### Output myArr
print('Case #%d:%s' % (caseNum, myAns))
output.write('Case #%d:%s\n' % (caseNum, myAns))
### Step
lineNum += 2
### i.e. infoLines == False
else:
print('deadend')
### END
| [
"[email protected]"
] | |
665fbacd18a16c4a0381bbb91ebfed9745bf12a4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02927/s950956417.py | a97f1206dbc46df6ba04a9ae44abc68733ed10d6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | m,d = map(int,input().split())
cnt = 0
for i in range(1,m+1):
for j in range(1,d+1):
#print(i,j)
num = 1
d2 = (j-j%10)//10
d1 = j%10
if d2 >=2 and d1 >=2:
num =d1*d2
#print(i,j,num,d1,d2)
#print(i,j,num,(j-j%10),j%10)
if num == i:
cnt +=1
print(cnt) | [
"[email protected]"
] | |
24febda16aaf84019c85d59f061b3d6ff3898bc0 | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /test/test_od_mcomplex_type_definition_picture.py | 493605438a6e7cfdeebbd08be2e3907161a9123f | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import rcc
from rcc.models.od_mcomplex_type_definition_picture import ODMcomplexTypeDefinitionPicture # noqa: E501
from rcc.rest import ApiException
class TestODMcomplexTypeDefinitionPicture(unittest.TestCase):
"""ODMcomplexTypeDefinitionPicture unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ODMcomplexTypeDefinitionPicture
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = rcc.models.od_mcomplex_type_definition_picture.ODMcomplexTypeDefinitionPicture() # noqa: E501
if include_optional :
return ODMcomplexTypeDefinitionPicture(
picture_file_name = '0',
image_type = '0'
)
else :
return ODMcomplexTypeDefinitionPicture(
)
def testODMcomplexTypeDefinitionPicture(self):
"""Test ODMcomplexTypeDefinitionPicture"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8bf53f137afc4728262b2316165a54280baa1e66 | acc244c97a943d8e2074339afa1bff1274ae4cfc | /scripts/cgat_build_report_page.py | 7cd893e0d601bb43ea071b7da33f5b20a3caf22d | [] | no_license | eromasko/cgat | 00114f4c95b439ba6595ddf2092d1a3307347401 | d82d197f3913b8d65b656c0b205ca48854fdb2a6 | refs/heads/master | 2021-01-17T09:37:17.168278 | 2015-02-20T09:03:31 | 2015-02-20T09:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,581 | py | '''
cgat_build_report_page.py - build report page for all projects
=======================================================================
:Author:
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script scans all of :file:`/ifs/projects/sftp` for :file:`index.html` files
and outputs an html formatted summary table into :file:`/ifs/projects/overview`.
Usage
-----
Example::
python cgat_build_report_page.py
Type::
python cgat_build_report_page.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import re
import optparse
import subprocess
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-p", "--path", dest="path", type="string",
help="path to scan for files [%default]")
parser.add_option("-d", "--destination", dest="destination", type="string",
help="path to deposit files into [%defaul]")
parser.set_defaults(path='/ifs/projects/sftp',
url='http://www.cgat.org/downloads/',
dest='/ifs/projects/overview')
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
statement = "find %s -name 'index.html'" % options.path
process = subprocess.Popen(statement,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
files = stdout.split('\n')
files.sort()
outfile = IOTools.openFile(os.path.join(options.dest, "index.html"), "w")
outfile.write( '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>CGAT project reports</title>
<link rel="stylesheet" href="cgat.css" type="text/css" />
<link rel="stylesheet" href="pygments.css" type="text/css" />
<link rel="shortcut icon" href="http://cgatwiki.anat.ox.ac.uk/favicon.ico">
<script type="text/javascript" src="sorttable.js"></script>
</head>
<body>
<div class="related">
<h3>Navigation</h3>
<ul>
<li><a href="index.html">CGAT Projects Overview</a> »</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">
<div class="section" id="cgat-pipelines">
<H1>CGAT exported project pages</H1>
<p>
This page is for internal use only. Do not distribute outside of CGAT and
do not make this page available on the world wide web.
</p>
<table class="sortable">\n''' )
outfile.write(
'''<tr><th>Project</th><th>Report</th><th>Title</th></tr>\n''' )
for f in files:
if f == '':
continue
proj = re.search('(proj\d+)', f).groups()[0]
relpath = re.sub('.*proj\d+/', '', f)
report = re.sub('^[^/]*/', '', os.path.dirname(relpath))
lines = IOTools.openFile(f).readlines()
titles = [x for x in lines if "<title>" in x]
if titles:
title = re.search("<title>(.*)</title>", titles[0]).groups()[0]
else:
title = "NA"
if title.endswith("documentation"):
title = title[:-len("documentation")]
url = os.path.join(options.url, relpath)
outfile.write(
'<tr><td>%(proj)s</td><td><a HREF="%(url)s">%(report)s</td><td>%(title)s</td></tr>\n' % locals())
outfile.write( '''
</table>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
<p class="logo"><a href="contents.html">
<img class="logo" src="cgat_logo.png" alt="Logo"/>
</a></p>
</body>
</html>\n''' )
outfile.close()
E.info('created output file %s' % outfile.name)
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
d5c02d1e01d38255df3b594ba8c9a67668b94140 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CISCO-NDE-MIB.py | a5ec9a397658dc0d689a0edf516f9cc7c36e095b | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,271 | py | #
# PySNMP MIB module CISCO-NDE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-NDE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:51:24 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ObjectIdentity, ModuleIdentity, Integer32, IpAddress, Counter32, MibIdentifier, TimeTicks, Bits, iso, Counter64, Gauge32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "ModuleIdentity", "Integer32", "IpAddress", "Counter32", "MibIdentifier", "TimeTicks", "Bits", "iso", "Counter64", "Gauge32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
ciscoNDEMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 226))
ciscoNDEMIB.setRevisions(('2006-03-01 00:00', '2005-12-06 00:00', '2001-08-08 00:00',))
if mibBuilder.loadTexts: ciscoNDEMIB.setLastUpdated('200603010000Z')
if mibBuilder.loadTexts: ciscoNDEMIB.setOrganization('Cisco Systems, Inc.')
ciscoNDEMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 1))
cndeCollectorConfiguration = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1))
cndeMaxCollectors = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cndeMaxCollectors.setStatus('current')
cndeCollectorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 2), )
if mibBuilder.loadTexts: cndeCollectorTable.setStatus('current')
cndeCollectorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-NDE-MIB", "cndeCollectorAddressType"), (0, "CISCO-NDE-MIB", "cndeCollectorAddress"), (0, "CISCO-NDE-MIB", "cndeCollectorPort"))
if mibBuilder.loadTexts: cndeCollectorEntry.setStatus('current')
cndeCollectorAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 2, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cndeCollectorAddressType.setStatus('current')
cndeCollectorAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 2, 1, 2), InetAddress().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: cndeCollectorAddress.setStatus('current')
cndeCollectorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: cndeCollectorPort.setStatus('current')
cndeCollectorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 226, 1, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cndeCollectorStatus.setStatus('current')
cndeMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 2))
cndeMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 0))
cndeMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 3))
cndeMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 3, 1))
cndeMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 226, 3, 2))
cndeMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 226, 3, 1, 1)).setObjects(("CISCO-NDE-MIB", "cndeCollectorConfigurationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cndeMIBCompliance = cndeMIBCompliance.setStatus('current')
cndeCollectorConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 226, 3, 2, 1)).setObjects(("CISCO-NDE-MIB", "cndeMaxCollectors"), ("CISCO-NDE-MIB", "cndeCollectorStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cndeCollectorConfigurationGroup = cndeCollectorConfigurationGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-NDE-MIB", cndeMIBNotifications=cndeMIBNotifications, cndeCollectorConfiguration=cndeCollectorConfiguration, ciscoNDEMIBObjects=ciscoNDEMIBObjects, ciscoNDEMIB=ciscoNDEMIB, cndeCollectorAddressType=cndeCollectorAddressType, cndeCollectorConfigurationGroup=cndeCollectorConfigurationGroup, cndeCollectorAddress=cndeCollectorAddress, PYSNMP_MODULE_ID=ciscoNDEMIB, cndeMIBNotifs=cndeMIBNotifs, cndeCollectorEntry=cndeCollectorEntry, cndeCollectorTable=cndeCollectorTable, cndeCollectorStatus=cndeCollectorStatus, cndeMaxCollectors=cndeMaxCollectors, cndeMIBCompliances=cndeMIBCompliances, cndeMIBConformance=cndeMIBConformance, cndeMIBGroups=cndeMIBGroups, cndeCollectorPort=cndeCollectorPort, cndeMIBCompliance=cndeMIBCompliance)
| [
"[email protected]"
] | |
787e85a801a1a850d53cb69481a3ab6f9107e2b8 | c86e31e8e67ccb9a164903e394df7a444b5406de | /avg_word2vec.py | 73495851d695da43e749fa28b2040e508a22b1e8 | [] | no_license | candlewill/short_texts_sentiment_analysis | fb0e329c4c1ad6f8a72e6c2858a921913dde38b2 | 760e60e1cf430a8d0b1a313523a0c6f773a9c4c1 | refs/heads/master | 2020-04-24T15:36:51.258749 | 2015-07-14T08:28:54 | 2015-07-14T08:28:54 | 38,301,099 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | from sklearn.cross_validation import train_test_split
from gensim.models.word2vec import Word2Vec
from load_data import load_train_data, load_processed_data
import numpy as np
from sklearn.preprocessing import MinMaxScaler
# The following skills is useful
# train_test_split(np.array(texts), np.array(sentiemnt), test_size=0.2)
x_train, y_train = load_processed_data(data_type='train', stem=False)
x_test, y_test = load_processed_data(data_type='test', stem=False)
from preprocess import preprocessor as preprocess
n_dim = 100
scaling = False
# Initialize model and build vocab
imdb_w2v = Word2Vec(size=n_dim, min_count=10)
imdb_w2v.build_vocab(x_train)
# Train the model over train_reviews (this may take several minutes)
imdb_w2v.train(x_train)
# Build word vector for training set by using the average value of all word vectors in the tweet, then scale
# from load_data import load_word_embedding
# imdb_w2v = load_word_embedding()
def buildWordVector(text, size):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in text:
try:
vec += imdb_w2v[word].reshape((1, size))
count += 1.
except KeyError:
continue
if count != 0:
vec /= count
return vec
from sklearn.preprocessing import scale
train_vecs = np.concatenate([buildWordVector(z, n_dim) for z in x_train])
if scaling == True:
train_vecs = scale(train_vecs)
# Train word2vec on test tweets
# imdb_w2v.train(x_test)
# Build test tweet vectors then scale
test_vecs = np.concatenate([buildWordVector(z, n_dim) for z in x_test])
if scaling == True:
test_vecs = scale(test_vecs)
min_max_scaler = MinMaxScaler()
train_vecs = min_max_scaler.fit_transform(train_vecs)
test_vecs = min_max_scaler.fit_transform(test_vecs)
# Use classification algorithm (i.e. Stochastic Logistic Regression) on training set, then assess model performance on test set
from classifiers import gNB, mNB
from analysis import analysis_result
pre = mNB(train_vecs, y_train, test_vecs)
analysis_result(pre, y_test)
| [
"[email protected]"
] | |
b5f8821fc9ecb1f613a863bf6fbc7f174e5ca53a | 905750d3f6bf6232ffefd00ce74b4c7684d7f27e | /lmp_lib.py | b03eae6590766e65883ac744785cfa5ba362e2f7 | [] | no_license | petervanya/GeneralScripts | d1147b89defade68e68122e892e8844f7d4c0e64 | 77c0180156ceb78f08fabf7481c16be8d9aa8bfa | refs/heads/master | 2020-12-19T12:45:46.227823 | 2016-07-28T14:56:59 | 2016-07-28T14:56:59 | 40,310,828 | 0 | 2 | null | 2015-11-30T15:53:57 | 2015-08-06T14:58:49 | Python | UTF-8 | Python | false | false | 2,414 | py | #!/usr/bin/env python
"""
A collection of functions to manipulate LAMMPS files
[email protected], 11/01/16
"""
import numpy as np
# ===== print input
def header2str(N, Nbonds, atomtypes, bondtypes, L):
"""Generate LAMMPS header"""
s = "#blabla\n"
s += str(N) + " atoms\n"
s += str(Nbonds) + " bonds\n"
s += str(atomtypes) + " atom types\n"
s += str(bondtypes) + " bond types\n"
s += "\n"
s += "0.0 " + str(L) + " xlo xhi\n"
s += "0.0 " + str(L) + " ylo yhi\n"
s += "0.0 " + str(L) + " zlo zhi\n\n"
return s
def mass2str(masses):
"""Print mass dictionary into string for LAMMPS data file"""
s = "Masses\n\n"
for k, v in masses.items():
s += str(k) + " " + str(v) + "\n"
return s + "\n"
def pair_dpd_coeffs2str(coeffs):
"""
Structure:
* key: "part1 part2"
* value: [force, gamma, cutoff]
"""
s = "PairIJ Coeffs\n\n"
for k, v in coeffs.items():
s += "%s %s %s %s\n" % (str(k), str(v[0]), str(v[1]), str(v[2]))
return s + "\n"
def bond_coeffs2str(k_ij):
"""Print bond coefficients into string.
Structure:
* key: 1..4
* value [k_ij, r0]
"""
s = "Bond Coeffs\n\n"
for k, v in k_ij.items():
s += "%s %s %s\n" % (str(k), "%e" % v[0], "%e" % v[1])
return s + "\n"
def atoms2str(mat):
"""Convert atomic matrix to str, atom_type molecular
xyz_mat[:, 0] are atom ids"""
M = len(mat)
s = ""
for i in range(M):
s += "%i\t%i\t%i\t%e\t%e\t%e\n" % \
(i+1, mat[i, 0], mat[i, 1], mat[i, 2], mat[i, 3], mat[i, 4])
return s + "\n"
def bonds2str(bond_mat):
"""Convert bond matrix to string"""
M, N = bond_mat.shape
s = ""
for i in range(M):
s += str(i+1) + "\t"
for j in range(N):
s += str(bond_mat[i, j]) + "\t"
s += "\n"
return s + "\n"
# ===== manipulate output
def read_xyzfile(outfile):
"""Read one xyz outfile into a numpy matrix"""
A = open(outfile, "r").readlines()[2:]
A = [line.split() for line in A]
A = np.array(A, order="F").astype(float)
return A
def save_xyzfile(fname, mat):
"""Take xyz matrix [ids, x, y, z] and save into fname"""
N = len(mat)
with open(fname, "w") as f:
f.write(str(N) + "\nbla\n")
for i in range(N):
f.write("%i\t%f\t%f\t%f\n" % (mat[i, 0], mat[i, 1], mat[i, 2], mat[i, 3]))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.