blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fd9bed4328f8591ad62960574eed263df888ec7 | f618cb7a1b1f49c02396a2bb969cc7518fd163ab | /doc/_gallery/1_3_1_noisy_chirp_wv.py | ba10a534a72d30bbb4a32f5780d048b7422177fb | [] | no_license | kingjr/pytftb | b968b8e2fc294a19cec8bf63e7d289f368ddf194 | 0bcacf5eef46bd173d90a23c00a7f4b8ee284b22 | refs/heads/master | 2021-01-16T22:27:05.587174 | 2015-06-25T05:16:02 | 2015-06-25T05:16:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
"""
from tftb.generators import fmlin, sigmerge, noisecg
from tftb.processing.cohen import WignerVilleDistribution
# Generate a chirp signal
n_points = 128
fmin, fmax = 0.0, 0.5
signal, _ = fmlin(n_points, fmin, fmax)
# Noisy chirp
noisy_signal = sigmerge(signal, noisecg(128), 0)
# Wigner-Ville spectrum of noisy chirp.
wvd = WignerVilleDistribution(noisy_signal)
wvd.run()
wvd.plot(kind='contour')
| [
"[email protected]"
] | |
8b26447125e32014c72172e771be247c148428e0 | 4e5ddba389409b4b62444a4eac9903635b57e230 | /rastervision/backend/fastai_utils.py | 1df6d269c2d6389ed6483fbf20a6dceac6cbdc25 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | blessings-h/raster-vision | cc4804e09f8396f861e1fff8bff3e0a4c4f3d048 | 65647c710e668ba59951081faa5f379397185d67 | refs/heads/master | 2021-06-29T18:59:23.823567 | 2021-01-27T02:05:01 | 2021-01-27T02:05:01 | 210,014,893 | 0 | 0 | null | 2019-09-21T16:09:08 | 2019-09-21T16:09:07 | null | UTF-8 | Python | false | false | 10,285 | py | import os
from os.path import join
import zipfile
from typing import Any
import warnings
from fastai.callbacks import CSVLogger, Callback, SaveModelCallback, TrackerCallback
from fastai.metrics import add_metrics
from fastai.torch_core import dataclass, torch, Tensor, Optional, warn
from fastai.basic_train import Learner
from torch.utils.tensorboard import SummaryWriter
from rastervision.utils.files import (sync_to_dir)
class SyncCallback(Callback):
"""A callback to sync from_dir to to_uri at the end of epochs."""
def __init__(self, from_dir, to_uri, sync_interval=1):
self.from_dir = from_dir
self.to_uri = to_uri
self.sync_interval = sync_interval
def on_epoch_end(self, **kwargs):
if (kwargs['epoch'] + 1) % self.sync_interval == 0:
sync_to_dir(self.from_dir, self.to_uri, delete=True)
class ExportCallback(TrackerCallback):
""""Exports the model when monitored quantity is best.
The exported model is the one used for inference.
"""
def __init__(self, learn:Learner, model_path:str, monitor:str='valid_loss', mode:str='auto'):
self.model_path = model_path
super().__init__(learn, monitor=monitor, mode=mode)
def on_epoch_end(self, epoch:int, **kwargs:Any)->None:
current = self.get_monitor_value()
if (epoch == 0 or
(current is not None and self.operator(current, self.best))):
print(f'Better model found at epoch {epoch} with {self.monitor} value: {current}.')
self.best = current
print(f'Exporting to {self.model_path}')
self.learn.export(self.model_path)
class MySaveModelCallback(SaveModelCallback):
"""Saves the model after each epoch to potentially resume training.
Modified from fastai version to delete the previous model that was saved
to avoid wasting disk space.
"""
def on_epoch_end(self, epoch:int, **kwargs:Any)->None:
"Compare the value monitored to its best score and maybe save the model."
if self.every=="epoch":
self.learn.save(f'{self.name}_{epoch}')
prev_model_path = self.learn.path/self.learn.model_dir/f'{self.name}_{epoch-1}.pth'
if os.path.isfile(prev_model_path):
os.remove(prev_model_path)
else: #every="improvement"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
print(f'Better model found at epoch {epoch} with {self.monitor} value: {current}.')
self.best = current
self.learn.save(f'{self.name}')
class MyCSVLogger(CSVLogger):
"""Logs metrics to a CSV file after each epoch.
Modified from fastai version to:
- flush after each epoch
- append to log if already exists
"""
def __init__(self, learn, filename='history'):
super().__init__(learn, filename)
def on_train_begin(self, **kwargs):
if self.path.exists():
self.file = self.path.open('a')
else:
super().on_train_begin(**kwargs)
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
out = super().on_epoch_end(
epoch, smooth_loss, last_metrics, **kwargs)
self.file.flush()
return out
# The following are a set of metric callbacks that have been modified from the
# original version in fastai to support semantic segmentation, which doesn't
# have the class dimension in position -1. It also adds an ignore_idx
# which is used to ignore pixels with class equal to ignore_idx. These
# would be good to contribute back upstream to fastai -- however we should
# wait for their upcoming refactor of the callback architecture.
@dataclass
class ConfusionMatrix(Callback):
"Computes the confusion matrix."
# The index of the dimension in the output and target arrays which ranges
# over the different classes. This is -1 (the last index) for
# classification, but is 1 for semantic segmentation.
clas_idx:int=-1
def on_train_begin(self, **kwargs):
self.n_classes = 0
def on_epoch_begin(self, **kwargs):
self.cm = None
def on_batch_end(self, last_output:Tensor, last_target:Tensor, **kwargs):
preds = last_output.argmax(self.clas_idx).view(-1).cpu()
targs = last_target.view(-1).cpu()
if self.n_classes == 0:
self.n_classes = last_output.shape[self.clas_idx]
self.x = torch.arange(0, self.n_classes)
cm = ((preds==self.x[:, None]) & (targs==self.x[:, None, None])).sum(dim=2, dtype=torch.float32)
if self.cm is None: self.cm = cm
else: self.cm += cm
def on_epoch_end(self, **kwargs):
self.metric = self.cm
@dataclass
class CMScores(ConfusionMatrix):
"Base class for metrics which rely on the calculation of the precision and/or recall score."
average:Optional[str]="binary" # `binary`, `micro`, `macro`, `weighted` or None
pos_label:int=1 # 0 or 1
eps:float=1e-9
# If ground truth label is equal to the ignore_idx, it should be ignored
# for the sake of evaluation.
ignore_idx:int=None
def _recall(self):
rec = torch.diag(self.cm) / self.cm.sum(dim=1)
rec[rec != rec] = 0 # removing potential "nan"s
if self.average is None: return rec
else:
if self.average == "micro": weights = self._weights(avg="weighted")
else: weights = self._weights(avg=self.average)
return (rec * weights).sum()
def _precision(self):
prec = torch.diag(self.cm) / self.cm.sum(dim=0)
prec[prec != prec] = 0 # removing potential "nan"s
if self.average is None: return prec
else:
weights = self._weights(avg=self.average)
return (prec * weights).sum()
def _weights(self, avg:str):
if self.n_classes != 2 and avg == "binary":
avg = self.average = "macro"
warn("average=`binary` was selected for a non binary case. Value for average has now been set to `macro` instead.")
if avg == "binary":
if self.pos_label not in (0, 1):
self.pos_label = 1
warn("Invalid value for pos_label. It has now been set to 1.")
if self.pos_label == 1: return Tensor([0,1])
else: return Tensor([1,0])
else:
if avg == "micro": weights = self.cm.sum(dim=0) / self.cm.sum()
if avg == "macro": weights = torch.ones((self.n_classes,)) / self.n_classes
if avg == "weighted": weights = self.cm.sum(dim=1) / self.cm.sum()
if self.ignore_idx is not None and avg in ["macro", "weighted"]:
weights[self.ignore_idx] = 0
weights /= weights.sum()
return weights
class Recall(CMScores):
"Compute the Recall."
def on_epoch_end(self, last_metrics, **kwargs):
return add_metrics(last_metrics, self._recall())
class Precision(CMScores):
"Compute the Precision."
def on_epoch_end(self, last_metrics, **kwargs):
return add_metrics(last_metrics, self._precision())
@dataclass
class FBeta(CMScores):
"Compute the F`beta` score."
beta:float=2
def on_train_begin(self, **kwargs):
self.n_classes = 0
self.beta2 = self.beta ** 2
self.avg = self.average
if self.average != "micro": self.average = None
def on_epoch_end(self, last_metrics, **kwargs):
prec = self._precision()
rec = self._recall()
metric = (1 + self.beta2) * prec * rec / (prec * self.beta2 + rec + self.eps)
metric[metric != metric] = 0 # removing potential "nan"s
if self.avg: metric = (self._weights(avg=self.avg) * metric).sum()
return add_metrics(last_metrics, metric)
def on_train_end(self, **kwargs): self.average = self.avg
def zipdir(dir, zip_path):
"""Create a zip file from a directory.
The zip file contains the contents of dir, but not dir itself.
Args:
dir: (str) the directory with the content to place in zip file
zip_path: (str) path to the zip file
"""
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as ziph:
for root, dirs, files in os.walk(dir):
for file in files:
ziph.write(join(root, file),
join('/'.join(dirs),
os.path.basename(file)))
# This code was adapted from
# https://github.com/Pendar2/fastai-tensorboard-callback/blob/master/fastai_tensorboard_callback/tensorboard_cb.py
@dataclass
class TensorboardLogger(Callback):
learn:Learner
run_name:str
histogram_freq:int=100
path:str=None
def __post_init__(self):
self.path = self.path or os.path.join(self.learn.path, "logs")
self.log_dir = os.path.join(self.path, self.run_name)
def on_train_begin(self, **kwargs):
self.writer = SummaryWriter(log_dir=self.log_dir)
def on_epoch_end(self, **kwargs):
iteration = kwargs["iteration"]
metrics = kwargs["last_metrics"]
metrics_names = ["valid_loss"] + [o.__class__.__name__ for o in self.learn.metrics]
for val, name in zip(metrics, metrics_names):
self.writer.add_scalar(name, val, iteration)
def on_batch_end(self, **kwargs):
iteration = kwargs["iteration"]
loss = kwargs["last_loss"]
self.writer.add_scalar("learning_rate", self.learn.opt.lr, iteration)
self.writer.add_scalar("momentum", self.learn.opt.mom, iteration)
self.writer.add_scalar("loss", loss, iteration)
if iteration%self.histogram_freq == 0:
for name, param in self.learn.model.named_parameters():
self.writer.add_histogram(name, param, iteration)
def on_train_end(self, **kwargs):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dummy_input = next(iter(self.learn.data.train_dl))[0]
self.writer.add_graph(self.learn.model, tuple(dummy_input))
except Exception as e:
print("Unable to create graph.")
print(e)
self.writer.close() | [
"[email protected]"
] | |
c0dd503b1a9ab64668c1bd73cb9fac6abcc20aaf | 9c20f53c155a487b2af0110a7388f7b1ae8d6ac0 | /JQKA/JQKA/spiders/myselector.py | 13a243c755006de1367c0d53a52fc93853ace3af | [] | no_license | xfzhu2003/github | b9f2f2c37b571b7019a2faf02deb5f8d1d5fafc9 | 2f135849023a89d1514dec236d086e4783aad3df | refs/heads/master | 2020-08-08T14:59:03.860049 | 2018-03-20T01:52:37 | 2018-03-20T01:52:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,810 | py | #-*- coding:utf-8 -*-
import re
import urllib.parse
#from itertools import chain
import datetime
#import random
from user_agent import generate_user_agent
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
import requests
import os
from io import BytesIO
from win32com import client as wc
#from imp import reload
s = requests.Session()
class Selector(object):
def __init__(self):
pass
@staticmethod
def pdfparse(url=None):
try:
if url:
res = s.get(url,headers = {"user-agent":generate_user_agent()})
res.encoding = 'utf-8'
f = BytesIO()
f.write(res.content)
f.seek(0)
# path2 = os.getcwd()+"\\%s.txt"%name.split(".")[0]
# print(path1)
praser = PDFParser(f)
doc = PDFDocument()
praser.set_document(doc)
doc.set_parser(praser)
doc.initialize()
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
# 创建PDf 资源管理器 来管理共享资源
# print("a")
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
text = ''
# 循环遍历列表,每次处理一个page的内容
for page in doc.get_pages(): # doc.get_pages() 获取page列表
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
#text = "".join(map(lambda x:x.get_text().strip(" ") if x.get_text() else "",layout))
#print(text)
# 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等 想要获取文本就获得对象的text属性,
for x in layout:
results = x.get_text()
if results:
text = text+results.strip('\n')
f.close()
return text
except Exception as e:
print(e)
@staticmethod
def docparse(url):
name = url.split("/")[-1]
try:
path1 = os.getcwd()+"\\%s.doc"%name.split(".")[0]
path2 = os.getcwd()+"\\%s.txt"%name.split(".")[0]
# print(path1,path2)
doc = s.get(url,headers = {"user-agent":generate_user_agent()})
word = wc.Dispatch('Word.Application')
with open(path1,"wb") as f:
f.write(doc.content)
docment = word.Documents.Open(path1)
docment.SaveAs(path2, 4)
docment.Close()
try:
with open(path2) as f:
workdoc = f.read()
except:
workdoc = ""
os.remove(path1)
os.remove(path2)
return workdoc
except Exception as e:
print(e)
@classmethod
def replace_all(self,content):
content = self.replace_html_tag(content)
content = self.replace_invalid_html_char(content)
content = self.replace_invalid_char(content)
return content
@staticmethod
def changdt(content,dt):
if dt == "int":
v = int(content) if hasattr(content,'replace') and content.isdigit() else content if isinstance(content,int) else None
return v
elif dt == "float":
try:
v = round(float(content),4)
return v
except:
return None
if dt == 'str':
try:
if content:
return str(content)
except:
return None
if dt == "date":
if content:
if re.match("\d{4}-\d+-\d+",content):
result = content.split("-")
return "{0:0>4}-{1:0>2}-{2:0>2}".format(result[0],result[1],result[2])
else:
return content
else:
return content
@staticmethod
def select_content(content,config,response=None):
selector_type = config['t']
tag = config['v']
try:
if hasattr(content,'text'):
body = content.text
else:
body = content
except Exception as e:
print(e)
try:
if selector_type == 'meta':
return response.meta[tag]
elif selector_type == "json":
for i in tag.split("/"):
if isinstance(content,dict):
pass
else:
raise TypeError("typeError")
content = content[i] if i in content else ''
v = content
return v
elif selector_type == "xpath":
return content.xpath(tag)
elif selector_type == 'xpathList':
return content.xpath(tag).extract()
elif selector_type == 'xpath_split':
v = content.xpath(tag).extract()
if v:
return ",".join(v)
elif selector_type == "xpath_first":
v = content.xpath(tag).extract_first()
return v
elif selector_type == "xpath_join":
v = content.xpath(tag).extract()
if v:
v = "".join(v)
else:
v = None
return v
elif selector_type == 'xpathSet':
v = content.xpath(tag).extract()
v = set(v)
return v
elif selector_type == "css":
v = content.css[tag]
if v:
return v
elif selector_type == "re_first":
v = re.search(tag,body)
if hasattr(v,"group"):
v = v.group(0)
else:
return ''
elif selector_type == "re_findall":
v = re.findall(tag,body)
return v
elif 'splitwith' in selector_type:
if hasattr(selector_type,'replace'):
b = selector_type.replace('splitwith','')
else:
raise AttributeError('%s has not attribute replace'%selector_type)
if hasattr(content,'split'):
try:
return content.split(b)[tag]
except IndexError as e:
print(e)
else:
raise AttributeError('%s has not attribute split'%content)
elif selector_type == "url":
if hasattr(response,"url"):
return response.url
else:
raise AttributeError("url is Not Method")
elif selector_type =="date":
#set tag = "%Y-%m-%d %H:%M:%S"
return datetime.datetime.now().strftime(tag)
elif selector_type =='abs':
return tag
elif selector_type == 'url_re':
v = re.search(tag,response.url)
if v:
return v.group(1)
elif selector_type == 'url_split':
if hasattr(response,"url"):
return response.url.split('/')[tag]
else:
raise AttributeError("url is Not Method")
elif selector_type == 'static':
return content
except Exception as e:
print(e)
@staticmethod
def replace_html_tag(content):
if hasattr(content, 'replace'):
return re.subn('<[\s\S]*?>','',content)[0]
return content
@staticmethod
def replace_invalid_char(content):
if hasattr(content, 'replace'):
invalid_chars = {'\t','\r','\n','[',']',' ','--','\u3000','\xa0',"'"}
for char in invalid_chars:
content = content.replace(char,'')
return content
@staticmethod
def replace_invalid_html_char(content):
try:
if hasattr(content, 'replace'):
chars = {'nbsp': ' ','160': ' ',
'lt': '<', '60':'<',
'gt': '>', '62': '>',
'amp': '&', '38': '&',
'quot': '"', '34': '"',
}
re_char_entity = re.compile(r'&#?(?P<name>\w+);')
sz = re_char_entity.search(content)
while sz:
key = sz.group('name')
try:
content = re_char_entity.sub(chars[key], content, 1)
sz = re_char_entity.search(content)
except KeyError:
content = re_char_entity.sub('', content, 1)
sz = re_char_entity.search(content)
except Exception as e:
print(e)
return e
return content
@staticmethod
def urljoin(path, url):
urlp = urllib.parse.urlparse(url)
return urlp.scheme+'://'+urlp.netloc+'/'+path
@staticmethod
def urljoin2(path, url):
urlp = urllib.parse.urlparse(url)
return urlp.scheme+'://'+urlp.netloc+path
@classmethod
def headers(self):
return {'User-Agent':generate_user_agent()}
if __name__ == "__main__":
pass
a = Selector.pdfparse("http://www.szse.cn/UpFiles/cfwj/2017-09-20_002638676.pdf")
print(a)
# a = Selector()
# a = a.headers()
# print(a)
# print(type(a))
# print(a)
# a = Selector.replace_all('''<td style="text-align:center">男</td>
# <td style="text-align:center">南山区
#
# </td>
# <td style="text-align:center">
#
# <a href="/lawfirm/12e61b22fa6045deb55ca13d8ac5777c" target="_blank">广东君言律师事务所</a>
#''')
# print(a) | [
"[email protected]"
] | |
2f60ba606f3f3ff16f6ce61b7441c7944a9a3939 | 15f365dc711f2230073391687642498305286321 | /Figure plotting/FIG_3.9c)_maximal allowable radial offset.py | d3495aa3b0b50f74b8be071296dbfa7a96ad2f13 | [] | no_license | Isabelliuqin/Optical_Levitation_Master_project_final | 16d177ee0852361745286d4a5af8eea84aad5845 | 0ebe133a08a84e3c8521b06c6e9eec2584e0b3cc | refs/heads/master | 2023-01-03T13:35:05.753240 | 2020-11-01T10:13:59 | 2020-11-01T10:13:59 | 309,067,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 16:31:57 2020
@author: liuqi
"""
import scipy as sp
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pylab as plt
import scipy.integrate as spi
from scipy.integrate import quad
import seaborn
from scipy.integrate import odeint
from scipy.integrate import dblquad
import Will_Module_addwdep as TQ
import Module_table_parameter as MTP
import time
integration_method = 'manual' # 'manual' or 'integrated'
grid_size = 100
plt.close('all')
###########################
#Our sphere
g = 9.8
c = 3 * 10**8
w_0 = 0.85 * 10 ** (-6)
Lambda = 1.064 * 10**(-6)
z_R = np.pi* w_0 ** 2 / Lambda
rho = 30 * 10 ** (-6)
n_0 = 1
n_s_n = 0.04
k = 7.6097
n_s = n_s_n - k*1j
sig_s = 10.49 * 10 ** 3 * (( 3 ** 3 - 2.25 ** 3) / 3 ** 3 ) #density of sphere in kg/m^3
sig_0 = 0 #density of medium in kg/m^3
m = 4/3 * np.pi * rho ** 3 * ( sig_s - sig_0 )
Permittivity = 8.85 * 10**(-12)
#P = 0.5 * c * n_0 * Permittivity #total power of the LG01 beam
P = 12 #optimal power required to levitate at w0 = 0.85um
############################################
#FIG 3.9c) maximal allowable radial offset
############################################
#x-axis: x-axis radial offset
#y-axis: Qx trapping efficiency
#key function: TQ.F_total_manual_integration
rho_0x = np.linspace(0,2*rho,100)
rho_0 = [0,0]
w = np.sqrt(2) * rho #optimal beam radius
Qoplist = []
for rho_0xe in rho_0x:
F_op = TQ.F_total_manual_integration(rho_0xe,rho_0[1], rho, n_0, n_s, w_0, w, z_R, P , target = "reflective", coordinate = 'x', grid_size = grid_size)['force_total'] #compute Qx at optimal beam radius wop, various radial offsets
Q_op = F_op * c / ( n_0 * P )
Qoplist.append(Q_op)
plt.plot(rho_0x/rho, np.array(Qoplist), lw=2, c="c", label="w/(sqrt(2)rho) = 1")
print ((rho_0x/rho)[np.argmin(abs(np.array(Qoplist)))]) #print the inflection point
new_ticks1 = np.linspace(0, 2 , 5) # plot axis
print(new_ticks1)
plt.xticks(new_ticks1,fontsize=20)
plt.yticks(np.linspace(-0.1, 0.05, 4),fontsize=20)
plt.rc('xtick',labelsize=20)
plt.rc('ytick',labelsize=20)
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['bottom'].set_position(('data',0))
plt.legend(loc=1,fontsize=16)
plt.xlabel('rho_0x/rho',fontsize=20)
plt.ylabel('Qx',fontsize=20)
plt.title('rho = 30um, w0 = 0.85um',fontsize=20)
plt.grid()
plt.show() | [
"[email protected]"
] | |
a0558eff96171575b90ef92a7b59d2a7abd7f87f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/a8ab1a0b200881f52f564d28db90f10730c1f0b5-<latest>-fix.py | 34987d8922650e14b77fd72b4e1557dd3181ede0 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,811 | py | def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, update_only, installroot='/'):
res = {
}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {
}
pkgs['update'] = []
pkgs['install'] = []
updates = {
}
update_all = False
cmd = None
if ('*' in items):
update_all = True
(rc, out, err) = run_check_update(module, yum_basecmd)
if ((rc == 0) and update_all):
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif (rc == 100):
updates = parse_check_update(out)
elif (rc == 1):
res['msg'] = err
res['rc'] = rc
module.fail_json(**res)
if update_all:
cmd = (yum_basecmd + ['update'])
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
elif (spec.endswith('.rpm') and ('://' not in spec)):
if (not os.path.exists(spec)):
res['msg'] += ("No RPM file matching '%s' found on system" % spec)
res['results'].append(("No RPM file matching '%s' found on system" % spec))
res['rc'] = 127
module.fail_json(**res)
envra = local_envra(spec)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(spec)
continue
elif ('://' in spec):
package = fetch_rpm_from_url(spec, module=module)
envra = local_envra(package)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(package)
continue
elif (is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot) or update_only):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)
if (not pkglist):
res['msg'] += ("No package matching '%s' found available, installed or updated" % spec)
res['results'].append(("No package matching '%s' found available, installed or updated" % spec))
res['rc'] = 126
module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if ((spec in pkgs['install']) and is_available(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
nothing_to_do = False
break
(pkgname, _, _, _, _) = splitFilename(pkg)
if ((spec in pkgs['update']) and (pkgname in updates)):
nothing_to_do = False
will_update.add(spec)
if (spec != pkgname):
will_update_from_other_package[spec] = pkgname
break
if ((not is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)) and update_only):
res['results'].append(('Packages providing %s not installed due to update_only specified' % spec))
continue
if nothing_to_do:
res['results'].append(('All packages providing %s are up to date' % spec))
continue
conflicts = transaction_exists(pkglist)
if conflicts:
res['msg'] += ('The following packages have pending transactions: %s' % ', '.join(conflicts))
res['results'].append(('The following packages have pending transactions: %s' % ', '.join(conflicts)))
res['rc'] = 128
module.fail_json(**res)
if module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif (w not in updates):
other_pkg = will_update_from_other_package[w]
to_update.append((w, ('because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo']))))
else:
to_update.append((w, ('%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))))
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if (will_update or pkgs['install']):
res['changed'] = True
return res
if cmd:
(rc, out, err) = module.run_command(cmd)
res['changed'] = True
elif (pkgs['install'] or will_update):
cmd = (((yum_basecmd + ['install']) + pkgs['install']) + pkgs['update'])
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if ((not out_lower.endswith('no packages marked for update')) and (not out_lower.endswith('nothing to do'))):
res['changed'] = True
else:
(rc, out, err) = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res | [
"[email protected]"
] | |
188e8503cdd257dd7cab3babad6f8510a254137d | 633944f913050debf0764c2a29cf3e88f912670e | /v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/email/mime/base.py | 132e6913d660b6b7b332267152dbc43c4eddb1af | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
] | permissive | bopopescu/V8-lgtm | 0474c2ff39baf754f556ef57619ceae93e7320fd | da307e2f7abfca5fa0e860a809de6cd07fd1b72b | refs/heads/master | 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | ../../../../../.cipd/pkgs/2/_current/lib/python3.8/email/mime/base.py | [
"[email protected]"
] | |
a8cf597841bdc78c1f56b1e0b73d9efdcca7b554 | c55bca491632ef98dfd0e39e9e197f86d4ce94f0 | /wcoa/migrations/0019_auto_20200922_1837.py | 6a1b7fb208ec5b9d7b5906ffb04ffb52f40aa3af | [
"MIT"
] | permissive | Ecotrust/wcoa | 420b2e9f03219a72f79e435c1001b87a76233a8b | f6ad1e42fa93560d57043ebeb8464a320befef14 | refs/heads/main | 2023-08-03T21:02:01.013970 | 2023-07-28T22:56:03 | 2023-07-28T22:56:03 | 196,878,615 | 1 | 1 | MIT | 2021-12-09T19:29:37 | 2019-07-14T20:07:39 | Python | UTF-8 | Python | false | false | 395 | py | # Generated by Django 2.2.9 on 2020-09-22 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wcoa', '0018_delete_masonrypage'),
]
operations = [
migrations.AlterField(
model_name='catalogiframepage',
name='source',
field=models.URLField(max_length=1999),
),
]
| [
"[email protected]"
] | |
a2f63a96b80b0d24c88cb051e4b93ed7a5134671 | 2dcf0d5cc921745bd34610162e540632066dd919 | /library/framesize.py | 200b812e69468c2e02fe0fd9dd27b4a7a38d54b7 | [] | no_license | ms412/pyIxia | 0aac92cfe2239853e5e815db23816252b1eb6997 | 17913d810e859fb776882f203bea4135aec72b36 | refs/heads/master | 2021-06-25T14:53:45.102058 | 2017-09-11T15:09:06 | 2017-09-11T15:09:06 | 103,150,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,825 | py |
import time
from library.ixiaIf import TclClient
from tools.logger import Logger
class Framesize(object):
def __init__(self, *args):
self._api = TclClient()
self._log = Logger()
self._portlist=[]
self._tclportlist =''
for item in args:
self._portlist.append([self._api.chassisID(),item[0],item[1]])
for item in self._portlist:
self._tclportlist = (self._tclportlist + '[list %d %d %d] ' % (item[0], item[1], item[2]))
self._tclportlist=('[list %s]'%(self._tclportlist))
def __del__(self):
self.disconnect()
def createGroup(self):
self._api.call('set group 12')
self._api.call('portGroup create $group')
for _port in self._portlist:
self._api.call('portGroup add $group %d %d %d' % (_port[0], _port[1], _port[2]))
# self._api.call('port setFactoryDefaults %d %d %d' % (_port[0], _port[1], _port[2]))
self._api.call('portGroup write $group')
self._api.call('portGroup setCommand $group resetStatistics')
# self._api.call('portGroup write $group')
time.sleep(2)
def port(self,mode):
print('port config',self._portlist)
# self._api.call('set portlist %s'%(self._TclPortList()))
# if self._api.call_rc('ixTakeOwnership portlist force') != 0:
if self._api.call_rc('ixTakeOwnership %s force'%(self._tclportlist)) != 0:
print('EXIT')
exit()
for _port in self._portlist:
if '1Gbe-opt' in mode:
print('config prot',_port)
self._api.call('port setDefault')
#optisch
self._api.call('port setPhyMode 1 %d %d %d'% (_port[0], _port[1], _port[2]))
self._api.call('port config -speed 1000')
self._api.call('port config -advertise100FullDuplex false')
self._api.call('port config -advertise100HalfDuplex false')
self._api.call('port config -advertise10FullDuplex false')
self._api.call('port config -advertise10HalfDuplex false')
self._api.call('port config -advertise1000FullDuplex true')
self._api.call('port config -speed 1000')
self._api.call('port set %d %d %d' % (_port[0], _port[1], _port[2]))
elif '1Gbe-el'in mode:
self._api.call('port setDefault')
# electrical
self._api.call('port setPhyMode 0 %d %d %d' % (_port[0], _port[1], _port[2]))
self._api.call('port config -speed 1000')
self._api.call('port config -advertise100FullDuplex false')
self._api.call('port config -advertise100HalfDuplex false')
self._api.call('port config -advertise10FullDuplex false')
self._api.call('port config -advertise10HalfDuplex false')
self._api.call('port config -advertise1000FullDuplex true')
self._api.call('port config -speed 1000')
self._api.call('port set %d %d %d' % (_port[0], _port[1], _port[2]))
else:
print('nothing')
def stat(self):
for _port in self._portlist:
self._api.call('stat setDefault')
if self._api.call_rc('stat set %d %d %d' % (_port[0], _port[1], _port[2])) != 0:
exit()
# self._api.call('stat write %d %d %d' % (_port[0], _port[1], _port[2]))
def felxibleTimestamp(self):
for _port in self._portlist:
self._api.call('flexibleTimestamp setDefault')
self._api.call('flexibleTimestamp set %d %d %d' % (_port[0], _port[1], _port[2]))
def filter(self):
for _port in self._portlist:
self._api.call('filter setDefault')
self._api.call('filter config -captureTriggerFrameSizeFrom 12')
self._api.call('filter config -captureTriggerFrameSizeTo 12')
self._api.call('filter config -captureFilterFrameSizeFrom 12')
self._api.call('filter config -captureFilterFrameSizeTo 12')
self._api.call('filter setDefault')
self._api.call('filter set %d %d %d' % (_port[0], _port[1], _port[2]))
def filterPallette(self):
for _port in self._portlist:
self._api.call('filterPallette setDefault')
self._api.call('filterPallette set %d %d %d' % (_port[0], _port[1], _port[2]))
def capture(self):
for _port in self._portlist:
self._api.call('capture setDefault')
self._api.call('capture set %d %d %d' % (_port[0], _port[1], _port[2]))
def interfaceTable(self):
# for _port in self._portlist:
self._api.call('interfaceTable setDefault')
self._api.call('interfaceTable write')
self._api.call('interfaceTable write')
self._api.call('interfaceTable clearAllInterfaces')
self._api.call('interfaceTable write')
def protocolServer(self):
for _port in self._portlist:
self._api.call('protocolServer setDefault')
self._api.call('protocolServer set %d %d %d' % (_port[0], _port[1], _port[2]))
def stream(self,framesize):
self._api.call('stream setDefault')
self._api.call('stream config -name %s'% 'TestStream')
self._api.call('stream config -framesize %d'% int(framesize))
self._api.call('stream config -ifg 96.0')
# self._api.call('stream config -ifgMIN 952.0')
#self._api.call('stream config -ifgMAX 1016.0')
# self._api.call('stream config -ibg 96.0')
self._api.call('stream config -percentPacketRate 100.0')
self._api.call('stream config -enableTimestamp true')
self._api.call('stream config -patternType patternTypeRandom')
self._api.call('stream config -dataPattern allOnes')
self._api.call('stream config -pattern "FF FF"')
self._api.call('stream config -frameType "FF FF"')
self._api.call('stream config -dma stopStream')
self._api.call('stream config -numFrames 1000')
#required for lartency
# self._api.call('stream config -fir true')
for _port in self._portlist:
self._api.call('stream set %d %d %d %d'%(_port[0], _port[1], _port[2],1))
def pauseFrame(self):
self._api.call('stream setDefault')
# self._api.call('stream config -name %s'% 'PauseStream')
self._api.call('protocol setDefault')
self._api.call('protocol config -name PauseStream')
self._api.call('protocol config -ethernetType ethernetII')
self._api.call('pauseControl setDefault')
self._api.call('pauseControl config -da {01 80 C2 00 00 01}')
self._api.call('pauseControl config -pauseTime 128')
for _port in self._portlist:
self._api.call('pauseControl set %d %d %d'%(_port[0], _port[1], _port[2]))
for _port in self._portlist:
self._api.call('stream set %d %d %d %d'%(_port[0], _port[1], _port[2],1))
def protocol(self):
self._api.call('protocol setDefault')
def packetGroup(self):
self._api.call('packetGroup setDefault')
self._api.call('packetGroup config -groupId 1')
self._api.call('packetGroup config -groupOffset 16')
self._api.call('packetGroup config -sequenceNumberOffset 28')
self._api.call('packetGroup config -insertSequenceSignature true')
for _port in self._portlist:
self._api.call('packetGroup setTx %d %d %d %d'%(_port[0], _port[1], _port[2],1))
def dataInegrity(self):
self._api.call('dataInegrity setDefault')
self._api.call('dataIntegrity config -signatureOffset 12')
self._api.call('dataIntegrity config -signature "08 71 18 00"')
def result(self):
_result = {}
for _port in self._portlist:
_str_port = (str(_port[0])+str(_port[1])+str(_port[2]))
print(_str_port)
_result[_str_port] = {}
for _port in self._portlist:
self._api.call_rc('capture get %d %d %d' % (_port[0],_port[1], _port[2]))
self._api.call('capture cget -nPackets')
for _port in self._portlist:
self._api.call_rc('captureBuffer get %d %d %d' % (_port[0],_port[1],_port[2]))
self._api.call_rc('captureBuffer getStatistics')
print('Port %s Latency: %d' % (str(_port), int(self._api.call('captureBuffer cget -averageLatency')[0])))
for _port in self._portlist:
self._api.call('stat get statAllStats %d %d %d'% (_port[0], _port[1], _port[2]))
# print('Port %s LinkState: %d'% (str(_port), int(self._api.call('stat cget -link')[0])))
# print('Port %s txFrames: %d'% (str(_port), int(self._api.call('stat cget -framesSent')[0])))
# print('Port %s rxFrames: %d'% (str(_port), int(self._api.call('stat cget -framesReceived')[0])))
# print('Port %s txBytes: %d'% (str(_port), int(self._api.call('stat cget -bytesSent')[0])))
# print('Port %s rxBytes: %d'% (str(_port), int(self._api.call('stat cget -bytesReceived')[0])))
# print('Port %s Line Rate: %d'% (str(_port), int(self._api.call('stat cget -lineSpeed')[0])))
# _str_port = (str(_port[0]) + '-' + str(_port[1]) + '-' + str(_port[2]))
_testResult = {}
_testResult['txFrame'] = int(self._api.call('stat cget -framesSent')[0])
_testResult['rxFrame'] = int(self._api.call('stat cget -framesReceived')[0])
_testResult['txBytes'] = int(self._api.call('stat cget -bytesSent')[0])
_testResult['rxBytes'] = int(self._api.call('stat cget -bytesReceived')[0])
_str_port = (str(_port[0]) + str(_port[1]) + str(_port[2]))
_result[_str_port] = _testResult
# _testResult['PORT'] = _port
# _resultList.append(_testResult)
# print('RESULT',_result)
return _result
def framesizeTest(self,sizelist):
_framesizeTest = {}
self._api.call('set portList %s' % (self._tclportlist))
self.createGroup()
self.port('1Gbe-opt')
#self.pauseFrame()
# _result = {}
for framesize in sizelist:
self.stat()
self.felxibleTimestamp()
self.filter()
self.capture()
self.filterPallette()
self.interfaceTable()
self.protocolServer()
self.stream(framesize)
if self._api.call_rc('ixWriteConfigToHardware portList') != 0:
exit()
time.sleep(10)
if self._api.call_rc('ixCheckLinkState portList') != 0:
exit()
if self._api.call_rc('ixStartCapture portList') != 0:
exit()
if self._api.call_rc('ixStartTransmit portList') != 0:
exit()
time.sleep(10)
if self._api.call_rc('ixStopCapture portList') != 0:
exit()
if self._api.call_rc('ixStopTransmit portList') != 0:
exit()
# _resultList = self.result()
_framesizeTest[framesize] = self.result()
# for item in _resultList:
# print(item)
# _port = item.get('PORT')
# _str_port = (str(_port[0]) + '-' + str(_port[1]) + '-' + str(_port[2]))
# print(_str_port)
# _framesizeTest[_str_port]['FRAMESIZE'][framesize] = _str_port
# print(_framesizeTest)
# _testresult = self.result()
# print('TESTRESULT', _testresult)
return _framesizeTest
def disconnect(self):
if self._api.call_rc('ixClearOwnership %s' % (self._tclportlist)) != 0:
exit()
| [
"[email protected]"
] | |
cd9815be7c9cc8ccdc4c8d46f182389f7124895a | 0f6581b105ea7eb4b99dbff131378340a634e7ac | /pages/select_mall_page.py | a47f0ce03ea8ce69435593430a96ed74a92a928e | [] | no_license | langdawang678/Se2PO | ded5e9f97a329f39a6de8ffaebe92330eb598eff | 96d7eb6b4e1774b06b2fd9a4781f9bee7d8f5ed6 | refs/heads/master | 2023-03-25T10:44:23.140843 | 2021-03-23T09:41:39 | 2021-03-23T09:41:39 | 346,628,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from locations.goods_lib_locations import GoodsLibLocations
from common.base_page import BasePage
class SelectMallPage(BasePage):
# 退出元素是否存在
def get_elements_exists(self):
try:
WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(GoodsLibLocations.exit_link))
except:
return False
else:
return True
| [
"[email protected]"
] | |
b29cd8cd90efb7cd3c3dcc4d135b53ae21c536a5 | f8104b29a8d0dbeb407060e494a206ca69335aeb | /tools/datasets/buildchange/json2coco_city_trainval.py | dabe0ff848ca135aa66f2af774888c1dc40685b2 | [] | no_license | Sebastixian/wwtool | c19f665f96e8b942e94af47db590f5bb28072f06 | 2f462a3d028b766234d62a3ef706a0f08f10680a | refs/heads/master | 2023-06-01T04:21:22.066639 | 2021-06-25T07:40:13 | 2021-06-25T07:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,213 | py | import argparse
import os
import cv2
import json
import csv
import shutil
import numpy as np
import wwtool
import os
import cv2
import mmcv
class SIMPLETXT2COCO():
def __init__(self,
imgpath=None,
annopath=None,
imageset_file=None,
image_format='.jpg',
anno_format='.txt',
data_categories=None,
data_info=None,
data_licenses=None,
data_type="instances",
groundtruth=True,
small_object_area=0,
sub_anno_fold=False,
cities=None):
super(SIMPLETXT2COCO, self).__init__()
self.imgpath = imgpath
self.annopath = annopath
self.image_format = image_format
self.anno_format = anno_format
self.categories = data_categories
self.info = data_info
self.licenses = data_licenses
self.type = data_type
self.small_object_area = small_object_area
self.small_object_idx = 0
self.groundtruth = groundtruth
self.max_object_num_per_image = 0
self.sub_anno_fold = sub_anno_fold
self.imageset_file = imageset_file
self.imgpaths, self.annotpaths = [], []
for label_fn in os.listdir(annopath):
basename = wwtool.get_basename(label_fn)
self.imgpaths.append(os.path.join(imgpath, basename + '.png'))
self.annotpaths.append(os.path.join(annopath, basename + '.json'))
def get_image_annotation_pairs(self):
images = []
annotations = []
index = 0
progress_bar = mmcv.ProgressBar(len(self.imgpaths))
imId = 0
for imgfile, annofile in zip(self.imgpaths, self.annotpaths):
# imgpath = os.path.join(self.imgpath, name + self.image_format)
# annotpath = os.path.join(self.annopath, name + self.anno_format)
name = wwtool.get_basename(imgfile)
annotations_coco = self.__generate_coco_annotation__(annofile, imgfile)
# if annotation is empty, skip this annotation
if annotations_coco != [] or self.groundtruth == False:
height, width, channels = 1024, 1024, 3
images.append({"date_captured": "2019",
"file_name": name + self.image_format,
"id": imId + 1,
"license": 1,
"url": "http://jwwangchn.cn",
"height": height,
"width": width})
for annotation in annotations_coco:
index = index + 1
annotation["image_id"] = imId + 1
annotation["id"] = index
annotations.append(annotation)
imId += 1
if imId % 500 == 0:
print("\nImage ID: {}, Instance ID: {}, Small Object Counter: {}, Max Object Number: {}".format(imId, index, self.small_object_idx, self.max_object_num_per_image))
progress_bar.update()
return images, annotations
def __generate_coco_annotation__(self, annotpath, imgpath):
"""
docstring here
:param self:
:param annotpath: the path of each annotation
:param return: dict()
"""
objects = self.__simpletxt_parse__(annotpath, imgpath)
coco_annotations = []
for object_struct in objects:
bbox = object_struct['bbox']
segmentation = object_struct['segmentation']
label = object_struct['label']
roof_bbox = object_struct['roof_bbox']
building_bbox = object_struct['building_bbox']
roof_mask = object_struct['roof_mask']
footprint_mask = object_struct['footprint_mask']
ignore_flag = object_struct['ignore_flag']
offset = object_struct['offset']
iscrowd = object_struct['iscrowd']
width = bbox[2]
height = bbox[3]
area = height * width
if area <= self.small_object_area and self.groundtruth:
self.small_object_idx += 1
continue
coco_annotation = {}
coco_annotation['bbox'] = bbox
coco_annotation['segmentation'] = [segmentation]
coco_annotation['category_id'] = label
coco_annotation['area'] = np.float(area)
coco_annotation['roof_bbox'] = roof_bbox
coco_annotation['building_bbox'] = building_bbox
coco_annotation['roof_mask'] = roof_mask
coco_annotation['footprint_mask'] = footprint_mask
coco_annotation['ignore_flag'] = ignore_flag
coco_annotation['offset'] = offset
coco_annotation['iscrowd'] = iscrowd
coco_annotations.append(coco_annotation)
return coco_annotations
def __simpletxt_parse__(self, label_file, image_file):
"""
(xmin, ymin, xmax, ymax)
"""
annotations = mmcv.load(label_file)['annotations']
# roof_mask, footprint_mask, roof_bbox, building_bbox, label, ignore, offset
objects = []
for annotation in annotations:
object_struct = {}
roof_mask = annotation['roof']
roof_polygon = wwtool.mask2polygon(roof_mask)
roof_bound = roof_polygon.bounds # xmin, ymin, xmax, ymax
footprint_mask = annotation['footprint']
footprint_polygon = wwtool.mask2polygon(footprint_mask)
footprint_bound = footprint_polygon.bounds
building_xmin = np.minimum(roof_bound[0], footprint_bound[0])
building_ymin = np.minimum(roof_bound[1], footprint_bound[1])
building_xmax = np.maximum(roof_bound[2], footprint_bound[2])
building_ymax = np.maximum(roof_bound[3], footprint_bound[3])
building_bound = [building_xmin, building_ymin, building_xmax, building_ymax]
xmin, ymin, xmax, ymax = list(roof_bound)
bbox_w = xmax - xmin
bbox_h = ymax - ymin
object_struct['bbox'] = [xmin, ymin, bbox_w, bbox_h]
object_struct['roof_bbox'] = object_struct['bbox']
xmin, ymin, xmax, ymax = list(building_bound)
bbox_w = xmax - xmin
bbox_h = ymax - ymin
object_struct['building_bbox'] = [xmin, ymin, bbox_w, bbox_h]
object_struct['roof_mask'] = roof_mask
object_struct['footprint_mask'] = footprint_mask
object_struct['ignore_flag'] = annotation['ignore']
object_struct['offset'] = annotation['offset']
object_struct['segmentation'] = roof_mask
object_struct['label'] = 1
object_struct['iscrowd'] = object_struct['ignore_flag']
objects.append(object_struct)
return objects
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument(
'--imagesets',
type=str,
nargs='+',
choices=['trainval', 'test'])
parser.add_argument(
'--release_version', default='v1', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
# basic dataset information
info = {"year" : 2019,
"version" : "1.0",
"description" : "SIMPLETXT-Building-COCO",
"contributor" : "Jinwang Wang",
"url" : "jwwangchn.cn",
"date_created" : "2019"
}
licenses = [{"id": 1,
"name": "Attribution-NonCommercial",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}]
original_simpletxt_class = {'building': 1}
converted_simpletxt_class = [{'supercategory': 'none', 'id': 1, 'name': 'building', }]
# dataset's information
image_format='.png'
anno_format='.txt'
core_dataset_name = 'buildchange'
cities = ['sampling']
# sub_city_folds = {'shanghai': ['arg']}
# cities = ['shanghai', 'beijing', 'jinan', 'haerbin', 'chengdu']
release_version = 'v2'
groundtruth = True
for idx, city in enumerate(cities):
anno_name = [core_dataset_name, release_version, 'trainval', city, 'roof_footprint']
print("Begin processing: {}".format("_".join(anno_name)))
imgpath = f'./data/{core_dataset_name}/{release_version}/{city}/images'
annopath = f'./data/{core_dataset_name}/{release_version}/{city}/labels_json'
save_path = f'./data/{core_dataset_name}/{release_version}/coco/annotations'
if not os.path.exists(save_path):
os.makedirs(save_path)
simpletxt2coco = SIMPLETXT2COCO(imgpath=imgpath,
annopath=annopath,
image_format=image_format,
anno_format=anno_format,
data_categories=converted_simpletxt_class,
data_info=info,
data_licenses=licenses,
data_type="instances",
groundtruth=groundtruth,
small_object_area=0,
cities=cities)
images, annotations = simpletxt2coco.get_image_annotation_pairs()
json_data = {"info" : simpletxt2coco.info,
"images" : images,
"licenses" : simpletxt2coco.licenses,
"type" : simpletxt2coco.type,
"annotations" : annotations,
"categories" : simpletxt2coco.categories}
with open(os.path.join(save_path, "_".join(anno_name) + ".json"), "w") as jsonfile:
json.dump(json_data, jsonfile, sort_keys=True, indent=4) | [
"[email protected]"
] | |
c599481b7904761d4e4518acc651183692d4f2d5 | 2fd087fbc5faf43940153693823969df6c8ec665 | /pyc_decrypted/latest/xml/etree/cElementTree.py | cf14d28fc01a58d8e8845641181b34f25ec71840 | [] | no_license | mickeystone/DropBoxLibrarySRC | ed132bbffda7f47df172056845e5f8f6c07fb5de | 2e4a151caa88b48653f31a22cb207fff851b75f8 | refs/heads/master | 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | #Embedded file name: xml/etree/cElementTree.py
from _elementtree import *
| [
"[email protected]"
] | |
3ed16fe01640223215e8ecb9dd68102306c1c59b | 592498a0e22897dcc460c165b4c330b94808b714 | /1000번/1406_에디터.py | a89e92eec4a01dc869414b5d997fc614f0d9d6f9 | [] | no_license | atom015/py_boj | abb3850469b39d0004f996e04aa7aa449b71b1d6 | 42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d | refs/heads/master | 2022-12-18T08:14:51.277802 | 2020-09-24T15:44:52 | 2020-09-24T15:44:52 | 179,933,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | from collections import deque
import sys
ip = sys.stdin.readline
lst = deque(list(ip().strip()))
rst = deque([])
for i in range(int(ip())):
cmd = ip().strip()
if cmd[0] == 'L':
if len(lst):
rst.appendleft(lst.pop())
elif cmd[0] == 'D':
if len(rst):
lst.append(rst.popleft())
elif cmd[0] == 'B':
if len(lst):
lst.pop()
else:
lst.append(cmd[2])
for i in lst+rst:
print(i,end='')
| [
"[email protected]"
] | |
e1b448acf3b730cb600a2828622a2b86bc3e47d9 | c9f4de7bf63df23325b477d3375a1bfb99865059 | /main_a3.py | 2a0057fd589f5aa522859a2167872c1f9d5d7b8e | [] | no_license | EliHill/TextAnalysis | 440a15dca3f467ab5d79a234582a9ca3b4c7ab10 | 44b05bd1995290bbbd7972a1f8953aa5e75be37e | refs/heads/master | 2020-09-30T16:24:18.911419 | 2019-12-06T19:52:17 | 2019-12-06T19:52:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,750 | py | """main_a3.py
"""
import re
import os
import math
import nltk
from nltk.corpus import brown
from nltk.corpus import wordnet as wn
from nltk.corpus import PlaintextCorpusReader
from fsa import FSA
# NLTK stoplist with 3136 words (multilingual)
STOPLIST = set(nltk.corpus.stopwords.words())
# Vocabulary with 234,377 English words from NLTK
ENGLISH_VOCABULARY = set(w.lower() for w in nltk.corpus.words.words())
# The five categories from Brown that we are using
BROWN_CATEGORIES = ('adventure', 'fiction', 'government', 'humor', 'news')
# Global place to store Brown vocabularies so you calculate them only once
BROWN_VOCABULARIES = None
def is_content_word(word):
"""A content word is not on the stoplist and its first character is a letter."""
return word.lower() not in STOPLIST and word[0].isalpha()
class Text(object):
def __init__(self, path, name=None):
"""Takes a file path, which is assumed to point to a file or a directory,
extracts and stores the raw text and also stores an instance of nltk.text.Text."""
self.name = name
if os.path.isfile(path):
self.raw = open(path).read()
elif os.path.isdir(path):
corpus = PlaintextCorpusReader(path, '.*.mrg')
self.raw = corpus.raw()
self.text = nltk.text.Text( nltk.word_tokenize(self.raw))
def __len__(self):
return len(self.text)
def __getitem__(self, i):
return self.text[i]
def __str__(self):
name = '' if self.name is None else " '%s'" % self.name
return "<Text%s tokens=%s>" % (name, len(self))
def token_count(self):
"""Just return the length of the text."""
return len(self)
def type_count(self):
"""Returns the type count, with minimal normalization by lower casing."""
# an alternative would be to use the method nltk.text.Text.vocab()
return len(set([w.lower() for w in self.text]))
def sentence_count(self):
"""Return number of sentences, using the simplistic measure of counting period,
exclamation marks and question marks."""
# could also use nltk.sent.tokenize on self.raw
return len([t for t in self.text if t in '.!?'])
def most_frequent_content_words(self):
"""Return a list with the 25 most frequent content words and their
frequencies. The list has (word, frequency) pairs and is ordered
on the frequency."""
dist = nltk.FreqDist([w for w in self.text if is_content_word(w.lower())])
return dist.most_common(n=25)
def most_frequent_bigrams(self, n=25):
"""Return a list with the 25 most frequent bigrams that only contain
content words. The list returned should have pairs where the first
element in the pair is the bigram and the second the frequency, as in
((word1, word2), frequency), these should be ordered on frequency."""
filtered_bigrams = [b for b in list(nltk.bigrams(self.text))
if is_content_word(b[0]) and is_content_word(b[1])]
dist = nltk.FreqDist([b for b in filtered_bigrams])
return dist.most_common(n=n)
def concordance(self, word):
self.text.concordance(word)
## new methods for search part of assignment 3
def search(self, pattern):
return re.finditer(pattern, self.raw)
def find_sirs(self):
answer = set()
for match in self.search(r"\bSir \S+\b"):
answer.add(match.group())
return sorted(answer)
def find_brackets(self):
answer = set()
# use a non-greedy match on the characters between the brackets
for match in self.search(r"([\(\[\{]).+?([\)\]\}])"):
brackets = "%s%s" % (match.group(1), match.group(2))
# this tests for matching pairs
if brackets in ['[]', '{}', '()']:
answer.add(match.group())
return sorted(answer)
def find_roles(self):
answer = set()
for match in re.finditer(r"^([A-Z]{2,}[^\:]+): ", self.raw, re.MULTILINE):
answer.add(match.group(1))
return sorted(answer)
def find_repeated_words(self):
answer = set()
for match in self.search(r"(\w{3,}) \1 \1"):
answer.add(match.group())
return sorted(answer)
def apply_fsa(self, fsa):
i = 0
results = []
while i < len(self):
match = fsa.consume(self.text[i:])
if match:
results.append((i, match))
i += len(match)
else:
i += 1
return results
class Vocabulary():
"""Class to store all information on a vocabulary, where a vocabulary is created
from a text. The vocabulary includes the text, a frequency distribution over
that text, the vocabulary items themselves (as a set) and the sizes of the
vocabulary and the text. We do not store POS and gloss, for those we rely on
WordNet. The vocabulary is contrained to those words that occur in a
standard word list. Vocabulary items are not normalized, except for being in
lower case."""
def __init__(self, text):
self.text = text.text
# keeping the unfiltered list around for statistics
self.all_items = set([w.lower() for w in text])
self.items = self.all_items.intersection(ENGLISH_VOCABULARY)
# restricting the frequency dictionary to vocabulary items
self.fdist = nltk.FreqDist(t.lower() for t in text if t.lower() in self.items)
self.text_size = len(self.text)
self.vocab_size = len(self.items)
def __str__(self):
return "<Vocabulary size=%d text_size=%d>" % (self.vocab_size, self.text_size)
def __len__(self):
return self.vocab_size
def frequency(self, word):
return self.fdist[word]
def pos(self, word):
# do not volunteer the pos for words not in the vocabulary
if word not in self.items:
return None
synsets = wn.synsets(word)
# somewhat arbitrary choice to make unknown words nouns, returning None
# or 'UNKNOWN' would have been fine too.
return synsets[0].pos() if synsets else 'n'
def gloss(self, word):
# do not volunteer the gloss (definition) for words not in the vocabulary
if word not in self.items:
return None
synsets = wn.synsets(word)
# make a difference between None for words not in vocabulary and words
# in the vocabulary that do not have a gloss in WordNet
return synsets[0].definition() if synsets else 'NO DEFINITION'
def kwic(self, word):
self.text.concordance(word)
| [
"[email protected]"
] | |
e0cca15b4698cfcef55c59c32ad1ec019b327f0b | b576ed1ff65700d505f687961cbed86fe94b1c3f | /objectModel/Python/cdm/utilities/copy_data_utils.py | 52fd4d1ee5390f942bbde1ef66b2b5cca9e4104f | [
"MIT",
"CC-BY-4.0"
] | permissive | AzureMentor/CDM | c80761737c92cf6561d4b982b4882b1b1c5265d3 | 84d3928995e7ab3bba0a283771e5e26639408643 | refs/heads/master | 2021-11-30T17:52:42.274900 | 2021-11-27T18:38:19 | 2021-11-27T18:38:19 | 217,569,642 | 1 | 0 | NOASSERTION | 2021-11-27T18:38:20 | 2019-10-25T16:04:16 | Java | UTF-8 | Python | false | false | 870 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information
from typing import Union, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.objectmodel import CdmCollection, CdmObject
from cdm.utilities import ResolveOptions, CopyOptions
def _array_copy_data(res_opt: 'ResolveOptions', source: Union['CdmCollection', List['CdmObject']], options: 'CopyOptions') -> Optional[List]:
"""Creates a list object that is a copy of the input IEnumerable object"""
if not source:
return None
casted = []
for elem in source:
if elem:
from cdm.persistence import PersistenceLayer
data = PersistenceLayer.to_data(elem, res_opt, options, PersistenceLayer.CDM_FOLDER)
casted.append(data)
return casted | [
"[email protected]"
] | |
eccf91200ca22006ec27e2a110af49ed35f9e3e8 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py | 8ca4e0f796ff15070fe471e1bafd0e2de2eef998 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 8,721 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(2016)
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorZerosTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def skip_these_tests():
return [
"cholesky",
"cond",
"inverse",
"log_abs_det",
"solve",
"solve_with_broadcast"
]
@staticmethod
def operator_shapes_infos():
shapes_info = linear_operator_test_util.OperatorShapesInfo
return [
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
del use_placeholder
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
operator = linalg_lib.LinearOperatorZeros(
num_rows, batch_shape=batch_shape, dtype=dtype)
matrix = array_ops.zeros(shape=shape, dtype=dtype)
return operator, matrix
def test_assert_positive_definite(self):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
with self.assertRaisesOpError("non-positive definite"):
operator.assert_positive_definite()
def test_assert_non_singular(self):
with self.assertRaisesOpError("non-invertible"):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
operator.assert_non_singular()
def test_assert_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
self.evaluate(operator.assert_self_adjoint()) # Should not fail
def test_non_scalar_num_rows_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorZeros(num_rows=[2])
with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=[2])
def test_non_integer_num_rows_raises_static(self):
with self.assertRaisesRegex(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2.)
with self.assertRaisesRegex(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=2.)
def test_negative_num_rows_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=-2)
with self.assertRaisesRegex(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=-2)
def test_non_1d_batch_shape_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be a 1-D"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=2)
def test_non_integer_batch_shape_raises_static(self):
with self.assertRaisesRegex(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[2.])
def test_negative_batch_shape_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[-2])
def test_non_scalar_num_rows_raises_dynamic(self):
with self.cached_session():
num_rows = array_ops.placeholder_with_default([2], shape=None)
with self.assertRaisesError("must be a 0-D Tensor"):
operator = linalg_lib.LinearOperatorZeros(
num_rows, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_negative_num_rows_raises_dynamic(self):
with self.cached_session():
n = array_ops.placeholder_with_default(-2, shape=None)
with self.assertRaisesError("must be non-negative"):
operator = linalg_lib.LinearOperatorZeros(
num_rows=n, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_non_1d_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder_with_default(2, shape=None)
with self.assertRaisesError("must be a 1-D"):
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_negative_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder_with_default([-2], shape=None)
with self.assertRaisesError("must be non-negative"):
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder_with_default(2, shape=None)
x = array_ops.placeholder_with_default(rng.rand(3, 3), shape=None)
with self.cached_session():
with self.assertRaisesError("Dimensions.*not.compatible"):
operator = linalg_lib.LinearOperatorZeros(
num_rows, assert_proper_shapes=True, dtype=dtypes.float64)
self.evaluate(operator.matmul(x))
def test_is_x_flags(self):
# The is_x flags are by default all True.
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
def test_zeros_matmul(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorZeros(num_rows=2)
self.assertTrue(isinstance(
operator1.matmul(operator2),
linalg_lib.LinearOperatorZeros))
self.assertTrue(isinstance(
operator2.matmul(operator1),
linalg_lib.LinearOperatorZeros))
def test_ref_type_shape_args_raises(self):
with self.assertRaisesRegex(TypeError, "num_rows.cannot.be.reference"):
linalg_lib.LinearOperatorZeros(num_rows=variables_module.Variable(2))
with self.assertRaisesRegex(TypeError, "num_columns.cannot.be.reference"):
linalg_lib.LinearOperatorZeros(
num_rows=2, num_columns=variables_module.Variable(3))
with self.assertRaisesRegex(TypeError, "batch_shape.cannot.be.reference"):
linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=variables_module.Variable([2]))
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorZerosNotSquareTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del use_placeholder
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
batch_shape = shape[:-2]
num_rows = shape[-2]
num_columns = shape[-1]
operator = linalg_lib.LinearOperatorZeros(
num_rows, num_columns, is_square=False, is_self_adjoint=False,
batch_shape=batch_shape, dtype=dtype)
matrix = array_ops.zeros(shape=shape, dtype=dtype)
return operator, matrix
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorZerosTest)
linear_operator_test_util.add_tests(LinearOperatorZerosNotSquareTest)
test.main()
| [
"[email protected]"
] | |
7ff960b1f5fe2ab8db39e70e382084c495881cb8 | 1b12e6096c47312b67fa6ff223216945d2efb70c | /sandbox/vtk/selection/myinteractor.py | 139202e49f1fe0d1418bde34dcae5a42beb929c2 | [
"Apache-2.0"
] | permissive | rboman/progs | 6e3535bc40f78d692f1f63b1a43193deb60d8d24 | 03eea35771e37d4b3111502c002e74014ec65dc3 | refs/heads/master | 2023-09-02T17:12:18.272518 | 2023-08-31T15:40:04 | 2023-08-31T15:40:04 | 32,989,349 | 5 | 2 | Apache-2.0 | 2022-06-22T10:58:38 | 2015-03-27T14:04:01 | MATLAB | UTF-8 | Python | false | false | 4,329 | py | # -*- coding: utf-8 -*-
import vtk
colors = vtk.vtkNamedColors()
class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None):
"""register to event listening
"""
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.selection = None
self.selected_mapper = vtk.vtkDataSetMapper()
self.selected_actor = vtk.vtkActor()
self.dataset = None
def select_one(self):
# get the mouse click position
clickPos = self.GetInteractor().GetEventPosition()
# crete a picker and pick at that position
picker = vtk.vtkCellPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
print("pick")
print(f"\tcell id = {picker.GetCellId()}")
print(f"\t3D pick position = {picker.GetPickPosition()}")
print(f"\t2D mouse position = {picker.GetSelectionPoint()[:2]}")
# the picking could be empty
# in that case, we leave the routine
if picker.GetDataSet():
print(f"\tdataset = {picker.GetDataSet().GetClassName()}")
else:
print(f"\tdataset = None")
return
# no cell has been picked => quit
if picker.GetCellId()==-1:
return
# cell type - we can pick triangles, but also tetras
cell_type = picker.GetDataSet().GetCellType( picker.GetCellId() )
print(f"\tcell type = { vtk.vtkCellTypes.GetClassNameFromTypeId( cell_type )}")
if(cell_type != vtk.VTK_TRIANGLE ):
print("\tWRONG CELL TYPE")
return
# we can pick the wrong ugrid (the red one)
# we store the right one at the first successful picking
if self.dataset == None:
self.dataset = picker.GetDataSet()
if picker.GetDataSet() != self.dataset:
print(f"\tPICKED WRONG DATASET!")
return
# -- cree un "vtkSelectionNode" (données de selection + type de selection)
ids = vtk.vtkIdTypeArray()
ids.SetNumberOfComponents(1)
ids.InsertNextValue(picker.GetCellId())
selectionNode = vtk.vtkSelectionNode()
selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL)
# CELL,POINT,FIELD,VERTEX,EDGE,ROW
selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES)
# SELECTIONS,GLOBALIDS,PEDIGREEIDS,VALUES,INDICES,FRUSTUM,
# LOCATIONS,THRESHOLDS,BLOCKS,QUERY
selectionNode.SetSelectionList(ids)
# -- cree une "vtkSelection" (la sélection en elle-même)
# c'est un ensemble de "noeuds de selection"
if not self.selection:
self.selection = vtk.vtkSelection()
self.selection.AddNode(selectionNode)
else:
self.selection.Union(selectionNode)
print( f"\tThere are {self.selection.GetNumberOfNodes()} 'selection nodes'.")
# -- DISPLAY: cree une "vtkExtractSelection"
extractSelection = vtk.vtkExtractSelection()
extractSelection.SetInputData(0, picker.GetDataSet())
# extractSelection.SetInputConnection(0, filt.GetOutputPort()) # cas d'un filtre
extractSelection.SetInputData(1, self.selection)
extractSelection.Update()
# build a ugrid for display
selected = vtk.vtkUnstructuredGrid()
selected.ShallowCopy(extractSelection.GetOutput())
print( f"\tThere are {selected.GetNumberOfPoints()} points in the selection.")
print( f"\tThere are {selected.GetNumberOfCells()} cells in the selection.")
self.selected_mapper.SetInputData(selected)
self.selected_actor.SetMapper(self.selected_mapper)
self.selected_actor.GetProperty().EdgeVisibilityOn()
self.selected_actor.GetProperty().SetColor( colors.GetColor3d('red') )
self.selected_actor.GetProperty().SetLineWidth(3)
self.GetDefaultRenderer().AddActor(self.selected_actor) # global - n'est pas ajouté si il l'a deja été
print(f'nb of actors = {self.GetDefaultRenderer().GetActors().GetNumberOfItems()}')
def leftButtonPressEvent(self, obj, event):
"""custom event
"""
self.select_one()
self.OnLeftButtonDown() # calls vtk.vtkInteractorStyleTrackballCamera
| [
"[email protected]"
] | |
9bbe6d6fdb9a744918ebab1c2d430323a7d02271 | 7c94bd20b7ee069dfb557f41279416aba7d8447a | /exchangelib/folders/roots.py | 7b4ec7acd98342333d43a75de5373ee33c2603cf | [
"BSD-2-Clause"
] | permissive | AnkushGupta02/exchangelib | 63a42d70fe8254ca2edb6075ac05822a8ccaae01 | 5430e603a1b42248c6a154ae24270b63e94cc49d | refs/heads/master | 2022-08-19T08:34:49.634728 | 2020-05-28T10:25:33 | 2020-05-28T10:25:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,465 | py | import logging
from ..errors import ErrorAccessDenied, ErrorFolderNotFound, ErrorNoPublicFolderReplicaAvailable, ErrorItemNotFound, \
ErrorInvalidOperation
from ..fields import EffectiveRightsField
from ..properties import Fields
from ..version import EXCHANGE_2007_SP1, EXCHANGE_2010_SP1
from .collections import FolderCollection
from .base import BaseFolder
from .known_folders import MsgFolderRoot, NON_DELETEABLE_FOLDERS, WELLKNOWN_FOLDERS_IN_ROOT, \
WELLKNOWN_FOLDERS_IN_ARCHIVE_ROOT
from .queryset import SingleFolderQuerySet, SHALLOW
log = logging.getLogger(__name__)
class RootOfHierarchy(BaseFolder):
"""Base class for folders that implement the root of a folder hierarchy"""
# A list of wellknown, or "distinguished", folders that are belong in this folder hierarchy. See
# https://docs.microsoft.com/en-us/dotnet/api/microsoft.exchange.webservices.data.wellknownfoldername
# and https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/distinguishedfolderid
# 'RootOfHierarchy' subclasses must not be in this list.
WELLKNOWN_FOLDERS = []
LOCAL_FIELDS = Fields(
# This folder type also has 'folder:PermissionSet' on some server versions, but requesting it sometimes causes
# 'ErrorAccessDenied', as reported by some users. Ignore it entirely for root folders - it's usefulness is
# deemed minimal at best.
EffectiveRightsField('effective_rights', field_uri='folder:EffectiveRights', is_read_only=True,
supported_from=EXCHANGE_2007_SP1),
)
FIELDS = BaseFolder.FIELDS + LOCAL_FIELDS
__slots__ = tuple(f.name for f in LOCAL_FIELDS) + ('_account', '_subfolders')
# A special folder that acts as the top of a folder hierarchy. Finds and caches subfolders at arbitrary depth.
def __init__(self, **kwargs):
self._account = kwargs.pop('account', None) # A pointer back to the account holding the folder hierarchy
super().__init__(**kwargs)
self._subfolders = None # See self._folders_map()
@property
def account(self):
return self._account
@property
def root(self):
return self
@property
def parent(self):
return None
def refresh(self):
self._subfolders = None
super().refresh()
@classmethod
def register(cls, *args, **kwargs):
if cls is not RootOfHierarchy:
raise TypeError('For folder roots, custom fields must be registered on the RootOfHierarchy class')
return super().register(*args, **kwargs)
@classmethod
def deregister(cls, *args, **kwargs):
if cls is not RootOfHierarchy:
raise TypeError('For folder roots, custom fields must be registered on the RootOfHierarchy class')
return super().deregister(*args, **kwargs)
def get_folder(self, folder_id):
return self._folders_map.get(folder_id, None)
def add_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
self._folders_map[folder.id] = folder
def update_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
self._folders_map[folder.id] = folder
def remove_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
try:
del self._folders_map[folder.id]
except KeyError:
pass
def clear_cache(self):
self._subfolders = None
def get_children(self, folder):
for f in self._folders_map.values():
if not f.parent:
continue
if f.parent.id == folder.id:
yield f
@classmethod
def get_distinguished(cls, account):
"""Gets the distinguished folder for this folder class"""
if not cls.DISTINGUISHED_FOLDER_ID:
raise ValueError('Class %s must have a DISTINGUISHED_FOLDER_ID value' % cls)
try:
return cls.resolve(
account=account,
folder=cls(account=account, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
)
except ErrorFolderNotFound:
raise ErrorFolderNotFound('Could not find distinguished folder %s' % cls.DISTINGUISHED_FOLDER_ID)
def get_default_folder(self, folder_cls):
# Returns the distinguished folder instance of type folder_cls belonging to this account. If no distinguished
# folder was found, try as best we can to return the default folder of type 'folder_cls'
if not folder_cls.DISTINGUISHED_FOLDER_ID:
raise ValueError("'folder_cls' %s must have a DISTINGUISHED_FOLDER_ID value" % folder_cls)
# Use cached distinguished folder instance, but only if cache has already been prepped. This is an optimization
# for accessing e.g. 'account.contacts' without fetching all folders of the account.
if self._subfolders:
for f in self._folders_map.values():
# Require exact class, to not match subclasses, e.g. RecipientCache instead of Contacts
if f.__class__ == folder_cls and f.is_distinguished:
log.debug('Found cached distinguished %s folder', folder_cls)
return f
try:
log.debug('Requesting distinguished %s folder explicitly', folder_cls)
return folder_cls.get_distinguished(root=self)
except ErrorAccessDenied:
# Maybe we just don't have GetFolder access? Try FindItems instead
log.debug('Testing default %s folder with FindItem', folder_cls)
fld = folder_cls(root=self, name=folder_cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
fld.test_access()
return self._folders_map.get(fld.id, fld) # Use cached instance if available
except ErrorFolderNotFound:
# The Exchange server does not return a distinguished folder of this type
pass
raise ErrorFolderNotFound('No useable default %s folders' % folder_cls)
@property
def _folders_map(self):
if self._subfolders is not None:
return self._subfolders
# Map root, and all subfolders of root, at arbitrary depth by folder ID. First get distinguished folders, so we
# are sure to apply the correct Folder class, then fetch all subfolders of this root.
folders_map = {self.id: self}
distinguished_folders = [
cls(root=self, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
for cls in self.WELLKNOWN_FOLDERS
if cls.get_folder_allowed and cls.supports_version(self.account.version)
]
for f in FolderCollection(account=self.account, folders=distinguished_folders).resolve():
if isinstance(f, (ErrorFolderNotFound, ErrorNoPublicFolderReplicaAvailable)):
# This is just a distinguished folder the server does not have
continue
if isinstance(f, ErrorInvalidOperation):
# This is probably a distinguished folder the server does not have. We previously tested the exact
# error message (f.value), but some Exchange servers return localized error messages, so that's not
# possible to do reliably.
continue
if isinstance(f, ErrorItemNotFound):
# Another way of telling us that this is a distinguished folder the server does not have
continue
if isinstance(f, ErrorAccessDenied):
# We may not have GetFolder access, either to this folder or at all
continue
if isinstance(f, Exception):
raise f
folders_map[f.id] = f
for f in SingleFolderQuerySet(account=self.account, folder=self).depth(
self.DEFAULT_FOLDER_TRAVERSAL_DEPTH
).all():
if isinstance(f, ErrorAccessDenied):
# We may not have FindFolder access, or GetFolder access, either to this folder or at all
continue
if isinstance(f, Exception):
raise f
if f.id in folders_map:
# Already exists. Probably a distinguished folder
continue
folders_map[f.id] = f
self._subfolders = folders_map
return folders_map
@classmethod
def from_xml(cls, elem, account):
kwargs = cls._kwargs_from_elem(elem=elem, account=account)
cls._clear(elem)
return cls(account=account, **kwargs)
@classmethod
def folder_cls_from_folder_name(cls, folder_name, locale):
"""Returns the folder class that matches a localized folder name.
locale is a string, e.g. 'da_DK'
"""
for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS:
if folder_name.lower() in folder_cls.localized_names(locale):
return folder_cls
raise KeyError()
def __repr__(self):
# Let's not create an infinite loop when printing self.root
return self.__class__.__name__ + \
repr((self.account, '[self]', self.name, self.total_count, self.unread_count, self.child_folder_count,
self.folder_class, self.id, self.changekey))
class Root(RootOfHierarchy):
"""The root of the standard folder hierarchy"""
DISTINGUISHED_FOLDER_ID = 'root'
WELLKNOWN_FOLDERS = WELLKNOWN_FOLDERS_IN_ROOT
__slots__ = tuple()
@property
def tois(self):
# 'Top of Information Store' is a folder available in some Exchange accounts. It usually contains the
# distinguished folders belonging to the account (inbox, calendar, trash etc.).
return self.get_default_folder(MsgFolderRoot)
def get_default_folder(self, folder_cls):
try:
return super().get_default_folder(folder_cls)
except ErrorFolderNotFound:
pass
# Try to pick a suitable default folder. we do this by:
# 1. Searching the full folder list for a folder with the distinguished folder name
# 2. Searching TOIS for a direct child folder of the same type that is marked as distinguished
# 3. Searching TOIS for a direct child folder of the same type that is has a localized name
# 4. Searching root for a direct child folder of the same type that is marked as distinguished
# 5. Searching root for a direct child folder of the same type that is has a localized name
log.debug('Searching default %s folder in full folder list', folder_cls)
for f in self._folders_map.values():
# Require exact class to not match e.g. RecipientCache instead of Contacts
if f.__class__ == folder_cls and f.has_distinguished_name:
log.debug('Found cached %s folder with default distinguished name', folder_cls)
return f
# Try direct children of TOIS first. TOIS might not exist.
try:
return self._get_candidate(folder_cls=folder_cls, folder_coll=self.tois.children)
except ErrorFolderNotFound:
# No candidates, or TOIS does ot exist
pass
# No candidates in TOIS. Try direct children of root.
return self._get_candidate(folder_cls=folder_cls, folder_coll=self.children)
def _get_candidate(self, folder_cls, folder_coll):
# Get a single the folder of the same type in folder_coll
same_type = [f for f in folder_coll if f.__class__ == folder_cls]
are_distinguished = [f for f in same_type if f.is_distinguished]
if are_distinguished:
candidates = are_distinguished
else:
candidates = [f for f in same_type if f.name.lower() in folder_cls.localized_names(self.account.locale)]
if candidates:
if len(candidates) > 1:
raise ValueError(
'Multiple possible default %s folders: %s' % (folder_cls, [f.name for f in candidates])
)
if candidates[0].is_distinguished:
log.debug('Found cached distinguished %s folder', folder_cls)
else:
log.debug('Found cached %s folder with localized name', folder_cls)
return candidates[0]
raise ErrorFolderNotFound('No useable default %s folders' % folder_cls)
class PublicFoldersRoot(RootOfHierarchy):
"""The root of the public folders hierarchy. Not available on all mailboxes"""
DISTINGUISHED_FOLDER_ID = 'publicfoldersroot'
DEFAULT_FOLDER_TRAVERSAL_DEPTH = SHALLOW
supported_from = EXCHANGE_2007_SP1
__slots__ = tuple()
def get_children(self, folder):
# EWS does not allow deep traversal of public folders, so self._folders_map will only populate the top-level
# subfolders. To traverse public folders at arbitrary depth, we need to get child folders on demand.
# Let's check if this folder already has any cached children. If so, assume we can just return those.
children = list(super().get_children(folder=folder))
if children:
# Return a generator like our parent does
for f in children:
yield f
return
# Also return early if the server told us that there are no child folders.
if folder.child_folder_count == 0:
return
children_map = {}
try:
for f in SingleFolderQuerySet(account=self.account, folder=folder).depth(
self.DEFAULT_FOLDER_TRAVERSAL_DEPTH
).all():
if isinstance(f, Exception):
raise f
children_map[f.id] = f
except ErrorAccessDenied:
# No access to this folder
pass
# Let's update the cache atomically, to avoid partial reads of the cache.
self._subfolders.update(children_map)
# Child folders have been cached now. Try super().get_children() again.
for f in super().get_children(folder=folder):
yield f
class ArchiveRoot(RootOfHierarchy):
"""The root of the archive folders hierarchy. Not available on all mailboxes"""
DISTINGUISHED_FOLDER_ID = 'archiveroot'
supported_from = EXCHANGE_2010_SP1
WELLKNOWN_FOLDERS = WELLKNOWN_FOLDERS_IN_ARCHIVE_ROOT
__slots__ = tuple()
| [
"[email protected]"
] | |
bf2df9013b94ee7ca80c35660b101bf47f905569 | bd4f8320118c4fb25b95d29193c1adb2f5b55ec6 | /contrib/userproperty_lint.py | 7d99b16806929b36131ad944ccb545cac48d4c45 | [
"Apache-2.0"
] | permissive | Khan/khan-linter | 30229d57ec82466af54b539eb3a57770335e0d65 | 9222e8f8c9aa6dead5c434d1eb7bb326207ed989 | refs/heads/master | 2023-07-21T05:06:19.757797 | 2022-07-11T16:54:42 | 2022-07-11T16:54:42 | 4,628,579 | 26 | 8 | Apache-2.0 | 2023-09-06T21:29:52 | 2012-06-11T18:29:03 | Python | UTF-8 | Python | false | false | 1,639 | py | """Linter that warns about using the dangerous UserProperty.
UserProperty's user_id value can change depending on whether or not Google
currently has a Google account registered w/ an email address that matches
UserProperty's email property. That means when a user changes email settings
in their Google account it can change the behavior of our queries. We don't
want that.
"""
from __future__ import absolute_import
import re
from shared.testutil import lintutil
# This captures any use of UserProperty on a db or ndb model. It will not
# capture subclasses of UserProperty, but we don't expect any of those to be
# around.
_USERPROPERTY_RE = re.compile(r'\bn?db\.UserProperty\(', re.DOTALL)
def lint_no_user_property(files_to_lint):
"""Enforce that nobody uses UserProperty.
...unless marked as an explicitly approved legacy usage via @Nolint.
"""
files_to_lint = lintutil.filter(files_to_lint, suffix='.py')
for filename in files_to_lint:
contents = lintutil.file_contents(filename)
for fn_match in _USERPROPERTY_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
linenum = 1 + contents.count('\n', 0, fn_match.start())
yield (filename, linenum, # filename and linenum
"Do not use UserProperty, it is not safe. Use UserData's "
"key as its foreign key, instead.")
| [
"[email protected]"
] | |
95b5c45037161cace8ce3128cfd2bf49dc2bb7b6 | fc6eefb980b53baae393980c46ac40d256687014 | /Udacity-Intro-To-Computer-Science/Lesson 1/Lesson 1 - Quizzes/Final Quiz.py | 8aa9f447ce3f3fde860303b34c61711a69cb1cb7 | [] | no_license | Brian-Mascitello/UCB-Third-Party-Classes | 7bc151d348f753f93850f5e286c263639f782b05 | e2d26e3d207d364462024759ad2342a8e172f657 | refs/heads/master | 2021-01-02T09:10:01.146169 | 2018-10-08T00:19:58 | 2018-10-08T00:19:58 | 99,150,324 | 0 | 0 | null | 2018-02-01T06:33:25 | 2017-08-02T18:47:29 | Python | UTF-8 | Python | false | false | 780 | py | # Write Python code that assigns to the
# variable url a string that is the value
# of the first URL that appears in a link
# tag in the string page.
# Your code should print http://udacity.com
# Make sure that if page were changed to
# page = '<a href="http://udacity.com">Hello world</a>'
# that your code still assigns the same value to the variable 'url',
# and therefore still prints the same thing.
# page = contents of a web page
page =('<div id="top_bin"><div id="top_content" class="width960">'
'<div class="udacity float-left"><a href="http://udacity.com">')
start_link = page.find('<a href=')
end_link = page.find('>', start_link)
start_position = start_link + len('<a href=') + 1
end_position = end_link - 1
url = page[start_position:end_position]
print(url)
| [
"[email protected]"
] | |
2c6de1d98469164b77e496a0c33bfd4a67f22e17 | 1f5420fda4359bfc21b53de3a5f6e6a93b47b996 | /ch02/ch02_menu.py | 5abfa489c2386f900a6c3f914341bd20f4c6a22b | [] | no_license | fl0wjacky/wxPython | 600f5bfccad3ef5589e11573b30cffd1e2708b83 | 50b3cd5a63750d36065684b73aab0da70ff650a7 | refs/heads/master | 2022-09-02T04:24:47.540157 | 2022-08-10T04:13:17 | 2022-08-10T04:13:17 | 13,976,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #! /usr/bin/env python
import wx
import wx.py.images as images
class ToolbarFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Toolbars', size=(300,200))
panel = wx.Panel(self)
panel.SetBackgroundColour('White')
statusBar = self.CreateStatusBar()#1
toolbar = self.CreateToolBar()#2
toolbar.AddSimpleTool(wx.NewId(),images.getPyBitmap(),"New","Long help for 'New'")#3
toolbar.Realize()#4
menuBar = wx.MenuBar()
menu1 = wx.Menu()
menuBar.Append(menu1,"&File")
menu2 = wx.Menu()
#6
menu2.Append(wx.NewId(),"&Copy","Copy in status bar")
menu2.Append(wx.NewId(),"C&ut","")
menu2.Append(wx.NewId(),"Paste","")
menu2.AppendSeparator()
menu2.Append(wx.NewId(),"&Options...","Display Options")
menuBar.Append(menu2,"&Edit")
self.SetMenuBar(menuBar)
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = ToolbarFrame(parent=None, id = -1)
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
9e28e0cd12e58048913b3c3764cd180e05af5636 | 9e41adf86b2c166a219f0b6d9371089c5f2d7d93 | /Exerciciospython2/Função/e100.py | 0b47e1bb8952e250e0f02facf33b98bfe7653f2f | [] | no_license | Nadirlene/Exercicios-python | 1aaead61dd0efcb5303f6294e765e9e1d54506cc | 3fe82e166003922ef749756a249840ed1fe940b0 | refs/heads/main | 2022-12-25T21:35:06.172839 | 2020-09-28T15:08:37 | 2020-09-28T15:08:37 | 299,343,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from random import randint
from time import sleep
númerosSorteados = []
def sorteio(lista):
print(f'Sorteando {len(lista)} valores da lista:', end=' ')
for c in range(0, 5):
lista.append(randint(1, 10))
print(lista[c], end=' ')
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
soma = 0
for c in lista:
if c % 2 == 0:
soma += c
print(f'Somando os valores pares de {lista}, temos {soma}')
sorteio(númerosSorteados)
somaPar(númerosSorteados)
| [
"[email protected]"
] | |
b21ef021ca3d6afdf535882ef61eb49b75bf895c | 8b7db851e13737d5c44cc00d38a46a2817c7707b | /tests/train.py | 788e79cd09e75082a8dc8cf4d75b3dd063b824b5 | [
"MIT"
] | permissive | goelshivam1210/gym-novel-gridworlds | b6f24b38cfceb2b44461da9bb7607c56d27f4a9e | c8f419da02e4fd716b9e293fcf0b99ee2eb96367 | refs/heads/master | 2023-01-15T13:46:23.438199 | 2020-11-23T14:42:13 | 2020-11-23T14:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,349 | py | import os
import time
import gym
import gym_novel_gridworlds
import numpy as np
from stable_baselines.common.env_checker import check_env
from stable_baselines import PPO2
from stable_baselines import DQN
from stable_baselines.gail import ExpertDataset
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common import make_vec_env
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.results_plotter import load_results, ts2xy
class RenderOnEachStep(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, env):
super(RenderOnEachStep, self).__init__()
self.env = env
def _on_step(self):
self.env.render()
# time.sleep(0.5)
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, check_freq, log_dir, model_name):
super(SaveOnBestTrainingRewardCallback, self).__init__()
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, model_name)
self.best_mean_reward = -np.inf
def _on_step(self):
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
class RemapActionOnEachStep(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, env, step_num):
super(RemapActionOnEachStep, self).__init__()
self.env = env
self.step_num = step_num
def _on_step(self):
if self.n_calls % self.step_num == 0:
# self.env = remap_action(self.env)
self.env.remap_action()
if __name__ == "__main__":
env_id = 'NovelGridworld-v3'
timesteps = 200000 # 200000
experiment_dir = 'results2' # 'models', results
experiment_code1 = env_id + '_' + str(timesteps)
experiment_code2 = '_' + '8beams0filled40range3items_in_360degrees_lfd' # lfd
model_code = experiment_code1 + experiment_code2
log_dir = experiment_dir + os.sep + env_id + experiment_code2
pretrain = True
os.makedirs(log_dir, exist_ok=True)
env = gym.make(env_id)
env = Monitor(env, log_dir)
# callback = RenderOnEachStep(env)
callback = SaveOnBestTrainingRewardCallback(1000, log_dir, model_code + '_best_model')
# callback = RemapActionOnEachStep(env, 50000)
# multiprocess environment
# env = make_vec_env('NovelGridworld-v0', n_envs=4)
check_env(env, warn=True)
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
# env = DummyVecEnv([lambda: env])
# model = PPO2(MlpPolicy, env, verbose=1)
env = DummyVecEnv([lambda: env])
model = PPO2.load('NovelGridworld-v3_200000_8beams0filled40range3items_in_360degrees_lfd_OLD', env)
# Pretrain the model from human recored dataset
# specify `traj_limitation=-1` for using the whole dataset
if pretrain:
dataset = ExpertDataset(expert_path='expert_NovelGridworld-v3_50demos2.npz', traj_limitation=-1, batch_size=128)
model.pretrain(dataset, n_epochs=2000)
model.save(model_code)
# model.learn(total_timesteps=timesteps)
model.learn(total_timesteps=timesteps, callback=callback)
model.save(model_code + '_last_model')
| [
"[email protected]"
] | |
e0bd0c8393e10d70cd1d7736fc15a898d1f059dc | 2e858717fbc3b74cc809dc5d60d337a844ae7fed | /codegolf/planets.py | a4f5b0a908013fcda517843121fbb9b541e6773d | [] | no_license | maxbergmark/misc-scripts | 95a1b5416c34e65b7e8ef26f5c941f9ba0ae0986 | a1b3b889f8f6d28a452969a62af637a6866b69d3 | refs/heads/master | 2020-03-28T10:32:38.362737 | 2019-09-20T12:23:14 | 2019-09-20T12:23:14 | 148,118,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py |
def get_score(l, s):
c = 0
for i, e in enumerate(l):
c += int(e == s[i])
return c
def check_modulo(l, n):
mod = [i%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
def check_modulo_sq(l, n):
mod = [(i*i)%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
def check_modulo_cu(l, n):
mod = [(i*i*i)%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
l0 = [7238995, 32199698004604234, 121437875888467, 126948200247893, 28550423391528270, 448630251845, 495891408214, 1936875853, 7306076016364904775, 474081421652, 34184320687170893, 8031170932136239427, 28489, 1852796749, 107135534003525, 121424973492820, 478695222352, 1936290373, 107088256000328, 27418995543271764]
l1 = [358452458835, 5899229669892068223989509551434, 100801060862113215052800339, 103298841739860633878360661, 6211190611757106977975624033614, 1279847143846962159941, 1593728898723042190678, 21780717397762381, 370629223365341456449924529812037959, 1557125307789592521044, 6131964786814545525129908217165, 349859873446802695454943217443430723, 4812617, 21796097591570253, 83970509390754835569210693, 102090063924849785520616020, 1483554806647179537488, 19547570626458181, 87502894712962091220033864, 6687802272730805039891221866836]
l2 = [5469550, 20958273942611314, 91678030787182, 93949749261683, 22066581848026725, 297987634280, 371068925299, 1298231923, 5143513717239276645, 362546487662, 21785115176039033, 4855281086163547247, 18799, 1299148654, 76370733396065, 92850372243310, 345417020527, 1165126003, 79583419131233, 23759846615443809]
l3 = [474414806382, 9063409245936133368934076540274, 133522356591788631960941166, 139581022297794930405176691, 8036229759209788198835098840677, 1926852259672153551976, 2129837380648217507187, 32495384557351539, 526458259597464047712858951498687589, 2036164201638295527790, 9622030869291023328877655454329, 578706854677080430464104555890308207, 7293295, 31084771269373806, 117796765384867275302989921, 133508170257748661844078446, 2055980324755107837039, 32485561834039667, 117744782670614057051841889, 7717761131972000546125574465889]
l4 = [7695955, 33060607136195914, 129142996492627, 129138701525333, 33060598512444750, 500135649605, 504447788374, 1936875853, 8750051408287654215, 500068606292, 34187606587958093, 8391173042187362627, 28489, 1869573709, 129134373069125, 128034844600660, 504464632912, 1936877893, 129112712765768, 32772496317047124]
# lt = [l0, l1, l2, l3, l4]
lt = [[53, 104]]
c0 = 0
c1 = 0
c2 = 0
max_score = 0
for i in range(2, 10000000000):
for l in lt:
res = check_modulo(l, i)
# res_sq = check_modulo_sq(l, i)
# res_cu = check_modulo_cu(l, i)
res_sq = 0, 0
res_cu = 0, 0
c0 += res[0]
c1 += res_sq[0]
c2 += res_cu[0]
if i % 10000 == 0:
print("\r%d (%d %d %d)" % (i, c0, c1, c2), end="")
if res[1] > max_score or res_sq[1] > max_score or res_cu[1] > max_score:
print("\n%d %s %s %s %d" % (i, res, res_sq, res_cu, len(l)))
max_score = max(res[1], res_sq[1], res_cu[1]) | [
"[email protected]"
] | |
b20eb31e621b6af9cf0b1d9291f57832e0b170b2 | e00186e71a1f52b394315a0cbc27162254cfffb9 | /durga/without_rest_models/testapp/models.py | 6cac7e2e06e3e0b26b958a2b5e56c8110c3d2e6b | [] | no_license | anilkumar0470/git_practice | cf132eb7970c40d0d032520d43e6d4a1aca90742 | 588e7f654f158e974f9893e5018d3367a0d88eeb | refs/heads/master | 2023-04-27T04:50:14.688534 | 2023-04-22T05:54:21 | 2023-04-22T05:54:21 | 100,364,712 | 0 | 1 | null | 2021-12-08T19:44:58 | 2017-08-15T10:02:33 | Python | UTF-8 | Python | false | false | 240 | py | from django.db import models
# Create your models here
class Employee(models.Model):
eno = models.IntegerField()
ename = models.CharField(max_length=64)
esal = models.FloatField()
eadd = models.CharField(max_length=64)
| [
"[email protected]"
] | |
3a32e88f924763cebc773b157f5ade3bbf566316 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9BJzrtpdMP8JFQg74_5.py | 54241e25ddb6e70ccec2ec15821dcacf5ef26e29 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | """
Create a function that given a list, it returns the index where if split in
two-subarrays (last element of the first array has index of (foundIndex-1)),
the sum of them are equal.
### Examples
twins([10, 20, 30, 5, 40, 50, 40, 15]) ➞ 5
# foundIndex 5 : [10+20+30+5+40]=[50+40+15]
twins([1, 2, 3, 4, 5, 5]) ➞ 4
# [1, 2, 3, 4] [5, 5]
twins([3, 3]) ➞ 1
### Notes
Return only the foundIndex, not the divided list.
"""
def twins(lst):
for i in range(1, len(lst)):
temp = []
temp.append(lst[:i])
temp.append(lst[i:])
if sum(temp[0]) == sum(temp[1]):
return i
| [
"[email protected]"
] | |
b9c2ab2a145c713904bc1750e4837b1d3b4cc7d7 | bbfa3b7ee2008617d33a7c5c7770d22e1aa8836b | /Neural_Network/_base.py | 4297021e909f92cc59ba0f6ba4d9070986e15fba | [
"MIT"
] | permissive | luoshao23/ML_algorithm | 1a0046ce9c3abed029cceffa35defe57fffa82b2 | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | refs/heads/master | 2021-08-07T08:38:16.102455 | 2020-03-18T06:49:43 | 2020-03-18T06:49:43 | 92,467,636 | 4 | 1 | MIT | 2018-01-16T05:01:29 | 2017-05-26T03:20:08 | Jupyter Notebook | UTF-8 | Python | false | false | 1,611 | py | from scipy.special import expit as logistic_sigmoid
import numpy as np
def identity(X):
return X
def logistic(X):
return logistic_sigmoid(X, out=X)
def tanh(X):
return np.tanh(X, out=X)
def relu(X):
return np.clip(X, 0, np.finfo(X.dtype).max, out=X)
def softmax(X):
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
def deriv_identity(a, delta):
"""nothing"""
def deriv_logistic(a, delta):
delta *= a
delta *= (1.0 - a)
def deriv_tanh(a, delta):
delta *= (1.0 - a**2)
def deriv_relu(a, delta):
delta[a <= 0] = 0
def squared_loss(y_true, y_pred):
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
return -np.sum(y_true * np.log(y_prob) +
(1 - y_true) * np.log(1 - y_prob)) / y_prob.shape[0]
ACTIVATIONS = {'identity': identity, 'logistic': logistic,
'tanh': tanh, 'relu': relu, 'softmax': softmax}
DERIVATIVES = {'identity': deriv_identity, 'logistic': deriv_logistic,
'tanh': deriv_tanh, 'relu': deriv_relu}
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
| [
"[email protected]"
] | |
1d11db0aa1ed010ab524edc4b7847c5ce929f009 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_089/ch78_2020_04_13_14_24_17_375788.py | f3ffd533ba171b545632219f377a3046205a82d8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | jogo = True
import math
while jogo:
a = input("Qual o nome?")
if a == "sair":
jogo = False
else:
b = float(input("Aceleracao?"))
dic = {a:b}
dic2 = {}
soma = 0
for e in dic:
if e not int dic2:
dic2[e] = math.sqrt(200/dic[e])
if dic2[e] > soma:
soma = dic2[e]
r = ('O vencedor é {0} com tempo de conclusão de {1} s'.format(dic2[e],soma)
return r | [
"[email protected]"
] | |
f2016ead70d10ced68bab597dac0c22bfd28423e | d7641647d67d110e08997767e85bbea081c2537b | /bitmovin_api_sdk/encoding/inputs/udp_multicast/udp_multicast_api.py | 59839e170880781ece7571d5ff6cbc19d6ee3393 | [
"MIT"
] | permissive | aachenmax/bitmovin-api-sdk-python | d3ded77c459852cbea4927ff28c2a4ad39e6026a | 931bcd8c4695a7eb224a7f4aa5a189ba2430e639 | refs/heads/master | 2022-11-16T08:59:06.830567 | 2020-07-06T07:16:51 | 2020-07-06T07:16:51 | 267,538,689 | 0 | 1 | MIT | 2020-07-06T07:16:52 | 2020-05-28T08:44:44 | Python | UTF-8 | Python | false | false | 3,377 | py | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.udp_multicast_input import UdpMulticastInput
from bitmovin_api_sdk.encoding.inputs.udp_multicast.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_input_list_query_params import UdpMulticastInputListQueryParams
class UdpMulticastApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(UdpMulticastApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, udp_multicast_input, **kwargs):
# type: (UdpMulticastInput, dict) -> UdpMulticastInput
"""Create UDP multicast input
:param udp_multicast_input: The UdpMulticastInput to be created
:type udp_multicast_input: UdpMulticastInput, required
:return: UDP multicast input
:rtype: UdpMulticastInput
"""
return self.api_client.post(
'/encoding/inputs/udp-multicast',
udp_multicast_input,
type=UdpMulticastInput,
**kwargs
)
def delete(self, input_id, **kwargs):
# type: (string_types, dict) -> UdpMulticastInput
"""Delete UDP multicast input
:param input_id: Id of the input
:type input_id: string_types, required
:return: Id of the input
:rtype: UdpMulticastInput
"""
return self.api_client.delete(
'/encoding/inputs/udp-multicast/{input_id}',
path_params={'input_id': input_id},
type=UdpMulticastInput,
**kwargs
)
def get(self, input_id, **kwargs):
# type: (string_types, dict) -> UdpMulticastInput
"""UDP multicast Input Details
:param input_id: Id of the input
:type input_id: string_types, required
:return: UDP multicast input
:rtype: UdpMulticastInput
"""
return self.api_client.get(
'/encoding/inputs/udp-multicast/{input_id}',
path_params={'input_id': input_id},
type=UdpMulticastInput,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (UdpMulticastInputListQueryParams, dict) -> UdpMulticastInput
"""List UDP multicast inputs
:param query_params: Query parameters
:type query_params: UdpMulticastInputListQueryParams
:return: List of UDP multicast inputs
:rtype: UdpMulticastInput
"""
return self.api_client.get(
'/encoding/inputs/udp-multicast',
query_params=query_params,
pagination_response=True,
type=UdpMulticastInput,
**kwargs
)
| [
"[email protected]"
] | |
62e2055c06bdab8ebe9363f8cb6ba7382d3af888 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1798177/homework04/program02.py | 2afb7299a76787aa239a4beaaac8f0e9130c4d9e | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,331 | py | # Il tris è un popolarissimo gioco. Si gioca su una griglia quadrata di 3x3
# caselle. A turno, i due giocatori scelgono una cella vuota e vi disegnano il
# proprio simbolo (un giocatore ha come simbolo una 'o' e l'avversario una 'x').
# Vince il giocatore che riesce a disporre tre dei propri simboli in linea retta
# orizzontale, verticale o diagonale. Se la griglia viene riempita senza che
# nessuno dei giocatori sia riuscito a completare una linea retta di tre
# simboli, il gioco finisce in parità. Nel caso in cui il gioco finisse in
# parità, la partita è detta "patta". Per convenzione a griglia vuota la prima
# mossa spetta sempre al giocatore 'o'.
#
# Una configurazione del gioco è dunque univocamente determinata dal contenuto
# della griglia.
#
# Nel seguito assumiamo che il contenuto della griglia sia rappresentato tramite
# lista di liste. La dimensione della lista di liste M è 3x3 ed M[i][j] contiene
# '', 'x', o 'o' a seconda che la cella della griglia appartenente all'iesima
# riga e j-ma colonna sia ancora libera, contenga il simbolo 'x' o contenga il
# simbolo 'o'.
#
# Data una configurazione C del gioco, l'albero di gioco per C è l'albero che
# si ottiene ricorsivamente partendo dalla configurazione C e assegnando come
# figli le configurazioni che è possibile ottenere da C con una mossa ulteriore
# del gioco. Ovviamente risulteranno foglie dell'albero i possibili esiti della
# partita vale a dire le diverse configurazioni cui è possibile arrivare
# partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.
# Se veda ad esempio l'immagine albero_di_gioco.png che mostra l'albero di
# gioco che si ottiene a partire dalla configurazione rappresentata da
# [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']].
#
# Si consideri la seguente Classe di oggetti:
#
# class NodoTris:
# def __init__(self, griglia):
# self.nome = griglia
# self.lista_figli = []
#
# Bisogna progettare le seguente funzione gen_tree(griglia) che, data la
# configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a
# partire dalla configurazione griglia e ne restituisce la radice. I nodi
# dell'albero devono essere oggetti della classe NodoTris.
#
# Per testare la correttezza della vostra implementazione di gen_tree() il grade
# utilizzerà quattro metodi della classe NodoTris che dovete comunque
# implementare:
#
# 1) tipo(self)
# che, dato un nodo NodoTris, restituisce:
# - 'o' se la configurazione rappresentata dal nodo è una configurazione di
# vittoria per il giocatore 'o';
# - 'x' se la configurazione rappresentata dal nodo è una configurazione di
# vittoria per il giocatore 'x';
# - '-' se la configurazione rappresentata dal nodo è una configurazione di
# patta;
# - '?' se la configurazione rappresentata dal nodo è una configurazione di
# gioco non ancora terminato.
#
# 2) esiti(self)
# che, dato un nodo radice di un albero di gioco, restituisce una tripla con i
# possibili esiti della partita che ha come configurazione iniziale quella
# rappresentata dal nodo. Più precisamente: il primo elemento della tripla è il
# numero di patte possibili, il secondo è il numero di possibili vittorie per
# il giocatore 'o' mentre il terzo elemento è il numero di possibili vittorie
# per il giocatore 'x'.
#
# 3) vittorie_livello(self, giocatore, h)
# che, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un
# intero h, restituisce il numero di nodi che rappresentano una vittoria per il
# giocatore e si trovano ad altezza h nell'albero. In altri termini restituisce
# il numero di vittorie possibili per giocatore in esattamente h mosse, nella
# partita che ha come configurazione iniziale quella rappresentata dalla radice
# dell'albero.
#
# 4) strategia_vincente(self, giocatore)
# che, dato un nodo radice di un albero di gioco ed uno dei due giocatori,
# restituisce True o False. Restituisce True se giocatore ha una strategia
# vincente nella partita che ha come configurazione iniziale quella
# rappresentata dal nodo radice, False altrimenti.
#
# Nota che un giocatore ha una strategia vincente rispetto ad una certa
# configurazione se, qualunque siano le mosse dell'avversario ha sempre la
# possibilità di rispondere in modo che la partita termini con la sua vittoria.
#
# Potete ovviamente definire ulteriori funzioni e altri metodi per la Classe
# NodiTris se li ritenete utili al fine della risoluzione del compito.
#
# Potete assumere che le configurazioni di gioco rappresentate da griglia siano
# sempre configurazioni lecite (vale a dire ottenute dopo un certo numero di
# mosse a parire dalla griglia vuota).
#
# AVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non
# importare moduli che non sono nella libreria standard.
#
# ATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se
# il grader esegue N test).
class NodoTris:
def __init__(self, grid):
self.nome = grid # La griglia con i valori.
self.lista_figli = set() # Insieme dei sviluppi del nodo.
self.status = '' # Lo stato del nodo ('o', 'x', '?', '-').
self.turn = '' # 0 -> 'o'; 1 -> 'x'.
self.score = []
def tipo(self):
return self.status # Viene calcolato durante la creazione dell'albero.
def esiti(self):
result = [0, 0, 0]
perform_endings(self, result)
return tuple(result)
def vittorie_livello(self, player, dest_lvl, current_lvl = 0):
if dest_lvl == current_lvl:
return int(self.status == player) # Torna 1 o 0.
else: # Ancora non si raggiunge 'dest_lvl'.
wins = 0
for sub_config in self.lista_figli:
wins += sub_config.vittorie_livello(player,
dest_lvl,
current_lvl + 1)
return wins
def strategia_vincente(self,giocatore):
if giocatore == 'o':
opposite = 'x'
else:
opposite = 'o'
result = strategy(self, giocatore, opposite)
if result == -1:
return False
else:
return True
# ------------------------------------------------------------------------------
def perform_endings(node, result):
exit = { '-' : 0, 'o' : 1, 'x' : 2 }
if node.status != '?':
result[exit[node.status]] += 1
return
for sub_config in node.lista_figli:
perform_endings(sub_config, result)
def score(node, player, opponent):
'''Ritorna il punteggio del giocatore sul nodo 'node'.'''
if node.status == player:
return 1
else: # Se vince il nemico o pareggia è sempre una cosa negativa.
return -1
def get_single_score(scores, value):
'''Ritorna il punteggio in base a 'value'.'''
if value in scores:
return value
else:
return -value # Opposto.
def evalutate_strategy(node, opponent, scores):
'''Valuta se è presente o meno una strategia vincente su 'node'.'''
if node.turn == opponent:
return get_single_score(scores, -1)
else:
return get_single_score(scores, 1)
def strategy(node, player, opponent):
'''Ritorna la presenza di una strategia vincente per il giocatore.'''
if not node.lista_figli:
return score(node, player, opponent)
scores = set()
# Micro-ottimizzazione: la risoluzione dei nomi in Python è molto lenta,
# sopratutto in casi di cicli come il for.
add = scores.add
for sub_config in node.lista_figli:
add(strategy(sub_config, player, opponent))
return evalutate_strategy(node, opponent, scores)
# ------------------------------------------------------------------------------
GRID_X = 1
GRID_O = 0
GRID_EMPTY = 10
def get_translated_cell(cell):
'''Converte la cella dal formato della griglia di partenza a quello con
la codifica numerica. Restituisce la cella convertita.
'''
if not cell:
return GRID_EMPTY
return int(cell != 'o') # 0 -> 'o', 1 -> 'x'.
def convert_grid(grid):
'''Converte la griglia dal formato originale ad uno con le celle codificate
in numeri.
'''
for row in range(3):
for column in range(3):
grid[row][column] = get_translated_cell(grid[row][column])
def calculate_sums(grid, sums):
'''Calcola i risultati delle somme delle righe, colonne e diagonali e salva
tutto sulla lista 'sums'.
'''
first_row = 0
second_row = 1
third_row = 2
first_column = 3
second_column = 4
third_column = 5
diag = 6
rdiag = 7
for step in range(0, 3):
sums[first_row] += grid[0][step]
sums[second_row] += grid[1][step]
sums[third_row] += grid[2][step]
sums[first_column] += grid[step][0]
sums[second_column] += grid[step][1]
sums[third_column] += grid[step][2]
sums[diag] = grid[0][0] + grid[1][1] + grid[2][2]
sums[rdiag] = grid[0][2] + grid[1][1] + grid[2][0]
def get_default_status(sums):
'''Ritorna il simbolo di patta oppure partita non terminata, in base ai
valori di 'sums'.
'''
if max(sums) >= GRID_EMPTY:
return '?'
else:
return '-'
def get_status(grid):
'''Ritorna lo stato delle griglia, che può essere '-', '?', 'x', 'o'.'''
sums = [0, 0, 0, 0, 0, 0, 0, 0]
calculate_sums(grid, sums)
if 3 in sums: # tre 'x' (ossia 1) in fila.
return 'x'
elif 0 in sums: # tre 'o' (ossa 0) in fila.
return 'o'
return get_default_status(sums)
def get_copy_of(grid):
'''Restituisce una copia della griglia.'''
new_grid = []
for row in range(3):
new_grid += [[grid[row][0], grid[row][1], grid[row][2]]]
return new_grid
def get_player(player):
if player:
return 'o'
else:
return 'x'
def next_move(tree, grid, row, column, player):
if grid[row][column] == GRID_EMPTY:
child_grid = get_copy_of(grid)
child_grid[row][column] = player
tree.lista_figli.add(get_tree(child_grid, player))
def get_tree(griglia, player):
tree = NodoTris(griglia)
tree.status = get_status(griglia)
tree.turn = get_player(player)
if tree.status != '?':
return tree
player = (player + 1) % 2
for row in range(3):
next_move(tree, griglia, row, 0, player)
next_move(tree, griglia, row, 1, player)
next_move(tree, griglia, row, 2, player)
return tree
def get_start_player(grid):
'''Ritorna il giocatore (codificato in numero) che deve effettuare la mossa
al turno successivo rispetto alla griglia 'grid' convertita in numeri.
'''
first_row = 0
second_row = 0
third_row = 0
for column in range(3):
first_row += grid[0][column]
second_row += grid[1][column]
third_row += grid[2][column]
# La somma di tutti i valori indica chi inizierà per primo.
total = first_row + second_row + third_row
if total == 90: # Griglia vuota.
return 1
else:
return total % 2
def gen_tree(griglia):
griglia = get_copy_of(griglia)
convert_grid(griglia)
start_player = get_start_player(griglia)
return get_tree(griglia, start_player)
| [
"[email protected]"
] | |
e5edc21a34b45ca67e7abb9b03ee9215880f212d | c440bcb0e566ed107d198593bfeb482c59276dd8 | /advent_of_code/2021/day10_1.py | 2d34868acd58b3462cc9f7332e432aea3f23b3a6 | [] | no_license | TheCDC/Musings | 1ee917bbf2fd39f6fa97b268568053ca6ad7fbbf | 7b07e315230248239bbccad5d85d0a5e8a54d5d8 | refs/heads/master | 2022-11-30T23:37:24.608955 | 2021-12-19T08:12:03 | 2021-12-19T08:12:03 | 175,046,297 | 0 | 0 | null | 2022-11-22T07:20:49 | 2019-03-11T17:01:54 | Python | UTF-8 | Python | false | false | 1,751 | py | from typing import List, Optional, Tuple
with open("inputs/day10.txt") as f:
lines = f.read().split()
openers = "([{<"
closers = ")]}>"
points_corruption = {")": 3, "]": 57, "}": 1197, ">": 25137}
def complete(opens: List[str]):
to_complete = opens[:]
completion: List[str] = []
while to_complete:
c = to_complete.pop()
completion.append(closers[openers.find(c)])
return completion
def score_corruption(s: str):
return points_corruption[s]
def is_matched_pair(a: str, b: str):
assert len(a) == 1 and len(b) == 1
assert a in openers
assert b in closers
matching = openers.find(a) == closers.find(b)
return matching
def doline(line: str):
chars = list(reversed(list(enumerate(line))))
left: List[str] = []
right: List[str] = []
corrupted: Optional[Tuple[int, str]] = None
while chars:
i, c = chars.pop()
if c in openers:
left.append(c)
else:
right.append(c)
if not is_matched_pair(left[-1], c):
corrupted = (i, c) if corrupted is None else corrupted
while len(left) and len(right) and is_matched_pair(left[-1], right[-1]):
left.pop()
right.pop()
completion = complete(left)
return (left, right, completion, corrupted)
def solve(lines):
score_total = 0
results = [doline(line) for line in lines]
score_total = sum(score_corruption(cor[1]) for l, r, comp, cor in results if cor)
return (score_total, results)
def main():
solved = solve(lines)
print(
solved[0],
*[tuple("".join(x) for x in (t[0], t[1], t[2])) + (t[3],) for t in solved[1]],
sep="\n"
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4699041df8bc845885513fbf247fa04518328cbd | 14afcc5e2b8bdb3d91b500f6e7985d8a3378e929 | /src/68.文本左右对齐.py | b3689a9c97bc0475d281eab692c085002b906bbc | [] | no_license | hysapphire/leetcode-python | 8569a0e76f8917165e6b9fb25bfef1afc1186e3c | 8e338ee7a5c9f124e897491d6a1f4bcd1d1a6270 | refs/heads/master | 2022-12-03T15:17:52.557115 | 2020-08-17T14:19:59 | 2020-08-17T14:19:59 | 278,781,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | #
# @lc app=leetcode.cn id=68 lang=python3
#
# [68] 文本左右对齐
#
# @lc code=start
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
splited_words = []
s = []
cnt = 0
for word in words:
t = cnt + len(word)
if t > maxWidth:
splited_words.append(s)
s = [word]
cnt = len(word) + 1
else:
s.append(word)
cnt = t + 1
splited_words.append(s)
res = []
for splited_word in splited_words[:-1]:
s = ""
if len(splited_word) == 1:
num_space = 0
else:
num_space = (maxWidth - sum([len(word) for word in splited_word])) // (len(splited_word) - 1)
delta_num_space = (maxWidth - sum([len(word) for word in splited_word])) - (len(splited_word) - 1) * num_space
if len(splited_word) == 1:
s = ""
s += splited_word[0]
for _ in range(delta_num_space):
s += " "
else:
for word in splited_word[:-1]:
s += word
for _ in range(num_space):
s += " "
if delta_num_space > 0:
s += " "
delta_num_space -= 1
s += splited_word[-1]
res.append(s)
s = ""
for word in splited_words[-1][:-1]:
s += word
s += " "
s += splited_words[-1][-1]
for _ in range(maxWidth - len(s)):
s += " "
res.append(s)
return res
# @lc code=end
| [
"[email protected]"
] | |
72222da4ae1741a0fe83d540d008fd9bae0c1a83 | 51b6d2fc53d5c632fcf01319842baebf13901e84 | /atcoder.jp/abc131/abc131_a/Main.py | 68ba8c087a27f58b969015b21503fb2ab8a823b3 | [] | no_license | mono-0812/procon | 35db3b2c21eff74fbd7b52db07f249380f6834ef | 68a4b53880a228a0164052b23d1326363efcbc20 | refs/heads/master | 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | s=input()
las=""
for i in s:
if las==i:
print("Bad")
exit()
las=i
print("Good") | [
"[email protected]"
] | |
2850dbedb93f513dc0ee15666df35c5ff685c000 | 1302c48beae789b1b7837f34325a8f2b203d69df | /src/byro/bookkeeping/models/account.py | 866ae96ca5bbdf954ac3dddf73f44b8cdd0bb526 | [] | no_license | grince/byro | b9a8ad0d54b78ee220af6dedee119ab9ec0036df | abe8743c04ba828fdd5ff50c55c43a3b32bc26bd | refs/heads/master | 2021-01-25T12:31:12.461853 | 2018-02-26T17:42:12 | 2018-02-26T17:42:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,496 | py | from django.db import models
from django.db.models import Q
from django.utils.decorators import classproperty
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from byro.common.models.auditable import Auditable
from byro.common.models.choices import Choices
class AccountCategory(Choices):
# Regular Categories
MEMBER_DONATION = 'member_donation'
MEMBER_FEES = 'member_fees'
# Categories for double-entry bookkeeping
ASSET = 'asset'
LIABILITY = 'liability'
INCOME = 'income'
EXPENSE = 'expense'
@classproperty
def choices(cls):
return (
(cls.MEMBER_DONATION, _('Donation account')),
(cls.MEMBER_FEES, _('Membership fee account')),
(cls.ASSET, _('Asset account')),
(cls.LIABILITY, _('Liability account')),
(cls.INCOME, _('Income account')),
(cls.EXPENSE, _('Expense account')),
)
class Account(Auditable, models.Model):
account_category = models.CharField(
choices=AccountCategory.choices,
max_length=AccountCategory.max_length,
)
name = models.CharField(max_length=300, null=True) # e.g. 'Laser donations'
class Meta:
unique_together = (
('account_category', 'name'),
)
def __str__(self):
if self.name:
return self.name
return f'{self.account_category} account #{self.id}'
@property
def transactions(self):
from byro.bookkeeping.models import VirtualTransaction
return VirtualTransaction.objects.filter(
Q(source_account=self) | Q(destination_account=self)
)
def total_in(self, start=None, end=now()):
qs = self.incoming_transactions
if start:
qs = qs.filter(value_datetime__gte=start)
if end:
qs = qs.filter(value_datetime__lte=end)
return qs.aggregate(incoming=models.Sum('amount'))['incoming'] or 0
def total_out(self, start=None, end=now()):
qs = self.outgoing_transactions
if start:
qs = qs.filter(value_datetime__gte=start)
if end:
qs = qs.filter(value_datetime__lte=end)
return qs.aggregate(outgoing=models.Sum('amount'))['outgoing'] or 0
def balance(self, start=None, end=now()):
incoming_sum = self.total_in(start=start, end=end)
outgoing_sum = self.total_out(start=start, end=end)
return incoming_sum - outgoing_sum
| [
"[email protected]"
] | |
c8a176d73ce4de43a0c744f3ba4ba152b13f907d | 9c968f7cdf390f8417912519b53f1b7f6ea8b7e8 | /HJ_AL/brute_force/b1065_brute.py | 9f0cd478cfd5e6253c843f1b729ba4e7aabdc19b | [] | no_license | hhongjoon/TIL | aa33ce2973552a0baa0e0da5bd7d20824fd2e322 | a33b20af15d3f671ea7c7b2855291e50a9036c1c | refs/heads/master | 2021-08-07T17:33:39.722880 | 2020-04-25T08:11:02 | 2020-04-25T08:11:02 | 162,099,245 | 4 | 0 | null | 2019-10-30T09:06:21 | 2018-12-17T08:34:07 | Jupyter Notebook | UTF-8 | Python | false | false | 427 | py |
num = int(input())
count=0
for i in range(1,num+1):
if len(str(i)) == 1 or len(str(i))==2:
count += 1
continue
str_num=str(i)
judge = True
for j in range(0,len(str_num)-2):
if int(str_num[j]) - int(str_num[j+1]) == int(str_num[j+1]) - int(str_num[j+2]):
continue
else:
judge = False
break
if judge == True:
count+=1
print(count)
| [
"[email protected]"
] | |
dfa17b78951d1872ed8fc4f817a8579389a5a042 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dws/huaweicloudsdkdws/v2/model/cancel_readonly_cluster_response.py | d682b2b92dc9ed1fe6c03084b6e00be2e4fc2041 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 2,467 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CancelReadonlyClusterResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""CancelReadonlyClusterResponse
The model defined in huaweicloud sdk
"""
super(CancelReadonlyClusterResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CancelReadonlyClusterResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
028660a24e92f54b0bc846a5d68b6e90ac21cddf | 41710e9133d660739f8f9f17040a2a8a6082e9fb | /python/aa_modules/fitsio_has_errors/eg2.py | d4a0e6e5e75796a2ec451845dfda65e7d12df200 | [] | no_license | hanjiangxue007/Programming | 591678150e2e300051fdeaf09124d3893076d3a9 | 7a545ef2300b004497f30d27d1f2aaa032e26af5 | refs/heads/master | 2020-06-29T18:50:27.776557 | 2016-10-27T18:31:39 | 2016-10-27T18:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct-15-2016 Sat
# Last update :
#
#
# Imports
import fitsio
from fitsio import FITS,FITSHDR
# Often you just want to quickly read or write data without bothering to
# create a FITS object. In that case, you can use the read and write
# convienience functions.
# read all data from the first hdu with data
filename='test.fits'
data = fitsio.read(filename)
# read a subset of rows and columns from a table
data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
# read the header, or both at once
h = fitsio.read_header(filename, extension)
data,h = fitsio.read(filename, ext=ext, header=True)
# open the file, write a new binary table extension, and then write the
# data from "recarray" into the table. By default a new extension is
# added to the file. use clobber=True to overwrite an existing file
# instead. To append rows to an existing table, see below.
fitsio.write(filename, recarray)
# write an image
fitsio.write(filename, image)
| [
"[email protected]"
] | |
170ca8d188aacad28ab3a8be69a38b02bb931402 | 9e4ab50f5822941ab70fefb8ac8f2d91d702d9df | /suorganizer/views.py | a8c5954ad810ac9b24b4425723169efa4e7b3098 | [] | no_license | andyk1278/startuptracker | cf3b51a82aa6018b990c605cff47398636b4643c | b2b07db3a6213249588214200b52a705ed50b339 | refs/heads/master | 2021-01-02T23:00:29.839108 | 2017-08-13T09:18:51 | 2017-08-13T09:18:51 | 99,437,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.http import HttpResponseRedirect
def redirect_root(request):
return HttpResponseRedirect('/blog/') | [
"[email protected]"
] | |
b65f91b5d0820bef879b4902b41d7a79e7fe245a | 33f304bbd8536045a63dea909031576ea3f7b488 | /census_area/core.py | c3fe06979410922dd4552eca320be2f8349c5c06 | [
"MIT"
] | permissive | LindaLv11/census_area | 859c92cd5ca6a8537ff45014b42771804dc29913 | 48d8bc7e73c12b58e796307e36c93029b1ec0044 | refs/heads/master | 2020-04-20T08:25:32.838867 | 2019-01-04T03:00:47 | 2019-01-04T03:00:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,436 | py | import shapely.geometry
import shapely.geos
import esridump
GEO_URLS = {
'tracts' : {
1990 : 'https://gis.uspatial.umn.edu/arcgis/rest/services/nhgis/Census_Tracts_1910_2014/MapServer/8',
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/6',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2011 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2012 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2013 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/8',
2014 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/8',
2015 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/8',
2016 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/8'},
'block groups' : {
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/8',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2011 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2012 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2013 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/10',
2014 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/10',
2015 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/10',
2016 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/10'},
'blocks' : {
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/10',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Current/MapServer/12'},
'incorporated places' : {
1990 : 'https://gis.uspatial.umn.edu/arcgis/rest/services/nhgis/Places_1980_2014/MapServer/1',
2000 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2010/tigerWMS_Census2000/MapServer/24',
2010 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2011 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2012 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2013 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/26',
2014 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/26',
2015 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/26',
2016 : 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/26'}
}
class AreaFilter(object):
def __init__(self, geojson_geometry, sub_geography_url):
self.geo = shapely.geometry.shape(geojson_geometry)
geo_query_args = {'geometry': ','.join(str(x) for x in self.geo.bounds),
'geometryType': 'esriGeometryEnvelope',
'spatialRel': 'esriSpatialRelEnvelopeIntersects',
'inSR' : '4326',
'geometryPrecision' : 9,
'orderByFields': 'OID'}
self.area_dumper = esridump.EsriDumper(sub_geography_url,
extra_query_args = geo_query_args)
def __iter__(self):
for area in self.area_dumper:
area_geo = shapely.geometry.shape(area['geometry'])
if self.geo.intersects(area_geo):
try:
intersection = self.geo.intersection(area_geo)
except shapely.geos.TopologicalError:
intersection = self.geo.buffer(0).intersection(area_geo.buffer(0))
if intersection.area/area_geo.area > 0.1:
yield area
| [
"[email protected]"
] | |
b5a06168a7891d65d6d1f2dc37cc42b31c3f9075 | 14b8cf0b67104b53534678b8c0e9525ace4714ff | /codeeval/spiral.py | 8ce3b47e920f2d0e9c03bbd1d9e3a51d4092b051 | [] | no_license | bhfwg/py_learn | bb11898fd81f653643fc61949f43df751d317fcb | eca9da748bada67357961d1581d8ec890a3385f8 | refs/heads/master | 2020-03-27T15:01:25.881792 | 2018-06-05T01:36:26 | 2018-06-05T01:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | from sys import argv
def spiral_printing(n, m, one):
if n * m == 1:
yield one[0]
return
def point_2_index(x, y):
return x * m + y
ax = 0
ay = 0
bx = 0
by = m - 1
cx = n - 1
cy = m - 1
dx = n - 1
dy = 0
while 1:
for i in xrange(ay, by):
index = point_2_index(ax, i)
yield one[index]
for i in xrange(bx, cx):
index = point_2_index(i, cy)
yield one[index]
for i in xrange(cy, dy, -1):
index = point_2_index(dx, i)
yield one[index]
for i in xrange(dx, ax, -1):
index = point_2_index(i, ax)
yield one[index]
ax += 1
ay += 1
bx += 1
by -= 1
cx -= 1
cy -= 1
dx -= 1
dy += 1
if ay > by or ax > dx:
break
if ay == by:
for i in xrange(bx, cx + 1):
index = point_2_index(i, cy)
yield one[index]
break
elif ax == dx:
for i in xrange(ay, by + 1):
index = point_2_index(ax, i)
yield one[index]
break
f = open(argv[1], 'r')
for one in f:
one = one.strip()
if one:
n, m, one = one.split(';')
n = int(n)
m = int(m)
one = one.split(' ')
print ' '.join(spiral_printing(n, m, one))
f.close()
| [
"[email protected]"
] | |
6c45e72f32ca223fecfcc490073f0cd0d14b4b65 | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_run/div_no_nan_run.py | 1a2c66c665dc13f6f5900b55ab27ee71b9d67109 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,497 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.test_op import div_no_nan
from tests.common.tensorio import compare_tensor
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def div_no_nan_execute(shapes, dtype, attrs):
exp_output, inputs, args = gen_data(dtype, shapes)
mod = div_no_nan_compile(shapes, dtype, attrs)
# result_tvm
acu_output = utils.mod_launch(mod, args, expect=exp_output)
# compare result
rtol, atol = get_rtol_atol("div_no_nan", dtype)
TestCase_Result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
return inputs, acu_output, exp_output, TestCase_Result
def gen_data(dtype, shapes):
# Result_Numpy
data_x = random_gaussian(shapes[0], miu=1, sigma=0.1).astype(dtype)
data_y = random_gaussian(shapes[1], miu=0, sigma=2**-64).astype(dtype)
if dtype in ["uint8", "int8", "int32"]:
is_zero = np.equal(0, data_y)
if dtype in ["float16"]:
is_zero = np.less(np.abs(data_y), 2**-12)
if dtype in ["float32"]:
is_zero = np.less(np.abs(data_y), 2**-64)
if dtype in ["uint8", "int8", "int32"]:
exp_output = np.floor_divide(np.multiply(data_x, (1 - is_zero)), data_y + is_zero)
if dtype in ["float16", "float32"]:
exp_output = np.true_divide(np.multiply(data_x, (1 - is_zero)), data_y + is_zero)
# inputs and output to hold the data
output = np.full(exp_output.shape, np.nan, dtype)
inputs = [data_x, data_y]
args = [data_x, data_y, output]
return exp_output, inputs, args
def div_no_nan_compile(shapes, dtype, attrs, kernel_name='div_no_nan', runing=False):
return utils.op_build_test(div_no_nan.div_no_nan, [shapes[0], shapes[1]], [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=runing)
| [
"[email protected]"
] | |
9fc39c434aeb8db7e69c85650d79dea51a686666 | 5d2404f62e58d5fd1f6112744ff32c3166183ac7 | /Geek University/Seção 4/Exercicios/EX49.py | de8275af5902ac3f09895155461a32956779a2ef | [] | no_license | Leownhart/My_Course_of_python | 236cfc84d841c5883e5aa1cc0c0730e7a9a83c40 | 5abb21f8cdad91ab54247a007d40bf9ecd2cff8c | refs/heads/master | 2020-08-28T15:04:33.628086 | 2020-08-24T19:25:39 | 2020-08-24T19:25:39 | 217,733,877 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | '''
49 - Faça um programa que leia um horário (hora, minuto, segundo) de inicio e a duração, em
segundos, de uma experiência biológica. O programa de resultar com o novo horário
(hora, minuto, segundo) do termino da mesma.
from datetime import datetime
now = datetime.now()
print now.year
print now.month
print now.day
print now.hour
print now.minute
print now.second
'''
# RESPOSTAS
from datetime import datetime
Hora = int(input('Informe a Hora: '))
Minuto = int(input('Informe os Minutos: '))
Segundos = int(input('Informe os Segundos: '))
print(f'Passaram-se {Hora * 3600 + Minuto * 60 + Segundos} Segundos')
print(f'{datetime.now()}')
| [
"[email protected]"
] | |
68eeea5ed3b7b64fa83adeca2d9a513d9c57fd1c | 24caa6710105a060fab2e17147e6d56609939011 | /06-Importing_Data_in_Python_(Part_2)/01-Importing_data_from_the_Internet/01-Importing_flat_files_from_the_web_your_turn!.py | b845373064884f87b9853e85c1360cd5849f5a64 | [] | no_license | inverseundefined/DataCamp | 99607022ad3f899d7681ad1f70fcedab290e269a | 7226b6b6f41888c3610a884db9a226e013d37e56 | refs/heads/master | 2022-01-10T00:53:21.714908 | 2019-07-24T13:27:49 | 2019-07-24T13:27:49 | 198,280,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | '''
Importing flat files from the web: your turn!
You are about to import your first file from the web! The flat file you will import will be 'winequality-red.csv' from the University of California, Irvine's Machine Learning repository. The flat file contains tabular data of physiochemical properties of red wine, such as pH, alcohol content and citric acid content, along with wine quality rating.
The URL of the file is
'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
After you import it, you'll check your working directory to confirm that it is there and then you'll load it into a pandas DataFrame.
Instructions
100 XP
Import the function urlretrieve from the subpackage urllib.request.
Assign the URL of the file to the variable url.
Use the function urlretrieve() to save the file locally as 'winequality-red.csv'.
Execute the remaining code to load 'winequality-red.csv' in a pandas DataFrame and to print its head to the shell.
Take Hint (-30 XP)
'''
# Import package
from urllib.request import urlretrieve
# Import pandas
import pandas as pd
# Assign url of file: url
url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
# Save file locally
urlretrieve(url, 'winequality-red.csv')
# Read file into a DataFrame and print its head
df = pd.read_csv('winequality-red.csv', sep=';')
print(df.head()) | [
"[email protected]"
] | |
af110594bc60b09186afd5627301dc1dbf379ca8 | af61044c866eb85ca2c622e082090f7657431206 | /webcli/arthur_utils/experiment.py | a2e95ed3a2caacf3035abf7dcdb6607dbfd126af | [] | no_license | leepand/gridpoc | f7959ef099d8a5513c59dfeb682761771ffe7594 | 4c476cd0241a95a4a7d2abf53a519d3749ecfb94 | refs/heads/master | 2020-04-28T02:38:49.631595 | 2019-03-11T02:01:50 | 2019-03-11T02:01:50 | 174,906,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | from _mlflow_object import _MLflowObject
class Experiment(_MLflowObject):
"""
Experiment object.
"""
DEFAULT_EXPERIMENT_ID = 0
ACTIVE_LIFECYCLE = 'active'
DELETED_LIFECYCLE = 'deleted'
def __init__(self, experiment_id, name, artifact_location, lifecycle_stage):
super(Experiment, self).__init__()
self._experiment_id = experiment_id
self._name = name
self._artifact_location = artifact_location
self._lifecycle_stage = lifecycle_stage
@property
def experiment_id(self):
"""Integer ID of the experiment."""
return self._experiment_id
@property
def name(self):
"""String name of the experiment."""
return self._name
def _set_name(self, new_name):
self._name = new_name
@property
def artifact_location(self):
"""String corresponding to the root artifact URI for the experiment."""
return self._artifact_location
@property
def lifecycle_stage(self):
"""Lifecycle stage of the experiment. Can either be 'active' or 'deleted'."""
return self._lifecycle_stage
@classmethod
def from_proto(cls, proto):
return cls(proto.experiment_id, proto.name, proto.artifact_location, proto.lifecycle_stage)
@classmethod
def _properties(cls):
# TODO: Hard coding this list of props for now. There has to be a clearer way...
return ["experiment_id", "name", "artifact_location", "lifecycle_stage"] | [
"[email protected]"
] | |
33667e8b97d6c876c073bc1b32185c8188c271fa | a1614311937bae5204e171b2a3481fb31e61a490 | /media/codigos/36/36sol118.py | 0e4ccda5dba78b1aa00e7913b2e0c1bb249e5ec9 | [] | no_license | alexandre146/avaliar | 8d406100ed72f10292a0580edac50ad061ad92e9 | 3daf247ca68962086592a356e013b07fa1569afe | refs/heads/master | 2020-03-21T03:09:29.493919 | 2018-07-23T11:41:38 | 2018-07-23T11:41:38 | 137,883,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | n=int(input())
m=int(input())
if(m%n==0):
print(m)
elif(m%n!=0):
x=m%n
if((m-x)==n):
print("sem multiplos menores que"+str(m))
else:
print(m-x)
| [
"[email protected]"
] | |
5d2d9c1ac8f26a527eaf2d08e5cdd9a656e0880c | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/kms/v1/kms-v1-py/google/cloud/kms_v1/services/key_management_service/transports/base.py | 0da50e0196d2991964005d561c4193993d0eb0a7 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,784 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.kms_v1.types import resources
from google.cloud.kms_v1.types import service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-kms',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class KeyManagementServiceTransport(abc.ABC):
"""Abstract transport class for KeyManagementService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloudkms',
)
DEFAULT_HOST: str = 'cloudkms.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_key_rings: gapic_v1.method.wrap_method(
self.list_key_rings,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_crypto_keys: gapic_v1.method.wrap_method(
self.list_crypto_keys,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_crypto_key_versions: gapic_v1.method.wrap_method(
self.list_crypto_key_versions,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_import_jobs: gapic_v1.method.wrap_method(
self.list_import_jobs,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_key_ring: gapic_v1.method.wrap_method(
self.get_key_ring,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_crypto_key: gapic_v1.method.wrap_method(
self.get_crypto_key,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_crypto_key_version: gapic_v1.method.wrap_method(
self.get_crypto_key_version,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_public_key: gapic_v1.method.wrap_method(
self.get_public_key,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_import_job: gapic_v1.method.wrap_method(
self.get_import_job,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_key_ring: gapic_v1.method.wrap_method(
self.create_key_ring,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_crypto_key: gapic_v1.method.wrap_method(
self.create_crypto_key,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_crypto_key_version: gapic_v1.method.wrap_method(
self.create_crypto_key_version,
default_timeout=60.0,
client_info=client_info,
),
self.import_crypto_key_version: gapic_v1.method.wrap_method(
self.import_crypto_key_version,
default_timeout=60.0,
client_info=client_info,
),
self.create_import_job: gapic_v1.method.wrap_method(
self.create_import_job,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.update_crypto_key: gapic_v1.method.wrap_method(
self.update_crypto_key,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.update_crypto_key_version: gapic_v1.method.wrap_method(
self.update_crypto_key_version,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.update_crypto_key_primary_version: gapic_v1.method.wrap_method(
self.update_crypto_key_primary_version,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.destroy_crypto_key_version: gapic_v1.method.wrap_method(
self.destroy_crypto_key_version,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.restore_crypto_key_version: gapic_v1.method.wrap_method(
self.restore_crypto_key_version,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.encrypt: gapic_v1.method.wrap_method(
self.encrypt,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.decrypt: gapic_v1.method.wrap_method(
self.decrypt,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.asymmetric_sign: gapic_v1.method.wrap_method(
self.asymmetric_sign,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.asymmetric_decrypt: gapic_v1.method.wrap_method(
self.asymmetric_decrypt,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.mac_sign: gapic_v1.method.wrap_method(
self.mac_sign,
default_timeout=None,
client_info=client_info,
),
self.mac_verify: gapic_v1.method.wrap_method(
self.mac_verify,
default_timeout=None,
client_info=client_info,
),
self.generate_random_bytes: gapic_v1.method.wrap_method(
self.generate_random_bytes,
default_timeout=None,
client_info=client_info,
),
}
@property
def list_key_rings(self) -> Callable[
[service.ListKeyRingsRequest],
Union[
service.ListKeyRingsResponse,
Awaitable[service.ListKeyRingsResponse]
]]:
raise NotImplementedError()
@property
def list_crypto_keys(self) -> Callable[
[service.ListCryptoKeysRequest],
Union[
service.ListCryptoKeysResponse,
Awaitable[service.ListCryptoKeysResponse]
]]:
raise NotImplementedError()
@property
def list_crypto_key_versions(self) -> Callable[
[service.ListCryptoKeyVersionsRequest],
Union[
service.ListCryptoKeyVersionsResponse,
Awaitable[service.ListCryptoKeyVersionsResponse]
]]:
raise NotImplementedError()
@property
def list_import_jobs(self) -> Callable[
[service.ListImportJobsRequest],
Union[
service.ListImportJobsResponse,
Awaitable[service.ListImportJobsResponse]
]]:
raise NotImplementedError()
@property
def get_key_ring(self) -> Callable[
[service.GetKeyRingRequest],
Union[
resources.KeyRing,
Awaitable[resources.KeyRing]
]]:
raise NotImplementedError()
@property
def get_crypto_key(self) -> Callable[
[service.GetCryptoKeyRequest],
Union[
resources.CryptoKey,
Awaitable[resources.CryptoKey]
]]:
raise NotImplementedError()
@property
def get_crypto_key_version(self) -> Callable[
[service.GetCryptoKeyVersionRequest],
Union[
resources.CryptoKeyVersion,
Awaitable[resources.CryptoKeyVersion]
]]:
raise NotImplementedError()
@property
def get_public_key(self) -> Callable[
[service.GetPublicKeyRequest],
Union[
resources.PublicKey,
Awaitable[resources.PublicKey]
]]:
raise NotImplementedError()
@property
def get_import_job(self) -> Callable[
[service.GetImportJobRequest],
Union[
resources.ImportJob,
Awaitable[resources.ImportJob]
]]:
raise NotImplementedError()
@property
def create_key_ring(self) -> Callable[
[service.CreateKeyRingRequest],
Union[
resources.KeyRing,
Awaitable[resources.KeyRing]
]]:
raise NotImplementedError()
@property
def create_crypto_key(self) -> Callable[
[service.CreateCryptoKeyRequest],
Union[
resources.CryptoKey,
Awaitable[resources.CryptoKey]
]]:
raise NotImplementedError()
@property
def create_crypto_key_version(self) -> Callable[
[service.CreateCryptoKeyVersionRequest],
Union[
resources.CryptoKeyVersion,
Awaitable[resources.CryptoKeyVersion]
]]:
raise NotImplementedError()
@property
def import_crypto_key_version(self) -> Callable[
[service.ImportCryptoKeyVersionRequest],
Union[
resources.CryptoKeyVersion,
Awaitable[resources.CryptoKeyVersion]
]]:
raise NotImplementedError()
@property
def create_import_job(self) -> Callable[
[service.CreateImportJobRequest],
Union[
resources.ImportJob,
Awaitable[resources.ImportJob]
]]:
raise NotImplementedError()
@property
def update_crypto_key(self) -> Callable[
[service.UpdateCryptoKeyRequest],
Union[
resources.CryptoKey,
Awaitable[resources.CryptoKey]
]]:
raise NotImplementedError()
@property
def update_crypto_key_version(self) -> Callable[
[service.UpdateCryptoKeyVersionRequest],
Union[
resources.CryptoKeyVersion,
Awaitable[resources.CryptoKeyVersion]
]]:
raise NotImplementedError()
@property
def update_crypto_key_primary_version(self) -> Callable[
[service.UpdateCryptoKeyPrimaryVersionRequest],
Union[
resources.CryptoKey,
Awaitable[resources.CryptoKey]
]]:
raise NotImplementedError()
@property
def destroy_crypto_key_version(self) -> Callable[
[service.DestroyCryptoKeyVersionRequest],
Union[
resources.CryptoKeyVersion,
Awaitable[resources.CryptoKeyVersion]
]]:
raise NotImplementedError()
@property
def restore_crypto_key_version(self) -> Callable[
[service.RestoreCryptoKeyVersionRequest],
Union[
resources.CryptoKeyVersion,
Awaitable[resources.CryptoKeyVersion]
]]:
raise NotImplementedError()
@property
def encrypt(self) -> Callable[
[service.EncryptRequest],
Union[
service.EncryptResponse,
Awaitable[service.EncryptResponse]
]]:
raise NotImplementedError()
@property
def decrypt(self) -> Callable[
[service.DecryptRequest],
Union[
service.DecryptResponse,
Awaitable[service.DecryptResponse]
]]:
raise NotImplementedError()
@property
def asymmetric_sign(self) -> Callable[
[service.AsymmetricSignRequest],
Union[
service.AsymmetricSignResponse,
Awaitable[service.AsymmetricSignResponse]
]]:
raise NotImplementedError()
@property
def asymmetric_decrypt(self) -> Callable[
[service.AsymmetricDecryptRequest],
Union[
service.AsymmetricDecryptResponse,
Awaitable[service.AsymmetricDecryptResponse]
]]:
raise NotImplementedError()
@property
def mac_sign(self) -> Callable[
[service.MacSignRequest],
Union[
service.MacSignResponse,
Awaitable[service.MacSignResponse]
]]:
raise NotImplementedError()
@property
def mac_verify(self) -> Callable[
[service.MacVerifyRequest],
Union[
service.MacVerifyResponse,
Awaitable[service.MacVerifyResponse]
]]:
raise NotImplementedError()
@property
def generate_random_bytes(self) -> Callable[
[service.GenerateRandomBytesRequest],
Union[
service.GenerateRandomBytesResponse,
Awaitable[service.GenerateRandomBytesResponse]
]]:
raise NotImplementedError()
@property
def set_iam_policy(
self,
) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Union[
iam_policy_pb2.TestIamPermissionsResponse,
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
],
]:
raise NotImplementedError()
__all__ = (
'KeyManagementServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
ec49e6c91ca97068e5bb27f4a55e242b2c3c60c3 | d8cbe9ce0469f72b8929af01538b6ceddff10a38 | /tests/components/calendar/test_trigger.py | ac2547c81f72bf1d79b0d040948536d8a80702ac | [
"Apache-2.0"
] | permissive | piitaya/home-assistant | 9c1ba162dac9604e4d43e035e74bad7bba327f0b | 48893738192431f96966998c4ff7a3723a2f8f4a | refs/heads/dev | 2023-03-07T16:13:32.117970 | 2023-01-10T17:47:48 | 2023-01-10T17:47:48 | 172,578,293 | 3 | 1 | Apache-2.0 | 2023-02-22T06:15:56 | 2019-02-25T20:19:40 | Python | UTF-8 | Python | false | false | 21,468 | py | """Tests for the calendar automation.
The tests create calendar based automations, set up a fake set of calendar
events, then advance time to exercise that the automation is called. The
tests use a fixture that mocks out events returned by the calendar entity,
and create events using a relative time offset and then advance the clock
forward exercising the triggers.
"""
from __future__ import annotations
from collections.abc import Callable, Generator
import datetime
import logging
import secrets
from typing import Any
from unittest.mock import patch
import pytest
from homeassistant.components import calendar
import homeassistant.components.automation as automation
from homeassistant.components.calendar.trigger import EVENT_END, EVENT_START
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service
_LOGGER = logging.getLogger(__name__)
CALENDAR_ENTITY_ID = "calendar.calendar_2"
CONFIG = {calendar.DOMAIN: {"platform": "demo"}}
TEST_AUTOMATION_ACTION = {
"service": "test.automation",
"data": {
"platform": "{{ trigger.platform }}",
"event": "{{ trigger.event }}",
"calendar_event": "{{ trigger.calendar_event }}",
},
}
# The trigger sets two alarms: One based on the next event and one
# to refresh the schedule. The test advances the time an arbitrary
# amount to trigger either type of event with a small jitter.
TEST_TIME_ADVANCE_INTERVAL = datetime.timedelta(minutes=1)
TEST_UPDATE_INTERVAL = datetime.timedelta(minutes=7)
class FakeSchedule:
"""Test fixture class for return events in a specific date range."""
def __init__(self, hass, freezer):
"""Initiailize FakeSchedule."""
self.hass = hass
self.freezer = freezer
# Map of event start time to event
self.events: list[calendar.CalendarEvent] = []
def create_event(
self,
start: datetime.timedelta,
end: datetime.timedelta,
summary: str | None = None,
description: str | None = None,
location: str | None = None,
) -> dict[str, Any]:
"""Create a new fake event, used by tests."""
event = calendar.CalendarEvent(
start=start,
end=end,
summary=summary if summary else f"Event {secrets.token_hex(16)}",
description=description,
location=location,
)
self.events.append(event)
return event.as_dict()
async def async_get_events(
self,
hass: HomeAssistant,
start_date: datetime.datetime,
end_date: datetime.datetime,
) -> list[calendar.CalendarEvent]:
"""Get all events in a specific time frame, used by the demo calendar."""
assert start_date < end_date
values = []
local_start_date = dt_util.as_local(start_date)
local_end_date = dt_util.as_local(end_date)
for event in self.events:
if (
event.start_datetime_local < local_end_date
and local_start_date < event.end_datetime_local
):
values.append(event)
return values
async def fire_time(self, trigger_time: datetime.datetime) -> None:
"""Fire an alarm and wait."""
_LOGGER.debug(f"Firing alarm @ {trigger_time}")
self.freezer.move_to(trigger_time)
async_fire_time_changed(self.hass, trigger_time)
await self.hass.async_block_till_done()
async def fire_until(self, end: datetime.timedelta) -> None:
"""Simulate the passage of time by firing alarms until the time is reached."""
current_time = dt_util.as_utc(self.freezer())
if (end - current_time) > (TEST_UPDATE_INTERVAL * 2):
# Jump ahead to right before the target alarm them to remove
# unnecessary waiting, before advancing in smaller increments below.
# This leaves time for multiple update intervals to refresh the set
# of upcoming events
await self.fire_time(end - TEST_UPDATE_INTERVAL * 2)
while dt_util.utcnow() < end:
self.freezer.tick(TEST_TIME_ADVANCE_INTERVAL)
await self.fire_time(dt_util.utcnow())
@pytest.fixture
def set_time_zone(hass):
"""Set the time zone for the tests."""
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
hass.config.set_time_zone("America/Regina")
@pytest.fixture
def fake_schedule(hass, freezer):
"""Fixture that tests can use to make fake events."""
# Setup start time for all tests
freezer.move_to("2022-04-19 10:31:02+00:00")
schedule = FakeSchedule(hass, freezer)
with patch(
"homeassistant.components.demo.calendar.DemoCalendar.async_get_events",
new=schedule.async_get_events,
):
yield schedule
@pytest.fixture(autouse=True)
async def setup_calendar(hass: HomeAssistant, fake_schedule: FakeSchedule) -> None:
"""Initialize the demo calendar."""
assert await async_setup_component(hass, calendar.DOMAIN, CONFIG)
await hass.async_block_till_done()
async def create_automation(hass: HomeAssistant, event_type: str, offset=None) -> None:
"""Register an automation."""
trigger_data = {
"platform": calendar.DOMAIN,
"entity_id": CALENDAR_ENTITY_ID,
"event": event_type,
}
if offset:
trigger_data["offset"] = offset
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": trigger_data,
"action": TEST_AUTOMATION_ACTION,
"mode": "queued",
}
},
)
await hass.async_block_till_done()
@pytest.fixture
def calls(hass: HomeAssistant) -> Callable[[], list]:
"""Fixture to return payload data for automation calls."""
service_calls = async_mock_service(hass, "test", "automation")
def get_trigger_data() -> list:
return [c.data for c in service_calls]
return get_trigger_data
@pytest.fixture(autouse=True)
def mock_update_interval() -> Generator[None, None, None]:
"""Fixture to override the update interval for refreshing events."""
with patch(
"homeassistant.components.calendar.trigger.UPDATE_INTERVAL",
new=TEST_UPDATE_INTERVAL,
):
yield
async def test_event_start_trigger(hass, calls, fake_schedule):
"""Test the a calendar trigger based on start time."""
event_data = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
)
await create_automation(hass, EVENT_START)
assert len(calls()) == 0
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data,
}
]
@pytest.mark.parametrize(
"offset_str, offset_delta",
[
("-01:00", datetime.timedelta(hours=-1)),
("+01:00", datetime.timedelta(hours=1)),
],
)
async def test_event_start_trigger_with_offset(
hass, calls, fake_schedule, offset_str, offset_delta
):
"""Test the a calendar trigger based on start time with an offset."""
event_data = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 12:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 12:30:00+00:00"),
)
await create_automation(hass, EVENT_START, offset=offset_str)
# No calls yet
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:55:00+00:00") + offset_delta,
)
assert len(calls()) == 0
# Event has started w/ offset
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 12:05:00+00:00") + offset_delta,
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data,
}
]
async def test_event_end_trigger(hass, calls, fake_schedule):
"""Test the a calendar trigger based on end time."""
event_data = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 12:00:00+00:00"),
)
await create_automation(hass, EVENT_END)
# Event started, nothing should fire yet
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:10:00+00:00")
)
assert len(calls()) == 0
# Event ends
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 12:10:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_END,
"calendar_event": event_data,
}
]
@pytest.mark.parametrize(
"offset_str, offset_delta",
[
("-01:00", datetime.timedelta(hours=-1)),
("+01:00", datetime.timedelta(hours=1)),
],
)
async def test_event_end_trigger_with_offset(
hass, calls, fake_schedule, offset_str, offset_delta
):
"""Test the a calendar trigger based on end time with an offset."""
event_data = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 12:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 12:30:00+00:00"),
)
await create_automation(hass, EVENT_END, offset=offset_str)
# No calls yet
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 12:05:00+00:00") + offset_delta,
)
assert len(calls()) == 0
# Event has started w/ offset
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 12:35:00+00:00") + offset_delta,
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_END,
"calendar_event": event_data,
}
]
async def test_calendar_trigger_with_no_events(hass, calls, fake_schedule):
"""Test a calendar trigger setup with no events."""
await create_automation(hass, EVENT_START)
await create_automation(hass, EVENT_END)
# No calls, at arbitrary times
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00")
)
assert len(calls()) == 0
async def test_multiple_start_events(hass, calls, fake_schedule):
"""Test that a trigger fires for multiple events."""
event_data1 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 10:45:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
)
event_data2 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
)
await create_automation(hass, EVENT_START)
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data1,
},
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data2,
},
]
async def test_multiple_end_events(hass, calls, fake_schedule):
"""Test that a trigger fires for multiple events."""
event_data1 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 10:45:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
)
event_data2 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
)
await create_automation(hass, EVENT_END)
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_END,
"calendar_event": event_data1,
},
{
"platform": "calendar",
"event": EVENT_END,
"calendar_event": event_data2,
},
]
async def test_multiple_events_sharing_start_time(hass, calls, fake_schedule):
"""Test that a trigger fires for every event sharing a start time."""
event_data1 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
)
event_data2 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
)
await create_automation(hass, EVENT_START)
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:35:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data1,
},
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data2,
},
]
async def test_overlap_events(hass, calls, fake_schedule):
"""Test that a trigger fires for events that overlap."""
event_data1 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
)
event_data2 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:45:00+00:00"),
)
await create_automation(hass, EVENT_START)
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:20:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data1,
},
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data2,
},
]
async def test_invalid_calendar_id(hass, caplog):
"""Test creating a trigger with an invalid calendar id."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"action": TEST_AUTOMATION_ACTION,
"trigger": {
"platform": calendar.DOMAIN,
"entity_id": "invalid-calendar-id",
},
}
},
)
await hass.async_block_till_done()
assert "Entity ID invalid-calendar-id is an invalid entity ID" in caplog.text
async def test_legacy_entity_type(hass, caplog):
"""Test creating a trigger with an invalid calendar id."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"action": TEST_AUTOMATION_ACTION,
"trigger": {
"platform": calendar.DOMAIN,
"entity_id": "calendar.calendar_3",
},
}
},
)
await hass.async_block_till_done()
assert "is not a calendar entity" in caplog.text
async def test_update_next_event(hass, calls, fake_schedule):
"""Test detection of a new event after initial trigger is setup."""
event_data1 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
)
await create_automation(hass, EVENT_START)
# No calls before event start
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 10:45:00+00:00")
)
assert len(calls()) == 0
# Create a new event between now and when the event fires
event_data2 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 10:55:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:05:00+00:00"),
)
# Advance past the end of the events
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data2,
},
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data1,
},
]
async def test_update_missed(hass, calls, fake_schedule):
"""Test that new events are missed if they arrive outside the update interval."""
event_data1 = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
)
await create_automation(hass, EVENT_START)
# Events are refreshed at t+TEST_UPDATE_INTERVAL minutes. A new event is
# added, but the next update happens after the event is already over.
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 10:38:00+00:00")
)
assert len(calls()) == 0
fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 10:40:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 10:55:00+00:00"),
)
# Only the first event is returned
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:05:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data1,
},
]
@pytest.mark.parametrize(
"create_data,fire_time,payload_data",
[
(
{
"start": datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
"end": datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
"summary": "Summary",
},
datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
{
"summary": "Summary",
"start": "2022-04-19T11:00:00+00:00",
"end": "2022-04-19T11:30:00+00:00",
"all_day": False,
},
),
(
{
"start": datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"),
"end": datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
"summary": "Summary",
"description": "Description",
"location": "Location",
},
datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"),
{
"summary": "Summary",
"start": "2022-04-19T11:00:00+00:00",
"end": "2022-04-19T11:30:00+00:00",
"all_day": False,
"description": "Description",
"location": "Location",
},
),
(
{
"summary": "Summary",
"start": datetime.date.fromisoformat("2022-04-20"),
"end": datetime.date.fromisoformat("2022-04-21"),
},
datetime.datetime.fromisoformat("2022-04-20 00:00:01-06:00"),
{
"summary": "Summary",
"start": "2022-04-20",
"end": "2022-04-21",
"all_day": True,
},
),
],
ids=["basic", "more-fields", "all-day"],
)
async def test_event_payload(
hass, calls, fake_schedule, set_time_zone, create_data, fire_time, payload_data
):
"""Test the fields in the calendar event payload are set."""
fake_schedule.create_event(**create_data)
await create_automation(hass, EVENT_START)
assert len(calls()) == 0
await fake_schedule.fire_until(fire_time)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": payload_data,
}
]
async def test_trigger_timestamp_window_edge(hass, calls, fake_schedule, freezer):
"""Test that events in the edge of a scan are included."""
freezer.move_to("2022-04-19 11:00:00+00:00")
# Exactly at a TEST_UPDATE_INTERVAL boundary the start time,
# making this excluded from the first window.
event_data = fake_schedule.create_event(
start=datetime.datetime.fromisoformat("2022-04-19 11:14:00+00:00"),
end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"),
)
await create_automation(hass, EVENT_START)
assert len(calls()) == 0
await fake_schedule.fire_until(
datetime.datetime.fromisoformat("2022-04-19 11:20:00+00:00")
)
assert calls() == [
{
"platform": "calendar",
"event": EVENT_START,
"calendar_event": event_data,
}
]
| [
"[email protected]"
] | |
3a9bf2b914edde4e5c397c7319864fbf32311712 | 117f066c80f3863ebef74463292bca6444f9758a | /finnhub_swagger_api/finnhub_swagger_api/models/revenue_estimates_info.py | 02eb5c15a1e32e1b17eb727157f4a1affeec2537 | [] | no_license | cottrell/notebooks | c6de3842cbaeb71457d270cbe6fabc8695a6ee1b | 9eaf3d0500067fccb294d064ab78d7aaa03e8b4d | refs/heads/master | 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 | Python | UTF-8 | Python | false | false | 7,028 | py | # coding: utf-8
"""
Finnhub API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from finnhub_swagger_api.configuration import Configuration
class RevenueEstimatesInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'revenue_avg': 'float',
'revenue_high': 'float',
'revenue_low': 'float',
'number_analysts': 'int',
'period': 'date'
}
attribute_map = {
'revenue_avg': 'revenueAvg',
'revenue_high': 'revenueHigh',
'revenue_low': 'revenueLow',
'number_analysts': 'numberAnalysts',
'period': 'period'
}
def __init__(self, revenue_avg=None, revenue_high=None, revenue_low=None, number_analysts=None, period=None, _configuration=None): # noqa: E501
"""RevenueEstimatesInfo - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._revenue_avg = None
self._revenue_high = None
self._revenue_low = None
self._number_analysts = None
self._period = None
self.discriminator = None
if revenue_avg is not None:
self.revenue_avg = revenue_avg
if revenue_high is not None:
self.revenue_high = revenue_high
if revenue_low is not None:
self.revenue_low = revenue_low
if number_analysts is not None:
self.number_analysts = number_analysts
if period is not None:
self.period = period
@property
def revenue_avg(self):
"""Gets the revenue_avg of this RevenueEstimatesInfo. # noqa: E501
Average revenue estimates including Finnhub's proprietary estimates. # noqa: E501
:return: The revenue_avg of this RevenueEstimatesInfo. # noqa: E501
:rtype: float
"""
return self._revenue_avg
@revenue_avg.setter
def revenue_avg(self, revenue_avg):
"""Sets the revenue_avg of this RevenueEstimatesInfo.
Average revenue estimates including Finnhub's proprietary estimates. # noqa: E501
:param revenue_avg: The revenue_avg of this RevenueEstimatesInfo. # noqa: E501
:type: float
"""
self._revenue_avg = revenue_avg
@property
def revenue_high(self):
"""Gets the revenue_high of this RevenueEstimatesInfo. # noqa: E501
Highest estimate. # noqa: E501
:return: The revenue_high of this RevenueEstimatesInfo. # noqa: E501
:rtype: float
"""
return self._revenue_high
@revenue_high.setter
def revenue_high(self, revenue_high):
"""Sets the revenue_high of this RevenueEstimatesInfo.
Highest estimate. # noqa: E501
:param revenue_high: The revenue_high of this RevenueEstimatesInfo. # noqa: E501
:type: float
"""
self._revenue_high = revenue_high
@property
def revenue_low(self):
"""Gets the revenue_low of this RevenueEstimatesInfo. # noqa: E501
Lowest estimate. # noqa: E501
:return: The revenue_low of this RevenueEstimatesInfo. # noqa: E501
:rtype: float
"""
return self._revenue_low
@revenue_low.setter
def revenue_low(self, revenue_low):
"""Sets the revenue_low of this RevenueEstimatesInfo.
Lowest estimate. # noqa: E501
:param revenue_low: The revenue_low of this RevenueEstimatesInfo. # noqa: E501
:type: float
"""
self._revenue_low = revenue_low
@property
def number_analysts(self):
"""Gets the number_analysts of this RevenueEstimatesInfo. # noqa: E501
Number of Analysts. # noqa: E501
:return: The number_analysts of this RevenueEstimatesInfo. # noqa: E501
:rtype: int
"""
return self._number_analysts
@number_analysts.setter
def number_analysts(self, number_analysts):
"""Sets the number_analysts of this RevenueEstimatesInfo.
Number of Analysts. # noqa: E501
:param number_analysts: The number_analysts of this RevenueEstimatesInfo. # noqa: E501
:type: int
"""
self._number_analysts = number_analysts
@property
def period(self):
"""Gets the period of this RevenueEstimatesInfo. # noqa: E501
Period. # noqa: E501
:return: The period of this RevenueEstimatesInfo. # noqa: E501
:rtype: date
"""
return self._period
@period.setter
def period(self, period):
"""Sets the period of this RevenueEstimatesInfo.
Period. # noqa: E501
:param period: The period of this RevenueEstimatesInfo. # noqa: E501
:type: date
"""
self._period = period
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RevenueEstimatesInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RevenueEstimatesInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RevenueEstimatesInfo):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
26ec2100442d4be7cb84f871f4af39f81f332470 | 056f10d9f99506bb9b5abf7e91633f3ad0c76061 | /CountCSVRows.py | f31ac1a85a8c869736b03a67223274ff65e3ce66 | [] | no_license | taers232c/GAM-Scripts3 | 5f171b620b2ac19514ab7198e39720f59a60ba9e | a59c5adb7b03b6bc9a4e054b9b41eabae2779f13 | refs/heads/master | 2023-08-31T06:43:57.645295 | 2023-08-22T17:32:21 | 2023-08-22T17:32:21 | 108,921,186 | 176 | 46 | null | 2023-02-28T15:52:32 | 2017-10-30T23:48:44 | Python | UTF-8 | Python | false | false | 573 | py | #!/usr/bin/env python3
"""
# Purpose: Count rows in a CSV file
#
# Python: Use python or python3 below as appropriate to your system; verify that you have version 3
# $ python -V or python3 -V
# Python 3.x.y
# Usage:
# python3 CountCSVRows.py File.csv
#
"""
import csv
import sys
QUOTE_CHAR = '"' # Adjust as needed
if sys.argv[1] != '-':
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
rows = 0
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
rows += 1
print(rows)
if inputFile != sys.stdin:
inputFile.close()
| [
"[email protected]"
] | |
7c4856b94c048615d4958703b69db3191a928ddf | d7195e61bc37f6b90c8bc2d6f164e5e7da98aa77 | /landlab/grid/linkstatus.py | 6eb74a1aadecb3b7f83bdb0915c210dc93491ae0 | [
"MIT"
] | permissive | joeljgeo/landlab | ffaae36b3ad3c5e1377355427bc9cfbb21074f01 | 1d2651c76a8a36a7a132f139638192df1823f8fb | refs/heads/master | 2020-04-05T01:38:11.870170 | 2018-11-09T16:44:31 | 2018-11-09T16:44:31 | 156,443,219 | 0 | 0 | MIT | 2018-11-09T16:44:32 | 2018-11-06T20:26:54 | Python | UTF-8 | Python | false | false | 5,415 | py | #! /usr/bin/env python
import numpy as np
from .nodestatus import (CLOSED_BOUNDARY, CORE_NODE, FIXED_GRADIENT_BOUNDARY,
FIXED_VALUE_BOUNDARY)
from ..utils.decorators import (cache_result_in_object,
make_return_array_immutable)
# Define the link types
#: Indicates a link is *active*, and can carry flux
ACTIVE_LINK = 0
#: Indicates a link has a fixed (gradient) value, & behaves as a boundary
FIXED_LINK = 2
#: Indicates a link is *inactive*, and cannot carry flux
INACTIVE_LINK = 4
LINK_STATUS_FLAGS_LIST = [
ACTIVE_LINK,
FIXED_LINK,
INACTIVE_LINK,
]
LINK_STATUS_FLAGS = set(LINK_STATUS_FLAGS_LIST)
def is_fixed_link(node_status_at_link):
"""Find links that are fixed.
A link is fixed if it connects a core node with a fixed value
boundary node.
Parameters
----------
node_status_at_link : ndarray of int, shape `(n_links, 2)`
Node status a link tail and head.
Returns
-------
ndarray of bool, shape `(n_links, )`
True if link is fixed.
Examples
--------
>>> from landlab.grid.diagonals import is_fixed_link
>>> from landlab import CORE_NODE, FIXED_GRADIENT_BOUNDARY
>>> is_fixed_link([CORE_NODE, FIXED_GRADIENT_BOUNDARY])
array([ True], dtype=bool)
>>> from landlab import FIXED_VALUE_BOUNDARY
>>> is_fixed_link([CORE_NODE, FIXED_VALUE_BOUNDARY])
array([False], dtype=bool)
>>> is_fixed_link([[FIXED_GRADIENT_BOUNDARY, CORE_NODE],
... [CORE_NODE, CORE_NODE]])
array([ True, False], dtype=bool)
"""
node_status_at_link = np.asarray(node_status_at_link).reshape((-1, 2))
is_core_node = node_status_at_link == CORE_NODE
is_fixed_gradient_node = node_status_at_link == FIXED_GRADIENT_BOUNDARY
return ((is_core_node[:, 0] & is_fixed_gradient_node[:, 1]) |
(is_fixed_gradient_node[:, 0] & is_core_node[:, 1]))
def is_inactive_link(node_status_at_link):
"""Find links that are inactive.
A link is inactive if it connects two boundary nodes or one of
its nodes is closed.
Parameters
----------
node_status_at_link : ndarray of int, shape `(n_links, 2)`
Node status a link tail and head.
Returns
-------
ndarray of bool, shape `(n_links, )`
True if link is isactive.
Examples
--------
>>> from landlab.grid.diagonals import is_inactive_link
>>> from landlab import CORE_NODE, FIXED_GRADIENT_BOUNDARY
>>> is_inactive_link([CORE_NODE, CLOSED_BOUNDARY])
array([ True], dtype=bool)
>>> from landlab import FIXED_VALUE_BOUNDARY
>>> is_inactive_link([FIXED_GRADIENT_BOUNDARY, FIXED_VALUE_BOUNDARY])
array([ True], dtype=bool)
>>> is_inactive_link([[FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY],
... [CORE_NODE, CORE_NODE]])
array([ True, False], dtype=bool)
"""
node_status_at_link = np.asarray(node_status_at_link).reshape((-1, 2))
is_core = node_status_at_link == CORE_NODE
is_fixed_value = node_status_at_link == FIXED_VALUE_BOUNDARY
is_fixed_gradient = node_status_at_link == FIXED_GRADIENT_BOUNDARY
is_closed = node_status_at_link == CLOSED_BOUNDARY
is_boundary_node = is_fixed_value | is_fixed_gradient | is_closed
return ((is_boundary_node[:, 0] & is_boundary_node[:, 1]) |
(is_closed[:, 0] & is_core[:, 1]) |
(is_core[:, 0] & is_closed[:, 1]))
def is_active_link(node_status_at_link):
"""Find links that are active.
A link is active if it connects a core node with another core
node or a fixed value boundary.
Parameters
----------
node_status_at_link : ndarray of int, shape `(n_links, 2)`
Node status a link tail and head.
Returns
-------
ndarray of bool, shape `(n_links, )`
True if link is isactive.
Examples
--------
>>> from landlab.grid.diagonals import is_active_link
>>> from landlab import CORE_NODE, FIXED_GRADIENT_BOUNDARY
>>> is_active_link([CORE_NODE, FIXED_GRADIENT_BOUNDARY])
array([False], dtype=bool)
>>> from landlab import FIXED_VALUE_BOUNDARY
>>> is_active_link([CORE_NODE, FIXED_VALUE_BOUNDARY])
array([ True], dtype=bool)
>>> is_active_link([[FIXED_GRADIENT_BOUNDARY, CORE_NODE],
... [CORE_NODE, CORE_NODE]])
array([False, True], dtype=bool)
"""
node_status_at_link = np.asarray(node_status_at_link).reshape((-1, 2))
is_core_node = node_status_at_link == CORE_NODE
is_fixed_value_node = node_status_at_link == FIXED_VALUE_BOUNDARY
return (
(is_core_node[:, 0] & is_core_node[:, 1]) |
(is_core_node[:, 0] & is_fixed_value_node[:, 1]) |
(is_fixed_value_node[:, 0] & is_core_node[:, 1])
)
def set_status_at_link(node_status_at_link, out=None):
n_links = len(node_status_at_link)
if out is None:
out = np.full(n_links, 255, dtype=np.uint8)
_is_fixed_link = is_fixed_link(node_status_at_link)
_is_active_link = is_active_link(node_status_at_link)
_is_inactive_link = is_inactive_link(node_status_at_link)
assert np.all(np.sum(np.vstack((_is_active_link, _is_inactive_link,
_is_fixed_link)), axis=0) == 1)
out[_is_inactive_link] = INACTIVE_LINK
out[_is_active_link] = ACTIVE_LINK
out[_is_fixed_link] = FIXED_LINK
return out
| [
"[email protected]"
] | |
6d8323e3ea02352d65d2f5f99110a013ddd2cc3d | 1348885ccdebfcb6010a267a3440a4ccc64373d1 | /Examples/IPlugSideChain/scripts/update_installer_version.py | d4c3a9886d1d11e75b3572f01e371d4ebdeff671 | [
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ddf/iPlug2 | c6565343def57dbf063fefb3b875c6337d363081 | d05d20929544b06500369208b9ec81a62eb191fb | refs/heads/master | 2022-11-02T04:39:45.019866 | 2022-10-10T17:15:04 | 2022-10-10T17:15:04 | 170,179,953 | 2 | 0 | NOASSERTION | 2019-02-11T18:30:30 | 2019-02-11T18:30:30 | null | UTF-8 | Python | false | false | 3,091 | py | #!/usr/bin/python3
# this script will update the versions in packages and innosetup installer files to match that in config.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
projectpath = os.path.abspath(os.path.join(scriptpath, os.pardir))
IPLUG2_ROOT = "../../.."
sys.path.insert(0, os.path.join(os.getcwd(), IPLUG2_ROOT + '/Scripts'))
from parse_config import parse_config
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
string.find(line, s)
line = line.replace(s, r)
sys.stdout.write(line)
def main():
demo = 0
if len(sys.argv) != 2:
print("Usage: update_installer_version.py demo(0 or 1)")
sys.exit(1)
else:
demo=int(sys.argv[1])
config = parse_config(projectpath)
# MAC INSTALLER
print("Updating Mac Installer version info...")
plistpath = projectpath + "/installer/" + config['BUNDLE_NAME'] + ".pkgproj"
with open(plistpath, 'rb') as fp:
installer = plistlib.load(fp)
# range = number of items in the installer (VST 2, VST 3, app, audiounit, aax)
for x in range(0,5):
installer['PACKAGES'][x]['PACKAGE_SETTINGS']['VERSION'] = config['FULL_VER_STR']
if demo:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME'] + " Demo"
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro-demo.rtf"
else:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME']
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro.rtf"
with open(plistpath, 'wb') as fp:
plistlib.dump(installer, fp)
# replacestrs(plistpath, "//Apple//", "//Apple Computer//")
# WIN INSTALLER
print("Updating Windows Installer version info...")
for line in fileinput.input(projectpath + "/installer/" + config['BUNDLE_NAME'] + ".iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + config['FULL_VER_STR'] + "\n"
if "OutputBaseFilename" in line:
if demo:
line="OutputBaseFilename=IPlugSideChain Demo Installer\n"
else:
line="OutputBaseFilename=IPlugSideChain Installer\n"
if 'Source: "readme' in line:
if demo:
line='Source: "readme-win-demo.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
else:
line='Source: "readme-win.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
if "WelcomeLabel1" in line:
if demo:
line="WelcomeLabel1=Welcome to the IPlugSideChain Demo installer\n"
else:
line="WelcomeLabel1=Welcome to the IPlugSideChain installer\n"
if "SetupWindowTitle" in line:
if demo:
line="SetupWindowTitle=IPlugSideChain Demo installer\n"
else:
line="SetupWindowTitle=IPlugSideChain installer\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d021d36f984ab643b089ddca6cf72adba3e0c21e | e3565e1ce607f60745f2a045aae8026661a6b99b | /resources/Onyx-1.0.511/py/onyx/grid/griddy.py | b7fdb67930c51b30bfc7c426ac2a4ed49d48c2c2 | [
"Apache-2.0"
] | permissive | eternity668/speechAD | 4c08d953b2ed06b3357b1c39d8709dd088a2471c | f270a1be86372b7044615e4fd82032029e123bc1 | refs/heads/master | 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | ###########################################################################
#
# File: griddy.py (directory: ./py/onyx/grid)
# Date: 4-Feb-2009
# Author: Hugh Secker-Walker
# Description: A function for use in testing by gridgo.py
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
>>> True
True
"""
def my_func(a, b, c):
return a, b, c
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
| [
"[email protected]"
] | |
9eb02a16cb5679b043e158e4f36ae3ea11a51e80 | 162f0a636cab320ead784b33597e583e38ac432f | /1744.py | f339b7b48defbece73a4dddc7bee0dbea7c0d161 | [] | no_license | goodsosbva/BOJ_Greedy | fc2450df90f64790f6cc01c168ba7f19ec83e504 | 98d21af254cacf41632a4b40ca9ef643b29bb104 | refs/heads/main | 2023-03-31T17:26:33.863396 | 2021-04-06T07:50:57 | 2021-04-06T07:50:57 | 347,081,712 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | n = int(input())
sequence = []
negative = []
positive = []
res = 0
for i in range(n):
i = int(input())
sequence.append(i)
for k in sequence:
if k < 0:
negative.append(k)
elif k > 0:
positive.append(k)
else:
negative.append(k)
negative.sort()
positive.sort(reverse=True)
# print(negative)
u = len(negative)
if 0 in negative:
if u % 2 == 0:
for q in range(0, u, 2):
res += negative[q] * negative[q + 1]
else:
for w in range(0, u - 1, 2):
res += negative[w] * negative[w + 1]
else:
if u % 2 == 0:
for q in range(0, u, 2):
res += negative[q] * negative[q + 1]
elif u % 2 != 0 and u != 1:
for w in range(0, u - 1, 2):
res += negative[w] * negative[w + 1]
res += negative[u - 1]
else:
res += negative[0]
# print("음수합:", res)
# print(positive)
v = len(positive)
if 1 in positive:
x = positive.count(1)
# print(x)
if v - 1 > x:
if v % 2 == 0:
for s in range(0, v - x, 2):
res += positive[s] * positive[s + 1]
res += x
else:
for t in range(0, v - x, 2):
res += positive[t] * positive[t + 1]
res += x
else:
for h in positive:
res += h
else:
if v % 2 == 0:
for r in range(0, v, 2):
res += positive[r] * positive[r + 1]
else:
for f in range(0, v - 1, 2):
res += positive[f] * positive[f + 1]
res += positive[v - 1]
print(res)
| [
"[email protected]"
] | |
f8e9765b859dd527defd2ce06933a55ecb70e041 | 35fdd5b42b47a1dbe6a25f6fc1865f4e48b842a5 | /evalml/tests/component_tests/test_catboost_classifier.py | 1ef6fd41a8656a2914d90172ce42a92330d0a24e | [
"BSD-3-Clause"
] | permissive | skvorekn/evalml | 41e5426f9f7d5ad625c21b74336009894c79c7de | 2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8 | refs/heads/main | 2023-03-27T01:42:07.691406 | 2021-03-19T18:53:43 | 2021-03-19T18:53:43 | 349,555,689 | 0 | 0 | BSD-3-Clause | 2021-03-21T14:57:01 | 2021-03-19T21:08:12 | null | UTF-8 | Python | false | false | 837 | py | import pandas as pd
from pytest import importorskip
from evalml.pipelines.components import CatBoostClassifier
from evalml.utils import SEED_BOUNDS
importorskip('catboost', reason='Skipping test because catboost not installed')
def test_catboost_classifier_random_seed_bounds_seed(X_y_binary):
"""ensure catboost's RNG doesn't fail for the min/max bounds we support on user-inputted random seeds"""
X, y = X_y_binary
col_names = ["col_{}".format(i) for i in range(len(X[0]))]
X = pd.DataFrame(X, columns=col_names)
y = pd.Series(y)
clf = CatBoostClassifier(n_estimators=1, max_depth=1, random_seed=SEED_BOUNDS.min_bound)
clf.fit(X, y)
clf = CatBoostClassifier(n_estimators=1, max_depth=1, random_seed=SEED_BOUNDS.max_bound)
fitted = clf.fit(X, y)
assert isinstance(fitted, CatBoostClassifier)
| [
"[email protected]"
] | |
0f776e18f96167e136351a53c789777a2a35a629 | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /xlsxwriter/test/comparison/test_chart_layout04.py | f377a5806721d2af1d65752bac33bb918a5d84f3 | [
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] | permissive | mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_layout04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [68311296, 69198208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({
'name': 'Title',
'layout': {
'x': 0.42631933508311465,
'y': 0.14351851851851852,
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
48cf0f54c8738ea16878d6beb0a2fd2a8d7aa385 | c50e5af8f72de6ef560ee6c0bbfa756087824c96 | /刷题/Leetcode/84. 柱状图中最大的矩形/p84_Largest_Rectangle_in_Histogram_暴力.py | 7430809260718f7c390d48a5c4dc9f9b4dcaa792 | [] | no_license | binghe2402/learnPython | 5a1beef9d446d8316aaa65f6cc9d8aee59ab4d1c | 2b9e21fe4a8eea0f8826c57287d59f9d8f3c87ce | refs/heads/master | 2022-05-27T03:32:12.750854 | 2022-03-19T08:00:19 | 2022-03-19T08:00:19 | 252,106,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | from typing import List
class Solution:
# # 遍历不同边界(宽度),根据最小高度
# def largestRectangleArea(self, heights: List[int]) -> int:
# area = 0
# for i in range(len(heights)):
# for j in range(i, len(heights)):
# area = max(area, (j-i+1)*min(heights[i:j+1]))
# return area
# 遍历不同高度(从每个柱向两侧扩展),根据当前高度的最小宽度(最窄边界)
# 当两侧高度小于中央开始起点的高度,即为边界
def largestRectangleArea(self, heights: List[int]) -> int:
area = 0
for i in range(len(heights)):
left = right = i
# 寻找左边界
while left >= 0 and heights[i] <= heights[left]:
left -= 1
# 寻找右边界
while right < len(heights) and heights[i] <= heights[right]:
right += 1
area = max(area, (right - left - 1)*heights[i])
return area
| [
"[email protected]"
] | |
c6fef081bd46b0cb2875a2870bf64ad4631575c4 | baffcef29e33658138c43ef358d7399ab3ea2c0d | /WORKFLOWS/Tools/NEC/NAL/nal-model/rest/test/unit/test_dcs.py | cb088e130c920d2697ba7584fef6500526bdc175 | [
"Apache-2.0",
"JSON"
] | permissive | openmsa/NO | aa7d4ff000875bfcff0baee24555ec16becdb64e | 24df42ee3927415b552b5e5d7326eecd04ebca61 | refs/heads/master | 2020-03-09T23:21:09.657439 | 2019-03-29T06:29:07 | 2019-03-29T06:29:07 | 129,056,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,271 | py | import json
import mysql.connector
import os
import sys
import unittest
import urllib.request
import urllib.parse
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../../')
from rest.api import router
from rest.conf import config
class TestSelectAPI(unittest.TestCase):
# Do a test of Select.
ID = 0
def setUp(self):
# Establish a clean test environment.
super(TestSelectAPI, self).setUp()
# Insert test data
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestSelectAPI, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
con, cur = self.connect_db()
global extension_info
extension_info = {
'dc_name': 'dc_namexxxxxxxxxx',
'dc_number': 'dc_numberxxxxxxxxxx'
}
# Execute SQL
param_vals = ['test_create_id-0ac6cb428b23', '2016-12-31 23:59:59',
'test_update_id-0ac6cb428b23', '2016-12-31 23:59:59',
0, 'dc_id-dd7e-0ac6cb428b23',
json.dumps(extension_info)]
cur.execute("INSERT INTO WIM_DC_MNG(create_id, create_date, " +
"update_id, update_date, delete_flg, " +
"dc_id, extension_info) VALUES " +
"(%s, %s, %s, %s, %s, %s, %s)", param_vals)
cur.execute('SELECT last_insert_id() FROM WIM_DC_MNG')
global ID
ID = cur.fetchall()[0][0]
self.cut_db(con, cur)
def destroy_fixtures(self):
con, cur = self.connect_db()
# Execute SQL
param_vals = ['test_create_id-0ac6cb428b23']
cur.execute("DELETE FROM WIM_DC_MNG WHERE " +
"create_id = %s", param_vals)
self.cut_db(con, cur)
def connect_db(self):
# Connect Database
con = mysql.connector.connect(
host=getattr(config, 'MYSQL_HOSTNAME', ''),
db=getattr(config, 'MYSQL_DBNAME', ''),
user=getattr(config, 'MYSQL_USERID', ''),
passwd=getattr(config, 'MYSQL_PASSWORD', ''),
buffered=True)
# Set Autocommit Off
con.autocommit = False
# Open Cursor
cur = con.cursor()
return con, cur
def cut_db(self, con, cur):
# Commit Transaction
con.commit()
# Close Cursor
cur.close()
# Close Database
con.close()
def test_select_api(self):
request_params = {
'query': {
'delete_flg': '0', 'ID': ID
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
self.assertEqual(res_data[0]['ID'], ID)
self.assertEqual(res_data[0]['create_id'],
'test_create_id-0ac6cb428b23')
self.assertEqual(res_data[0]['update_id'],
'test_update_id-0ac6cb428b23')
self.assertEqual(res_data[0]['delete_flg'], '0')
self.assertEqual(res_data[0].get('extension_info', ''), '')
for key in extension_info:
self.assertEqual(res_data[0].get(key), extension_info[key])
def test_insert_api(self):
insert_params = {
'create_id': 'test_create_id-0ac6cb428b23',
'update_id': 'test_create_id-0ac6cb428b23',
'delete_flg': 0,
'dc_id': 'dc_id-bb6d-6bb9bd380a11',
'dc_name': 'dc_name_B',
'dc_number': 1234
}
request_params = {
'body': insert_params,
'query': {},
'resource': 'dcs',
'method': 'POST',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
self.assertTrue('ID' in res_data)
# Assertion(check select)
request_params = {
'query': {
'dc_id': 'dc_id-bb6d-6bb9bd380a11',
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
for key in insert_params:
if key == 'delete_flg':
self.assertEqual(res_data[0].get(key), str(insert_params[key]))
else:
self.assertEqual(res_data[0].get(key), insert_params[key])
def test_update_api(self):
update_params = {
'update_id': 'test_update_id-0ac6cb428b23',
'dc_id': 'dc_id-ad4c-4cc6ea276a55',
'dc_name': 'dc_name_C',
'dc_number': 5678
}
request_params = {
'body': update_params,
'query': {},
'resource': 'dcs',
'method': 'PUT',
'id': [ID]
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(res_data, True)
# Assertion(check select)
request_params = {
'query': {
'dc_id': 'dc_id-ad4c-4cc6ea276a55',
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
for key in update_params:
if key == 'delete_flg':
self.assertEqual(res_data[0].get(key), str(update_params[key]))
else:
self.assertEqual(res_data[0].get(key), update_params[key])
def test_delete_api(self):
request_params = {
'body': {},
'query': {},
'resource': 'dcs',
'method': 'DELETE',
'id': [ID]
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(res_data, True)
# Assertion(Select Check)
con, cur = self.connect_db()
cur.execute("SELECT ID FROM WIM_DC_MNG " +
"WHERE ID = %s", [ID])
self.assertEqual(cur.fetchall(), [])
self.cut_db(con, cur)
| [
"[email protected]"
] | |
fbd49bfeec9947ef6f83b1e9787a0081f6be9f05 | 57775b4c245723078fd43abc35320cb16f0d4cb6 | /Data structure/linked-list/delete-node-given-position.py | cc4164b336e8f1ad6093479327c26ce5514d4106 | [] | no_license | farhapartex/code-ninja | 1757a7292ac4cdcf1386fe31235d315a4895f072 | 168fdc915a4e3d3e4d6f051c798dee6ee64ea290 | refs/heads/master | 2020-07-31T16:10:43.329468 | 2020-06-18T07:00:34 | 2020-06-18T07:00:34 | 210,668,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,394 | py | """
Given a ‘key’, delete the first occurrence of this key in linked list.
To delete a node from linked list, we need to do following steps.
1) Find previous node of the node to be deleted.
2) Change the next of previous node.
3) Free memory for the node to be deleted.
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
def push_front(self, node):
new_node = Node(node)
new_node.next = self.head
self.head = new_node
def insert_after(self, prev_node, new_node):
if prev_node is None:
print("Previous node must be in a LinkedList")
return
new_node = Node(new_node)
new_node.next = prev_node.next
prev_node.next = new_node
def append(self, new_data):
new_node = Node(new_data)
if self.head is None:
self.head = new_node
return
last = self.head
while last.next:
last = last.next
last.next = new_node
def deleteNodeGivenPosition(self, position):
if self.head is None:
return
temp = self.head
if position == 0:
self.head = temp.next
temp = None
return
# Find previous node of the node to be deleted
for i in range(position-1):
temp = temp.next
if temp is None:
break
# If position is more than number of nodes
if temp is None:
return
if temp.next is None:
return
# Node temp.next is the node to be deleted
# store pointer to the next of node to be deleted
next = temp.next.next
temp.next=None
# Unlink the node from linked list
temp.next = next
if __name__ == "__main__":
llist = LinkedList()
llist.append(6)
llist.push_front(10)
llist.push_front(6)
llist.push_front(11)
llist.append(20)
llist.insert_after(llist.head.next, 8)
llist.print_list()
llist.deleteNodeGivenPosition(2)
print("Linked List after Deletion at 2:")
llist.print_list() | [
"[email protected]"
] | |
ca0bf818f5d797fe169d26f5876caf9e6873172e | 197b10d75ba44b22fca29f8d69c2922b72cb8ca5 | /ship/api.py | ae8690dc1a4da94d2c96e6f66ac78b8994a82a42 | [] | no_license | webbyfox/py_master | c713c87cf4fd7d2697765211cdaefd7b49f96adc | e4b3ef5ea618b8f91c363d7f51d0e7b7064762a9 | refs/heads/master | 2021-01-11T14:45:15.060075 | 2017-01-27T13:24:58 | 2017-01-27T13:24:58 | 80,209,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | # -*- coding: utf-8 -*-
from rest_framework import viewsets, mixins, status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from assessment.auth import TokenAuthSupportQueryString
from .injection_setup import logic
from .serializers import ShipSerializer
class ShipViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
authentication_classes = (TokenAuthSupportQueryString,)
permission_classes = (IsAuthenticated,)
pagination_class = LimitOffsetPagination
serializer_class = ShipSerializer
default_limit = 20
def list(self, request): # pylint: disable=unused-argument
ships = self.get_queryset()
page = self.paginate_queryset(ships)
return self.get_paginated_response(page)
def get_queryset(self):
user = self.request.user
user_ids = [user.id] + self.request.query_params.getlist('user_id')
query_kwargs = {
'user_ids': user_ids,
'id': self.request.query_params.get('id'),
'ids': self.request.query_params.getlist('ids'),
'status': self.request.query_params.get('status'),
'order_by': self.request.query_params.get('order_by'),
}
ships, __ = logic.get_ships(**query_kwargs)
return ships
def create(self, request):
data = self.request.data.copy()
# We want to override the user ID to be the authenticated user.
data['user_id'] = self.request.user.id
serializer = self.serializer_class(data=data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
ships, __ = logic.get_ships(
id=pk,
user_ids=[request.user.id],
)
return Response(self.serializer_class(ships[0]).data)
def update(self, request, pk=None):
raise NotImplementedError(
'Please implement ``ship.api:ShipViewSet.update``'
)
def destroy(self, request, pk=None): # pylint: disable=unused-argument
logic.delete_ship(id=pk)
return Response(status=status.HTTP_204_NO_CONTENT)
| [
"[email protected]"
] | |
e7cbec8407c61c7b724171aa967674dbf244853b | 89bae02f23e787416fda894a128c9abfb4986515 | /metalearning/allennlp/tests/modules/matrix_attention/cosine_matrix_attention_test.py | cff481ba8ea3a77de780b912867c54cef1eb849c | [
"Apache-2.0"
] | permissive | asheverdin/multilingual-interference | f2e64cebfffc749b080fa64860659922224e6e65 | 7bc1b5918142e3c84bea83c5a7f39e3f245172e9 | refs/heads/main | 2023-05-12T13:07:19.997696 | 2021-05-28T22:16:26 | 2021-05-28T22:16:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
| [
"[email protected]"
] | |
d2462ea0d850cd6935ccb6c60eff3fbb00faf7d7 | 07917881310fc81d85a2cbdf27c9b3c4fa03c694 | /python1812/python_1/17_测试_收发邮件_二维码/代码/04_验证码生成器.py | 4d493eee3597ce7e1c156d58c53c29845e19966c | [] | no_license | zaoyuaner/Learning-materials | 9bc9a127d1c6478fb6cebbb6371b1fd85427c574 | 1f468a6f63158758f7cbfe7b5df17f51e3205f04 | refs/heads/master | 2020-05-18T11:38:45.771271 | 2019-05-20T09:07:44 | 2019-05-20T09:07:44 | 184,384,050 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | import datetime
import hashlib
from PIL import ImageFont,ImageDraw,Image
from random import randint
class VerifyCode:
def __init__(self,width=100,height=40,size=4):
"""
:param width: 验证码的宽度
:param height: 验证码的高度
:param size: 验证码的长度
"""
self.width = width if width > 0 else 100
self.height = height if height > 0 else 40
self.size = size if size > 0 else 4
self.pen = None # 画笔
self.code = "" # 保存验证码字符串
# @property
# def code(self):
# return self.__code
# @code.setter
# def code(self,code):
# self.__code = code
def generate(self):
# 1.生成画布 # 越靠近255的颜色越浅
im = Image.new("RGB",(self.width,self.height),self.randColor(160,255))
# 2.生成画笔
self.pen = ImageDraw.Draw(im)
# 3.生成随机字符串
self.randString()
# 4.画字符串
self.__drawCode()
# 5.画干扰点
self.__drawPoint()
# 6.画干扰线
self.__drawLine()
# 7.保存图片
im.save("vc.jpg")
def __drawLine(self):
"""
画干扰线
:return:
"""
for i in range(6):
start = (randint(1,self.width-1),randint(1,self.height-1))
end = (randint(1,self.width-1),randint(1,self.height-1))
self.pen.line([start,end],fill=self.randColor(50,150),width = 1)
def __drawPoint(self):
"""
画干扰点
:return:
"""
for i in range(200):
x = randint(1,self.width-1)
y = randint(1,self.height-1)
self.pen.point((x,y),fill= self.randColor(30,100))
def __drawCode(self):
"""
画字符串
:return:
"""
myFont = ImageFont.truetype("MSYH.TTF",size=20,encoding="UTF-8")
for i in range(self.size):
x = 15 + i*(self.width - 20)/self.size # 为每个字符均匀分配位置
y = randint(5,10) # 随机高度
self.pen.text((x,y),self.code[i],fill = self.randColor(0,60),font = myFont)
def randString(self):
"""
产生随机整数字符串
:return:
"""
result = ""
for i in range(self.size):
result += str(randint(0,9))
self.code = result
def randColor(self,low,high): # 随机背景颜色
return randint(low,high),randint(low,high),randint(low,high)
# class StrCode(VerifyCode):
# def randString(self):
# s1 =hashlib.md5(b"2314").hexdigest()
# print(s1)
# self.code = s1[:self.size]
if __name__ == "__main__":
vc = VerifyCode()
# vc = StrCode()
vc.generate()
print(vc.code)
| [
"[email protected]"
] | |
edfcd0b67010b318be752683aea47602efef2e0e | 9b57429efa72dbfa2ead9ae8d98a148475264aef | /dataservice/zmq/UPcomputer_part/data_process_0mqsubsys/codetestfile.py | 5b92ab166e2e97779e29006953d6456126db19c8 | [] | no_license | Scottars/nis_website | 7d78b1ab8647ebf17bc2b020660a56ac6f6a039f | 2025e428dd65dba06c95738233978604ee011570 | refs/heads/master | 2022-03-07T19:04:15.565128 | 2021-01-19T16:03:50 | 2021-01-19T16:03:50 | 218,421,853 | 0 | 0 | null | 2022-03-02T06:49:57 | 2019-10-30T01:58:29 | JavaScript | UTF-8 | Python | false | false | 155 | py | import struct
b = b'exp' + struct.pack('!f', 12)
print(b)
print(b[0:3])
if b[0:3] == b'exp':
exp_id = struct.unpack('!f', b[3:7])[0]
print(exp_id) | [
"[email protected]"
] | |
82ecfd01834d11e1c0de1b980af3a9cafb7d5d79 | d0fec74acfbfdee1b662736731c1cc988e2ba2ee | /problem_44/p044.py | 45a4578f96261bb8aeac04304edbc1ab5ebc2014 | [] | no_license | msztylko/project-Euler | fdd0cfefbe88b63f6dbd2d08f1cd59270b9e1735 | b3f5ce828ccc6662c100dd27fa295fc8afa22f6e | refs/heads/master | 2021-11-23T02:50:19.333259 | 2021-10-31T17:52:28 | 2021-10-31T17:52:28 | 195,980,596 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | import itertools, sys
if sys.version_info.major == 2:
range = xrange
def compute():
pentanum = PentagonalNumberHelper()
min_d = None # None means not found yet, positive number means found a candidate
# For each upper pentagonal number index, going upward
for i in itertools.count(2):
pent_i = pentanum.term(i)
# If the next number down is at least as big as a found difference, then conclude searching
if min_d is not None and pent_i - pentanum.term(i - 1) >= min_d:
break
# For each lower pentagonal number index, going downward
for j in range(i - 1, 0, -1):
pent_j = pentanum.term(j)
diff = pent_i - pent_j
# If the difference is at least as big as a found difference, then stop testing lower pentagonal numbers
if min_d is not None and diff >= min_d:
break
elif pentanum.is_term(pent_i + pent_j) and pentanum.is_term(diff):
min_d = diff # Found a smaller difference
return str(min_d)
# Provides memoization for generating and testing pentagonal numbers.
class PentagonalNumberHelper(object):
def __init__(self):
self.term_list = [0]
self.term_set = set()
def term(self, x):
assert x > 0
while len(self.term_list) <= x:
n = len(self.term_list)
term = (n * (n * 3 - 1)) >> 1
self.term_list.append(term)
self.term_set.add(term)
return self.term_list[x]
def is_term(self, y):
assert y > 0
while self.term_list[-1] < y:
n = len(self.term_list)
term = (n * (n * 3 - 1)) >> 1
self.term_list.append(term)
self.term_set.add(term)
return y in self.term_set
if __name__ == "__main__":
print(compute())
| [
"[email protected]"
] | |
81678e4f401442962478ab90127c24b61b21e897 | c074ce302e0a2a09ebe8b0a94e342380afbaa911 | /beakjoon_PS/no2579_2.py | 7c00e40144c179d3cbf2eca5fbd8ec8eb8d546f6 | [] | no_license | elrion018/CS_study | eeea7a48e9e9b116ddf561ebf10633670d305722 | 3d5478620c4d23343ae0518d27920b3211f686fd | refs/heads/master | 2021-06-10T13:35:20.258335 | 2021-04-25T10:12:17 | 2021-04-25T10:12:17 | 169,424,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py |
import sys
N = int(sys.stdin.readline())
stair = []
for _ in range(N):
stair.append(int(sys.stdin.readline()))
dp = [[0, 0] for _ in range(N)]
if N > 2:
dp[0][0] = stair[0]
dp[1][0] = stair[1]
dp[1][1] = stair[0] + stair[1]
dp[2][0] = stair[0] + stair[2]
dp[2][1] = stair[1] + stair[2]
for i in range(2, N):
dp[i][0] = max(dp[i-2][0], dp[i-2][1]) + stair[i]
dp[i][1] = dp[i-1][0] + stair[i]
print(max(dp[N-1][0], dp[N-1][1]))
elif N == 2:
print(stair[0]+stair[1])
elif N == 1:
print(stair[0])
| [
"[email protected]"
] | |
24fa38cb1a5db921dd96c5f040aa58a9b77b65e4 | 7950e35b32e252690a82faf5aefc06e433e9bd34 | /cleverhans/serial.py | 9fc379d763ec15b0686422d9d09b7d66f61d0654 | [] | no_license | machanic/cleverhans_adversarial_example_gen | b717da4b803cec2b67d0fc730392b137d20682d5 | d5300f8a1228b4c9fe26568a956f06c36df03627 | refs/heads/master | 2022-11-21T19:10:01.258478 | 2020-07-21T15:45:03 | 2020-07-21T15:45:03 | 173,035,907 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,230 | py | """Serialization functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import joblib
import tensorflow as tf
from cleverhans.model import Model
from cleverhans.utils import ordered_union
from cleverhans.utils import safe_zip
class PicklableVariable(object):
"""
A wrapper around a Variable that makes it picklable.
The name of the Variable will not be reliable, only the value. Models
intended to be picklable should identify variables by referencing
Python objects rather than by using TensorFlow's names.
TensorFlow Variables have different values associated with each Session.
For this class, the value associated with the default Session will be used
for both saving and loading, so both operations require that a default
Session has been selected.
Pickle is not secure. Unpickle only files you made yourself.
See cleverhans_tutorials/mnist_tutorial_picklable.py for examples of a
complete model training, pickling, and unpickling process using
PicklableVariable.
See cleverhans.picklable_model for models built using PicklableVariable.
"""
def __init__(self, *args, **kwargs):
self.var = tf.Variable(*args, **kwargs)
def __getstate__(self):
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("PicklableVariable requires a default "
"TensorFlow session")
return {'var': sess.run(self.var)}
def __setstate__(self, d):
self.var = tf.Variable(d['var'])
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("PicklableVariable requires a default "
"TensorFlow session")
sess.run(self.var.initializer)
class NoRefModel(Model):
"""
A Model that can be pickled because it contains no references to any
Variables (e.g. it identifies Variables only by name).
The Model must be able to find all of its Variables via get_vars
for them to be pickled.
Note that NoRefModel may have different Variable names after it is
restored, e.g. if the unpickling is run with a different enclosing
scope. NoRefModel will still work in these circumstances as long
as get_params returns the same order of Variables after unpickling
as it did before pickling.
See also cleverhans.picklable_model for a different, complementary
pickling strategy: models that can be pickled because they use *only*
references to Variables and work regardless of Variable names.
"""
def __getstate__(self):
# Serialize everything except the Variables
out = self.__dict__.copy()
# The base Model class adds this tf reference to self
# We mustn't pickle anything tf, this will need to be
# regenerated after the model is reloaded.
if "_dummy_input" in out:
del out["_dummy_input"]
# Add the Variables
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("NoRefModel requires a default "
"TensorFlow session")
tf_variables = self.get_vars()
out[VARS] = sess.run(tf_variables)
out[VAR_NAMES] = [var.name for var in tf_variables]
return out
def __setstate__(self, d):
tf_variables = d[VARS]
del d[VARS]
tf_variable_names = None
# older joblib files may not have "_tf_variable_names"
if VAR_NAMES in d:
tf_variable_names = d[VAR_NAMES]
del d[VAR_NAMES]
else:
warnings.warn("This joblib file has no " + VAR_NAMES + " field. "
"The field may become required on or after 2019-04-11."
"You can make your file compatible with the new format by"
" loading the file and re-saving it.")
# Deserialize everything except the Variables
self.__dict__ = d
# Deserialize the Variables
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("NoRefModel requires a default "
"TensorFlow session")
cur_vars = self.get_vars()
if len(cur_vars) != len(tf_variables):
print("Model format mismatch")
print("Current model has " + str(len(cur_vars)) + " variables")
print("Saved model has " + str(len(tf_variables)) + " variables")
print("Names of current vars:")
for var in cur_vars:
print("\t" + var.name)
if tf_variable_names is not None:
print("Names of saved vars:")
for name in tf_variable_names:
print("\t" + name)
else:
print("Saved vars use old format, no names available for them")
assert False
found = [False] * len(cur_vars)
if tf_variable_names is not None:
# New version using the names to handle changes in ordering
for value, name in safe_zip(tf_variables, tf_variable_names):
value_found = False
for idx, cur_var in enumerate(cur_vars):
if cur_var.name == name:
assert not found[idx]
value_found = True
found[idx] = True
cur_var.load(value, sess)
break
assert value_found
assert all(found)
else:
# Old version that works if and only if the order doesn't change
for var, value in safe_zip(cur_vars, tf_variables):
var.load(value, sess)
def get_vars(self):
"""
Provides access to the model's Variables.
This may include Variables that are not parameters, such as batch
norm running moments.
:return: A list of all Variables defining the model.
"""
# Catch eager execution and assert function overload.
try:
if tf.executing_eagerly():
raise NotImplementedError("For Eager execution - get_vars "
"must be overridden.")
except AttributeError:
pass
done = False
tried_to_make_params = False
while not done:
# Most models in cleverhans use only trainable variables and do not
# make sure the other collections are updated correctly.
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope + "/")
# When wrapping other code, such as the CIFAR 10 challenge models,
# we need to make sure we get the batch norm running averages as well
# as the trainable variables.
model_vars = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
self.scope + "/")
scope_vars = ordered_union(trainable_vars, model_vars)
if len(scope_vars) > 0:
done = True
else:
assert not tried_to_make_params
tried_to_make_params = True
self.make_params()
# Make sure no variables have been added or removed
if hasattr(self, "num_vars"):
assert self.num_vars == len(scope_vars)
else:
self.num_vars = len(scope_vars)
return scope_vars
def save(filepath, obj):
"""Saves an object to the specified filepath using joblib.
joblib is like pickle but will save NumPy arrays as separate files for
greater efficiency.
:param filepath: str, path to save to
:obj filepath: object to save
"""
joblib.dump(obj, filepath)
def load(filepath):
"""Returns an object stored via `save`
"""
obj = joblib.load(filepath)
return obj
VARS = "_tf_variables"
VAR_NAMES = "_tf_variable_names"
| [
"[email protected]"
] | |
f2e9286044675907e079b6077b71208aafa5528d | a9b8f84c55aa64d4721de11e34e6fc300453be1b | /public/packages/pymongo/v28/pymongo/common.py | 7c53646dd809b43309aad38e4e69fa55b96ca912 | [] | no_license | xuning992/tfty | f17273db407bb5ca87f583b114a42eb8e83d67fc | 20785621b933d2d6bdc293e953710faef4268bf6 | refs/heads/master | 2022-12-13T22:39:14.696326 | 2017-11-19T15:23:11 | 2017-11-19T15:23:11 | 111,306,251 | 0 | 0 | null | 2022-07-05T21:08:37 | 2017-11-19T15:11:40 | Python | UTF-8 | Python | false | false | 27,865 | py | # Copyright 2011-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import sys
import warnings
from . import read_preferences
from .auth import MECHANISMS
from .read_preferences import ReadPreference
from .errors import ConfigurationError
from ..bson.binary import (OLD_UUID_SUBTYPE, UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY)
HAS_SSL = True
try:
import ssl
except ImportError:
HAS_SSL = False
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
if sys.platform.startswith('java'):
HAS_SSL = False
# Defaults until we connect to a server and get updated limits.
MAX_BSON_SIZE = 16 * (1024 ** 2)
MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE
MIN_WIRE_VERSION = 0
MAX_WIRE_VERSION = 0
MAX_WRITE_BATCH_SIZE = 1000
# What this version of PyMongo supports.
MIN_SUPPORTED_WIRE_VERSION = 0
MAX_SUPPORTED_WIRE_VERSION = 3
# mongod/s 2.6 and above return code 59 when a
# command doesn't exist. mongod versions previous
# to 2.6 and mongos 2.4.x return no error code
# when a command does exist. mongos versions previous
# to 2.4.0 return code 13390 when a command does not
# exist.
COMMAND_NOT_FOUND_CODES = (59, 13390, None)
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
# Mapping of URI uuid representation options to valid subtypes.
_UUID_SUBTYPES = {
'standard': UUID_SUBTYPE,
'pythonLegacy': OLD_UUID_SUBTYPE,
'javaLegacy': JAVA_LEGACY,
'csharpLegacy': CSHARP_LEGACY
}
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
if value is None:
return value
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_basestring_or_none(option, value):
"""Validates that 'value' is an instance of `basestring` or `None`.
"""
if value is None:
return value
return validate_basestring(option, value)
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def validate_tag_sets(dummy, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
if not isinstance(value, list):
raise ConfigurationError((
"Tag sets %s invalid, must be a list") % repr(value))
if len(value) == 0:
raise ConfigurationError((
"Tag sets %s invalid, must be None or contain at least one set of"
" tags") % repr(value))
for tags in value:
if not isinstance(tags, dict):
raise ConfigurationError(
"Tag set %s invalid, must be a dict" % repr(tags))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
if value not in _UUID_SUBTYPES.keys():
raise ConfigurationError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, _UUID_SUBTYPES.keys()))
return _UUID_SUBTYPES[value]
def validate_uuid_subtype(dummy, value):
"""Validate the uuid subtype option, a numerical value whose acceptable
values are defined in bson.binary."""
if value not in _UUID_SUBTYPES.values():
raise ConfigurationError("Not a valid setting for uuid_subtype.")
return value
_MECHANISM_PROPS = frozenset(['SERVICE_NAME'])
def validate_auth_mechanism_properties(option, value):
"""Validate authMechanismProperties."""
value = validate_basestring(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
if key not in _MECHANISM_PROPS:
raise ConfigurationError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
props[key] = val
except ValueError:
raise ConfigurationError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
return props
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout,
# readpreferencetags is an alias for tag_sets.
VALIDATORS = {
'replicaset': validate_basestring_or_none,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_positive_integer_or_none,
'ssl': validate_boolean,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'readpreference': validate_read_preference,
'read_preference': validate_read_preference,
'readpreferencetags': validate_tag_sets,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float,
'secondary_acceptable_latency_ms': validate_positive_float,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
'gssapiservicename': validate_basestring,
'authmechanismproperties': validate_auth_mechanism_properties,
'uuidrepresentation': validate_uuid_representation,
'socketkeepalive': validate_boolean
}
_AUTH_OPTIONS = frozenset(['gssapiservicename', 'authmechanismproperties'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
SAFE_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class WriteConcern(dict):
def __init__(self, *args, **kwargs):
"""A subclass of dict that overrides __setitem__ to
validate write concern options.
"""
super(WriteConcern, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in SAFE_OPTIONS:
raise ConfigurationError("%s is not a valid write "
"concern option." % (key,))
key, value = validate(key, value)
super(WriteConcern, self).__setitem__(key, value)
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB.
"""
def __init__(self, **options):
self.__slave_okay = False
self.__read_pref = ReadPreference.PRIMARY
self.__tag_sets = [{}]
self.__secondary_acceptable_latency_ms = 15
self.__safe = None
self.__uuid_subtype = OLD_UUID_SUBTYPE
self.__write_concern = WriteConcern()
self.__set_options(options)
if (self.__read_pref == ReadPreference.PRIMARY
and self.__tag_sets != [{}]):
raise ConfigurationError(
"ReadPreference PRIMARY cannot be combined with tags")
# If safe hasn't been implicitly set by write concerns then set it.
if self.__safe is None:
if options.get("w") == 0:
self.__safe = False
else:
self.__safe = validate_boolean('safe',
options.get("safe", True))
# Note: 'safe' is always passed by Connection and ReplicaSetConnection
# Always do the most "safe" thing, but warn about conflicts.
if self.__safe and options.get('w') == 0:
warnings.warn("Conflicting write concerns: %s. Write concern "
"options were configured, but w=0 disables all "
"other options." % self.write_concern,
UserWarning)
def __set_safe_option(self, option, value):
"""Validates and sets getlasterror options for this
object (Connection, Database, Collection, etc.)
"""
if value is None:
self.__write_concern.pop(option, None)
else:
self.__write_concern[option] = value
if option != "w" or value != 0:
self.__safe = True
def __set_options(self, options):
"""Validates and sets all options passed to this object."""
for option, value in options.iteritems():
if option in ('slave_okay', 'slaveok'):
self.__slave_okay = validate_boolean(option, value)
elif option in ('read_preference', "readpreference"):
self.__read_pref = validate_read_preference(option, value)
elif option in ('tag_sets', 'readpreferencetags'):
self.__tag_sets = validate_tag_sets(option, value)
elif option == 'uuidrepresentation':
self.__uuid_subtype = validate_uuid_subtype(option, value)
elif option in (
'secondaryacceptablelatencyms',
'secondary_acceptable_latency_ms'
):
self.__secondary_acceptable_latency_ms = \
validate_positive_float(option, value)
elif option in SAFE_OPTIONS:
if option == 'journal':
self.__set_safe_option('j', value)
elif option == 'wtimeoutms':
self.__set_safe_option('wtimeout', value)
else:
self.__set_safe_option(option, value)
def __set_write_concern(self, value):
"""Property setter for write_concern."""
if not isinstance(value, dict):
raise ConfigurationError("write_concern must be an "
"instance of dict or a subclass.")
# Make a copy here to avoid users accidentally setting the
# same dict on multiple instances.
wc = WriteConcern()
for k, v in value.iteritems():
# Make sure we validate each option.
wc[k] = v
self.__write_concern = wc
def __get_write_concern(self):
"""The default write concern for this instance.
Supports dict style access for getting/setting write concern
options. Valid options include:
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **Setting w=0 disables write
acknowledgement and all other write concern options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
>>> m = pymongo.MongoClient()
>>> m.write_concern
{}
>>> m.write_concern = {'w': 2, 'wtimeout': 1000}
>>> m.write_concern
{'wtimeout': 1000, 'w': 2}
>>> m.write_concern['j'] = True
>>> m.write_concern
{'wtimeout': 1000, 'j': True, 'w': 2}
>>> m.write_concern = {'j': True}
>>> m.write_concern
{'j': True}
>>> # Disable write acknowledgement and write concern
...
>>> m.write_concern['w'] = 0
.. note:: Accessing :attr:`write_concern` returns its value
(a subclass of :class:`dict`), not a copy.
.. warning:: If you are using :class:`~pymongo.connection.Connection`
or :class:`~pymongo.replica_set_connection.ReplicaSetConnection`
make sure you explicitly set ``w`` to 1 (or a greater value) or
:attr:`safe` to ``True``. Unlike calling
:meth:`set_lasterror_options`, setting an option in
:attr:`write_concern` does not implicitly set :attr:`safe`
to ``True``.
"""
# To support dict style access we have to return the actual
# WriteConcern here, not a copy.
return self.__write_concern
write_concern = property(__get_write_concern, __set_write_concern)
def __get_slave_okay(self):
"""DEPRECATED. Use :attr:`read_preference` instead.
.. versionchanged:: 2.1
Deprecated slave_okay.
.. versionadded:: 2.0
"""
return self.__slave_okay
def __set_slave_okay(self, value):
"""Property setter for slave_okay"""
warnings.warn("slave_okay is deprecated. Please use "
"read_preference instead.", DeprecationWarning,
stacklevel=2)
self.__slave_okay = validate_boolean('slave_okay', value)
slave_okay = property(__get_slave_okay, __set_slave_okay)
def __get_read_pref(self):
"""The read preference mode for this instance.
See :class:`~pymongo.read_preferences.ReadPreference` for
available options.
.. versionadded:: 2.1
"""
return self.__read_pref
def __set_read_pref(self, value):
"""Property setter for read_preference"""
self.__read_pref = validate_read_preference('read_preference', value)
read_preference = property(__get_read_pref, __set_read_pref)
def __get_acceptable_latency(self):
"""Any replica-set member whose ping time is within
secondary_acceptable_latency_ms of the nearest member may accept
reads. Defaults to 15 milliseconds.
See :class:`~pymongo.read_preferences.ReadPreference`.
.. versionadded:: 2.3
.. note:: ``secondary_acceptable_latency_ms`` is ignored when talking
to a replica set *through* a mongos. The equivalent is the
localThreshold_ command line option.
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
"""
return self.__secondary_acceptable_latency_ms
def __set_acceptable_latency(self, value):
"""Property setter for secondary_acceptable_latency_ms"""
self.__secondary_acceptable_latency_ms = (validate_positive_float(
'secondary_acceptable_latency_ms', value))
secondary_acceptable_latency_ms = property(
__get_acceptable_latency, __set_acceptable_latency)
def __get_tag_sets(self):
"""Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to
read only from members whose ``dc`` tag has the value ``"ny"``.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." ReplicaSetConnection tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
.. versionadded:: 2.3
"""
return self.__tag_sets
def __set_tag_sets(self, value):
"""Property setter for tag_sets"""
self.__tag_sets = validate_tag_sets('tag_sets', value)
tag_sets = property(__get_tag_sets, __set_tag_sets)
def __get_uuid_subtype(self):
"""This attribute specifies which BSON Binary subtype is used when
storing UUIDs. Historically UUIDs have been stored as BSON Binary
subtype 3. This attribute is used to switch to the newer BSON Binary
subtype 4. It can also be used to force legacy byte order and subtype
compatibility with the Java and C# drivers. See the :mod:`bson.binary`
module for all options."""
return self.__uuid_subtype
def __set_uuid_subtype(self, value):
"""Sets the BSON Binary subtype to be used when storing UUIDs."""
self.__uuid_subtype = validate_uuid_subtype("uuid_subtype", value)
uuid_subtype = property(__get_uuid_subtype, __set_uuid_subtype)
def __get_safe(self):
"""**DEPRECATED:** Use the 'w' :attr:`write_concern` option instead.
Use getlasterror with every write operation?
.. versionadded:: 2.0
"""
return self.__safe
def __set_safe(self, value):
"""Property setter for safe"""
warnings.warn("safe is deprecated. Please use the"
" 'w' write_concern option instead.",
DeprecationWarning, stacklevel=2)
self.__safe = validate_boolean('safe', value)
safe = property(__get_safe, __set_safe)
def get_lasterror_options(self):
"""DEPRECATED: Use :attr:`write_concern` instead.
Returns a dict of the getlasterror options set on this instance.
.. versionchanged:: 2.4
Deprecated get_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("get_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
return self.__write_concern.copy()
def set_lasterror_options(self, **kwargs):
"""DEPRECATED: Use :attr:`write_concern` instead.
Set getlasterror options for this instance.
Valid options include j=<bool>, w=<int/string>, wtimeout=<int>,
and fsync=<bool>. Implies safe=True.
:Parameters:
- `**kwargs`: Options should be passed as keyword
arguments (e.g. w=2, fsync=True)
.. versionchanged:: 2.4
Deprecated set_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("set_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
for key, value in kwargs.iteritems():
self.__set_safe_option(key, value)
def unset_lasterror_options(self, *options):
"""DEPRECATED: Use :attr:`write_concern` instead.
Unset getlasterror options for this instance.
If no options are passed unsets all getlasterror options.
This does not set `safe` to False.
:Parameters:
- `*options`: The list of options to unset.
.. versionchanged:: 2.4
Deprecated unset_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("unset_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
if len(options):
for option in options:
self.__write_concern.pop(option, None)
else:
self.__write_concern = WriteConcern()
def _get_wc_override(self):
"""Get write concern override.
Used in internal methods that **must** do acknowledged write ops.
We don't want to override user write concern options if write concern
is already enabled.
"""
if self.safe and self.__write_concern.get('w') != 0:
return {}
return {'w': 1}
def _get_write_mode(self, safe=None, **options):
"""Get the current write mode.
Determines if the current write is safe or not based on the
passed in or inherited safe value, write_concern values, or
passed options.
:Parameters:
- `safe`: check that the operation succeeded?
- `**options`: overriding write concern options.
.. versionadded:: 2.3
"""
if safe is not None:
warnings.warn("The safe parameter is deprecated. Please use "
"write concern options instead.", DeprecationWarning,
stacklevel=3)
validate_boolean('safe', safe)
# Passed options override collection level defaults.
if safe is not None or options:
if safe or options:
if not options:
options = self.__write_concern.copy()
# Backwards compatability edge case. Call getLastError
# with no options if safe=True was passed but collection
# level defaults have been disabled with w=0.
# These should be equivalent:
# Connection(w=0).foo.bar.insert({}, safe=True)
# MongoClient(w=0).foo.bar.insert({}, w=1)
if options.get('w') == 0:
return True, {}
# Passing w=0 overrides passing safe=True.
return options.get('w') != 0, options
return False, {}
# Fall back to collection level defaults.
# w=0 takes precedence over self.safe = True
if self.__write_concern.get('w') == 0:
return False, {}
elif self.safe or self.__write_concern.get('w', 0) != 0:
return True, self.__write_concern.copy()
return False, {}
| [
"[email protected]"
] | |
aa4c1d64ab5007478c6035cf4a0c3268d542695f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_tow.py | bbb4161da8d142413231367d45dc13fd41964c06 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py |
#calss header
class _TOW():
def __init__(self,):
self.name = "TOW"
self.definitions = [u"to pull someone's vehicle using a rope or chain tied to your vehicle: ", u'being pulled along: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
1345cc0e0a984974cc45d265fb5e248b561053c2 | b503e79ccfca67c8114f5bd7a215f5ae993a0ba4 | /airflow/providers/amazon/aws/sensors/glue.py | 21a82da9ee9d040fd45ccda5044d467bf7c6b4c3 | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] | permissive | github/incubator-airflow | df1d9780f862ea1df8261ea6015dd50a4583f983 | 73f70e00b9fd294057f8ca6b714a85622f6d5dd5 | refs/heads/gh-2.0.2 | 2023-07-29T18:08:43.140580 | 2022-09-14T18:23:42 | 2022-09-14T18:23:42 | 80,634,006 | 24 | 27 | Apache-2.0 | 2023-04-18T04:24:36 | 2017-02-01T15:34:55 | Python | UTF-8 | Python | false | false | 2,398 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class AwsGlueJobSensor(BaseSensorOperator):
"""
Waits for an AWS Glue Job to reach any of the status below
'FAILED', 'STOPPED', 'SUCCEEDED'
:param job_name: The AWS Glue Job unique name
:type job_name: str
:param run_id: The AWS Glue current running job identifier
:type run_id: str
"""
template_fields = ('job_name', 'run_id')
@apply_defaults
def __init__(self, *, job_name: str, run_id: str, aws_conn_id: str = 'aws_default', **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
self.run_id = run_id
self.aws_conn_id = aws_conn_id
self.success_states = ['SUCCEEDED']
self.errored_states = ['FAILED', 'STOPPED', 'TIMEOUT']
def poke(self, context):
hook = AwsGlueJobHook(aws_conn_id=self.aws_conn_id)
self.log.info("Poking for job run status :for Glue Job %s and ID %s", self.job_name, self.run_id)
job_state = hook.get_job_state(job_name=self.job_name, run_id=self.run_id)
if job_state in self.success_states:
self.log.info("Exiting Job %s Run State: %s", self.run_id, job_state)
return True
elif job_state in self.errored_states:
job_error_message = "Exiting Job " + self.run_id + " Run State: " + job_state
raise AirflowException(job_error_message)
else:
return False
| [
"[email protected]"
] | |
925de84479dcf1d87e11ce81b0c8dc7b15d21acd | 8a495b823576b5c0bb39decd44575de20b1dc43d | /hydrus/client/db/ClientDBFilesStorage.py | 79d2c246ceddf3d6d68933dbb19b89d0713923d8 | [
"WTFPL"
] | permissive | Treadder/hydrus | f8c11e9798316cc5457497e9bff56236727862e0 | ca2f5f161214aa6df3900809f9ca18339c3e1f9a | refs/heads/master | 2023-09-06T08:46:05.694446 | 2021-11-17T21:22:27 | 2021-11-17T21:22:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,781 | py | import collections
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDB
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientSearch
from hydrus.client.db import ClientDBMaster
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
def GenerateFilesTableNames( service_id: int ) -> typing.Tuple[ str, str, str, str ]:
suffix = str( service_id )
current_files_table_name = 'main.current_files_{}'.format( suffix )
deleted_files_table_name = 'main.deleted_files_{}'.format( suffix )
pending_files_table_name = 'main.pending_files_{}'.format( suffix )
petitioned_files_table_name = 'main.petitioned_files_{}'.format( suffix )
return ( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name )
def GenerateFilesTableName( service_id: int, status: int ) -> str:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
if status == HC.CONTENT_STATUS_CURRENT:
return current_files_table_name
elif status == HC.CONTENT_STATUS_DELETED:
return deleted_files_table_name
elif status == HC.CONTENT_STATUS_PENDING:
return pending_files_table_name
else:
return petitioned_files_table_name
class DBLocationSearchContext( object ):
def __init__( self, location_search_context: ClientSearch.LocationSearchContext ):
self.location_search_context = location_search_context
self.files_table_name = None
def GetLocationSearchContext( self ) -> ClientSearch.LocationSearchContext:
return self.location_search_context
def GetTableJoinIteratedByFileDomain( self, table_phrase: str ):
if self.location_search_context.IsAllKnownFiles():
return table_phrase
else:
return '{} CROSS JOIN {} USING ( hash_id )'.format( self.files_table_name, table_phrase )
def GetTableJoinLimitedByFileDomain( self, table_phrase: str ):
if self.location_search_context.IsAllKnownFiles():
return table_phrase
else:
return '{} CROSS JOIN {} USING ( hash_id )'.format( table_phrase, self.files_table_name )
class ClientDBFilesStorage( ClientDBModule.ClientDBModule ):
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_texts: ClientDBMaster.ClientDBMasterTexts ):
self.modules_services = modules_services
self.modules_texts = modules_texts
ClientDBModule.ClientDBModule.__init__( self, 'client file locations', cursor )
self.temp_file_storage_table_name = None
def _GetInitialTableGenerationDict( self ) -> dict:
return {
'main.local_file_deletion_reasons' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );', 400 )
}
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
index_generation_dict = {}
index_generation_dict[ current_files_table_name ] = [
( [ 'timestamp' ], False, 447 )
]
index_generation_dict[ deleted_files_table_name ] = [
( [ 'timestamp' ], False, 447 ),
( [ 'original_timestamp' ], False, 447 )
]
index_generation_dict[ petitioned_files_table_name ] = [
( [ 'reason_id' ], False, 447 )
]
return index_generation_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
return {
current_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER );', 447 ),
deleted_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER, original_timestamp INTEGER );', 447 ),
pending_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );', 447 ),
petitioned_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );', 447 )
}
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
def AddFiles( self, service_id, insert_rows ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} VALUES ( ?, ? );'.format( current_files_table_name ), ( ( hash_id, timestamp ) for ( hash_id, timestamp ) in insert_rows ) )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( pending_files_table_name ), ( ( hash_id, ) for ( hash_id, timestamp ) in insert_rows ) )
pending_changed = self._GetRowCount() > 0
return pending_changed
def ClearDeleteRecord( self, service_id, hash_ids ):
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
num_deleted = self._GetRowCount()
return num_deleted
def ClearFilesTables( self, service_id: int, keep_pending = False ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._Execute( 'DELETE FROM {};'.format( current_files_table_name ) )
self._Execute( 'DELETE FROM {};'.format( deleted_files_table_name ) )
if not keep_pending:
self._Execute( 'DELETE FROM {};'.format( pending_files_table_name ) )
self._Execute( 'DELETE FROM {};'.format( petitioned_files_table_name ) )
def ClearLocalDeleteRecord( self, hash_ids = None ):
# we delete from everywhere, but not for files currently in the trash
service_ids_to_nums_cleared = {}
local_non_trash_service_ids = self.modules_services.GetServiceIds( ( HC.COMBINED_LOCAL_FILE, HC.LOCAL_FILE_DOMAIN ) )
if hash_ids is None:
trash_current_files_table_name = GenerateFilesTableName( self.modules_services.trash_service_id, HC.CONTENT_STATUS_CURRENT )
for service_id in local_non_trash_service_ids:
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
self._Execute( 'DELETE FROM {} WHERE hash_id NOT IN ( SELECT hash_id FROM {} );'.format( deleted_files_table_name, trash_current_files_table_name ) )
num_cleared = self._GetRowCount()
service_ids_to_nums_cleared[ service_id ] = num_cleared
self._Execute( 'DELETE FROM local_file_deletion_reasons WHERE hash_id NOT IN ( SELECT hash_id FROM {} );'.format( trash_current_files_table_name ) )
else:
trashed_hash_ids = self.FilterCurrentHashIds( self.modules_services.trash_service_id, hash_ids )
ok_to_clear_hash_ids = set( hash_ids ).difference( trashed_hash_ids )
if len( ok_to_clear_hash_ids ) > 0:
for service_id in local_non_trash_service_ids:
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( ( hash_id, ) for hash_id in ok_to_clear_hash_ids ) )
num_cleared = self._GetRowCount()
service_ids_to_nums_cleared[ service_id ] = num_cleared
self._ExecuteMany( 'DELETE FROM local_file_deletion_reasons WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in ok_to_clear_hash_ids ) )
return service_ids_to_nums_cleared
def DeletePending( self, service_id: int ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._Execute( 'DELETE FROM {};'.format( pending_files_table_name ) )
self._Execute( 'DELETE FROM {};'.format( petitioned_files_table_name ) )
def DropFilesTables( self, service_id: int ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( current_files_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( deleted_files_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( pending_files_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( petitioned_files_table_name ) )
def FilterAllCurrentHashIds( self, hash_ids, just_these_service_ids = None ):
if just_these_service_ids is None:
service_ids = self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
else:
service_ids = just_these_service_ids
current_hash_ids = set()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
for service_id in service_ids:
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
hash_id_iterator = self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) )
current_hash_ids.update( hash_id_iterator )
return current_hash_ids
def FilterAllPendingHashIds( self, hash_ids, just_these_service_ids = None ):
if just_these_service_ids is None:
service_ids = self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
else:
service_ids = just_these_service_ids
pending_hash_ids = set()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
for service_id in service_ids:
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
hash_id_iterator = self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, pending_files_table_name ) ) )
pending_hash_ids.update( hash_id_iterator )
return pending_hash_ids
def FilterCurrentHashIds( self, service_id, hash_ids ):
if service_id == self.modules_services.combined_file_service_id:
return set( hash_ids )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
current_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) )
return current_hash_ids
def FilterPendingHashIds( self, service_id, hash_ids ):
if service_id == self.modules_services.combined_file_service_id:
return set( hash_ids )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
pending_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, pending_files_table_name ) ) )
return pending_hash_ids
def GenerateFilesTables( self, service_id: int ):
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._Execute( create_query_without_name.format( table_name ) )
index_generation_dict = self._GetServiceIndexGenerationDict( service_id )
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
self._CreateIndex( table_name, columns, unique = unique )
def GetAPendingHashId( self, service_id ):
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
result = self._Execute( 'SELECT hash_id FROM {};'.format( pending_files_table_name ) ).fetchone()
if result is None:
return None
else:
( hash_id, ) = result
return hash_id
def GetAPetitionedHashId( self, service_id ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
result = self._Execute( 'SELECT hash_id FROM {};'.format( petitioned_files_table_name ) ).fetchone()
if result is None:
return None
else:
( hash_id, ) = result
return hash_id
def GetCurrentFilesCount( self, service_id, only_viewable = False ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
if only_viewable:
# hashes to mimes
result = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN files_info USING ( hash_id ) WHERE mime IN {};'.format( current_files_table_name, HydrusData.SplayListForDB( HC.SEARCHABLE_MIMES ) ) ).fetchone()
else:
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( current_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetCurrentFilesInboxCount( self, service_id ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
result = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN file_inbox USING ( hash_id );'.format( current_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetCurrentHashIdsList( self, service_id ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {};'.format( current_files_table_name ) ) )
return hash_ids
def GetCurrentFilesTotalSize( self, service_id ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
# hashes to size
result = self._Execute( 'SELECT SUM( size ) FROM {} CROSS JOIN files_info USING ( hash_id );'.format( current_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetCurrentHashIdsToTimestamps( self, service_id, hash_ids ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
rows = dict( self._Execute( 'SELECT hash_id, timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) )
return rows
def GetCurrentTimestamp( self, service_id: int, hash_id: int ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
result = self._Execute( 'SELECT timestamp FROM {} WHERE hash_id = ?;'.format( current_files_table_name ), ( hash_id, ) ).fetchone()
if result is None:
return None
else:
( timestamp, ) = result
return timestamp
def GetDeletedFilesCount( self, service_id: int ) -> int:
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( deleted_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetDeletionStatus( self, service_id, hash_id ):
# can have a value here and just be in trash, so we fetch it whatever the end result
result = self._Execute( 'SELECT reason_id FROM local_file_deletion_reasons WHERE hash_id = ?;', ( hash_id, ) ).fetchone()
if result is None:
file_deletion_reason = 'Unknown deletion reason.'
else:
( reason_id, ) = result
file_deletion_reason = self.modules_texts.GetText( reason_id )
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
is_deleted = False
timestamp = None
result = self._Execute( 'SELECT timestamp FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( hash_id, ) ).fetchone()
if result is not None:
is_deleted = True
( timestamp, ) = result
return ( is_deleted, timestamp, file_deletion_reason )
def GetDBLocationSearchContext( self, location_search_context: ClientSearch.LocationSearchContext ):
if not location_search_context.SearchesAnything():
location_search_context = ClientSearch.LocationSearchContext( current_service_keys = [ CC.COMBINED_FILE_SERVICE_KEY ] )
db_location_search_context = DBLocationSearchContext( location_search_context )
if location_search_context.IsAllKnownFiles():
# no table set, obviously
return db_location_search_context
table_names = []
for current_service_key in location_search_context.current_service_keys:
service_id = self.modules_services.GetServiceId( current_service_key )
table_names.append( GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT ) )
for deleted_service_key in location_search_context.deleted_service_keys:
service_id = self.modules_services.GetServiceId( deleted_service_key )
table_names.append( GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED ) )
if len( table_names ) == 1:
table_name = table_names[0]
db_location_search_context.files_table_name = table_name
else:
# while I could make a VIEW of the UNION SELECT, we'll populate an indexed single column table to help query planner later on
# we're hardcoding the name to this class for now, so a limit of one db_location_search_context at a time _for now_
# we make change this in future to use wrapper temp int tables, we'll see
# maybe I should stick this guy in 'temp' to live through db connection resets, but we'll see I guess. it is generally ephemeral, not going to linger through weird vacuum maintenance or anything right?
if self.temp_file_storage_table_name is None:
self.temp_file_storage_table_name = 'mem.temp_file_storage_hash_id'
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );'.format( self.temp_file_storage_table_name ) )
else:
self._Execute( 'DELETE FROM {};'.format( self.temp_file_storage_table_name ) )
select_query = ' UNION '.join( ( 'SELECT hash_id FROM {}'.format( table_name ) for table_name in table_names ) )
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( self.temp_file_storage_table_name, select_query ) )
db_location_search_context.files_table_name = self.temp_file_storage_table_name
return db_location_search_context
def GetHashIdsToCurrentServiceIds( self, temp_hash_ids_table_name ):
hash_ids_to_current_file_service_ids = collections.defaultdict( list )
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
for hash_id in self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) ):
hash_ids_to_current_file_service_ids[ hash_id ].append( service_id )
return hash_ids_to_current_file_service_ids
def GetHashIdsToServiceInfoDicts( self, temp_hash_ids_table_name ):
hash_ids_to_current_file_service_ids_and_timestamps = collections.defaultdict( list )
hash_ids_to_deleted_file_service_ids_and_timestamps = collections.defaultdict( list )
hash_ids_to_pending_file_service_ids = collections.defaultdict( list )
hash_ids_to_petitioned_file_service_ids = collections.defaultdict( list )
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
for ( hash_id, timestamp ) in self._Execute( 'SELECT hash_id, timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ):
hash_ids_to_current_file_service_ids_and_timestamps[ hash_id ].append( ( service_id, timestamp ) )
for ( hash_id, timestamp, original_timestamp ) in self._Execute( 'SELECT hash_id, timestamp, original_timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, deleted_files_table_name ) ):
hash_ids_to_deleted_file_service_ids_and_timestamps[ hash_id ].append( ( service_id, timestamp, original_timestamp ) )
for hash_id in self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, pending_files_table_name ) ):
hash_ids_to_pending_file_service_ids[ hash_id ].append( service_id )
for hash_id in self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, petitioned_files_table_name ) ):
hash_ids_to_petitioned_file_service_ids[ hash_id ].append( service_id )
return (
hash_ids_to_current_file_service_ids_and_timestamps,
hash_ids_to_deleted_file_service_ids_and_timestamps,
hash_ids_to_pending_file_service_ids,
hash_ids_to_petitioned_file_service_ids
)
def GetNumLocal( self, service_id: int ) -> int:
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
combined_local_current_files_table_name = GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
( num_local, ) = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN {} USING ( hash_id );'.format( current_files_table_name, combined_local_current_files_table_name ) ).fetchone()
return num_local
def GetPendingFilesCount( self, service_id: int ) -> int:
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( pending_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetPetitionedFilesCount( self, service_id: int ) -> int:
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( petitioned_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetServiceIdCounts( self, hash_ids ) -> typing.Dict[ int, int ]:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
service_ids_to_counts = {}
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
# temp hashes to files
( count, ) = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ).fetchone()
service_ids_to_counts[ service_id ] = count
return service_ids_to_counts
def GetSomePetitionedRows( self, service_id: int ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
petitioned_rows = list( HydrusData.BuildKeyToListDict( self._Execute( 'SELECT reason_id, hash_id FROM {} ORDER BY reason_id LIMIT 100;'.format( petitioned_files_table_name ) ) ).items() )
return petitioned_rows
def GetTableJoinIteratedByFileDomain( self, service_id, table_name, status ):
files_table_name = GenerateFilesTableName( service_id, status )
return '{} CROSS JOIN {} USING ( hash_id )'.format( files_table_name, table_name )
def GetTableJoinLimitedByFileDomain( self, service_id, table_name, status ):
files_table_name = GenerateFilesTableName( service_id, status )
return '{} CROSS JOIN {} USING ( hash_id )'.format( table_name, files_table_name )
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if HC.CONTENT_TYPE_HASH:
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
tables_and_columns.extend( [
( current_files_table_name, 'hash_id' ),
( deleted_files_table_name, 'hash_id' ),
( pending_files_table_name, 'hash_id' ),
( petitioned_files_table_name, 'hash_id' )
] )
return tables_and_columns
def GetUndeleteRows( self, service_id, hash_ids ):
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
rows = self._Execute( 'SELECT hash_id, original_timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, deleted_files_table_name ) ).fetchall()
return rows
def PendFiles( self, service_id, hash_ids ):
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( pending_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
def PetitionFiles( self, service_id, reason_id, hash_ids ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( petitioned_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id, reason_id ) VALUES ( ?, ? );'.format( petitioned_files_table_name ), ( ( hash_id, reason_id ) for hash_id in hash_ids ) )
def RecordDeleteFiles( self, service_id, insert_rows ):
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
now = HydrusData.GetNow()
self._ExecuteMany(
'INSERT OR IGNORE INTO {} ( hash_id, timestamp, original_timestamp ) VALUES ( ?, ?, ? );'.format( deleted_files_table_name ),
( ( hash_id, now, original_timestamp ) for ( hash_id, original_timestamp ) in insert_rows )
)
num_new_deleted_files = self._GetRowCount()
return num_new_deleted_files
def RescindPendFiles( self, service_id, hash_ids ):
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( pending_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
def RescindPetitionFiles( self, service_id, hash_ids ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( petitioned_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
def RemoveFiles( self, service_id, hash_ids ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( current_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( petitioned_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
pending_changed = self._GetRowCount() > 0
return pending_changed
def SetFileDeletionReason( self, hash_ids, reason ):
reason_id = self.modules_texts.GetTextId( reason )
self._ExecuteMany( 'REPLACE INTO local_file_deletion_reasons ( hash_id, reason_id ) VALUES ( ?, ? );', ( ( hash_id, reason_id ) for hash_id in hash_ids ) )
| [
"[email protected]"
] | |
7358cda3629e79200fe58e47c0f254cdd0af3523 | 1d7ae7f6e7a0df98d92f9ec5f277752d14924a94 | /fake-very-small-test/wrong_case/pytorch_bike_dqn_test-small-with-former-a-trick.py | 5916cee0a242517a8af7ef2d2c50f65db11824f0 | [] | no_license | lindsaymorgan/Mobike-Bike-Sharing-System-Dispatch-Optimization-Using-Reinforcement-Learning | 1e6b1aa3c64d2ff2e31b5d9dcc4abdc11e10679c | 6c8a329fae5c2ac8db45a3d8c55b308aae8ad804 | refs/heads/master | 2023-05-02T07:39:49.089459 | 2021-05-23T02:26:14 | 2021-05-23T02:26:14 | 279,467,461 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,003 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import matplotlib.pyplot as plt
import pandas as pd
import random
import time
# hyper parameters
EPSILON = 0.85
GAMMA = 0.99
LR = 0.001
MEMORY_CAPACITY = 3000
Q_NETWORK_ITERATION = 2000
BATCH_SIZE = 128
EPISODES = 20000
need = pd.read_csv('../fake_4region_trip_20170510.csv')
ts=int(time.time())
class Env(object):
def __init__(self, region_num, move_amount_limit, eps_num):
self.region_num = region_num
self.move_amount_limit = move_amount_limit
self.action_dim = region_num * (2 * move_amount_limit + 1)
self.obs_dim = 2 * region_num + 1
self.episode_num = eps_num
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(eps_num)}
self.out_nums = np.array([self.start_region[str(i)].agg(np.sum) for i in range(eps_num)])
self.in_nums = np.array([self.end_region[str(i)].agg(np.sum) for i in range(eps_num)])
self.t = 0
self.obs_init = np.array([15, 15, 15, 15, 0, 0,0,0,15, 15, 15, 15, 0, 0]) # 各方格单车量+货车位置+货车上的单车量
self.obs_init[-self.region_num-2:-2] -= self.out_nums[0, ]
def init(self):
self.obs = self.obs_init.copy()
self.t = 0
return np.append(self.obs, self.t)
def step(self, action):
# 更新时间状态
self.t += 1
if self.t == self.episode_num-1:
done = True
else:
done = False
self.obs[:self.region_num+2]=self.obs[-self.region_num-2:] #更新状态
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
# 更新单车分布状态
# 处理上时段骑入
self.obs[-self.region_num-2:-2] += self.in_nums[self.t - 1, ]
reward = 0
# 筛选不合理情况 若合理 按照推算移动车辆 更新货车状态 若不合理则不采取任何操作
if move + self.obs[-self.region_num-2+region] >= 0 and move <= self.obs[-1] \
and (self.obs[-self.region_num-2+region]- self.out_nums[self.t,region])*move<=0:
self.obs[-self.region_num-2+region] += move
# 更新货车状态
self.obs[-1] -= move # 更新货车上的单车数
self.obs[-2] = region # 更新货车位置
# 更新之前的动作历史
self.obs[-self.region_num-2-1] = move # 搬动的单车数
self.obs[-self.region_num-2-2] = region # 货车位置
self.obs[-self.region_num-2:-2] -= self.out_nums[self.t, ]
reward = np.sum(self.obs[-self.region_num-2:-2][self.obs [-self.region_num-2:-2]< 0])
self.obs[-self.region_num-2:-2][self.obs [-self.region_num-2:-2]< 0] = 0
return np.append(self.obs, self.t), reward, done
class Net(nn.Module):
def __init__(self, NUM_STATES):
super(Net, self).__init__()
EMB_SIZE = 10
OTHER_SIZE = NUM_STATES-2 # fixme: update this value based on the input
self.fc1 = nn.Linear(OTHER_SIZE + EMB_SIZE * 4, 256).cuda()
# self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(256, 64).cuda()
# self.fc2.weight.data.normal_(0, 0.1)
self.fc3 = nn.Linear(64, 1).cuda()
# self.fc3.weight.data.normal_(0, 0.1)
self.m = nn.Dropout(p=0.2).cuda()
self.emb = nn.Embedding(NUM_STATES, EMB_SIZE).cuda()
def forward(self, x: torch.cuda.FloatTensor, stations: torch.cuda.LongTensor):
emb = self.emb(stations).flatten(start_dim=1)
x = torch.cat([x, emb], 1)
x = self.fc1(x)
x = F.relu(x)
# x = self.m(x)
x = self.fc2(x)
x = F.relu(x)
# x = self.m(x)
x = self.fc3(x)
return x
class Dqn():
def __init__(self, NUM_STATES, NUM_ACTIONS, region_num,move_amount_limit, eps_num):
self.eval_net, self.target_net = Net(NUM_STATES), Net(NUM_STATES)
self.target_net.load_state_dict(self.eval_net.state_dict())
self.memory = np.zeros((MEMORY_CAPACITY, NUM_STATES * 2 + 2))
# state, action ,reward and next state
self.memory_counter = 0
self.learn_counter = 0
self.optimizer = optim.Adam(self.eval_net.parameters(), LR)
self.loss = nn.MSELoss()
self.NUM_ACTIONS = NUM_ACTIONS
self.NUM_STATES = NUM_STATES
self.move_amount_limit = move_amount_limit
self.region_num=region_num
self.fig, self.ax = plt.subplots()
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(eps_num)}
self.out_nums = np.array([self.start_region[str(i)].agg(np.sum) for i in range(eps_num)])
def store_trans(self, state, action, reward, next_state):
if self.memory_counter % 10 == 0:
print("The experience pool collects {} time experience".format(self.memory_counter))
index = self.memory_counter % MEMORY_CAPACITY
trans = np.hstack((state, [action], [reward], next_state))
self.memory[index,] = trans
self.memory_counter += 1
def choose_action(self, state,EPSILON):
# notation that the function return the action's index nor the real action
# EPSILON
# state = torch.unsqueeze(torch.FloatTensor(state) ,0)
# feasible action
# print(EPSILON)
if random.random() > EPSILON:
action=self.predict(state)
else:
feasible_action = list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num - 2 + region] >= 0 and move <= state[-2] and \
(state[-self.region_num-2+region]- self.out_nums[state[-1],region])*move<=0:
feasible_action.append(action)
action = random.choice(feasible_action)
return action
def predict(self, state):
# notation that the function return the action's index nor the real action
# EPSILON
# feasible action
feasible_action = list()
state_1 = [j for i, j in enumerate(state) if
i not in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
state_2 = [j for i, j in enumerate(state) if
i in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
tmp_x = list()
tmp_y = list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num - 2 + region] >= 0 and move <= state[-2]\
and (state[-self.region_num-2+region]- self.out_nums[state[-1],region])*move<=0:
feasible_action.append(action)
tmp_x.append(np.concatenate([state_1, np.array([move])]))
tmp_y.append(np.concatenate([state_2, np.array([region])]))
x = torch.FloatTensor(tmp_x).cuda()
station = torch.LongTensor(tmp_y).cuda()
action_val = self.target_net.forward(x, station)
max_indice = [i for i, j in enumerate([i[0] for i in action_val]) if
j == np.max([i[0] for i in action_val])] # 找最大index
action = feasible_action[random.choice(max_indice)] # 如果有多个index随机选一个,获得对应action
return action
def plot(self, ax, x):
ax.cla()
ax.set_xlabel("episode")
ax.set_ylabel("total reward")
ax.plot(x, 'b-')
plt.pause(0.000000000000001)
def learn(self):
# learn 100 times then the target network update
if self.learn_counter % Q_NETWORK_ITERATION == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_counter += 1
if self.learn_counter % 50 == 0:
test_x=torch.FloatTensor([[11,12,12,7,0,0,5,5,3,0,0,1,-5],[5,5,3,0,0,0,10,11,0,3,0,2,-10],
[11,12,12,7,0,-1,4,5,3,0,1,1,-5],[10,8,0,3,3,3,8,9,0,0,0,3,-9]]).cuda()
test_station=torch.LongTensor([[0,3,3,0],[3,0,0,0],[0,0,0,0],[1,3,3,0]]).cuda()
action_val = self.target_net.forward(test_x, test_station)
print(np.mean(action_val.cpu().detach().numpy()), file=open(f"result_history/actionless_output_action_value_{ts}.txt", "a"))
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
# 切取sars切片
batch_memory = self.memory[sample_index, :]
batch_reward = torch.FloatTensor(batch_memory[:, self.NUM_STATES + 1: self.NUM_STATES + 2]).cuda()
x=torch.FloatTensor(np.delete(batch_memory[:, :self.NUM_STATES],
[self.region_num,self.region_num+2,self.region_num*2+4], 1)).cuda()
move = torch.FloatTensor([[i[0] % (2 * self.move_amount_limit + 1) - self.move_amount_limit] for i in
batch_memory[:, self.NUM_STATES:self.NUM_STATES + 1]]).cuda()
x = torch.cat((x, move), 1)
y=torch.LongTensor(batch_memory[:, [self.region_num,self.region_num+2,self.region_num*2+4]]).cuda()
region = torch.LongTensor([[int(np.floor(i[0] / (2 * self.move_amount_limit + 1)))] for i in
batch_memory[:, self.NUM_STATES:self.NUM_STATES + 1]]).cuda()
y = torch.cat((y, region), 1)
q_eval = self.eval_net(x, y)
tmp_q_next = list()
for state in batch_memory[:, -self.NUM_STATES:]:
feasible_action = list()
m_r_list=list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num-2+region] >= 0 and move <= state[-2]\
and (state[-self.region_num-2+region]- self.out_nums[int(state[-1]),region])*move<=0:
feasible_action.append(action)
m_r_list.append((move,region))
tmp_x = list()
tmp_y = list()
# 对每个feasible action算value
state_1 = [j for i, j in enumerate(state) if
i not in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
state_2 = [j for i, j in enumerate(state) if
i in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
for move,region in m_r_list:
# move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
# region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
tmp_x.append(np.concatenate([state_1, np.array([move])]))
tmp_y.append(np.concatenate([state_2, np.array([region])]))
x = torch.FloatTensor(tmp_x).cuda()
station = torch.LongTensor(tmp_y).cuda()
action_val = self.target_net.forward(x, station)
tmp_q_next.append([float(action_val.max(1)[0].max().cpu().detach().numpy())])
q_next = torch.FloatTensor(tmp_q_next).cuda()
# q_target = batch_reward + GAMMA*q_next.max(1)[0].view(BATCH_SIZE, 1)
q_target = batch_reward + GAMMA * q_next
loss = self.loss(q_eval, q_target)
print(loss.item(), file=open(f"result_history/actionless_output_loss_{ts}.txt", "a"))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(self, env, render=False):
eval_reward = []
for i in range(1):
obs = env.init()
episode_reward = 0
while True:
action = self.predict(obs) # 预测动作,只选最优动作
obs, reward, done = env.step(action)
episode_reward += reward
print(f"obs:{obs[:-1]} action:{action} reward:{reward} reward_sum:{episode_reward} t:{obs[-1]}")
print(
f"obs:{obs[:-1]} t:{obs[-1]} region:{int(np.floor(action / (2 * self.move_amount_limit + 1)))} "
f"move:{action % (2 * self.move_amount_limit + 1) - self.move_amount_limit} reward:{reward} "
f"reward_sum:{episode_reward}",
file=open(f"result_action/actionless_output_action_{ts}.txt", "a"))
# if render:
# env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
def main():
eps_num = 5
EPSILON = 0.9
EPS_DECAY = 0.99
env = Env(region_num=4, move_amount_limit=10, eps_num=eps_num)
NUM_ACTIONS = (2 * env.move_amount_limit + 1) * env.region_num # [-500,500]*4个方块
NUM_STATES = 2*env.region_num + 7 # MountainCar-v0: (2,)
net = Dqn(NUM_STATES, NUM_ACTIONS, env.region_num, env.move_amount_limit, eps_num)
print("The DQN is collecting experience...")
step_counter_list = []
for episode in range(EPISODES):
state = env.init()
step_counter = 0
reward_sum = 0
while True:
step_counter += 1
# env.render()
EPSILON = max(EPSILON * EPS_DECAY, 0.01)
action = net.choose_action(state,EPSILON)
# print("the action is {}".format(action))
next_state, reward, done = env.step(action)
net.store_trans(state, action, reward, next_state)
reward_sum += reward
if net.memory_counter >= 5*BATCH_SIZE:
net.learn()
if done:
print("episode {}, the reward is {}".format(episode, round(reward_sum, 3)))
print(f"{round(reward_sum, 3)}", file=open(f"result_history/actionless_output_result_{ts}.txt", "a"))
if done:
step_counter_list.append(step_counter)
net.plot(net.ax, step_counter_list)
break
state = next_state
print(net.evaluate(env))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1dbae09a882980f310395543b799670a7ed4e9c9 | 8217d63b4f8875598cc8e01c9cdf4c92e35e4e62 | /tools/pypmmn/pypmmn/pypmmn.py | 4d38dc5e8ba1cdc93e9037397c8102e80acbeff2 | [] | no_license | bubbafix/munin-contrib | d5bdfa156cbebbec73a3851349859bf7caa137eb | b9ec8fbb040808bf4930bea6b065ce5564fbd77d | refs/heads/master | 2021-01-16T19:58:11.309579 | 2013-05-22T09:02:23 | 2013-05-22T09:02:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,683 | py | #!/usr/bin/python
"""
A very simple munin-node written in pure python (no external libraries
required)
"""
from datetime import datetime
from logging.handlers import RotatingFileHandler
from optparse import OptionParser
from os import listdir, access, X_OK, getpid
from os.path import join, isdir, abspath, dirname, exists
from subprocess import Popen, PIPE
from time import sleep
import logging
import socket
import sys
LOG = logging.getLogger(__name__)
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
SESSION_TIMEOUT = 10 # Amount of seconds until an unused session is closed
from daemon import createDaemon
__version__ = '1.0b1'
class CmdHandler(object):
"""
This handler defines the protocol between munin and this munin node.
Each method starting with ``do_`` responds to the corresponding munin
command.
"""
def __init__(self, get_fun, put_fun, options):
"""
Constructor
:param get_fun: The function used to receive a message from munin
:param put_fun: The function used to send a message back to munin
:param options: The command-line options object
"""
self.get_fun = get_fun
self.put_fun = put_fun
self.options = options
def do_version(self, arg):
"""
Prints the version of this instance.
"""
LOG.debug('Command "version" executed with args: %r' % arg)
self.put_fun('# munin node at %s\n' % (
self.options.host,
))
def do_nodes(self, arg):
"""
Prints this hostname
"""
LOG.debug('Command "nodes" executed with args: %r' % arg)
self.put_fun('%s\n' % self.options.host)
self.put_fun('.\n')
def do_quit(self, arg):
"""
Stops this process
"""
LOG.debug('Command "quit" executed with args: %r' % arg)
sys.exit(0)
def do_list(self, arg):
"""
Print a list of plugins
"""
LOG.debug('Command "list" executed with args: %r' % arg)
try:
LOG.debug('Listing files inside %s' % self.options.plugin_dir)
for filename in listdir(self.options.plugin_dir):
if not access(join(self.options.plugin_dir, filename), X_OK):
LOG.warning('Non-executable plugin %s found!' % filename)
continue
LOG.debug('Found plugin: %s' % filename)
self.put_fun("%s " % filename)
except OSError, exc:
self.put_fun("# ERROR: %s" % exc)
self.put_fun("\n")
def _caf(self, plugin, cmd):
"""
handler for ``config``, ``alert`` and ``fetch``
Calls the plugin with ``cmd`` as only argument.
:param plugin: The plugin name
:param cmd: The command which is to passed to the plugin
"""
plugin_filename = join(self.options.plugin_dir, plugin)
# Sanity checks
if isdir(plugin_filename) or not access(plugin_filename, X_OK):
msg = "# Unknown plugin [%s] for %s" % (plugin, cmd)
LOG.warning(msg)
self.put_fun(msg)
return
# for 'fetch' we don't need to pass a command to the plugin
if cmd == 'fetch':
plugin_arg = ''
else:
plugin_arg = cmd
try:
cmd = [plugin_filename, plugin_arg]
LOG.debug('Executing %r' % cmd)
output = Popen(cmd, stdout=PIPE).communicate()[0]
except OSError, exc:
LOG.exception()
self.put_fun("# ERROR: %s\n" % exc)
return
self.put_fun(output)
self.put_fun('.\n')
def do_alert(self, arg):
"""
Handle command "alert"
"""
LOG.debug('Command "alert" executed with args: %r' % arg)
self._caf(arg, 'alert')
def do_fetch(self, arg):
"""
Handles command "fetch"
"""
LOG.debug('Command "fetch" executed with args: %r' % arg)
self._caf(arg, 'fetch')
def do_config(self, arg):
"""
Handles command "config"
"""
LOG.debug('Command "config" executed with args: %r' % arg)
self._caf(arg, 'config')
def do_cap(self, arg):
"""
Handles command "cap"
"""
LOG.debug('Command "cap" executed with args: %r' % arg)
self.put_fun("cap ")
if self.options.spoolfetch_dir:
self.put_fun("spool")
else:
LOG.debug('No spoolfetch_dir specified. Result spooling disabled')
self.put_fun("\n")
def do_spoolfetch(self, arg):
"""
Handles command "spoolfetch"
"""
LOG.debug('Command "spellfetch" executed with args: %r' % arg)
output = Popen(['%s/spoolfetch_%s' % (self.options.spoolfetch_dir,
self.options.host),
arg]).communicate()[0]
self.put_fun(output)
self.put_fun('.\n')
# aliases
do_exit = do_quit
def handle_input(self, line):
"""
Handles one input line and sends any result back using ``put_fun``
"""
line = line.strip()
line = line.split(' ')
cmd = line[0]
if len(line) == 1:
arg = ''
elif len(line) == 2:
arg = line[1]
else:
self.put_fun('# Invalid input: %s\n' % line)
return
if not cmd:
return
func = getattr(self, 'do_%s' % cmd, None)
if not func:
# Give the client a list of supported commands.
commands = [_[3:] for _ in dir(self) if _.startswith('do_')]
self.put_fun("# Unknown command. Supported commands: %s\n" % (
commands))
return
func(arg)
def is_timed_out(self):
return (datetime.now() - self._last_command).seconds > SESSION_TIMEOUT
def reset_time(self):
self._last_command = datetime.now()
def usage(option, opt, value, parser):
"""
Prints the command usage and exits
"""
parser.print_help()
sys.exit(0)
def get_options():
"""
Parses command-line arguments.
"""
parser = OptionParser(add_help_option=False)
parser.add_option('-p', '--port', dest='port',
default=None,
help='TCP Port to listen on. (If not specified, use stdin/stdout)')
parser.add_option('-d', '--plugin-dir', dest='plugin_dir',
default='plugins',
help=('The directory containing the munin-plugins.'
' Default: <current working dir>/plugins'))
parser.add_option('-h', '--host', dest='host',
help=('The hostname which will be reported in the plugins.'
' Default: %s' % socket.gethostname()),
default=socket.gethostname())
parser.add_option('-n', '--no-daemon', dest='no_daemon',
default=False,
action='store_true',
help='Run in foreground. Do not daemonize. '
'Will also enable debug logging to stdout.')
parser.add_option('-l', '--log-dir', dest='log_dir',
default=None,
help='The log folder. Default: disabled')
parser.add_option('-s', '--spoolfech-dir', dest='spoolfetch_dir',
default=None,
help='The spoolfetch folder. Default: disabled')
parser.add_option('--help', action='callback', callback=usage,
help='Shows this help')
options, args = parser.parse_args()
# ensure we are using absolute paths (for daemonizing)
if options.log_dir:
options.log_dir = abspath(options.log_dir)
if options.spoolfetch_dir:
options.spoolfetch_dir = abspath(options.spoolfetch_dir)
if options.plugin_dir:
options.plugin_dir = abspath(options.plugin_dir)
return (options, args)
def process_stdin(options):
"""
Process commands by reading from stdin
"""
rfhandler = RotatingFileHandler(
join(abspath(dirname(__file__)), 'log', 'pypmmn.log'),
maxBytes=100 * 1024,
backupCount=5
)
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(rfhandler)
handler = CmdHandler(sys.stdin.read, sys.stdout.write, options)
handler.do_version(None)
LOG.info('STDIN handler opened')
while True:
data = sys.stdin.readline().strip()
if not data:
return
handler.handle_input(data)
def process_socket(options):
"""
Process socket connections.
.. note::
This is not a multithreaded process. So only one connection can be
handled at any given time. But given the nature of munin, this is Good
Enough.
"""
retcode = 0
if options.no_daemon:
# set up on-screen-logging
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(console_handler)
else:
# fork fork
retcode = createDaemon()
# set up a rotating file log
rfhandler = RotatingFileHandler(
join(options.log_dir, 'daemon.log'),
maxBytes=100 * 1024,
backupCount=5
)
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(rfhandler)
# write down some house-keeping information
LOG.info('New process PID: %d' % getpid())
pidfile = open(join(options.log_dir, 'pypmmn.pid'), 'w')
pidfile.write(str(getpid()))
pidfile.close()
LOG.info('PID file created in %s' % join(options.log_dir,
'pypmmn.pid'))
LOG.info('Socket handler started.')
host = '' # listens on all addresses TODO: make this configurable
port = int(options.port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(1)
LOG.info('Listening on host %r, port %r' % (host, port))
conn, addr = s.accept()
handler = CmdHandler(conn.recv, conn.send, options)
handler.do_version(None)
handler.reset_time()
LOG.info("Accepting incoming connection from %s" % (addr, ))
while True:
data = conn.recv(1024)
if not data.strip():
sleep(1)
if handler.is_timed_out():
LOG.info('Session timeout.')
conn.shutdown(socket.SHUT_RDWR)
conn.close()
LOG.info('Listening on host %r, port %r' % (host, port))
conn, addr = s.accept()
handler.reset_time()
handler.get_fun = conn.recv
handler.put_fun = conn.send
handler.do_version(None)
LOG.info("Accepting incoming connection from %s" % (addr, ))
try:
data = conn.recv(1024)
except socket.error, exc:
LOG.warning("Socket error. Reinitialising.: %s" % exc)
conn, addr = s.accept()
handler.reset_time()
handler.get_fun = conn.recv
handler.put_fun = conn.send
handler.do_version(None)
LOG.info("Accepting incoming connection from %s" % (addr, ))
if data.strip() == 'quit':
LOG.info('Client requested session end. Closing connection.')
conn.shutdown(socket.SHUT_RDWR)
conn.close()
LOG.info('Listening on host %r, port %r' % (host, port))
conn, addr = s.accept()
handler.reset_time()
handler.get_fun = conn.recv
handler.put_fun = conn.send
handler.do_version(None)
LOG.info("Accepting incoming connection from %s" % (addr, ))
continue
handler.handle_input(data)
sys.exit(retcode)
def main():
"""
The main entry point of the application
"""
options, args = get_options()
# Handle logging as early as possible.
if options.log_dir:
if not exists(options.log_dir):
raise IOError('[Errno 2] No such file or directory: %r' % (
options.log_dir))
# set up logging if requested
root_logger = logging.getLogger()
root_logger.setLevel(logging.NOTSET) # TODO: Make configurable
# Start either the "stdin" interface, or the socked daemon. Depending on
# whether a port was given on startup or not.
if not options.port:
process_stdin(options)
else:
process_socket(options)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
816ccf13d545d21c6a8991fbbd5db56841a3fd65 | 4eab1bd9e1b00155872e44963a5df0532cb5341f | /menus/menuTwo.py | ebbc521d4e91603c346648b2c0ccb7a4a9256571 | [] | no_license | soheilpaper/python-gui | 9b067467ca41d27092e5817d0a49162b10c37de6 | 4e6bcad319829dd2c0fdc328520a55a7932060c7 | refs/heads/master | 2020-12-31T04:29:16.798703 | 2016-04-08T08:41:59 | 2016-04-08T08:41:59 | 55,763,643 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | import wx
########################################################################
class MyForm(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="wx.Menu Tutorial")
self.panel = wx.Panel(self, wx.ID_ANY)
# create the menubar
menuBar = wx.MenuBar()
# create the first menu (starting on left)
carMenu = wx.Menu()
carMenu.Append(101, "&Ford", "An American Automaker")
carMenu.Append(102, "&Nissan", "")
carMenu.Append(103, "&Toyota", "Buy Japanese!")
carMenu.Append(104, "&Close", "Close the application")
# add a picture to a menu
picMenu = wx.Menu()
item = wx.MenuItem(picMenu, wx.ID_ANY, "Snake", "This menu has a picture!")
img = wx.Image('snake32.bmp', wx.BITMAP_TYPE_ANY)
item.SetBitmap(wx.BitmapFromImage(img))
picMenu.AppendItem(item)
# add menus to menubar
menuBar.Append(carMenu, "&Vehicles")
menuBar.Append(picMenu, "&Picture")
self.SetMenuBar(menuBar)
#----------------------------------------------------------------------
def onExit(self, event):
""""""
self.Close()
#----------------------------------------------------------------------
# Run the program
if __name__ == "__main__":
app = wx.App(False)
frame = MyForm().Show()
app.MainLoop()
| [
"[email protected]"
] | |
5ea8085f35c9778a5a1d4aae6dc84dacc2eb3e30 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D08A/MOVINSD08AUN.py | 546f7086b78b82989b1b35deabc5ccb25f908114 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,740 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD08AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TDT', MIN: 1, MAX: 3, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'HAN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DIM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 1, MAX: 99},
{ID: 'TMP', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'EQA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'GID', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'GDS', MIN: 0, MAX: 1},
]},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DGS', MIN: 1, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
] | |
82b102860dad12c81b3575f99ab5d3102e7229e3 | 927d23e5fbcbd7001b1007990b9a28014bfb8219 | /mnist_classification.py | 373bf1d62d3f945e2554161b608f5dc3b439098b | [] | no_license | minar09/tensorflow-practices | 5822cf784063223bc0a5a62570fa0a5548cf1ef0 | 7982860ce2ec6df0c57a5389711464cbddad89fe | refs/heads/master | 2020-03-28T21:09:32.658650 | 2018-10-08T15:25:08 | 2018-10-08T15:25:08 | 149,133,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,725 | py | #### MNIST classification ###
# Hide the warning messages about CPU/GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Import libraries
import tensorflow as tf
import time
import numpy as np
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
# Download/Read MNIST
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Hide the warning messages about deprecations of MNIST data read
tf.logging.set_verbosity(old_v)
# Initialize parameters
t1 = time.time()
num_steps = 5000
batch_size = 128
display_step = 500
n_hidden_1 = 256
n_hidden_2 = 256
n_hidden_3 = 256
num_input = 784
num_classes = 10
# Define placeholder
x = tf.placeholder("float", [None, num_input])
y = tf.placeholder("float", [None, num_classes])
# Define Weight and Bias for linear regression
weights = {
'h1' : tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3' : tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'out' : tf.Variable(tf.random_normal([n_hidden_1, num_classes]))
}
biases = {
'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
'b3' : tf.Variable(tf.random_normal([n_hidden_3])),
'out' : tf.Variable(tf.random_normal([num_classes]))
}
# Initialize the model
def mlp(x):
l1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
l2 = tf.nn.relu(tf.add(tf.matmul(l1, weights['h2']), biases['b2']))
l3 = tf.nn.relu(tf.add(tf.matmul(l2, weights['h3']), biases['b3']))
lout = tf.add(tf.matmul(l3, weights['out']), biases['out'])
return lout
# Define hypothesis, cost and optimization functions
logits = mlp(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
prediction = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Launch graph/Initialize session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(1, num_steps+1):
batch_train_images, batch_train_labels = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x: batch_train_images, y: batch_train_labels})
if step % display_step == 0 or step == 1:
print("Step " + str(step) + " out of " + str(num_steps))
print("Optimization finished!")
t2 = time.time()
print("Testing accuracy: ", sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})*100, "%")
print("Learning time: " + str(t2-t1) + " seconds")
| [
"[email protected]"
] | |
088dc88aa4aeb64878d97237118802a64edf1d5f | 48db7bebad4309a7bca8b7dec2cc9193551f46a3 | /returns/_generated/pointfree/bind_io.pyi | 192094dd7cdfecddb6d6bb7c83451e2b4d7e27ae | [
"BSD-2-Clause"
] | permissive | kenjihiraoka/returns | bff6196a059d411b6c36f4a2e284e4439d24fd73 | 4589973520d7226b18acd7295d1a9a10ff032759 | refs/heads/master | 2022-11-20T13:20:41.094871 | 2020-07-07T08:23:05 | 2020-07-07T08:23:05 | 277,863,697 | 0 | 0 | BSD-2-Clause | 2020-07-07T16:09:25 | 2020-07-07T16:09:25 | null | UTF-8 | Python | false | false | 1,779 | pyi | from typing import Callable, TypeVar, overload
from typing_extensions import Protocol
from returns.context import RequiresContextFutureResult, RequiresContextIOResult
from returns.future import Future, FutureResult
from returns.io import IO, IOResult
_ValueType = TypeVar('_ValueType', contravariant=True)
_ErrorType = TypeVar('_ErrorType')
_NewValueType = TypeVar('_NewValueType', covariant=True)
_EnvType = TypeVar('_EnvType', contravariant=True)
class _BindIO(Protocol[_ValueType, _NewValueType]):
"""
Helper class to represent type overloads for ret_type based on a value type.
Contains all containers we have.
It does not exist in runtime.
It is also completely removed from typing with the help of the mypy plugin.
"""
@overload
def __call__(
self,
container: RequiresContextIOResult[_EnvType, _ValueType, _ErrorType],
) -> RequiresContextIOResult[_EnvType, _NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: RequiresContextFutureResult[
_EnvType, _ValueType, _ErrorType,
],
) -> RequiresContextFutureResult[_EnvType, _NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: IOResult[_ValueType, _ErrorType],
) -> IOResult[_NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: Future[_ValueType],
) -> Future[_NewValueType]:
...
@overload
def __call__(
self,
container: FutureResult[_ValueType, _ErrorType],
) -> FutureResult[_NewValueType, _ErrorType]:
...
def _bind_io(
function: Callable[[_ValueType], IO[_NewValueType]],
) -> _BindIO[_ValueType, _NewValueType]:
...
| [
"[email protected]"
] | |
70237c341ae1c9585377c6c6ec289173ce92bdae | 148044ba8412cfe9227201e82360770d6a7e9780 | /check_screen.py | 095f92651215f811b52a37d88fe1c3fbc9022209 | [] | no_license | mwaskom/sticks_experiment | 9e0b2af851e20f82cd8a3011b08ac061b0061191 | fcfd98cb4528e9011168be27b2121a96514b3fa3 | refs/heads/master | 2023-08-24T06:22:27.939464 | 2015-08-28T23:53:16 | 2015-08-31T04:38:24 | 38,704,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py |
import sys
from psychopy import visual, event
import cregg
def main(arglist):
p = cregg.Params("scan")
p.set_by_cmdline(arglist)
win = cregg.launch_window(p)
visual.Circle(win, p.array_radius,
edges=128,
lineColor="white",
lineWidth=2).draw()
win.flip()
event.waitKeys(keyList=p.quit_keys)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"[email protected]"
] | |
4a010a42bfbd615afad1fd018c160396fa4dbd69 | 40f4626ec26f23923c2b19d7ed24f3c512495182 | /src/kangqi/task/compQA/model/module/cross_attention_indirect.py | 3606a67410715c49755b69f8e4e28061ab9a5fcc | [] | no_license | Zjhao666/CompQA | c937c382a2f0a0fce4fdda8efda7c916b3e4c978 | 4bb2abc40428373481909e02543062a7388615bd | refs/heads/master | 2023-02-09T02:28:09.966576 | 2020-12-31T21:18:32 | 2020-12-31T21:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,278 | py | """
Author: Kangqi Luo
Goal: Combine the structure of ABCNN-1 and AF-attention
(A Decomposable Attention Model for Natural Language Inference)
We are using the module in compQA scenario, where the rhs (path) is represented by both pwords and preds.
Therefore, we send'em together into the module, making it a little bit more complex than a normal CrossAtt layer.
"""
import tensorflow as tf
from . import att_layer
from kangqi.util.LogUtil import LogInfo
class IndirectCrossAttention:
def __init__(self, lf_max_len, rt_max_len, dim_att_hidden, att_func):
self.lf_max_len = lf_max_len
self.rt_max_len = rt_max_len
self.dim_att_hidden = dim_att_hidden
LogInfo.logs('IndirectCrossAttention: lf_max_len = %d, rt_max_len = %d, dim_att_hidden = %d, att_func = %s.',
lf_max_len, rt_max_len, dim_att_hidden, att_func)
assert att_func in ('dot', 'bilinear', 'bahdanau', 'bdot')
self.att_func = getattr(att_layer, 'cross_att_' + att_func)
def forward(self, lf_input, lf_mask, rt_input, rt_mask):
"""
:param lf_input: (ds, lf_max_len, dim_hidden)
:param lf_mask: (ds, lf_max_len) as float32
:param rt_input: (ds, rt_max_len, dim_hidden)
:param rt_mask: (ds, rt_max_len) as float32
"""
with tf.variable_scope('cross_att_indirect', reuse=tf.AUTO_REUSE):
lf_cube_mask = tf.stack([lf_mask] * self.rt_max_len,
axis=-1, name='lf_cube_mask') # (ds, lf_max_len, rt_max_len)
rt_cube_mask = tf.stack([rt_mask] * self.lf_max_len,
axis=1, name='rt_cube_mask') # (ds, lf_max_len, rt_max_len)
cube_mask = tf.multiply(lf_cube_mask, rt_cube_mask, name='cube_mask')
""" Calculate cross attention matrix """
raw_att_mat = self.att_func(lf_input=lf_input, rt_input=rt_input,
lf_max_len=self.lf_max_len,
rt_max_len=self.rt_max_len,
dim_att_hidden=self.dim_att_hidden)
masked_att_mat = raw_att_mat * cube_mask + tf.float32.min * (1. - cube_mask)
# padding: -inf
""" Attention normalize & produce att_repr """
att_norm_for_lf = tf.nn.softmax(masked_att_mat, dim=2, name='att_norm_for_lf')
att_norm_for_rt = tf.nn.softmax(masked_att_mat, dim=1, name='att_norm_for_rt')
# for_lf: sum_j A[:,j] = 1.
# for_rt: sum_i A[i,:] = 1.
lf_att_repr = tf.matmul(att_norm_for_lf, rt_input, name='lf_att_repr') # (ds, lf_max_len, dim_emb)
rt_att_repr = tf.matmul(tf.transpose(att_norm_for_rt, perm=[0, 2, 1]), # (ds, rt_max_len, lf_max_len)
lf_input, name='rt_att_repr') # (ds, rt_max_len, dim_emb)
return lf_att_repr, rt_att_repr, raw_att_mat
# @staticmethod
# def att_norm_col_wise(att_mat):
# sum_of_cols = 1e-4 + tf.reduce_mean(att_mat, axis=1, name='sum_of_cols') # (ds, rt_max_len)
# sum_of_cols = tf.expand_dims(sum_of_cols, axis=1) # (ds, 1, rt_max_len)
# att_norm = tf.div(att_mat, sum_of_cols, name='att_norm_col_wise')
# # (ds, lf_max_len, rt_max_len), sum(att_norm[:, j]) = 1
# # att_norm[:, j]: the distribution over left words for each word-j at right side
# return att_norm
#
# @staticmethod
# def att_norm_row_wise(att_mat):
# sum_of_rows = 1e-4 + tf.reduce_sum(att_mat, axis=2, name='sum_of_rows') # (ds, lf_max_len)
# sum_of_rows = tf.expand_dims(sum_of_rows, axis=2) # (ds, lf_max_len, 1)
# att_norm = tf.div(att_mat, sum_of_rows, name='att_norm_row_wise')
# # (ds, lf_max_len, rt_max_len), sum(att_norm[i, :]) = 1
# # att_norm[i, :]: the distribution over right words for each word-i at left side
# return att_norm
#
# def construct_att_weights(self, att_mat):
# """
# Parikh: Go through formula (2) in AF-attention paper
# :param att_mat: (ds, q_max_len, p_max_len + pw_max_len)
# :return: 3 attention weights (q, p, pw) and the split attention matrices
# """
# """ Naive v.s. Parikh: just different from the normalizing direction!! """
# p_att_mat, pw_att_mat = tf.split(value=att_mat,
# num_or_size_splits=[self.p_max_len, self.pw_max_len],
# axis=2) # (ds, q_max_len, p_max_len | pw_max_len)
# if self.att_norm_mode == 'parikh':
# att_wt_q = self.att_norm_col_wise(att_mat=att_mat) # (ds, q_max_len, p_max_len+pw_max_len)
# att_wt_p = self.att_norm_row_wise(att_mat=p_att_mat) # (ds, q_max_len, p_max_len)
# att_wt_pw = self.att_norm_row_wise(att_mat=pw_att_mat) # (ds, q_max_len, pw_max_len)
# else: # naive
# att_wt_q = self.att_norm_row_wise(att_mat=att_mat)
# att_wt_p = self.att_norm_col_wise(att_mat=p_att_mat)
# att_wt_pw = self.att_norm_col_wise(att_mat=pw_att_mat)
# return p_att_mat, pw_att_mat, att_wt_q, att_wt_p, att_wt_pw
| [
"[email protected]"
] | |
dc34c1f11f334a3d915def0a7e3345ee0781e7e9 | 38ea041a35d6e1bbdcb875cfff1a313b02476e81 | /appModules/AddContact.py | 515d8116725ef529922d9747efd7df54bad352c6 | [] | no_license | saraliuhou/DataDriverTestFrameWork | 1824d0b771c20a87ce3d0b5cebf5cf1e70b4226b | 5f243026e9f03e96fa010f945fb31b7545759798 | refs/heads/master | 2020-06-01T00:19:32.435417 | 2019-06-12T09:10:09 | 2019-06-12T09:10:09 | 190,554,542 | 0 | 0 | null | 2019-06-06T09:29:50 | 2019-06-06T09:29:50 | null | UTF-8 | Python | false | false | 2,262 | py | from pageObjects.HomePage import HomePage
from pageObjects.NewContact import AddContactPage
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from util.ParseConfigurationFile import ParseConfigFile
class NewContactPersonAction(object):
def __init__(self):
pass
@staticmethod
def addressLink(driver):
'''
点击通讯录按钮
:param driver:
:return:
'''
homePage = HomePage(driver)
# 点击通讯录
homePage.addressLink().click()
@staticmethod
def addContact(driver, contactName, contactMail, isSatr, contactPhone, contactComment):
'''
添加联系人场景
:param driver:
:param contactName:
:param contactMail:
:param isSatr:
:param contactPhone:
:param contactComment:
:return:
'''
# 点击新建联系人
addContact = AddContactPage(driver)
# 调试的时候这边有时候会报错。点击不到[新建联系人]这个按钮,所以加了一个显示等待
by, locator = ParseConfigFile().getElementValue('126mail_addContactPage', 'addContactPage.newContact')
WebDriverWait(driver, 30).until(EC.element_to_be_clickable((by, locator)))
addContact.newContact().click()
if contactName:
# 非必填项
addContact.addName().send_keys(contactName)
# 必填项
addContact.addMail().send_keys(contactMail)
if isSatr == '是':
addContact.markStar().click()
if contactPhone:
addContact.addPhone().send_keys(contactPhone)
if contactComment:
addContact.addContent().send_keys(contactComment)
addContact.clickCommitBtn().click()
if __name__=='__main__':
from appModules.LoginAction import LoginAction
import time
from selenium import webdriver
driver = webdriver.Firefox()
driver.get('https://mail.126.com')
time.sleep(5)
LoginAction.login(driver, 'linux', 'chao')
NewContactPersonAction.addressLink(driver)
NewContactPersonAction.addContact(driver, '','[email protected]', '是', '','')
time.sleep(5)
driver.quit() | [
"[email protected]"
] | |
6872ae8eb5ec75cbb2419ad2d62cff26fed5eae2 | 1a31dfb66512aa66c407484f2ea8b0fb370669a4 | /dstt/urls.py | ea10b8d0b93dbfa3fb49d2bb5b02982685ef0ae0 | [] | no_license | nisha-eng/dstt | 790129f2918e0210421039baba0a4e8c877a7627 | bab89000242aec3a1a6fb05447ec52b14722809f | refs/heads/main | 2023-03-01T02:50:20.767421 | 2021-02-05T05:23:08 | 2021-02-05T05:23:08 | 336,171,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | """dstt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import Settings, settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('account/',include('account.urls')),
path('administration/',include('administration.urls')),
path('employee/',include('employee.urls'))
]+static(settings.STATIC_URL,document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
d8f3167c34525042bfc9833d02d8d53673ff7978 | 79aa4b99a48bb16a907916ad63c902443420541a | /0019.py | e1253c96f9a4ecabdca22315f7ecd7d39377a98c | [] | no_license | mach8686devops/leetcode-100 | 62dec66c719d7cfa120ca9505701df49d8d5b982 | f90526c9b073165b86b933cdf7d1dc496e68f2c6 | refs/heads/main | 2023-04-11T06:28:15.059587 | 2021-04-13T12:11:54 | 2021-04-13T12:11:54 | 329,346,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
l = []
while head: l, head = l + [head], head.next
if n != len(l): l[-n - 1].next = l[-n].next
del l[-n]
return l and l[0]
| [
"[email protected]"
] | |
abee20e74748f84b81263b5a7dca482647bdac3d | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/v4g.py | 947f100ab07df7782ca283e84a091c434fd16de6 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'v4G':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
2132ca489839efb59eecac3da30efd56457831e6 | 18eac94ff076c1eecd72870ef93ae656906e8673 | /supervised_learning/0x06-keras/13-predict.py | e2426676d174b5ff52b5fd6940693d363bda35a2 | [] | no_license | dgquintero/holbertonschool-machine_learning | c1331ff87e053f9c143a0e503e8db177dfc7aafe | c80073d0ef68deeedbe2d991e296ef75f58a220f | refs/heads/master | 2022-12-19T21:49:10.581793 | 2020-10-15T14:56:22 | 2020-10-15T14:56:22 | 279,329,167 | 0 | 1 | null | 2020-09-25T19:11:52 | 2020-07-13T14:42:03 | Python | UTF-8 | Python | false | false | 574 | py | #!/usr/bin/env python3
"""function predict"""
import tensorflow.keras as K
def predict(network, data, verbose=False):
"""
function that tests a neural network
Arguments:
network: the network model to test
data: the input data to test the model with
labels: are the correct one-hot labels of data
verbose: is a boolean that determines if output
should be printed during the testing process
Returns: the prediction for the data
"""
prediction = network.predict(data, verbose=verbose)
return prediction
| [
"[email protected]"
] | |
c4a498197bd65892c63d8b651006a2e100b27e0c | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/media/v20210601/get_transform.py | 2d5107073354128ca98d5c3db8d4db0c9a68f79d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,036 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTransformResult',
'AwaitableGetTransformResult',
'get_transform',
'get_transform_output',
]
@pulumi.output_type
class GetTransformResult:
"""
A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
"""
def __init__(__self__, created=None, description=None, id=None, last_modified=None, name=None, outputs=None, system_data=None, type=None):
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outputs and not isinstance(outputs, list):
raise TypeError("Expected argument 'outputs' to be a list")
pulumi.set(__self__, "outputs", outputs)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def created(self) -> str:
"""
The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ' format.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
An optional verbose description of the Transform.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
The UTC date and time when the Transform was last updated, in 'YYYY-MM-DDThh:mm:ssZ' format.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def outputs(self) -> Sequence['outputs.TransformOutputResponse']:
"""
An array of one or more TransformOutputs that the Transform should generate.
"""
return pulumi.get(self, "outputs")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTransformResult(GetTransformResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTransformResult(
created=self.created,
description=self.description,
id=self.id,
last_modified=self.last_modified,
name=self.name,
outputs=self.outputs,
system_data=self.system_data,
type=self.type)
def get_transform(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
transform_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransformResult:
"""
A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
:param str transform_name: The Transform name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['transformName'] = transform_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:media/v20210601:getTransform', __args__, opts=opts, typ=GetTransformResult).value
return AwaitableGetTransformResult(
created=__ret__.created,
description=__ret__.description,
id=__ret__.id,
last_modified=__ret__.last_modified,
name=__ret__.name,
outputs=__ret__.outputs,
system_data=__ret__.system_data,
type=__ret__.type)
@_utilities.lift_output_func(get_transform)
def get_transform_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
transform_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTransformResult]:
"""
A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
:param str transform_name: The Transform name.
"""
...
| [
"[email protected]"
] | |
4aeb5076c559a2d62968ac097e20666249770856 | 03f9b8bdea312636afb4df3737b55cb0cc4b21ff | /CanIWin.py | 3d81f1f782f454808169ef87a967ad9bee42ec2d | [] | no_license | ellinx/LC-python | f29dd17bbe15407ba0d06ad68386efdc9a343b56 | 9190d3d178f1733aa226973757ee7e045b7bab00 | refs/heads/master | 2021-06-01T15:21:24.379811 | 2020-10-29T04:37:07 | 2020-10-29T04:37:07 | 132,704,788 | 1 | 1 | null | 2019-05-15T03:26:11 | 2018-05-09T05:13:26 | Python | UTF-8 | Python | false | false | 2,052 | py | """
In the "100 game," two players take turns adding, to a running total,
any integer from 1..10. The player who first causes the running total to reach or exceed 100 wins.
What if we change the game so that players cannot re-use integers?
For example, two players might take turns drawing from a common pool of
numbers of 1..15 without replacement until they reach a total >= 100.
Given an integer maxChoosableInteger and another integer desiredTotal,
determine if the first player to move can force a win, assuming both players play optimally.
You can always assume that maxChoosableInteger will not be larger than 20 and
desiredTotal will not be larger than 300.
Example
Input:
maxChoosableInteger = 10
desiredTotal = 11
Output:
false
Explanation:
No matter which integer the first player choose, the first player will lose.
The first player can choose an integer from 1 up to 10.
If the first player choose 1, the second player can only choose integers from 2 up to 10.
The second player will win by choosing 10 and get a total = 11, which is >= desiredTotal.
Same with other integers chosen by the first player, the second player will always win.
"""
class Solution:
def canIWin(self, maxChoosableInteger, desiredTotal):
"""
:type maxChoosableInteger: int
:type desiredTotal: int
:rtype: bool
"""
def dfs(nums, diff, mm):
if diff<=0:
return False
key = ",".join(nums)+" "+str(diff)
if key in mm:
return mm[key]
for i,num in enumerate(nums):
if not dfs(nums[:i]+nums[i+1:], diff-int(num), mm):
mm[key] = True
return True
mm[key] = False
return False
if desiredTotal<=1:
return True
if (1+maxChoosableInteger)*maxChoosableInteger//2<desiredTotal:
return False
nums = [ str(i) for i in range(1,maxChoosableInteger+1)]
mm = dict()
return dfs(nums, desiredTotal, mm)
| [
"[email protected]"
] | |
6276ed8fbaf501f6fe6c7314d1eee780a50c0c89 | 270d7f88e47683abd55c0191466c80513b2aa9f9 | /tests/test_tta.py | 9d0ba17296509b58febeed4a4f4c0b193716299d | [
"MIT"
] | permissive | williamberrios/pytorch-toolbelt | abdf8e455a4ffc79d2afbc92e80005a821fb97a9 | 4a24e6324b8270d31c08b8b2f667d740b9823377 | refs/heads/master | 2023-07-06T06:35:24.197821 | 2021-08-12T07:47:20 | 2021-08-12T07:47:20 | 400,866,088 | 1 | 0 | MIT | 2021-08-28T18:43:12 | 2021-08-28T18:43:12 | null | UTF-8 | Python | false | false | 4,994 | py | from collections import defaultdict
import cv2
import torch
import numpy as np
import pytest
from torch import nn
from pytorch_toolbelt.inference import tta
from pytorch_toolbelt.modules import GlobalAvgPool2d
from pytorch_toolbelt.utils.torch_utils import to_numpy
from pytorch_toolbelt.zoo import resnet34_unet64_s4
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
class NoOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input
class SumAll(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input.sum(dim=[1, 2, 3])
def test_d4_image2mask():
x = torch.rand((4, 3, 224, 224))
model = NoOp()
output = tta.d4_image2mask(model, x)
np.testing.assert_allclose(to_numpy(output), to_numpy(x), atol=1e-6, rtol=1e-6)
def test_d4_image2mask_v2():
x = torch.rand((4, 3, 224, 224))
x_a = tta.d4_image_augment(x)
y = tta.d4_image_deaugment(x_a)
np.testing.assert_allclose(to_numpy(y), to_numpy(x), atol=1e-6, rtol=1e-6)
@torch.no_grad()
@skip_if_no_cuda()
def test_d4_speed():
df = defaultdict(list)
n = 100
model = resnet34_unet64_s4().cuda().eval()
x = torch.rand((4, 3, 224, 224)).float().cuda()
y1 = tta.d4_image2mask(model, x)
y2 = tta.d4_image_deaugment(model(tta.d4_image_augment(x)))
np.testing.assert_allclose(to_numpy(y1), to_numpy(y2), atol=1e-6, rtol=1e-6)
for deterministic in [False, True]:
for benchmark in [False, True]:
for dtype in [torch.float16, torch.float32]:
torch.cuda.empty_cache()
torch.backends.cuda.deterministic = deterministic
torch.backends.cuda.benchmark = benchmark
model = resnet34_unet64_s4().to(dtype).cuda().eval()
speed_v1 = 0
speed_v2 = 0
for i in range(n):
x = torch.rand((4, 3, 224, 224)).to(dtype).cuda(non_blocking=False)
start = cv2.getTickCount()
y = tta.d4_image2mask(model, x)
v = y.sum().item()
finish = cv2.getTickCount()
speed_v1 += finish - start
np.testing.assert_allclose(v, v, atol=1e-6, rtol=1e-6)
for i in range(n):
x = torch.rand((4, 3, 224, 224)).to(dtype).cuda(non_blocking=False)
start = cv2.getTickCount()
x_a = tta.d4_image_augment(x)
x_a = model(x_a)
y = tta.d4_image_deaugment(x_a)
v = y.sum().item()
finish = cv2.getTickCount()
speed_v2 += finish - start
np.testing.assert_allclose(v, v, atol=1e-6, rtol=1e-6)
df["mode"].append("fp16" if dtype == torch.float16 else "fp32")
df["deterministic"].append(deterministic)
df["benchmark"].append(benchmark)
df["d4_image2mask (ms)"].append(1000.0 * speed_v1 / (cv2.getTickFrequency() * n))
df["d4_augment (ms)"].append(1000.0 * speed_v2 / (cv2.getTickFrequency() * n))
import pandas as pd
df = pd.DataFrame.from_dict(df)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
print(df)
df.to_csv("tta_eval.csv", index=False)
def test_fliplr_image2mask():
x = torch.rand((4, 3, 224, 224))
model = NoOp()
output = tta.fliplr_image2mask(model, x)
np.testing.assert_allclose(to_numpy(output), to_numpy(x), atol=1e-6, rtol=1e-6)
def test_d4_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.d4_image2label(model, x)
expected = int(x.sum())
assert int(output) == expected
def test_fliplr_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.fliplr_image2label(model, x)
expected = int(x.sum())
assert int(output) == expected
def test_fivecrop_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.fivecrop_image2label(model, x, (2, 2))
expected = ((1 + 2 + 5 + 6) + (3 + 4 + 7 + 8) + (9 + 0 + 3 + 4) + (1 + 2 + 5 + 6) + (6 + 7 + 0 + 1)) / 5
assert int(output) == expected
def test_tencrop_image2label():
x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()
model = SumAll()
output = tta.tencrop_image2label(model, x, (2, 2))
expected = (2 * ((1 + 2 + 5 + 6) + (3 + 4 + 7 + 8) + (9 + 0 + 3 + 4) + (1 + 2 + 5 + 6) + (6 + 7 + 0 + 1))) / 10
assert int(output) == expected
| [
"[email protected]"
] | |
2f876f6a85661251f0ba85f749269bb1b2e63c24 | e2efa339cf6fb017e1d1898325b363a69c227409 | /app.py | 6367bcdbeda570b322259488161e00e0d12605db | [] | no_license | lm10pulkit/update_delete | 201b22b3816606640ab22a0f63c7bf2d58ed6295 | c9c935e070f555c006dca00fd0940863fcc0790d | refs/heads/master | 2020-04-15T18:32:53.239716 | 2019-01-09T19:07:40 | 2019-01-09T19:07:40 | 164,915,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,733 | py | from flask import Flask ,session, render_template, request, redirect,url_for,g
from flask_mysqldb import MySQL
from flask_bcrypt import Bcrypt
import os
# intializing the app
app = Flask(__name__)
#secret key
app.secret_key= os.urandom(24)
# setting up database
app.config['MYSQL_HOST']='localhost'
app.config['MYSQL_USER']='root'
app.config['MYSQL_PASSWORD']= ''
app.config['MYSQL_DB']='crud'
mysql = MySQL(app)
#bcrypt for hashing passwords to keep database secure
bcrypt= Bcrypt(app)
@app.route('/',methods=['GET','POST'])
def index():
if request.method=='GET':
return render_template('login.html')
else:
form= request.form
username=form['username']
password=form['password']
if username=='admin' and password=='admin':
session['user']=username
return redirect(url_for('data'))
else:
return redirect(url_for('index'))
@app.route('/list',methods=['GET'])
def data():
if 'user' in session:
cur = mysql.connection.cursor()
resultValue = cur.execute(" select * from employee")
userDetails = cur.fetchall()
return render_template('list.html', employee=userDetails)
else:
return redirect(url_for('index'))
@app.route('/add',methods=['GET','POST'])
def add():
if 'user' in session:
if request.method == 'GET':
return render_template('add.html')
else:
form = request.form
print(form)
firstname = form['firstname']
lastname = form['lastname']
address = form['address']
email = form['email']
contact = form['contact']
argo = [firstname, lastname, address, email, int(contact)]
cur = mysql.connection.cursor()
cur.execute("INSERT INTO employee(firstname,lastname,address,email,contact) values (%s,%s,%s,%s,%s)", argo)
mysql.connection.commit()
cur.close()
return redirect(url_for('data'))
else:
return redirect(url_for('index'))
@app.route('/delete/<id>',methods=['GET'])
def delete(id=None):
if 'user' in session:
query='delete from employee where id = %s'
params=[id]
cur = mysql.connection.cursor()
cur.execute(query,params)
mysql.connection.commit()
cur.close()
return redirect(url_for('data'))
else:
return redirect(url_for('index'))
@app.route('/edit/<id>',methods=['POST','GET'])
def edit(id=None):
if 'user' in session:
if request.method=='POST':
form = request.form
params=[form['firstname'],form['lastname'],form['address'],form['email'],form['contact'],id]
query ='update employee set firstname= %s , lastname = %s , address= %s , email= %s, contact= %s where id = %s '
cur = mysql.connection.cursor()
cur.execute(query, params)
mysql.connection.commit()
cur.close()
return redirect(url_for('data'))
else:
query = 'select * from employee where id = %s'
params=[id]
cur = mysql.connection.cursor()
resultValue=cur.execute(query, params)
if resultValue>0:
userDetails = cur.fetchall()
return render_template('edit.html',user=userDetails[0])
else:
return 'invalid id'
else:
return redirect(url_for('index'))
@app.route('/logout',methods=['GET'])
def logout():
session.pop('user', None)
return redirect(url_for('index'))
if __name__=='__main__':
app.run(debug=True) | [
"[email protected]"
] | |
d5e1939f099cf3e03251eb29a08dac4f28524a6f | 7eedbe5c7d0a0d602d03bfe7d73878adca4c61e4 | /src_old/calculus.py | 20c434bac11cde88912e6138766eb575a8dce5ea | [
"MIT"
] | permissive | johndpope/GeLaTo | da413da5e718ed67b620af313ba99abb87b49c4e | e00577924b7d13f3d5d387583f457caf8065b004 | refs/heads/master | 2020-03-29T23:05:27.981692 | 2018-07-30T15:16:54 | 2018-07-30T15:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,757 | py | # coding: utf-8
# TODO add action of diff operators on sympy known functions
import numpy as np
from itertools import groupby
from collections import OrderedDict
#from sympy.core.sympify import sympify
from sympy.simplify.simplify import simplify
from sympy import Symbol
from sympy import Lambda
from sympy import Function
from sympy import bspline_basis
from sympy import lambdify
from sympy import cos
from sympy import sin
from sympy import Rational
from sympy import diff
from sympy import Matrix, ImmutableDenseMatrix
from sympy import latex
from sympy import Integral
from sympy import I as sympy_I
from sympy.core import Basic
from sympy.core.singleton import S
from sympy.simplify.simplify import nsimplify
from sympy.utilities.lambdify import implemented_function
from sympy.matrices.dense import MutableDenseMatrix
from sympy import Mul, Add
from sympy import postorder_traversal
from sympy import preorder_traversal
from sympy.core.expr import Expr
from sympy.core.containers import Tuple
from sympy import Integer, Float
from sympy import Add, Mul
from sympy import preorder_traversal, Expr
from sympy import simplify
from sympy import S
from sympy.core.compatibility import is_sequence
from sympy import Basic
from sympy import Indexed, IndexedBase
# ...
class LinearOperator(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Grad'
is_commutative = True
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
expr = _args[0]
if isinstance(expr, Add):
args = expr.args
args = [cls.eval(a) for a in expr.args]
return Add(*args)
if isinstance(expr, Mul):
coeffs = [a for a in expr.args if isinstance(a, _coeffs_registery)]
vectors = [a for a in expr.args if not(a in coeffs)]
a = S.One
if coeffs:
a = Mul(*coeffs)
b = S.One
if vectors:
b = cls(Mul(*vectors), evaluate=False)
return Mul(a, b)
return cls(expr, evaluate=False)
# ...
# ...
class DifferentialOperator(LinearOperator):
"""
This class is a linear operator that applies the Leibniz formula
Examples
========
"""
coordinate = None
@classmethod
def eval(cls, *_args):
"""."""
expr = _args[0]
if isinstance(expr, Add):
args = expr.args
args = [cls.eval(a) for a in expr.args]
return Add(*args)
if isinstance(expr, Mul):
coeffs = [a for a in expr.args if isinstance(a, _coeffs_registery)]
vectors = [a for a in expr.args if not(a in coeffs)]
c = S.One
if coeffs:
c = Mul(*coeffs)
V = S.One
if vectors:
if len(vectors) == 1:
V = cls(Mul(vectors[0]), evaluate=False)
elif len(vectors) == 2:
a = vectors[0]
b = vectors[1]
fa = cls(a, evaluate=False)
fb = cls(b, evaluate=False)
V = a * fb + fa * b
else:
V = cls(Mul(*vectors), evaluate=False)
return Mul(c, V)
return cls(expr, evaluate=False)
# ...
# ...
class dx(DifferentialOperator):
coordinate = 'x'
grad_index = 0 # index in grad
pass
class dy(DifferentialOperator):
coordinate = 'y'
grad_index = 1 # index in grad
pass
class dz(DifferentialOperator):
coordinate = 'z'
grad_index = 2 # index in grad
pass
_partial_derivatives = (dx, dy, dz)
# ...
# ...
def find_partial_derivatives(expr):
"""
returns all partial derivative expressions
"""
if isinstance(expr, (Add, Mul)):
return find_partial_derivatives(expr.args)
elif isinstance(expr, (list, tuple, Tuple)):
args = []
for a in expr:
args += find_partial_derivatives(a)
return args
elif isinstance(expr, _partial_derivatives):
return [expr]
return []
# ...
# ...
def get_number_derivatives(expr):
"""
returns the number of partial derivatives in expr.
this is still an experimental version, and it assumes that expr is of the
form d(a) where a is a single atom.
"""
n = 0
if isinstance(expr, _partial_derivatives):
assert(len(expr.args) == 1)
n += 1 + get_number_derivatives(expr.args[0])
return n
# ...
# ...
def sort_partial_derivatives(expr):
"""returns the partial derivatives of an expression, sorted.
"""
ls = []
args = find_partial_derivatives(expr)
# # ... Note
# # group by is given the wrong answer for expr =mu * u + dx(u) + dx(dx(u))
# for key, group in groupby(args, lambda x: get_number_derivatives(x)):
# g = [a for a in group]
# for a in group:
# ls.append(a)
# # ...
# ...
d = {}
for a in args:
n = get_number_derivatives(a)
if n in d.keys():
d[n] += [a]
else:
d[n] = [a]
# ...
# ...
if not d:
return []
# ...
# ... sort keys from high to low
keys = np.asarray(list(d.keys()))
keys.sort()
keys = keys[::-1]
# ...
# ... construct a list of partial derivatives from high to low order
ls = []
for k in keys:
ls += d[k]
# ...
return ls
# ...
# ...
def get_index_derivatives(expr):
"""
"""
coord = ['x','y','z']
d = OrderedDict()
for c in coord:
d[c] = 0
ops = [a for a in preorder_traversal(expr) if isinstance(a, _partial_derivatives)]
for i in ops:
op = type(i)
if isinstance(i, dx):
d['x'] += 1
elif isinstance(i, dy):
d['y'] += 1
elif isinstance(i, dz):
d['z'] += 1
return d
# ...
# ...
def get_atom_derivatives(expr):
"""
"""
if isinstance(expr, _partial_derivatives):
assert(len(expr.args) == 1)
return get_atom_derivatives(expr.args[0])
elif isinstance(expr, _calculus_operators):
raise TypeError('remove this raise later')
else:
return expr
# ...
# ...
class DotBasic(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Dot'
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
class Dot_1d(DotBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
if not( len(_args) == 2):
raise ValueError('Expecting two arguments')
u = _args[0]
v = _args[1]
return u * v
class Dot_2d(DotBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
if not( len(_args) == 2):
raise ValueError('Expecting two arguments')
u = _args[0]
v = _args[1]
if isinstance(u, (Matrix, ImmutableDenseMatrix)):
if isinstance(v, (Matrix, ImmutableDenseMatrix)):
raise NotImplementedError('TODO')
else:
return Tuple(u[0,0]*v[0] + u[0,1]*v[1], u[1,0]*v[0] + u[1,1]*v[1])
else:
if isinstance(v, (Matrix, ImmutableDenseMatrix)):
raise NotImplementedError('TODO')
else:
return u[0]*v[0] + u[1]*v[1]
class Dot_3d(DotBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
if not( len(_args) == 2):
raise ValueError('Expecting two arguments')
u = _args[0]
v = _args[1]
if isinstance(u, (Matrix, ImmutableDenseMatrix)):
if isinstance(v, (Matrix, ImmutableDenseMatrix)):
raise NotImplementedError('TODO')
else:
return Tuple(u[0,0]*v[0] + u[0,1]*v[1] + u[0,2]*v[2],
u[1,0]*v[0] + u[1,1]*v[1] + u[1,2]*v[2],
u[2,0]*v[0] + u[2,1]*v[1] + u[2,2]*v[2])
else:
if isinstance(v, (Matrix, ImmutableDenseMatrix)):
raise NotImplementedError('TODO')
else:
return u[0]*v[0] + u[1]*v[1] + u[2]*v[2]
# ...
# ...
class CrossBasic(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Cross'
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
class Cross_2d(CrossBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
v = _args[1]
return u[0]*v[1] - u[1]*v[0]
class Cross_3d(CrossBasic):
"""
Examples
========
"""
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
v = _args[1]
return Tuple(u[1]*v[2] - u[2]*v[1],
u[2]*v[0] - u[0]*v[2],
u[0]*v[1] - u[1]*v[0])
# ...
# ...
class GradBasic(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Grad'
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
class Grad_1d(GradBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return dx(u)
class Grad_2d(GradBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return Tuple(dx(u), dy(u))
class Grad_3d(GradBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return Tuple(dx(u), dy(u), dz(u))
# ...
# ...
class CurlBasic(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Curl'
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
class Curl_2d(CurlBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return Tuple( dy(u),
-dx(u))
class Curl_3d(CurlBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return Tuple(dy(u[2]) - dz(u[1]),
dz(u[0]) - dx(u[2]),
dx(u[1]) - dy(u[0]))
# ...
# ...
class Rot_2d(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Grad'
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return dy(u[0]) - dx(u[1])
# ...
# ...
class DivBasic(CalculusFunction):
"""
Examples
========
"""
nargs = None
name = 'Div'
def __new__(cls, *args, **options):
# (Try to) sympify args first
if options.pop('evaluate', True):
r = cls.eval(*args)
else:
r = None
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
class Div_1d(DivBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return dx(u)
class Div_2d(DivBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return dx(u[0]) + dy(u[1])
class Div_3d(DivBasic):
"""
Examples
========
"""
@classmethod
def eval(cls, *_args):
"""."""
if not _args:
return
u = _args[0]
return dx(u[0]) + dy(u[1]) + dz(u[2])
# ...
# ...
_coord_registery = ['x', 'y', 'z']
# ...
# ...
_operators_1d = [Dot_1d,
Grad_1d, Div_1d]
_operators_2d = [Dot_2d, Cross_2d,
Grad_2d, Curl_2d, Rot_2d, Div_2d]
_operators_3d = [Dot_3d, Cross_3d,
Grad_3d, Curl_3d, Div_3d]
# ...
# ... generic operators
class GenericFunction(CalculusFunction):
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
return Indexed(self, *indices, **kw_args)
else:
return Indexed(self, indices, **kw_args)
class Dot(GenericFunction):
pass
class Cross(GenericFunction):
pass
class Grad(GenericFunction):
pass
class Curl(GenericFunction):
pass
class Rot(GenericFunction):
pass
class Div(GenericFunction):
pass
_generic_ops = (Dot, Cross,
Grad, Curl, Rot, Div)
# ...
# ... alias for ufl compatibility
cross = Cross
dot = Dot
Inner = Dot # TODO do we need to add the Inner class Function?
inner = Inner
grad = Grad
curl = Curl
rot = Rot
div = Div
_calculus_operators = (Grad, Dot, Inner, Cross, Rot, Curl, Div)
# ...
# ...
def partial_derivative_as_symbol(expr, name=None, dim=None):
"""Returns a Symbol from a partial derivative expression."""
if not isinstance(expr, _partial_derivatives):
raise TypeError('Expecting a partial derivative expression')
index = get_index_derivatives(expr)
var = get_atom_derivatives(expr)
if not isinstance(var, (Symbol, Indexed)):
print(type(var))
raise TypeError('Expecting a Symbol, Indexed')
code = ''
for k,n in list(index.items()):
code += k*n
if var.is_Indexed:
if name is None:
name = var.base
indices = ''.join('{}'.format(i) for i in var.indices)
name = '{name}_{code}'.format(name=name, code=code)
shape = None
if dim:
shape = [dim]
return IndexedBase(name, shape=shape)[indices]
else:
if name is None:
name = var.name
name = '{name}_{code}'.format(name=name, code=code)
return Symbol(name)
# ...
| [
"[email protected]"
] | |
3ad6b6e4e9387b3b9cc5855347a729c5a5d8be58 | 49caef1f93bd4673530e0a4c54c59028fb7b54e6 | /npg7/web_printscreen_zb/controllers.py | f5da2a2deb809c478d12f54592606a9e2f4de36e | [] | no_license | slevenhagen/addons-extra7.0 | 7622024198c0cf637f3f4767eb2b955532af3710 | 85611a86a0e1522fd88b5e6fbb217f425c4ae12d | refs/heads/master | 2020-03-17T14:12:42.082766 | 2018-05-16T13:02:05 | 2018-05-16T13:02:05 | 133,663,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,142 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 ZestyBeanz Technologies Pvt. Ltd.
# (http://wwww.zbeanztech.com)
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import json
except ImportError:
import simplejson as json
import web.http as openerpweb
from web.controllers.main import ExcelExport
from web.controllers.main import Export
import re
from cStringIO import StringIO
from lxml import etree
import trml2pdf
import time, os
import locale
import openerp.tools as tools
try:
import xlwt
except ImportError:
xlwt = None
class ZbExcelExport(ExcelExport):
_cp_path = '/web/export/zb_excel_export'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
style = xlwt.easyxf('align: wrap yes')
font = xlwt.Font()
font.bold = True
style.font = font
ignore_index = []
count = 0
for i, fieldname in enumerate(fields):
if fieldname.get('header_data_id', False):
field_name = fieldname.get('header_name', '')
worksheet.write(0, i-count, field_name, style)
worksheet.col(i).width = 8000
else:
count += 1
ignore_index.append(i)
style = xlwt.easyxf('align: wrap yes')
bold_style = xlwt.easyxf('align: wrap yes')
font = xlwt.Font()
font.bold = True
bold_style.font = font
for row_index, row in enumerate(rows):
count = 0
for cell_index, cell_value in enumerate(row):
if cell_index not in ignore_index:
cell_style = style
if cell_value.get('bold', False):
cell_style = bold_style
cellvalue = cell_value.get('data', '')
if isinstance(cellvalue, basestring):
cellvalue = re.sub("\r", " ", cellvalue)
if cell_value.get('number', False) and cellvalue:
cellvalue = float(cellvalue)
if cellvalue is False: cellvalue = None
worksheet.write(row_index + 1, cell_index - count, cellvalue, cell_style)
else:
count += 1
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
@openerpweb.httprequest
def index(self, req, data, token):
data = json.loads(data)
return req.make_response(
self.from_data(data.get('headers', []), data.get('rows', [])),
headers=[
('Content-Disposition', 'attachment; filename="%s"'
% data.get('model', 'Export.xls')),
('Content-Type', self.content_type)
],
cookies={'fileToken': token}
)
class ExportPdf(Export):
_cp_path = '/web/export/zb_pdf'
fmt = {
'tag': 'pdf',
'label': 'PDF',
'error': None
}
@property
def content_type(self):
return 'application/pdf'
def filename(self, base):
return base + '.pdf'
def from_data(self, uid, fields, rows, company_name):
pageSize=[210.0,297.0]
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('PageFormat', 'a4')
_append_node('header-date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('company', company_name)
l = []
t = 0
temp = []
tsum = []
skip_index = []
header = etree.SubElement(new_doc, 'header')
i = 0
for f in fields:
if f.get('header_data_id', False):
value = f.get('header_name', "")
field = etree.SubElement(header, 'field')
field.text = tools.ustr(value)
else:
skip_index.append(i)
i += 1
lines = etree.SubElement(new_doc, 'lines')
for row_lines in rows:
node_line = etree.SubElement(lines, 'row')
j = 0
for row in row_lines:
if not j in skip_index:
para = "yes"
tree = "no"
value = row.get('data', '')
if row.get('bold', False):
para = "group"
if row.get('number', False):
tree = "float"
col = etree.SubElement(node_line, 'col', para=para, tree=tree)
col.text = tools.ustr(value)
j += 1
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = trml2pdf.parseNode(rml, title='Printscreen')
return self.obj
class ZbPdfExport(ExportPdf):
_cp_path = '/web/export/zb_pdf_export'
@openerpweb.httprequest
def index(self, req, data, token):
data = json.loads(data)
uid = data.get('uid', False)
return req.make_response(self.from_data(uid, data.get('headers', []), data.get('rows', []),
data.get('company_name','')),
headers=[('Content-Disposition',
'attachment; filename=PDF Export'),
('Content-Type', self.content_type)],
cookies={'fileToken': int(token)})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"[email protected]"
] | |
19edfb5d48f61044424ab6c2a3dd832edbd0612a | daae0cf103b6c9f26065f7546a7dc79281fc0bde | /16/3.py | 297df94c2bd4b2df86a41fbc8e26d6952e1e12d4 | [] | no_license | oc0de/pyEPI | 97a5d4db91d5459f407c9d414fc999de56885124 | 2b7cedecdd5b8665ab4f1ca4762a3fd5adcc9864 | refs/heads/master | 2021-09-05T09:32:53.646441 | 2018-01-26T03:58:10 | 2018-01-26T03:58:10 | 119,003,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | def number_of_ways(n, m):
cache = {}
def helper(x,y):
if x == y == 0: return 1
if (x,y) not in cache:
ways_top = 0 if x == 0 else helper(x-1, y)
ways_left = 0 if y == 0 else helper(x, y-1)
cache[(x,y)] = ways_top + ways_left
return cache[(x,y)]
return helper(n-1, m-1)
print number_of_ways(5, 5)
| [
"[email protected]"
] | |
70429f73bbca6c8c28bbffeb622ee490018c69d8 | 95f21bdadb48a25321f76980ba72887255033343 | /torch/fx/graph.py | 0975962ff45a803dd2244f6ce861e2ca16d18f36 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | chen-chentao/pytorch | e4f5215270aeb25c61ec700c4e142962ac1e56de | caa377f546306f8885fba1df230ae4db91dea2a4 | refs/heads/master | 2023-03-08T06:26:15.628237 | 2021-02-24T00:29:34 | 2021-02-24T00:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,433 | py | from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
from typing import Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet
from dataclasses import dataclass
from contextlib import contextmanager
import torch
import keyword
import re
import builtins
import math
# Mapping of builtins to their `typing` equivalent.
_origin_type_map = {
list: List,
dict: Dict,
set: Set,
frozenset: FrozenSet,
tuple: Tuple,
}
class _CustomBuiltin(NamedTuple):
"""Additional objs that we add to every graph's globals.
The repr() for some standard library objects is not valid Python code without
an import. For common objects of this sort, we bundle them in the globals of
every FX graph.
"""
# How to import this object from the standard library.
import_str: str
# The actual object, produced from that import string.
obj: Any
_custom_builtins: Dict[str, _CustomBuiltin] = {}
def _register_custom_builtin(name: str, import_str: str, obj: Any):
_custom_builtins[name] = _CustomBuiltin(import_str, obj)
_register_custom_builtin('inf', 'from math import inf', math.inf)
_register_custom_builtin('nan', 'from math import nan', math.nan)
_register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
_register_custom_builtin('torch', 'import torch', torch)
def _is_magic(x: str) -> bool:
return x.startswith('__') and x.endswith('__')
def _snake_case(s: str) -> str:
"""
Transforms the given string ``s`` to a Python-style variable name
Examples:
``mod.snake_case`` -> ``mod.snake_case``
``mod.pascalCase``-> ``mod.pascal_case``
``mod.ALL_CAPS`` -> ``mod.all_caps``
"""
chars = []
prev_lower = False
for c in s:
if prev_lower and c.isupper():
chars.append('_')
chars.append(c.lower())
prev_lower = c.islower()
return ''.join(chars)
def _is_from_torch(obj: Any) -> bool:
module_name = getattr(obj, '__module__', None)
if module_name is not None:
base_module = module_name.partition('.')[0]
return base_module == 'torch'
name = getattr(obj, '__name__', None)
# exclude torch because torch.torch.torch.torch works. idk mang
if name is not None and name != 'torch':
for guess in [torch, torch.nn.functional]:
if getattr(guess, name, None) is obj:
return True
return False
class _Namespace:
"""A context for associating names uniquely with objects.
The following invariants are enforced:
- Each object gets a single name.
- Each name is unique within a given namespace.
- Names generated do not shadow builtins, unless the object is indeed that builtin.
"""
def __init__(self):
self._obj_to_name: Dict[Any, str] = {}
self._unassociated_names = set()
self._used_names: Dict[str, int] = {}
def create_name(self, candidate: str, obj: Optional[Any]) -> str:
"""Create a unique name.
Arguments:
candidate: used as the basis for the unique name, relevant to the user.
obj: If not None, an object that will be associated with the unique name.
"""
if obj is not None and obj in self._obj_to_name:
return self._obj_to_name[obj]
# delete all characters that are illegal in a Python identifier
candidate = re.sub('[^0-9a-zA-Z_]+', '_', candidate)
if candidate[0].isdigit():
candidate = f'_{candidate}'
while candidate in self._used_names or self._is_illegal_name(candidate, obj):
match = re.match(r"(.*)_(\d+)$", candidate)
if match is None:
candidate = candidate + '_1'
else:
base, num = match.group(1, 2)
candidate = f'{base}_{int(num) + 1}'
self._used_names.setdefault(candidate)
if obj is None:
self._unassociated_names.add(candidate)
else:
self._obj_to_name[obj] = candidate
return candidate
def associate_name_with_obj(self, name: str, obj: Any):
"""Associate a unique name with an object.
Neither `name` nor `obj` should be associated already.
"""
assert obj not in self._obj_to_name
assert name in self._unassociated_names
self._obj_to_name[obj] = name
self._unassociated_names.remove(name)
def _is_illegal_name(self, name: str, obj: Any) -> bool:
# 1. keywords are never allowed as names.
if name in keyword.kwlist:
return True
# 2. Can't shadow a builtin name, unless you *are* that builtin.
if name in builtins.__dict__:
return obj is not builtins.__dict__[name]
# 3. Can't shadow our custom builtins either
if name in _custom_builtins:
return obj is not _custom_builtins[name].obj
return False
@dataclass
class PythonCode:
"""Represents all the information necessary to exec or save a graph as Python code."""
# Python source code for the forward function definition.
src: str
# Values in global scope during exection of `src_def`.
globals: Dict[str, Any]
def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str:
args_s = ', '.join(repr(a) for a in args)
kwargs_s = ', '.join(f'{k} = {repr(v)}' for k, v in kwargs.items())
if args_s and kwargs_s:
return f'{args_s}, {kwargs_s}'
return args_s or kwargs_s
def _format_target(base: str, target: str) -> str:
elems = target.split('.')
r = base
for e in elems:
if not e.isidentifier():
r = f'getattr({r}, "{e}")'
else:
r = f'{r}.{e}'
return r
class _InsertPoint:
def __init__(self, graph, new_insert):
self.graph = graph
self.orig_insert, graph._insert = graph._insert, new_insert
def __enter__(self):
pass
def __exit__(self, type, value, tb):
self.graph._insert = self.orig_insert
class _node_list:
def __init__(self, graph: 'Graph', direction: str = '_next'):
assert direction in ['_next', '_prev']
self.graph = graph
self.direction = direction
def __len__(self):
return self.graph._len
def __iter__(self):
root, direction = self.graph._root, self.direction
cur = getattr(root, direction)
while cur is not root:
if not cur._erased:
yield cur
cur = getattr(cur, direction)
def __reversed__(self):
return _node_list(self.graph, '_next' if self.direction == '_prev' else '_prev')
class Graph:
"""
``Graph`` is the main data structure used in the FX Intermediate Representation.
It consists of a series of ``Node`` s, each representing callsites (or other
syntactic constructs). The list of ``Node`` s, taken together, constitute a
valid Python function.
For example, the following code
.. code-block:: python
import torch
import torch.fx
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return torch.topk(torch.sum(self.linear(x + self.linear.weight).relu(), dim=-1), 3)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
Will produce the following Graph::
print(gm.graph)
.. code-block:: text
graph(x):
%linear_weight : [#users=1] = self.linear.weight
%add_1 : [#users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
%linear_1 : [#users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
%relu_1 : [#users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
%sum_1 : [#users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
%topk_1 : [#users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
return topk_1
For the semantics of operations represented in the ``Graph``, please see :class:`Node`.
"""
def __init__(self):
"""
Construct an empty Graph.
"""
self._root : Node = Node(self, '', 'root', '', (), {})
self._used_names : Dict[str, int] = {} # base name -> number
self._insert = self._root.prepend
self._len = 0
self._graph_namespace = _Namespace()
@property
def nodes(self) -> _node_list:
"""
Get the list of Nodes that constitute this Graph.
Note that this ``Node`` list representation is a doubly-linked list. Mutations
during iteration (e.g. delete a Node, add a Node) are safe.
Returns:
A doubly-linked list of Nodes. Note that ``reversed`` can be called on
this list to switch iteration order.
"""
return _node_list(self)
def graph_copy(self, g : 'Graph', val_map : Dict[Node, Node]) -> 'Optional[Argument]':
"""
Copy all nodes from a given graph into ``self``.
Args:
g (Graph): The source graph from which to copy Nodes.
val_map (Dict[Node, Node]): a dictionary that will be populated with a mapping
from nodes in ``g`` to nodes in ``self``. Note that ``val_map`` can be passed
in with values in it already to override copying of certain values.
Returns:
The value in ``self`` that is now equivalent to the output value in ``g``,
if ``g`` had an ``output`` node. ``None`` otherwise.
"""
for node in g.nodes:
if node in val_map:
continue
if node.op == 'output':
rv = map_arg(node.args[0], lambda n: val_map[n])
return rv
val_map[node] = self.node_copy(node, lambda n : val_map[n])
return None
def __deepcopy__(self, memo=None) -> 'Graph':
"""
Explicitly implement __deepcopy__ to prevent excessive recursion depth
from the default implementation. This uses graph_copy to copy the nodes
in an iterative way, rather than recursive. It also populates the
memoization table to prevent unnecessary copies (e.g. references to
nodes or other parts of the Graph from a custom GraphModule implementation
"""
memo = memo if memo else {}
g = Graph()
output_val = g.graph_copy(self, val_map=memo)
g.output(output_val)
return g
def create_node(self, op: str, target: 'Target',
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
name: Optional[str] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Create a ``Node`` and add it to the ``Graph`` at the current insert-point.
Note that the current insert-point can be set via :meth:`Graph.inserting_before`
and :meth:`Graph.inserting_after`.
Args:
op (str): the opcode for this Node. One of 'call_function', 'call_method', 'get_attr',
'call_module', 'placeholder', or 'output'. The semantics of these opcodes are
described in the ``Graph`` docstring.
args (Optional[Tuple[Argument, ...]]): is a tuple of arguments to this node.
kwargs (Optional[Dict[str, Argument]]): the kwargs of this Node
name (Optional[str]): an optional string name for the ``Node``.
This will influence the name of the value assigned to in the
Python generated code.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly-created and inserted node.
"""
assert op in ('call_function', 'call_method', 'get_attr', 'call_module', 'placeholder', 'output')
args = () if args is None else args
kwargs = {} if kwargs is None else kwargs
assert isinstance(args, tuple), "args must be a tuple"
assert isinstance(kwargs, dict), "kwargs must be a dict"
candidate = name if name is not None else self._target_to_str(target)
name = self._graph_namespace.create_name(candidate, None)
n = Node(self, name, op, target, args, kwargs, type_expr)
self._graph_namespace.associate_name_with_obj(name, n)
self._insert(n)
self._len += 1
return n
def erase_node(self, to_erase : Node) -> None:
"""
Erases a ``Node`` from the ``Graph``. Throws an exception if
there are still users of that node in the ``Graph``.
Args:
to_erase (Node): The ``Node`` to erase from the ``Graph``.
"""
if len(to_erase.users) > 0:
raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
f'users in the graph: {to_erase.users}!')
to_erase._remove_from_list()
to_erase._erased = True # iterators may retain handles to erased nodes
self._len -= 1
# Null out this Node's argument nodes so that the Nodes referred to
# can update their ``users`` accordingly
new_args = map_arg(to_erase.args, lambda n: None)
assert isinstance(new_args, tuple)
to_erase.args = new_args
new_kwargs = map_arg(to_erase.kwargs, lambda n: None)
assert isinstance(new_kwargs, dict)
to_erase.kwargs = new_kwargs
def inserting_before(self, n: Optional[Node] = None):
"""Set the point at which create_node and companion methods will insert into the graph.
When used within a 'with' statement, this will temporary set the insert point and
then restore it when the with statement exits::
with g.inserting_before(n):
... # inserting before node n
... # insert point restored to what it was previously
g.inserting_before(n) # set the insert point permanently
Args:
n (Optional[Node]): The node before which to insert. If None this will insert before
the beginning of the entire graph.
Returns:
A resource manager that will restore the insert point on ``__exit__``.
"""
if n is None:
return self.inserting_after(self._root)
assert n.graph == self, "Node to insert before is not in graph."
return _InsertPoint(self, n.prepend)
def inserting_after(self, n: Optional[Node] = None):
"""Set the point at which create_node and companion methods will insert into the graph.
When used within a 'with' statement, this will temporary set the insert point and
then restore it when the with statement exits::
with g.inserting_after(n):
... # inserting after node n
... # insert point restored to what it was previously
g.inserting_after(n) # set the insert point permanently
Args:
n (Optional[Node]): The node before which to insert. If None this will insert after
the beginning of the entire graph.
Returns:
A resource manager that will restore the insert point on ``__exit__``.
"""
if n is None:
return self.inserting_before(self._root)
assert n.graph == self, "Node to insert after is not in graph."
return _InsertPoint(self, n.append)
# sugar for create_node when you know the op
def placeholder(self, name: str, type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``placeholder`` node into the Graph. A ``placeholder`` represents
a function input.
Args:
name (str): A name for the input value. This corresponds to the name
of the positional argument to the function this ``Graph`` represents.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have. This is needed in some
cases for proper code generation (e.g. when the function is used
subsequently in TorchScript compilation).
.. note::
The same insertion point and type expression rules apply for this method
as ``Graph.create_node``.
"""
return self.create_node('placeholder', name, type_expr=type_expr)
def get_attr(self, qualified_name: str, type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``get_attr`` node into the Graph. A ``get_attr`` ``Node`` represents the
fetch of an attribute from the ``Module`` hierarchy.
Args:
qualified_name (str): the fully-qualified name of the attribute to be retrieved.
For example, if the traced Module has a submodule named ``foo``, which has a
submodule named ``bar``, which has an attribute named ``baz``, the qualified
name ``foo.bar.baz`` should be passed as ``qualified_name``.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly-created and inserted ``get_attr`` node.
.. note::
The same insertion point and type expression rules apply for this method
as ``Graph.create_node``.
"""
return self.create_node('get_attr', qualified_name, type_expr=type_expr)
def call_module(self,
module_name: str,
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``call_module`` ``Node`` into the ``Graph``. A ``call_module`` node
represents a call to the forward() function of a ``Module`` in the ``Module``
hierarchy.
Args:
module_name (str): The qualified name of the ``Module`` in the ``Module``
hierarchy to be called. For example, if the traced ``Module`` has a
submodule named ``foo``, which has a submodule named ``bar``, the
qualified name ``foo.bar`` should be passed as ``module_name`` to
call that module.
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
to the called method. Note that this should *not* include a ``self`` argument.
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
to the called method
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly-created and inserted ``call_module`` node.
.. note::
The same insertion point and type expression rules apply for this method
as :meth:`Graph.create_node`.
"""
return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr)
def call_method(self,
method_name: str,
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``call_method`` ``Node`` into the ``Graph``. A ``call_method`` node
represents a call to a given method on the 0th element of ``args``.
Args:
method_name (str): The name of the method to apply to the self argument.
For example, if args[0] is a ``Node`` representing a ``Tensor``,
then to call ``relu()`` on that ``Tensor``, pass ``relu`` to ``method_name``.
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
to the called method. Note that this *should* include a ``self`` argument.
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
to the called method
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly created and inserted ``call_method`` node.
.. note::
The same insertion point and type expression rules apply for this method
as :meth:`Graph.create_node`.
"""
return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr)
def call_function(self,
the_function: Callable[..., Any],
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``call_function`` ``Node`` into the ``Graph``. A ``call_function`` node
represents a call to a Python callable, specified by ``the_function``. ``the_function``
can be
Args:
the_function (Callable[..., Any]): The function to be called. Can be any PyTorch
operator, Python function, or member of the ``builtins`` or ``operator``
namespaces.
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
to the called function.
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
to the called function
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns
The newly created and inserted ``call_function`` node.
.. note::
The same insertion point and type expression rules apply for this method
as :meth:`Graph.create_node`.
"""
return self.create_node('call_function', the_function, args, kwargs, type_expr=type_expr)
def node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument'] = lambda x: x) -> Node:
"""
Copy a node from one graph into another. ``arg_transform`` needs to transform arguments from
the graph of node to the graph of self. Example::
# Copying all the nodes in `g` into `new_graph`
g : torch.fx.Graph = ...
new_graph = torch.fx.graph()
value_remap = {}
for node in g.nodes:
value_remap[node] = new_graph.node_copy(node, lambda n : value_remap[n])
Args:
node (Node): The node to copy into ``self``.
arg_transform (Callable[[Node], Argument]): A function that transforms
``Node`` arguments in node's ``args`` and ``kwargs`` into the
equivalent argument in ``self``. In the simplest case, this should
retrieve a value out of a table mapping Nodes in the original
graph to ``self``.
"""
args = map_arg(node.args, arg_transform)
kwargs = map_arg(node.kwargs, arg_transform)
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
return self.create_node(node.op, node.target, args, kwargs, node.name, node.type)
def output(self, result: 'Argument', type_expr: Optional[Any] = None):
"""
Insert an ``output`` ``Node`` into the ``Graph``. An ``output`` node represents
a ``return`` statement in Python code. ``result`` is the value that should
be returned.
Args:
result (Argument): The value to be returned.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
.. note::
The same insertion point and type expression rules apply for this method
as ``Graph.create_node``.
"""
return self.create_node(op='output', target='output', args=(result,), type_expr=type_expr)
def _target_to_str(self, target : Target) -> str:
if callable(target):
op = target.__name__
else:
assert isinstance(target, str)
op = target
if _is_magic(op):
op = op[2:-2]
op = _snake_case(op)
return op
def python_code(self, root_module: str) -> PythonCode:
"""
Turn this ``Graph`` into valid Python code.
Args:
root_module (str): The name of the root module on which to look-up
qualified name targets. This is usually 'self'.
Returns:
A PythonCode object, consisting of two fields:
src: the Python source code representing the object
globals: a dictionary of global names in `src` -> the objects that they reference.
"""
# NOTE: [Graph Namespaces]
#
# There are two types of symbols in generated Python source code:
# locals and globals.
# Locals are locally defined by the output of a node in the Graph.
# Globals are references to external objects, like functions or types.
#
# When generating Python code, we need to make sure to name things
# appropriately. In particular:
# - All names should be unique, to avoid weird shadowing bugs.
# - These names need to be consistent, e.g. a object should always be
# referenced by the same name.
#
# To do this, we create a new namespace just for this source. All names
# that get printed must come from this namespace.
#
# Why can't we re-use node.name? Because it was generated within the
# namespace `self._graph_namespace`. In order to provide uniqueness
# over both locals (node.name) *and* globals, we create a completely
# new namespace to put all identifiers in.
namespace = _Namespace()
# Override Node's repr to generate a valid name within our namespace.
# Since repr() is designed to produce a valid Python expression, it
# makes sense to re-use it. This way, it's easy to print something like
# Tuple[Node, Node] by simply calling repr() on it. Node's __repr__ is
# implemented cooperatively to allow this.
def node_repr(n: Node):
return namespace.create_name(n.name, n)
@contextmanager
def override_node_repr(graph: Graph):
orig_repr_fns = {}
for node in graph.nodes:
orig_repr_fns[node] = node._repr_fn
node._repr_fn = node_repr
try:
yield None
finally:
# restore the original repr functions
for node in graph.nodes:
node._repr_fn = orig_repr_fns[node]
with override_node_repr(self):
return self._python_code(root_module, namespace)
def _python_code(self, root_module: str, namespace: _Namespace) -> PythonCode:
free_vars: List[str] = []
body: List[str] = []
globals_: Dict[str, Any] = {}
# Wrap string in list to pass by reference
maybe_return_annotation : List[str] = ['']
def add_global(name_hint: str, obj: Any):
"""Add an obj to be tracked as a global.
We call this for names that reference objects external to the
Graph, like functions or types.
Returns: the global name that should be used to reference 'obj' in generated source.
"""
if _is_from_torch(obj):
# HACK: workaround for how torch custom ops are registered. We
# can't import them like normal modules so they must retain their
# fully qualified name.
return _get_qualified_name(obj)
# normalize the name hint to get a proper identifier
global_name = namespace.create_name(name_hint, obj)
if global_name in globals_:
assert globals_[global_name] is obj
return global_name
globals_[global_name] = obj
return global_name
# Pre-fill the globals table with registered builtins.
for name, (_, obj) in _custom_builtins.items():
add_global(name, obj)
def type_repr(o : Any):
typename = _type_repr(o)
# This is a generic type, e.g. typing.List[torch.Tensor]
if hasattr(o, '__origin__'):
origin_type = _origin_type_map.get(o.__origin__, o.__origin__)
origin_typename = add_global(_type_repr(origin_type), origin_type)
# Assign global names for each of the inner type variables.
args = [type_repr(arg) for arg in o.__args__]
return f'{origin_typename}[{",".join(args)}]'
# Common case: this is a regular module name like 'foo.bar.baz'
return add_global(typename, o)
# Run through reverse nodes and record the first instance of a use
# of a given node. This represents the *last* use of the node in the
# execution order of the program, which we will use to free unused
# values
node_to_last_use : Dict[Node, Node] = {}
user_to_last_uses : Dict[Node, List[Node]] = {}
def register_last_uses(n : Node, user : Node):
if n not in node_to_last_use:
node_to_last_use[n] = user
user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(self.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def delete_unused_values(user : Node):
"""
Delete values after their last use. This ensures that values that are
not used in the remainder of the code are freed and the memory usage
of the code is optimal.
"""
if user.op == 'placeholder':
return
if user.op == 'output':
body.append('\n')
return
nodes_to_delete = user_to_last_uses.get(user, [])
if len(nodes_to_delete):
to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None'])
body.append(f'; {to_delete_str}\n')
else:
body.append('\n')
def emit_node(node : Node):
if node.op == 'placeholder':
assert isinstance(node.target, str)
maybe_type_annotation = '' if node.type is None else f' : {type_repr(node.type)}'
maybe_default_arg = '' if not node.args else f' = {repr(node.args[0])}'
free_vars.append(f'{node.target}{maybe_type_annotation}{maybe_default_arg}')
raw_name = node.target.replace('*', '')
if raw_name != repr(node):
body.append(f'{repr(node)} = {raw_name}\n')
return
elif node.op == 'call_method':
assert isinstance(node.target, str)
body.append(
f'{repr(node)} = {_format_target(repr(node.args[0]), node.target)}'
f'({_format_args(node.args[1:], node.kwargs)})')
return
elif node.op == 'call_function':
assert callable(node.target)
# pretty print operators
if node.target.__module__ == '_operator' and node.target.__name__ in magic_methods:
assert isinstance(node.args, tuple)
body.append(f'{repr(node)} = {magic_methods[node.target.__name__].format(*(repr(a) for a in node.args))}')
return
qualified_name = _get_qualified_name(node.target)
global_name = add_global(qualified_name, node.target)
if global_name == 'getattr' and \
isinstance(node.args, tuple) and \
isinstance(node.args[1], str) and \
node.args[1].isidentifier():
# pretty print attribute access
body.append(f'{repr(node)} = {_format_target(repr(node.args[0]), node.args[1])}')
return
body.append(f'{repr(node)} = {global_name}({_format_args(node.args, node.kwargs)})')
return
elif node.op == 'call_module':
assert isinstance(node.target, str)
body.append(f'{repr(node)} = {_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})')
return
elif node.op == 'get_attr':
assert isinstance(node.target, str)
body.append(f'{repr(node)} = {_format_target(root_module, node.target)}')
return
elif node.op == 'output':
if node.type is not None:
maybe_return_annotation[0] = f" -> {type_repr(node.type)}"
body.append(f'return {repr(node.args[0])}')
return
raise NotImplementedError(f'node: {node.op} {node.target}')
for node in self.nodes:
# NOTE: emit_node does not emit a string with newline. It depends
# on delete_unused_values to append one
emit_node(node)
delete_unused_values(node)
# repr() for inf and nan floating point values aren't parseable by
# python as literals. Explicitly import the names from the ``math`` module.
if len(body) == 0:
# If the Graph has no non-placeholder nodes, no lines for the body
# have been emitted. To continue to have valid Python code, emit a
# single pass statement
body.append('pass\n')
code = ''.join(body)
code = '\n'.join(' ' + line for line in code.split('\n'))
fn_code = f"""
def forward(self, {', '.join(free_vars)}){maybe_return_annotation[0]}:
{code}"""
return PythonCode(fn_code,
globals_)
def __str__(self) -> str:
"""
Print a human-readable (not machine-readable) string representation
of this Graph
"""
placeholder_names : List[str] = []
# This is a one-element array just so ``format_node`` can modify the closed
# over value
maybe_return_typename : List[str] = ['']
node_strs = [node.format_node(placeholder_names) for node in self.nodes]
param_str = ', '.join(placeholder_names)
s = f'graph({param_str}){maybe_return_typename[0]}:'
for node_str in node_strs:
if node_str:
s += '\n ' + node_str
return s
def print_tabular(self):
"""
Prints the intermediate representation of the graph in tabular
format.
"""
try:
from tabulate import tabulate
except ImportError:
print("`print_tabular` relies on the library `tabulate`, "
"which could not be found on this machine. Run `pip "
"install tabulate` to install the library.")
node_specs = [[n.op, n.name, n.target, n.args, n.kwargs]
for n in self.nodes]
print(tabulate(node_specs,
headers=['opcode', 'name', 'target', 'args', 'kwargs']))
def lint(self, root : Optional[torch.nn.Module] = None):
"""
Runs various checks on this Graph to make sure it is well-formed. In
particular:
- Checks Nodes have correct ownership (owned by this graph)
- Checks Nodes appear in topological order
- If ``root`` is provided, checks that targets exist in ``root``
Args:
root (Optional[torch.nn.Module]): The root module with which to check
for targets. This is equivalent to the ``root`` argument that is
passed when constructing a ``GraphModule``.
"""
# Check topo order
def check_arg(arg : Node, n : Optional[Node] = None) -> None:
context_str = f' of Node \'{n}\' ' if n else ' '
if arg.graph is not self:
raise RuntimeError(f'Argument \'{arg}\'{context_str}does not belong to this Graph, '
f'but was used as an argument! If you are copying nodes from another graph, make '
f'sure to use ``arg_transform`` on node_copy() to remap values\n{self}')
if arg not in seen_values:
raise RuntimeError(f'Argument \'{arg}\'{context_str}was used before it has been '
f'defined! Please check that Nodes in the graph are topologically ordered\n{self}')
seen_names : Set[str] = set()
seen_values : Set[Node] = set()
for node in self.nodes:
if node.op not in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output']:
raise RuntimeError(f'Node {node} had unknown opcode {node.op}!')
if node.graph is not self:
raise RuntimeError(f'Node \'{node}\' does not belong to this Graph!')
map_arg(node.args, lambda arg: check_arg(arg, node))
map_arg(node.kwargs, lambda arg: check_arg(arg, node))
seen_values.add(node)
if node.name in seen_names:
raise RuntimeError(f'Node redefined name {node.name}!')
seen_names.add(node.name)
# Check targets are legit
if root:
for node in self.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
target_atoms = node.target.split('.')
m_itr = root
for i, atom in enumerate(target_atoms):
m_itr = getattr(m_itr, atom, None)
if m_itr is None:
seen_qualname = '.'.join(target_atoms[:i])
raise RuntimeError(f'Node {node} target {node.target} references nonexistent attribute '
f'{atom} of {seen_qualname}')
reflectable_magic_methods = {
'add': '{} + {}',
'sub': '{} - {}',
'mul': '{} * {}',
'floordiv': '{} // {}',
'truediv': '{} / {}',
'div': '{} / {}',
'mod': '{} % {}',
'pow': '{} ** {}',
'lshift': '{} << {}',
'rshift': '{} >> {}',
'and': '{} & {}',
'or': '{} | {}',
'xor': '{} ^ {}',
'getitem': '{}[{}]'
}
magic_methods = dict({
'eq': '{} == {}',
'ne': '{} != {}',
'lt': '{} < {}',
'gt': '{} > {}',
'le': '{} <= {}',
'ge': '{} >= {}',
'pos': '+{}',
'neg': '-{}',
'invert': '~{}'}, **reflectable_magic_methods)
| [
"[email protected]"
] | |
e11630ca7d98b098071e4dbe2e4d7ccbd0e87332 | 525dc175d55c2f5f33f87df6915f3633537da17c | /oas_dev/notebooks/eusaari/02-02-Sizedistrib/02-02-sizedistribution_allstations-sec_and_mod_separately.py | d470894aa30e6884b01c96e3bce822ef6d69d6b6 | [
"CC0-1.0"
] | permissive | sarambl/OAS-DEV | 1b4c020ff862075034536ea38f30a131968791fb | 8dec6d29ef23dee8135bc937cd6ee1ef5b64d304 | refs/heads/master | 2023-04-09T07:59:31.051158 | 2021-10-26T12:20:04 | 2021-10-26T12:20:04 | 310,578,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,442 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import xarray as xr
from oas_dev.util.Nd.sizedist_class_v2 import SizedistributionStation
from oas_dev.util.eusaar_data.eusaar_noresm import compute_all_subsets_percs_flag, get_all_distc_noresm
from useful_scit.util.make_folders import make_folders
# %% [markdown]
# ## Cases:
# %%
# Case names:
cases_sec = ['SECTv21_ctrl_koagD']#, 'SECTv21_ctrl']#,'SECTv11_redSOA_LVnuc','SECTv11_incBVOC']#'PD_SECT_CHC7_diur_ricc']#, 'PD_SECT_CHC7_diurnal']# , 'PD_SECT_CHC7_diur_ricc_incC']
cases_orig = ['noSECTv21_default_dd','noSECTv21_ox_ricc_dd']#'noSECTv11_ctrl']#,'PD_noSECT_nudgeERA_eq20']#'Original eq.20'] # , 'Original eq.18','Original eq.20, 1.5xBVOC','Original eq.20, rednuc']
cases = cases_sec + cases_orig
# %% [markdown]
# ## Settings
# %%
from_t = '2008-01-01'
to_t = '2010-01-01'
nr_of_bins = 5
maxDiameter = 39.6 # 23.6 #e-9
minDiameter = 5.0 # e-9
time_resolution = 'hour'
history_field='.h1.'
# %% [markdown]
# ### EUSAAR subset:
# %%
year_subset='BOTH'
# %% [markdown]
# ## Load datasets:
# %% [markdown]
# ### Models
# %%
dic_finish = {}
for case_name in cases:
#_ds = dic_mod_all[case_name]
ds = get_all_distc_noresm(case_name, from_t, to_t)
dic_finish[case_name] = ds
# %% [markdown]
# ## make sectional not sum of mode and sectional
# %% [markdown]
# for case in cases_sec:
# _ds = dic_finish[case]
# _ds['dNdlogD_mod_mod'] = _ds['dNdlogD_mod'].where(_ds['diameter']>=39.6, other=0)
# _ds['dNdlogD'] = _ds['dNdlogD_sec'] + _ds['dNdlogD_mod_mod']
# dic_finish[case] = _ds
# %%
dic_finish
import numpy as np
for key in cases:
dic_finish[key]['dNdlog10dp'] = dic_finish[key]['dNdlogD']*np.log(10)
# %%
dic_finish['SECTv21_ctrl_koagD']
# %% [markdown]
# ### Eusaar:
# %%
# # %load_ext autoreload
# # %autoreload 2
import numpy as np
from oas_dev.util.eusaar_data import distc_var # import load_var_as_dtframe
import matplotlib.pyplot as plt
from useful_scit.plot import get_cmap_dic
# %%
ds_eusaar = distc_var.get_distc_xarray_all(from_nc=True)
# %%
# select bottom layer
for case in dic_finish.keys():
ds = dic_finish[case]
if 'lev' in ds.coords:
dic_finish[case] = ds.isel(lev=-1)
# %%
ds_eusaar
# %%
ds_eusaar = ds_eusaar.sel(year=year_subset)
# %%
dic_finish['eusaar'] = ds_eusaar
# %% [markdown]
# ### Various functions:
# %%
from oas_dev.constants import collocate_locations, paths_plotsave, collocate_locations, collocate_locations
# %%
coll_ltr = collocate_locations.transpose()
# %%
from useful_scit.util.pd_fix import pd_custom_sort_values
#pd_custom_sort_values(coll_ltr, sorter, dall_c)
#coll_ltr
# %%
dall_c = "Dall'Osto 2018 categories"
sorter = ['North','Center','South (spring)', 'South (winter)', 'Overlap']
def make_new_cat():
"""
Make new category
"""
coll_ltr = collocate_locations.transpose()
td = {
'Low altitude sites (less than 1000 m a.s.l.)': 'LA',
'High altitude sites (over 1000 m a.s.l.)': 'HA'
}
coll_ltr['AC'] = coll_ltr['Altitude category'].apply(lambda x: td[x])
coll_ltr['new_cat'] = coll_ltr['AC'] + ': ' + coll_ltr['Region']
coll_ltr = coll_ltr.sort_values('new_cat', ascending=False)
return coll_ltr
def get_ordered_stations():
coll_ltr = make_new_cat()
coll_ltr=pd_custom_sort_values(coll_ltr, sorter, dall_c)
return coll_ltr.index
list(get_ordered_stations())
# %%
# %%
coll_ltr = collocate_locations.transpose()
# %%
# %%
from oas_dev.data_info import get_nice_name_case
# %%
from useful_scit.plot import get_cmap_dic
from oas_dev.constants import collocate_locations, paths_plotsave, collocate_locations, collocate_locations
from oas_dev.util.plot.colors import get_case_col
def plot_grid(dic_finish, subs = 'TOT', st_ls=None, name='all_stations', ylim=[5,8.8e3],
yscale='linear', plot_sec=True,nr_col=4, figsize=None, ylim_ZEP=[0,500]):
colors_source = get_cmap_dic(dic_finish.keys())
colors_source['EUSAAR']='k'
dic_ds = dic_finish
t_cs =dic_ds[ list(dic_ds.keys())[0]]
st_ls =list(get_ordered_stations())
# st_ls = list(loc_tr[loc_tr['Region']==reg].index)
print(list(st_ls))
if len(st_ls)>nr_col:
nr_row = int(np.ceil(len(st_ls)/nr_col))
else:
nr_row=1
nr_col=len(st_ls)
if figsize is None:
figsize = [10 / 4 * nr_col, 10 / 6 * nr_row]
fig, axs = plt.subplots(nr_row, nr_col , sharex=True,sharey=True, figsize=figsize )
axs_nf = axs
if nr_row>1: axs = axs.flatten()
for station, ax in zip(st_ls, axs):
lines=[]
labels= []
for key in dic_finish.keys():
_ds = dic_finish[key]
#if 'dNdlog10dp_sec' in _ds:
# plt_perc(dic_finish[key]['dNdlog10dp_sec'], station, key,
# color=get_case_col(key),
# ax=ax,
# subs=subs, percs=[16,84], yscale=yscale, ylim=ylim)
## plt_perc(dic_finish[key]['dNdlog10dp_mod'], station, key,
# color=get_case_col(key),
# ax=ax,
# subs=subs, percs=[16,84], yscale=yscale, ylim=ylim)
#else:
plt_perc(dic_finish[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=ax,
subs=subs, percs=[16,84], yscale=yscale, ylim=ylim)
for key in dic_finish.keys():
_ds = dic_finish[key]
#if 'dNdlog10dp_sec' in _ds:
# line =plt_median(dic_finish[key]['dNdlog10dp_sec'], station, key,
# color=get_case_col(key),
# ax=ax,
# subs=subs, percs=[16,84], yscale=yscale, ylim=ylim)
# line =plt_median(dic_finish[key]['dNdlog10dp_mod'], station, key,
# color=get_case_col(key),
# ax=ax,
# subs=subs, percs=[16,84], yscale=yscale, ylim=ylim)
#else:
if 'dNdlog10dp_sec' in _ds:
line =plt_median(dic_finish[key]['dNdlog10dp_sec'].where(dic_finish[key]['dNdlog10dp_sec']>0), station, key,
color=get_case_col(key),
ax=ax,
subs=subs, percs=[16,84], yscale=yscale, ylim=ylim,
plt_kwargs = {'linestyle':'dashed'})
line =plt_median(dic_finish[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=ax,
subs=subs, percs=[16,84], yscale=yscale, ylim=ylim)
lines = lines+line
labels.append(get_nice_name_case(key))
if station=='ZEP':
axins = insert_ZEP(ax)
for key in dic_finish.keys():
plt_perc(dic_finish[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=axins,
subs=subs, percs=[16,84], yscale=yscale, ylim=ylim_ZEP)
for key in dic_finish.keys():
plt_median(dic_finish[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=axins,
subs=subs, percs=[16,84], yscale=yscale, ylim=ylim_ZEP)
if 'dNdlog10dp_sec' in _ds:
plt_median(dic_finish[key]['dNdlog10dp_sec'], station, key,
color=get_case_col(key),
ax=axins,
subs=subs, percs=[16,84], yscale=yscale, ylim=ylim_ZEP)
ax.indicate_inset_zoom(axins,edgecolor='r',)
fix_ins(axins)
if subs=='TOT':
cat = coll_ltr.loc[station, dall_c]
ax.set_title(station)#+' '+cat)
else:
cat = coll_ltr.loc[station, dall_c]
ax.set_title(station+' '+ subs)#+', '+cat)
ax.grid(True)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis=u'y', which=u'both',length=0)
if nr_row>1:
for ii in range(nr_row):
for jj in range(nr_col):
if ii !=nr_row-1:
axs_nf[ii,jj].set_xlabel('')
if jj!=0:
axs_nf[ii,jj].set_ylabel('')
else:
axs_nf[ii,jj].set_ylabel('dN/dlog$_{10}$D [cm$^{-3}$]')
fig.tight_layout()
lgn = fig.legend(lines, labels, bbox_to_anchor=(0, 1., 1, 0.5), # (0, -0.04, 1., .1),
loc='lower center', ncol=4,
# mode="expand",
borderaxespad=0., fontsize=11, frameon=False) # bbox_to_anchor=(0, 1., 1, 0.5))
#fig.legend(lines, labels, bbox_to_anchor=(0,1.,1,0.5),#(0, -0.04, 1., .1),
# loc='lower center', ncol=4,
# #mode="expand",
# borderaxespad=0., fontsize=11, frameon=False)# bbox_to_anchor=(0, 1., 1, 0.5))
fn = paths_plotsave['eusaar'] + '/sizedist/%s_overview_yscale_%s_sec%s_%s.'%(name.replace(' ','-'),yscale, plot_sec, subs)
print(fn)
make_folders(fn)
plt.savefig(fn+'png', bbox_extra_artists=(lgn,), bbox_inches='tight')
plt.savefig(fn+'pdf', bbox_extra_artists=(lgn,), bbox_inches='tight')
plt.show()
# %%
def plt_median(ds, station, label, percs=[5, 95], ax=None, ylim=[1, 8e3], xlim=[5, 5e2],
yscale='linear', color='k', subs='TOT', year='2008', plt_kwargs={}):
if ax is None:
fig, ax= plt.subplots(1)
da = ds.sel(subset=subs)
_da50 = da.sel(station=station, percentile='50th percentile')#
lines = _da50.plot(yscale=yscale,xscale='log', label=label+', %s'%station,
color=color ,
ax=ax,**plt_kwargs)
return lines
def plt_perc(ds, station, label, percs=[5, 95], ax=None, ylim=[1, 8e3], xlim=[5, 5e2],
yscale='linear', color='k', subs='TOT', year='2008'):
if ax is None:
fig, ax= plt.subplots(1)
da = ds.sel(subset=subs)
_da95 = da.sel(station=station, percentile='%sth percentile'%percs[1])
_da5 = da.sel(station=station, percentile='%sth percentile'%percs[0])
ax.fill_between(da.diameter.values, _da5,_da95, alpha=.2 , color = color)
ax.set_ylim(ylim)
ax.set_xlim(xlim)
return
# %%
def plt_median_perc(ds, station, label, percs=[5, 95], ax=None, ylim=[1, 8e3], xlim=[5, 5e2],
yscale='linear', color='k', subs='TOT', year='2008'):
if ax is None:
fig, ax= plt.subplots(1)
da = ds.sel(subset=subs)
_da50 = da.sel(station=station, percentile='50th percentile')#
lines = _da50.plot(yscale=yscale,xscale='log', label=label+', %s'%station,color=color , ax=ax)
_da95 = da.sel(station=station, percentile='%sth percentile'%percs[1])
_da5 = da.sel(station=station, percentile='%sth percentile'%percs[0])
ax.fill_between(da.diameter.values, _da5,_da95, alpha=.2 , color = color)
ax.set_ylim(ylim)
ax.set_xlim(xlim)
return lines
# %%
def insert_ZEP(ax):
axins = ax.inset_axes([0.23, 0.2, 0.67, 0.67])#zoomed_inset_axes(ax, zoom=1, loc='upper right')
return axins
def fix_ins(axins):
axins.set_xticklabels('')
axins.grid(False)
axins.yaxis.label.set_color('red')
axins.tick_params(axis='y', colors='red')
# #axins.tick_params(axis=u'both', which=u'both',length=1)
axins.spines['left'].set_color('r')
axins.spines['right'].set_color('r')
axins.spines['bottom'].set_color('r')
axins.spines['top'].set_color('r')
axins.set_title('')
axins.set_xlabel('')
axins.set_ylabel('')
# %%
dic_clean={}
_ds_eu = dic_finish['eusaar']
_ds_eu = _ds_eu.where(_ds_eu!=-1).drop('year')
# %%
for key in cases:
dic_clean[key] = dic_finish[key].where(_ds_eu.notnull())
if 'dNdlog10dp_sec' in dic_finish[key]:
dic_clean[key]['dNdlog10dp_sec']=dic_finish[key]['dNdlog10dp_sec']
dic_clean['eusaar'] = _ds_eu
# %%
plot_grid(dic_clean)
# %%
plot_grid(dic_clean,subs = 'SUM', ylim=[0,1e4])
# %%
plot_grid(dic_clean,subs = 'WIN', ylim=[0,5e3])
# %%
plot_grid(dic_clean,subs = 'SPR', ylim=[0,8e3])
# %%
plot_grid(dic_clean,subs = 'AUT', ylim=[0,8e3])
# %%
plot_grid(dic_finish)
# %%
plot_grid(dic_finish,subs = 'SUM', ylim=[0,1e4])
# %%
plot_grid(dic_finish,subs = 'WIN', ylim=[0,8e3])
# %%
plot_grid(dic_finish,subs = 'SPR', ylim=[0,8e3])
# %%
plot_grid(dic_finish,subs = 'AUT', ylim=[0,8e3])
# %%
from oas_dev.constants import collocate_locations
station='ASP'
colors_source = get_cmap_dic(dic_finish.keys())
colors_source['EUSAAR']='k'
for station in dic_finish[cases[0]].coords['station'].values:
fig, axs = plt.subplots(2,2)
seasons = ['SPR', 'SUM','AUT','WIN']
for seas, ax in zip(seasons, axs.flatten()):
for key in dic_finish.keys():
plt_median_perc(dic_clean[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=ax,
subs=seas, percs=[16,84], yscale='log', ylim=[10,1e4])
ax.set_title(station+', '+ seas)
plt.legend()
plt.show()
# %%
from oas_dev.constants import collocate_locations
station='ASP'
colors_source = get_cmap_dic(dic_finish.keys())
colors_source['EUSAAR']='k'
for station in dic_finish[cases[0]].coords['station'].values:
fig, axs = plt.subplots(2,2)
seasons = ['SPR', 'SUM','AUT','WIN']
for seas, ax in zip(seasons, axs.flatten()):
for key in dic_finish.keys():
plt_median_perc(dic_finish[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=ax,
subs=seas, percs=[16,84], yscale='linear', ylim=[10,1e4])
ax.set_title(station+', '+ seas)
plt.legend()
plt.show()
# %%
# %%
# %%
# %%
#for station in dic_finish[cases[0]].coords['station'].values:
fig, axs = plt.subplots(1,2, figsize=[10,5])
period = ['DAY', 'NIG']
for seas, ax in zip(period, axs.flatten()):
for key in dic_finish.keys():
dic_finish[key]['dNdlog10dp'].median('station').sel(subset=seas, percentile='50th percentile').plot(color=get_case_col(key), xscale='log', ax=ax, label=key)
#plt_median_perc(dic_finish[key]['dNdlog10dp'], station, key,
# color=get_case_col(key),
# ax=ax,
# subs=seas, percs=[16,84], yscale='linear')
ax.set_title(station+', '+ seas)
plt.legend()
plt.show()
# %%
#for station in dic_finish[cases[0]].coords['station'].values:
fig, axs = plt.subplots(1,4, figsize=[20,5])
period = seasons
for seas, ax in zip(period, axs.flatten()):
for key in dic_finish.keys():
for station in dic_finish[key].station.values:
_plt_da=dic_finish[key]['dNdlog10dp'].sel(subset=seas,station=station, percentile='50th percentile')#
_plt_da.plot(color=get_case_col(key), xscale='log', ax=ax, label=key,
linewidth=.5, alpha=.8)
#plt_median_perc(dic_finish[key]['dNdlog10dp'], station, key,
# color=get_case_col(key),
# ax=ax,
# subs=seas, percs=[16,84], yscale='linear')
ax.set_title(station+', '+ seas)
ax.set_xlim([4,800])
#plt.legend()
plt.show()
# %%
for station in dic_finish[cases[0]].coords['station'].values:
print(station)
fig, axs = plt.subplots(1,2)
period = ['DAY', 'NIG']
for seas, ax in zip(period, axs.flatten()):
for key in dic_finish.keys():
plt_median_perc(dic_finish[key]['dNdlog10dp'], station, key,
color=get_case_col(key),
ax=ax,
subs=seas, percs=[16,84], yscale='linear')
ax.set_title(station+', '+ seas)
plt.legend()
plt.show()
| [
"[email protected]"
] | |
e0b7367a019a91e2fa1bcd3bff959a74b8b7323a | e8cc4cd00990a4f8a75e538ca68fa77456f37e3c | /telebot/apps.py | f6201278a2003ae19e1031d371370381cf66d2e3 | [] | no_license | devRayanwv/djangoTest | 950b5d54a3a53f52f615e2ed0a99bac975fb0db9 | 71bb9377f70fde5b28c5685e8800c4209f265a9f | refs/heads/master | 2020-04-06T04:28:08.942379 | 2017-02-24T22:20:08 | 2017-02-24T22:20:08 | 82,883,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class TelebotConfig(AppConfig):
name = 'telebot'
| [
"[email protected]"
] | |
24d2af17dd3749befa8832fee7ee08d62a1a9063 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /JgYPQrYdivmqN4KKX_18.py | 51bf0ca4c9e57c6e4d4df644268825f4357b96e2 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py |
def BMI(weight, height):
if "kilos" in weight:
fBmi = round(float(weight.split()[0]) / float(height.split()[0])**2,1)
else :
a = (float(weight.split()[0])) * 0.453592
b = (float(height.split()[0])) * 0.0254
fBmi = round(a/b**2,1)
if fBmi < 18.5:
return "{0} Underweight".format(fBmi)
elif fBmi >= 18.5 and fBmi < 24.9:
return "{0} Normal weight".format(fBmi)
elif fBmi >= 25 and fBmi < 29.9:
return "{0} Overweight".format(fBmi)
elif fBmi >= 30:
return "{0} Obesity".format(fBmi)
| [
"[email protected]"
] | |
9ed3302317bb7901f6b3244ef26fc1ecb990a599 | 5b9f9b4ea1494943e6f7f842df55909599ed1304 | /python/onshape_client/oas/models/bt_shaded_render_document_response.py | 7f8e89af07f0ac165d25afbbf29e6536706ff134 | [] | no_license | jenniferyoung02/onshape-clients | f50534f033428027515b7fc0b801b1caab4d0aec | 8ee31a17d7af32f105b851e45f69fd4a3006e1ba | refs/heads/master | 2020-09-07T06:44:37.682545 | 2019-10-08T18:52:06 | 2019-10-08T18:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,884 | py | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.104
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BTShadedRenderDocumentResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'image_data': 'list[str]',
'status': 'BTNodeStatus'
}
attribute_map = {
'image_data': 'imageData',
'status': 'status'
}
def __init__(self, image_data=None, status=None): # noqa: E501
"""BTShadedRenderDocumentResponse - a model defined in OpenAPI""" # noqa: E501
self._image_data = None
self._status = None
self.discriminator = None
if image_data is not None:
self.image_data = image_data
if status is not None:
self.status = status
@property
def image_data(self):
"""Gets the image_data of this BTShadedRenderDocumentResponse. # noqa: E501
:return: The image_data of this BTShadedRenderDocumentResponse. # noqa: E501
:rtype: list[str]
"""
return self._image_data
@image_data.setter
def image_data(self, image_data):
"""Sets the image_data of this BTShadedRenderDocumentResponse.
:param image_data: The image_data of this BTShadedRenderDocumentResponse. # noqa: E501
:type: list[str]
"""
self._image_data = image_data
@property
def status(self):
"""Gets the status of this BTShadedRenderDocumentResponse. # noqa: E501
:return: The status of this BTShadedRenderDocumentResponse. # noqa: E501
:rtype: BTNodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BTShadedRenderDocumentResponse.
:param status: The status of this BTShadedRenderDocumentResponse. # noqa: E501
:type: BTNodeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BTShadedRenderDocumentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
262e1d9f54429dd7118716daf6cfbc910a323686 | 4fb9150b08a128571ed4a84897c8c95afb76ccb6 | /healthy/migrations/0002_labdetail.py | e6624ae565d211b7af58232ca3a06dfcfe941dd7 | [] | no_license | eduarde/ChunkyMonkeys | 815feb7f3e6e2085babb61d12f2255ea2cb46ada | 34f30e6aaeef6af15aa12e6d599f55d67c6fb7d7 | refs/heads/master | 2021-07-09T21:30:49.084584 | 2016-12-05T10:42:04 | 2016-12-05T10:42:04 | 58,738,867 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-14 11:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('healthy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LabDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reason', models.TextField(blank=True, null=True, verbose_name='Reason')),
('cause', models.TextField(blank=True, null=True, verbose_name='Cause')),
('action', models.TextField(blank=True, null=True, verbose_name='Action')),
('lab_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='LabDet', to='healthy.Lab')),
('user_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
eb53a990da835beaca9e9cc878481161831bfb1f | 1bb2a9150de01c618163bbb8f872bdce6f14df4f | /BaekJoon/2981_검문.py | acbbec8e742ffdac47cb7a67e0dc300dcd8ab895 | [] | no_license | whyj107/Algorithm | a1c9a49a12a067366bd0f93abf9fa35ebd62102e | aca83908cee49ba638bef906087ab3559b36b146 | refs/heads/master | 2023-04-14T12:59:52.761752 | 2021-05-01T03:53:31 | 2021-05-01T03:53:31 | 240,014,212 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | # 문제
# 검문
# https://www.acmicpc.net/problem/2981
# 풀이
from sys import stdin
N = int(stdin.readline())
M = [int(stdin.readline()) for i in range(N)]
M.sort()
tmp = M[-1] - M[0]
y = []
for i in range(2, int(tmp**0.5)+1):
if tmp % i == 0:
y.append(i)
if tmp//i not in y: y.append(tmp//i)
y.sort()
y.append(tmp)
for i in y:
for n in range(N):
if n == N-1:
print(i, end=" ")
elif M[n] % i != M[n+1] % i:
break
# 다른 사람의 풀이
"""
import sys
input = sys.stdin.readline
def gcd(a, b):
return gcd(b, a % b) if a % b else b
n = int(input())
num = sorted([int(input()) for _ in range(n)])
get = num[1] - num[0]
for i in range(2, n):
get = gcd(get, num[i]-num[i-1])
res = set()
for i in range(2, int(get**0.5)+1):
if get % i == 0:
res.add(i)
res.add(get//i)
res.add(get)
res = sorted(list(res))
print(' '.join(map(str, res)))
""" | [
"[email protected]"
] | |
2ce9a1b049459c79da30b6a1c77b1b59475eaa01 | 2f260fa01c744d93aacfe592b62b1cee08b469de | /sphinx/tello/source/_static/code/python/control-program/tello.py | 5b7390453ddf2deefede00ae55943829785944fd | [
"CC-BY-4.0"
] | permissive | oneoffcoder/books | 2c1b9b5c97d3eaaf47bafcb1af884b1adcc23bba | 35c69915a2a54f62c2c3a542045719cf5540f6ba | refs/heads/master | 2023-06-25T16:00:10.926072 | 2023-06-20T03:40:09 | 2023-06-20T03:40:09 | 216,915,443 | 50 | 3 | null | 2023-03-07T01:27:50 | 2019-10-22T21:46:03 | Jupyter Notebook | UTF-8 | Python | false | false | 8,757 | py | import socket
import threading
import time
class Tello(object):
"""
Wrapper class to interact with the Tello drone.
"""
def __init__(self, local_ip, local_port, imperial=False,
command_timeout=.3,
tello_ip='192.168.10.1',
tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip: Local IP address to bind.
:param local_port: Local port to bind.
:param imperial: If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout: Number of seconds to wait for a response to a command.
:param tello_ip: Tello IP.
:param tello_port: Tello port.
"""
self.abort_flag = False
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tello_address = (tello_ip, tello_port)
self.last_height = 0
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
self.socket.sendto(b'command', self.tello_address)
print ('sent: command')
def __del__(self):
"""
Closes the local socket.
:return: None.
"""
self.socket.close()
def _receive_thread(self):
"""
Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
:return: None.
"""
while True:
try:
self.response, _ = self.socket.recvfrom(3000)
except socket.error as exc:
print(f'Caught exception socket.error : {exc}')
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return: Response from Tello.
"""
print(f'>> send cmd: {command}')
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
:return: None.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
:param speed: Speed.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command(f'speed {speed}')
def rotate_cw(self, degrees):
"""
Rotates clockwise.
:param degrees: Degrees to rotate, 1 to 360.
:return:Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'cw {degrees}')
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
:param degrees: Degrees to rotate, 1 to 360.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'ccw {degrees}')
def flip(self, direction):
"""
Flips.
:param direction: Direction to flip, 'l', 'r', 'f', 'b'.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'flip {direction}')
def get_response(self):
"""
Returns response of tello.
:return: Response of tello.
"""
response = self.response
return response
def get_height(self):
"""
Returns height(dm) of tello.
:return: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""
Returns percent battery life remaining.
:return: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""
Returns the number of seconds elapsed during flight.
:return: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""
Returns the current speed.
:return: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""
Initiates landing.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""
Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
:param direction: Direction to move, 'forward', 'back', 'right' or 'left'.
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command(f'{direction} {distance}')
def move_backward(self, distance):
"""
Moves backward for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""
Moves down for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""
Moves forward for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""
Moves left for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""
Moves right for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('right', distance)
def move_up(self, distance):
"""
Moves up for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
| [
"[email protected]"
] | |
43170fa8f7fc5a3560607c4b21a1cb123096b586 | f6c1a4593859ad75000e726414f25fbf02766143 | /setup.py | 7edb29cfc794fbf5f917801018c219ab2e44a25c | [] | no_license | jbeezley/metadata_extractor | b753ce6f9e55e5bc92f16b5decfbab5b992ac621 | 1401127bf572119353e3c504278ff7436e077c9e | refs/heads/master | 2020-03-20T00:57:52.713434 | 2018-06-12T13:49:54 | 2018-06-12T13:49:54 | 137,062,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from setuptools import setup, find_packages
# perform the install
setup(
name='girder-plugin-metadata-extractor',
version='0.2.0',
description='Enables the extraction of metadata from uploaded files',
author='Kitware, Inc.',
author_email='[email protected]',
url='https://github.com/girder/metadata_extractor',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
package_data={
'': ['web_client/**']
},
packages=find_packages(exclude=['test']),
zip_safe=False,
install_requires=[
'girder',
'hachoir-core',
'hachoir-metadata',
'hachoir-parser'
],
entry_points={
'girder.plugin': [
'metadata_extractor = girder_plugin_metadata_extractor:MetadataExtractorPlugin'
]
}
)
| [
"[email protected]"
] | |
af7e89df385ab20dc1f91bac730a8ca9b629cf3f | 1316cd6763e784811c769c1de577235c921af0de | /Apps/qscan/pyth2p7/scanner.py | fe77520db52aa55bf64dc1ebb7679cf1b63d600f | [] | no_license | VELA-CLARA-software/Software | a6fb6b848584e5893fd6939a447d23134ce636cc | 2e2a88ac0b2b03a495c868d2e11e6481e05097c3 | refs/heads/master | 2023-02-05T07:40:58.260798 | 2023-01-27T09:39:09 | 2023-01-27T09:39:09 | 69,860,536 | 7 | 3 | null | 2021-04-07T14:17:07 | 2016-10-03T10:20:46 | Mathematica | UTF-8 | Python | false | false | 9,766 | py | from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignal
from PyQt4.QtCore import pyqtSlot
import sys,os
#import view
import numpy as np
sys.path.append("\\\\apclara1\\ControlRoomApps\\Controllers\\bin\\stage\\")
#sys.path.append("\\\\apclara1\\ControlRoomApps\\Controllers\\bin\\stage\\Python3_x64\\")
#sys.path.append("\\\\apclara1.dl.ac.uk\\ControlRoomApps\\Controllers\\bin\\stage\\Python3_x64\\")
#
#for item in sys.path:
# print item
#0# import VELA_CLARA_PILaser_Control as pil
import time
#0# pil_init = pil.init()
#pil_init.setVerbose()
#0# pil_control = pil_init.physical_PILaser_Controller()
#import lasmover as lm
import math as ma
import numpy as np
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
import VELA_CLARA_LLRF_Control as rf
rfinit = rf.init()
therf = rfinit.physical_CLARA_LRRG_LLRF_Controller()
import VELA_CLARA_BPM_Control as bpm
bpminit = bpm.init()
bpminit.setVerbose()
bpms = bpminit.physical_CLARA_PH1_BPM_Controller()
import VELA_CLARA_General_Monitor as mon
monini = mon.init()
charge = monini.connectPV('CLA-S01-DIA-WCM-01:Q')
lasE = monini.connectPV('CLA-LAS-DIA-EM-01:E')
vcsump = monini.connectPV('CLA-VCA-DIA-CAM-01:ANA:Intensity_RBV')
# NEW section to get llrf stuff. Tried copying from Duncan charge app.
therf2 = rfinit.getLLRFController(rf.MACHINE_MODE.PHYSICAL,rf.LLRF_TYPE.CLARA_LRRG)
##therf2.getCavFwdPwr()
##therf2.getCavRevPwr()
##print("hello!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!",therf2.getCavFwdPwr())
#exit()
class chargescanner(QtCore.QObject):
changedval = pyqtSignal(float, float, float, float)
changedlogtxt = pyqtSignal(str)
# lo, hi and the min and max values of the area on the VC to scan
# values are mm from bottom left of the VC imagecollector
# nx,y is number of points to stop and measure charge at in x,y
xlo = 3
xhi = 7
ylo = 3
yhi = 7
nx = 3
ny = 3
xrange = np.linspace(xlo,xhi,nx)
yrange = np.linspace(ylo,yhi,ny)
def setxrange(self,dumxlo,dumxhi,dumnx):
self.xrange = np.linspace(dumxlo,dumxhi,dumnx)
def setyrange(self,dumylo,dumyhi,dumyx):
self.yrange = np.linspace(dumylo,dumyhi,dumny)
def doscan(self,xxlo,xxhi,xxn,yylo,yyhi,yyn):
self.xrange = np.linspace(xxlo,xxhi,xxn)
self.yrange = np.linspace(yylo,yyhi,yyn)
print('IN DOSCAN',self.xrange)
print('IN DOSCAN',self.yrange)
print(therf.getPhiDEG())
print('***********************************************************')
print('!!!!!!!!!!!!!!!!!!!!!PLEASE READ!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('***********************************************************\n')
print('This is a script to scan the laser over the cathode (via VC)')
print('and measure the charge on the WCM \n')
print('Please have imagecollector open on the VC and check that the')
print('mask feedback is switched on, and that the mask follows the VC laser spot')
print('throughout the scan.\n')
print('the x locations where charge will be measured are', self.xrange, ' mm')
print('the y locations where charge will be measured are', self.yrange, ' mm')
print('the total number of scan points is', self.nx*self.ny)
print('the wcm reading at each point will be recorded in the file qscan.txt')
print('which is created whereever you run this script from')
# write results to work folder.
timestr = time.strftime("%H%M%S")
dir = '\\\\fed.cclrc.ac.uk\\Org\\NLab\\ASTeC\\Projects\\VELA\\Work\\'+time.strftime("%Y\\%m\\%d")+'\\'
try:
os.makedirs(dir)
except OSError:
if not os.path.isdir(dir):
self.logger.emit('Error creating directory - saving to local directory')
dir = '.'
filename = dir+'qscan'+str(timestr)+'.txt'
f = open(filename,'a')
# f = open('qscan'+str(timestr)+'.txt','a')
#exit()
#0# pil_control.setMaskFeedBackOn_VC()
#xrange = [5.5]
#yrange = [4.5]
# # next section of code to access data from PI Contoller (code c/o Duncan)
# # from this we can get individual peices of hardware, let's store them in a dictionary called hardware
# hardware = {}
# # VC camera image object object
# vc_image = "vc_image"
# hardware[vc_image] = pil_control.getImageObj()
# # to access the vc_image object just call: hardware[vc_image]
# # image data / anmalaysis
# vc_image_data= "vc_image_data"
# hardware[vc_image_data] = pil_control.getVCDataObjConstRef()
# # the VC camera
# vc_cam= "vc_cam"
# hardware[vc_cam] = pil_control.getCameraObj()
# # laser mirror object
# mirror = "mirror"
# # the PI laser object (from here you can get Q, laser energy ...)
# pil = "pil"
# hardware[pil] = pil_control.getPILObjConstRef()
# #number of shots:
# num_shots_to_average = 12
# pil_control.setAllRSBufferSize(num_shots_to_average)
# # Check the buffer size for the Q
# print("getRSBufferSize = ",hardware[pil].max_buffer_count)
# # some constants, (probably should save once for each run )
# x_pix_scale_factor = hardware[vc_image].x_pix_scale_factor
# y_pix_scale_factor = hardware[vc_image].y_pix_scale_factor
# x_pix_to_mm = hardware[vc_image].x_pix_to_mm
# y_pix_to_mm = hardware[vc_image].y_pix_to_mm
# num_pix_x = hardware[vc_image].num_pix_x
# num_pix_y = hardware[vc_image].num_pix_y
# # ++ others??
ix = 0
chargebest = 0
for x in self.xrange:
if ix % 2 == 0:
dumyrange = self.yrange
print('going up', dumyrange)
else:
dumyrange = self.yrange[::-1]
print('going up', dumyrange)
ix = ix + 1
for y in dumyrange:
print(x, y, '\n')
#l a = pil_control.setVCPos(x,y)
#l # monitor this paramter to know when ity has finished
#l set_pos_succes = False
#l# exit()
#l
#l while 1:
#l set_pos_state = pil_control.getSetVCPosState()
#l print 'success status', set_pos_state
#l if set_pos_state == pil.VC_SET_POS_STATE.SUCCESS:
#l set_pos_succes = True
#l break
#l else:
#l print set_pos_state
#l time.sleep(1)
#l print("Set Position Finished",pil_control.getSetVCPosState())
# exit()
# mylasmove.setposition(x,y,5,0.1)
# raw_input("Press Enter to continue...")0
# # get the qscan quantities at this point (c.o Duncan for the code)
# # set next-position
# # when at next position
# print("At next_positoin, getting some data")
# pil_control.clearRunningValues()
# # wait for buffer to fill, we will just check against the Q buffer
# while hardware[pil].Q_full == False: # suggest for pil_control, we could do with a isRFBufferNOTFull function(!)
# print("Waiting for running stat buffer to fill, ", hardware[pil].Q_n)
# time.sleep(0.5)
# print("Buffer Is Full, ",hardware[pil].Q_n," getting data")
# # mean and (standard deviation) sd for Q
# Q_mean = hardware[pil].Q_mean
# Q_sd = hardware[pil].Q_sd
# # mean and sd for energy
# energy_mean = hardware[pil].energy_mean
# energy_sd = hardware[pil].energy_sd
# # laser x position mean and sd
# x_pix_mean = hardware[vc_image_data].x_pix_mean
# x_pix_sd = hardware[vc_image_data].x_pix_sd
# # laser y position mean and sd
# y_pix_mean = hardware[vc_image_data].y_pix_mean
# y_pix_sd = hardware[vc_image_data].y_pix_sd
# # laser x width mean and sd
# sig_x_pix_mean = hardware[vc_image_data].sig_x_pix_mean
# sig_x_pix_sd = hardware[vc_image_data].sig_x_pix_sd
# # y position mean and sd
# sig_y_pix_mean = hardware[vc_image_data].sig_y_pix_mean
# sig_y_pix_sd = hardware[vc_image_data].sig_y_pix_sd
chargenow = 1.1
# chargenow = monini.getValue(charge)
lasEnow = 1.1
# lasEnow = monini.getValue(lasE)
vcsumpnow = monini.getValue(vcsump)
# f.write('str(x)+' '+str(x_pix_mean)+' '+str(x_pix_sd)+' '+str(y)+' '+str(y_pix_mean)+' '+str(y_pix_sd)+' '+str(Q_mean)+' '+str(Q_sd)+' ' '+'str(Q_mean)+' '+str(Q_sd)')
# f.write('RF phase '+str(therf.getPhiDEG())+' vcx '+str(x)+' vcy '+str(y)+' charge '+str(chargenow)+' laserE '+str(lasEnow)+' VCintens '+str(vcsumpnow)+'\n')
f.flush()
self.changedval.emit(x,y,chargenow,lasEnow)
iterstring = "hello string signal"
print(iterstring)
self.changedlogtxt.emit(iterstring)
print("charge now", chargenow, " best charge ", chargebest)
if chargenow > chargebest:
chargebest = chargenow
print("got a higher charge")
print('finished the scan, the higher charge was', chargebest)
f.close()
| [
"[email protected]"
] | |
c56a3f8d77a5d05be57428bbda596c5e31709503 | 241724e83f5c12ed9d7dd3b825dfe4e2b1b0f777 | /examples/boundary_conditions.py | a73111c7860a10c82ddfefc46005d3f0954a7718 | [
"MIT"
] | permissive | xuanxu/py-pde | d8be358ab76d4060b14afc74bc7d836591c6188e | de33d938aea8680eff872ae1b64569895662a248 | refs/heads/master | 2021-03-09T21:37:13.920717 | 2020-03-10T12:18:03 | 2020-03-10T12:18:03 | 246,382,909 | 0 | 0 | MIT | 2020-03-10T18:54:22 | 2020-03-10T18:54:22 | null | UTF-8 | Python | false | false | 521 | py | #!/usr/bin/env python3
from pde import UnitGrid, ScalarField, DiffusionPDE
grid = UnitGrid([16, 16], periodic=[False, True]) # generate grid
state = ScalarField.random_uniform(grid, 0.2, 0.3) # generate initial condition
# set boundary conditions `bc` for all axes
bc_x_left = {'type': 'derivative', 'value': 0.1}
bc_x_right = {'type': 'value', 'value': 0}
bc_x = [bc_x_left, bc_x_right]
bc_y = 'periodic'
eq = DiffusionPDE(bc=[bc_x, bc_y])
result = eq.solve(state, t_range=10, dt=0.005)
result.plot(show=True)
| [
"[email protected]"
] | |
fb833a786a0d20f87937019f8e9caa12a42bd37f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02859/s282657113.py | e434f658756ad3e7e51c07a4f92cf7ee39d78ef2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | N = int(input())
print(int(N**2)) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.