blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
607250c2ca29295053d3ba2e5639b4b102ba1fb8 | 5f5f9fb972d6c8236142413876fee737b11b0090 | /Assignment4/comp/code_lstm/max_predictions.py | 9da8ed5ca0836e69d7c306c3c2a0b5fe796f133f | [] | no_license | agarwal-ayushi/Machine-Learning-Assignments | c634d48996dd975874ff7383aac46e626bcb338d | 53262ba02ce440946e0aa92d6385f94bd79b32c5 | refs/heads/master | 2022-12-17T00:15:25.626464 | 2020-09-16T17:19:33 | 2020-09-16T17:19:33 | 235,383,698 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models_rasha import Encoder_Rasha, Decoder_Rasha
import numpy as np
def to_device(data, device):
if isinstance(data,(list,tuple)):
return [to_device(x,device) for x in data]
return data.to(device)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def max_prediction(encoded_features, decoder_model, vocab_dict, max_length=80):
'''
Function to perform max prediction to generate captions
'''
word_2_ix, ix_2_word = vocab_dict
start_token = word_2_ix['<start>']
end_token = word_2_ix['<end>']
hidden = None # In the beginning the hidden state is None
caption_word_id = []
for i in range(max_length):
encoded_features = encoded_features.unsqueeze(1)
if(hidden == None):
output, hidden = decoder_model.get_pred(encoded_features.cuda())
else:
output, hidden = decoder_model.get_pred(encoded_features.cuda(), to_device(hidden, device))
_ , predicted_id = output.max(1)
caption_word_id.append(predicted_id)
if (predicted_id == end_token):
break
encoded_features = decoder_model.embed(predicted_id)
caption_word_id = torch.stack(caption_word_id, 1)
return caption_word_id.cpu().numpy()[0]
| [
"[email protected]"
] | |
74a2ad6e251baa847e24ec5c7ee551c2e7349fbe | a232988fe8f247fbd56f7a91748ccfbf73326265 | /blog/blog/views.py | 739eb639313cb9e904159fe63b5e39ce4fd5f797 | [] | no_license | Vigs16/hello-world | 32236f6258ce42e6d8f5ef5139ecd84090be37bd | 4212e0669944f22d0d3148516b97bf9fec95b72d | refs/heads/master | 2021-01-20T13:03:51.843003 | 2017-05-30T04:52:13 | 2017-05-30T04:52:13 | 90,442,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from flask import render_template
from flask import request, redirect, url_for
from . import app
from .database import session, Entry
PAGINATE_BY = 10
@app.route("/")
@app.route("/page/<int:page>")
def entries(page=1):
# Zero-indexed page
page_index = page - 1
count = session.query(Entry).count()
start = page_index * PAGINATE_BY
end = start + PAGINATE_BY
total_pages = (count - 1) // PAGINATE_BY + 1
has_next = page_index < total_pages - 1
has_prev = page_index > 0
entries = session.query(Entry)
entries = entries.order_by(Entry.datetime.desc())
entries = entries[start:end]
return render_template("entries.html",
entries=entries,
has_next=has_next,
has_prev=has_prev,
page=page,
total_pages=total_pages
)
@app.route("/entry/add", methods=["GET"])
def add_entry_get():
return render_template("add_entry.html")
@app.route("/entry/add", methods=["POST"])
def add_entry_post():
entry = Entry(
title=request.form["title"],
content=request.form["content"],
)
session.add(entry)
session.commit()
return redirect(url_for("entries")) | [
"[email protected]"
] | |
123de62f0fc26c6891df29d80121cecdcef6d869 | bdea01d7702bec417772442f54744a5abcab8090 | /contours.py | 133c0683fc36f14bed0b04a8bd385e80397e1ea8 | [] | no_license | arbaza/open-cv | 6e21565e34e9e208d39a8d391137d038cef930f0 | 576d4f210f1ca764f9ae8d442e730bd6ee9a798a | refs/heads/main | 2023-02-10T11:27:05.228090 | 2020-12-30T18:19:09 | 2020-12-30T18:19:09 | 325,618,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import numpy as np
import cv2 as cv
img = cv.imread('Photos\Euro.jpg')
cv.imshow('image', img)
cv.waitKey(0)
#Grayscale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
blank = np.zeros(img.shape[:2], dtype = 'uint8')
cv.imshow('Blank', blank)
#blur
# blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
# cv.imshow("Blur", blur )
# cv.waitKey(0)
#Canny/edge detection
canny = cv.Canny(img, 127, 175)
# ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
#Contour detection
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
print(len(contours))
cv.drawContours(blank, contours, -1, (0, 0, 255 ), 1)
cv.imshow('Contours drawn', blank)
cv.waitKey(0)
# cv.imshow("Thresholded", thresh )
cv.waitKey(0) | [
"[email protected]"
] | |
1767ed91cd5ecb9dc4967b259a9c41f4baf56d84 | 093d2e689823e5716c46b09511d8adebb2320573 | /Python Competitive Program/count occurance in tuple.py | c4103e6f394595ad87dceed955728d81a0eb86fe | [] | no_license | gauravk268/Competitive_Coding | 02813a908e4cd023e4a7039997e750d1fdae6d92 | 783c246dbaf36425a5b7cb76b4e79e2b7ba1a10c | refs/heads/master | 2022-10-15T02:25:41.598723 | 2022-10-03T06:09:17 | 2022-10-03T06:09:17 | 235,630,000 | 20 | 22 | null | 2022-10-03T06:09:18 | 2020-01-22T17:45:32 | C++ | UTF-8 | Python | false | false | 138 | py |
# Count occurrences of an element
str=input("Enter the string : ")
word_count={char:str.count(char) for char in str}
print(word_count)
| [
"[email protected]"
] | |
d0ca2d9cd484b355ced743af6aebbbdb18d8529a | 3cf8d34cc1ea0ef7857742211bed333ee0400e63 | /46.py | 422649ee9f326b4384db1ce35c43258bbd08ace6 | [] | no_license | ElseVladimir/py_tasks | 2dd11130cae83af772f4cb89d04e80da9dbcf070 | bdfa6e4dbb06b67eb79f3a06ba4ab1bf6052d1a6 | refs/heads/master | 2023-01-27T21:54:18.911645 | 2020-12-02T15:51:55 | 2020-12-02T15:51:55 | 303,987,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,203 | py | """
В этой задаче вам необходимо воспользоваться API сайта artsy.net
API проекта Artsy предоставляет информацию о некоторых деятелях искусства, их работах, выставках.
В рамках данной задачи вам понадобятся сведения о деятелях искусства (назовем их, условно, художники).
Вам даны идентификаторы художников в базе Artsy.
Для каждого идентификатора получите информацию о имени художника и годе рождения.
Выведите имена художников в порядке неубывания года рождения. В случае если у художников одинаковый
год рождения, выведите их имена в лексикографическом порядке.
Работа с API Artsy
Полностью открытое и свободное API предоставляют совсем немногие проекты. В большинстве случаев,
для получения доступа к API необходимо зарегистрироваться в проекте, создать свое приложение,
и получить уникальный ключ (или токен), и в дальнейшем все запросы к API осуществляются при помощи этого ключа.
Чтобы начать работу с API проекта Artsy, вам необходимо пройти на стартовую страницу документации к
API https://developers.artsy.net/start и выполнить необходимые шаги, а именно зарегистрироваться,
создать приложение, и получить пару идентификаторов Client Id и Client Secret. Не публикуйте эти идентификаторы.
После этого необходимо получить токен доступа к API. На стартовой странице документации есть примеры
того, как можно выполнить запрос и как выглядит ответ сервера. Мы приведем пример запроса на Python.
import requests
import json
client_id = '...'
client_secret = '...'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
})
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
Теперь все готово для получения информации о художниках. На стартовой странице документации есть
пример того, как осуществляется запрос и как выглядит ответ сервера. Пример запроса на Python.
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
# инициируем запрос с заголовком
r = requests.get("https://api.artsy.net/api/artists/4d8b92b34eb68a1b2c0003f4", headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
Примечание:
В качестве имени художника используется параметр sortable_name в кодировке UTF-8.
Пример входных данных:
4d8b92b34eb68a1b2c0003f4
537def3c139b21353f0006a6
4e2ed576477cc70001006f99
Пример выходных данных:
Abbott Mary
Warhol Andy
Abbas Hamra
Примечание для пользователей Windows
При открытии файла для записи на Windows по умолчанию используется кодировка CP1251, в то время как для записи имен на сайте используется кодировка UTF-8, что может привести к ошибке при попытке записать в файл имя с необычными символами. Вы можете использовать print, или аргумент encoding функции open.
У вас есть неограниченное число попыток.
Время одной попытки: 5 mins
"""
import requests
import json
idlist = []
artists = dict()
with open('dataset_24476_4.txt', 'r') as f:
for i in f:
idlist.append(i.strip())
token = 'token'
headers = {"X-Xapp-Token" : token}
for ident in idlist:
r = requests.get("https://api.artsy.net/api/artists/{}".format(ident), headers=headers)
j = json.loads(r.text)
artists.update({j['sortable_name']: j['birthday']})
srtd_artists = sorted(artists.items(), key=lambda x: (x[1], x[0]))
for i in srtd_artists:
print(i[0]) | [
"[email protected]"
] | |
feaced22bf438e634a1320d815dfdf7b85c5bbe9 | 5f5fb1cec25f432a8cc1c73fb9943b0b45588042 | /Base/BaseElementEnmu.py | c2416f791cdbf7aa1296caec5b556acaa7dc11fd | [] | no_license | qijianxiaobai/Python-Appium | 732f64c8e982f1a660b1c45bdf3a73c692e04417 | caef97e5eaa6f739b08a85194bfb61a2666419b6 | refs/heads/master | 2020-04-11T11:36:43.266070 | 2018-12-17T10:10:02 | 2018-12-17T10:10:02 | 161,753,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py |
class Element(object):
# 常用操作关键字
find_element_by_id = "id"
find_elements_by_id = "ids"
INDEX = "index"
find_elements_by_xpath = "xpaths"
find_element_by_xpath = "xpath"
find_element_by_css_selector = "css"
find_element_by_class_name = "class_name"
CLICK = "click"
BACK = "back"
TAP = "tap"
ACCESSIBILITY = "accessibility"
ADB_TAP = "adb_tap"
SWIPE_DOWN = "swipe_down"
SWIPE_UP = "swipe_up"
SWIPE_LEFT = "swipe_left"
SET_VALUE = "set_value"
GET_VALUE = "get_value"
WAIT_TIME = 20
PRESS_KEY_CODE = "press_keycode"
GET_CONTENT_DESC = "get_content_desc"
# 错误日志
TIME_OUT = "timeout"
NO_SUCH = "noSuch"
WEB_DROVER_EXCEPTION = "WebDriverException"
INDEX_ERROR = "index_error"
STALE_ELEMENT_REFERENCE_EXCEPTION = "StaleElementReferenceException"
DEFAULT_ERROR = "default_error"
# 检查点
CONTRARY = "contrary" # 相反检查点,表示如果检查元素存在就说明失败,如删除后,此元素依然存在
CONTRARY_GETVAL = "contrary_getval" # 检查点关键字contrary_getval: 相反值检查点,如果对比成功,说明失败
DEFAULT_CHECK = "default_check" # 默认检查点,就是查找页面元素
COMPARE = "compare" # 历史数据和实际数据对比
TOAST = "toast"
RE_CONNECT = 1 # 是否打开失败后再次运行一次用例
INFO_FILE = "info.pickle"
SUM_FILE = "sum.pickle"
DEVICES_FILE = "devices.pickle"
REPORT_FILE = "Report.xlsx"
| [
"[email protected]"
] | |
b6223ed96517013e0969b1c9d814fc5c4699d324 | 9ee84830f4360c063c1bb9fe0d7312e5fdab47d9 | /例子-0911-02.py | 0e43c38a239c44c7dbc3d1b443786146afc9123d | [] | no_license | hcl621/python | 05974f23548f7afd5d8643d9cf1fd4f5b1937186 | af8972dc2918640451c13fa76447f2088054b38c | refs/heads/master | 2020-07-09T22:07:52.317034 | 2019-10-08T12:40:53 | 2019-10-08T12:40:53 | 204,094,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | #coding=utf-8
try:
j
except NameError as e:
print('catch error!!!')
| [
"[email protected]"
] | |
1638e966bc35966aa652f2387e6f29f7caad377c | 9c4c895d23f8109d6ddd8c26937e33d905665b6e | /testing/readfile.py | 6d4e684df232b8e6e6116a8e879e9fd3030994f6 | [] | no_license | shaybix/Raazi | 401b3b4cd8ef31248f9efb58d8750a25ad319718 | 1fee51d2bdfb0a023c51ae5bc906f03f4e055544 | refs/heads/master | 2021-01-21T21:39:43.959826 | 2016-03-21T00:42:02 | 2016-03-21T00:42:02 | 18,281,430 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | import sqlite3
connection = sqlite3.connect('sample.sqlite3')
cursor = connection.cursor()
output = cursor.execute('Select * from main')
f = open('sample2.txt', 'w')
fetch = cursor.fetchone()
#lines = []
#
#for line in f:
# lines.append(line)
#
#
#print lines[]
#print fetch[0]
#print "----------------"
#print fetch[1]
#print "----------------"
#info = fetch[2]
#
#encoded = info.encode('utf-8')
#
#f.write(encoded)
#print "----------------"
#encoded = fetch[3].encode('utf-8')
#encoded = encoded.splitlines()[5].split(':')[-1].split('.')[0]
info = fetch[-5:]
#
print info[1]
#encoded = info.encode('utf-8')
#print encoded
#f.write(encoded)
#print "----------------"
#print fetch[4]
f.close() | [
"[email protected]"
] | |
631a2dcb65f7b01f394a4887810810476c69ec19 | 933376c11498a6567da8d7eb7d2675100895c3ba | /pyzoo/zoo/chronos/forecaster/tcn_forecaster.py | 1d2359d1cc2e54a9820e4f91c65c4ff5cd87761b | [
"Apache-2.0"
] | permissive | intel-analytics/analytics-zoo | 320a461765f86d41dd456b598b1cf1d51d57f4c4 | 7cc3e2849057d6429d03b1af0db13caae57960a5 | refs/heads/master | 2023-08-13T20:47:58.621714 | 2023-07-06T00:49:11 | 2023-07-06T00:49:11 | 90,328,920 | 3,104 | 996 | Apache-2.0 | 2023-09-06T01:51:18 | 2017-05-05T02:27:30 | Jupyter Notebook | UTF-8 | Python | false | false | 5,894 | py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.chronos.forecaster.base_forecaster import BasePytorchForecaster
from zoo.chronos.forecaster.utils import set_pytorch_seed
from zoo.chronos.model.tcn import TCNPytorch
from zoo.chronos.model.tcn import model_creator, optimizer_creator, loss_creator
class TCNForecaster(BasePytorchForecaster):
"""
Example:
>>> #The dataset is split into x_train, x_val, x_test, y_train, y_val, y_test
>>> forecaster = TCNForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
...)
>>> forecaster.fit((x_train, y_train))
>>> forecaster.to_local() # if you set distributed=True
>>> test_pred = forecaster.predict(x_test)
>>> test_eval = forecaster.evaluate((x_test, y_test))
>>> forecaster.save({ckpt_name})
>>> forecaster.restore({ckpt_name})
"""
def __init__(self,
past_seq_len,
future_seq_len,
input_feature_num,
output_feature_num,
num_channels=[30]*7,
kernel_size=3,
repo_initialization=True,
dropout=0.1,
optimizer="Adam",
loss="mse",
lr=0.001,
metrics=["mse"],
seed=None,
distributed=False,
workers_per_node=1,
distributed_backend="torch_distributed"):
"""
Build a TCN Forecast Model.
TCN Forecast may fall into local optima. Please set repo_initialization
to False to alleviate the issue. You can also change a random seed to
work around.
:param past_seq_len: Specify the history time steps (i.e. lookback).
:param future_seq_len: Specify the output time steps (i.e. horizon).
:param input_feature_num: Specify the feature dimension.
:param output_feature_num: Specify the output dimension.
:param num_channels: Specify the convolutional layer filter number in
TCN's encoder. This value defaults to [30]*7.
:param kernel_size: Specify convolutional layer filter height in TCN's
encoder. This value defaults to 3.
:param repo_initialization: if to use framework default initialization,
True to use paper author's initialization and False to use the
framework's default initialization. The value defaults to True.
:param dropout: Specify the dropout close possibility (i.e. the close
possibility to a neuron). This value defaults to 0.1.
:param optimizer: Specify the optimizer used for training. This value
defaults to "Adam".
:param loss: Specify the loss function used for training. This value
defaults to "mse". You can choose from "mse", "mae" and
"huber_loss".
:param lr: Specify the learning rate. This value defaults to 0.001.
:param metrics: A list contains metrics for evaluating the quality of
forecasting. You may only choose from "mse" and "mae" for a
distributed forecaster. You may choose from "mse", "me", "mae",
"mse","rmse","msle","r2", "mpe", "mape", "mspe", "smape", "mdape"
and "smdape" for a non-distributed forecaster.
:param seed: int, random seed for training. This value defaults to None.
:param distributed: bool, if init the forecaster in a distributed
fashion. If True, the internal model will use an Orca Estimator.
If False, the internal model will use a pytorch model. The value
defaults to False.
:param workers_per_node: int, the number of worker you want to use.
The value defaults to 1. The param is only effective when
distributed is set to True.
:param distributed_backend: str, select from "torch_distributed" or
"horovod". The value defaults to "torch_distributed".
"""
# config setting
self.data_config = {
"past_seq_len": past_seq_len,
"future_seq_len": future_seq_len,
"input_feature_num": input_feature_num,
"output_feature_num": output_feature_num
}
self.config = {
"lr": lr,
"loss": loss,
"num_channels": num_channels,
"kernel_size": kernel_size,
"repo_initialization": repo_initialization,
"optim": optimizer,
"dropout": dropout
}
# model creator settings
self.local_model = TCNPytorch
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
# distributed settings
self.distributed = distributed
self.distributed_backend = distributed_backend
self.workers_per_node = workers_per_node
# other settings
self.lr = lr
self.metrics = metrics
self.seed = seed
super().__init__()
| [
"[email protected]"
] | |
448adf2f39782bfbffbcd858cbcdfd29fa0d2642 | d4822b0a4bf2279f31edf5eceddac884f77de5b0 | /order/permissions.py | 6f85f40a9a84eafa2238d36dbc52d85319088402 | [] | no_license | xizlt/api_sport | bb05fa36272782bd91796ac8acb72e16cb38d881 | 55bbccfb344528016e4f903bcadad6ffaa02db17 | refs/heads/master | 2023-03-05T02:32:33.215134 | 2021-02-21T21:49:28 | 2021-02-21T21:49:28 | 297,443,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | from rest_framework.permissions import SAFE_METHODS, BasePermission
class OrderPermission(BasePermission):
def has_object_permission(self, request, view, obj):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated and (obj.user == request.user or request.user.is_staff)
)
def has_permission(self, request, view):
if request.method == 'POST':
return True
return bool(
request.method in SAFE_METHODS or
request.user.is_authenticated or request.user.is_staff
)
class ItemViewPermission(BasePermission):
def has_object_permission(self, request, view, obj):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated and (obj.order.user.id == request.user.id or request.user.is_staff)
)
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user.is_authenticated or request.user.is_staff
)
| [
"[email protected]"
] | |
a74472db035ea74db466460bf3be92ed1ad179cf | 5e5a2823a212f8b3b71863b95c73f7b259220070 | /runspider.py | e769984489a93842dddebe27c14329e34c9a40e5 | [] | no_license | everyday-stoke-phosphate/kyodo_scraping | 85f28d59460507e357bf0f7d13c9778136420821 | a7bf5c27507174217dbf8d8e3e79ddac7eb3c2f9 | refs/heads/master | 2021-03-04T06:49:03.876710 | 2020-03-15T01:55:37 | 2020-03-15T01:55:37 | 246,015,135 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
def main():
# 設定の変更 基本的にsetting.pyに記述
# 特に何か変えるときには設定内容を指定
settings = get_project_settings() # FEED_URI='results.json')
process = CrawlerProcess(settings)
spider_name = "kyodo_articles_scraping"
process.crawl(spider_name)
process.start()
| [
"[email protected]"
] | |
65eb25457df6d77b41c68d357cd137870ad8fe15 | f23c47a5495799015c8b803420813a6ee8d6d2a4 | /options/base_options.py | b8f879c556236b9d0b3fbbe242454ad2911e94d6 | [] | no_license | eitan3/continuous_view_synthesis_gluon | d3f5e74553ddfb480ba24861cc4cd39acbb7a1ce | b3d90378f0e49a165ae85a9249ee3e5d208348eb | refs/heads/master | 2022-12-02T16:16:37.440990 | 2020-08-21T17:17:33 | 2020-08-21T17:17:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,605 | py | import argparse
import os
from util import util
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
self.isTrain = True
self.opt = None
def initialize(self):
# basic info
self.parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# visualization related
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
# data loading related
self.parser.add_argument('--image_width', default=256, type=int, help='Image width')
self.parser.add_argument('--image_height', default=256, type=int, help='Image height')
# experiment related
self.parser.add_argument('--nz_geo', type=int, default=256, help='number of latent points')
self.parser.add_argument('--category', type=str, default='car', help='model category [car|chair|kitti]')
self.parser.add_argument('--random_elevation', action='store_true', help='introduce elevation changes between training pairs')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| [
"[email protected]"
] | |
e3bffa2e9644e317f97a756bd852d57fe28c1fae | 50d8f7a805546edd768a245a544d3a362b674a73 | /03.image_stitch/image_stitch.py | 41706bca2902d0384ccbd7b7ef406ad1dddb2547 | [
"MIT"
] | permissive | Taishuaibo/pycv-training | 0d3f812a7e57acc8496ced9ada5879ee9f8a33f5 | a9ad8755c82bff62e907099ebac7587422b28ccd | refs/heads/main | 2023-05-05T15:51:19.394705 | 2021-05-30T01:08:17 | 2021-05-30T01:08:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | import cv2
import numpy as np
def cv_show(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def createFeature(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 建立SIFT生成器
sift = cv2.xfeatures2d.SIFT_create()
# 检测SIFT特征点,并计算特征描述子
(kps, features) = sift.detectAndCompute(img, None)
# 将结果转换成NumPy数组
kps = np.float32([kp.pt for kp in kps])
# 返回特征点集,及对应的特征描述子
return (kps, features)
# 读取拼接图片
imageA = cv2.imread("./oriImgs/left.png") # 左图
imageB = cv2.imread("./oriImgs/right.png") # 右图
cv2.imshow('imageA', imageA)
cv2.imshow('imageB', imageB)
cv2.waitKey(0)
cv2.destroyAllWindows()
#检测A、B图片的SIFT关键特征点,并计算特征描述子
(kpsA, featuresA) = createFeature(imageA)
(kpsB, featuresB) = createFeature(imageB)
# 建立暴力匹配器
matcher = cv2.BFMatcher()
# 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
goodMatches = []
for m in rawMatches:
# 当最近距离跟次近距离的比值小于ratio(0.75)值时,保留此匹配对
if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
# 存储两个点在featuresA, featuresB中的索引值
goodMatches.append((m[0].trainIdx, m[0].queryIdx))
# 当筛选后的匹配对大于4时,才去计算视角变换矩阵
if len(goodMatches) > 4:
# 获取匹配对的点坐标
ptsA = np.float32([kpsA[i] for (_, i) in goodMatches])
ptsB = np.float32([kpsB[i] for (i, _) in goodMatches])
# 计算视角变换矩阵
# !!!注意A,B坐标点的顺序!!!
(H, status) = cv2.findHomography(ptsB, ptsA, cv2.RANSAC, 4.0)
# 将图片B进行视角变换,res是变换后图片
res = cv2.warpPerspective(imageB, H, (imageA.shape[1] + imageB.shape[1], imageB.shape[0]))
cv_show('result', res)
# 将图片A传入result图片最左端
res[0:imageA.shape[0], 0:imageA.shape[1]] = imageA
cv_show('result', res)
# 初始化可视化图片,将A、B图左右连接到一起
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# 联合遍历,画出匹配对
for ((trainIdx, queryIdx), s) in zip(goodMatches, status):
# 当点对匹配成功时,画到可视化图上
if s == 1:
# 画出匹配对
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# 显示所有图片
cv2.imshow('imageA', imageA)
cv2.imshow('imageB', imageB)
cv2.imshow('keypoint matches', vis)
cv2.imwrite('./resImgs/keypointmatcher.png', vis)
cv2.imshow('result', res)
cv2.imwrite('./resImgs/result.png', res)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
1cc38e8e931283f05c93bbaab335422b6e5f0351 | 61191088ee4a257b15fad0b8fe53641c70ca78f1 | /vectdraw/draw/colour.py | b318d51482b6c8587b1dc0087345978adbb1cebc | [] | no_license | ajpen/Vectdraw | 2e0394a6ff646f408a683033ef14692b6d7fd048 | 3b0d941b31f759737bd52210ac9738d1ef7b5aaa | refs/heads/master | 2022-11-14T09:51:58.772146 | 2020-07-08T21:40:53 | 2020-07-08T21:40:53 | 277,761,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py |
"""
Class for rgba colour representation
"""
class Colour(object):
red = 0
green = 0
blue = 0
alpha = 255
def __init__(self, rgba=None):
"""
Returns a colour instance. If nothing is passsed, the default
is 0,0, 0, 255 for red, green, blue and alpha respectively
:param rgba: a tuple containing numbers between 0-255 for red, green,
blue and alpha respectively
"""
if rgba:
self.SetColour(rgba)
def SetColour(self, rgba):
if not isinstance(rgba, tuple) and len(rgba) != 4:
raise TypeError(
"Unexpected type given. Expected tuple of size 4 "
"(int, int, int, int), received {}".format(type(rgba)))
for c in rgba:
if c > 255 or c < 0:
raise ValueError(
"Colour values are outside of the domain (0-255)")
self.red, self.green, self.blue, self.alpha = rgba
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.red == other.red and
self.green == other.green and
self.blue == other.blue and
self.alpha == other.alpha)
else:
raise NotImplemented | [
"[email protected]"
] | |
98166df402980f456d8048e29aa8a450f9257655 | 80d879a552ce00a9bc73a26d0ddb74c278867b1f | /scripts/080_hilo_concrete.py | 4abf39886121d03650f95582dad542dc8c6f5d56 | [] | no_license | whiskyching/WS-EscobedoGroup | 4a25abe62fac91b82d3b1abd74ddc02af107457f | bd36d623ec2f60638fe3f330b9ad92c810804e8d | refs/heads/main | 2023-03-20T07:03:19.594765 | 2021-03-16T13:15:14 | 2021-03-16T13:15:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | import os
import compas
from compas.utilities import pairwise
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, '../data')
FILE = os.path.join(DATA, 'session.json')
session = compas.json_load(FILE)
mesh = session['mesh']
# ==============================================================================
# Idos
# ==============================================================================
idos = mesh.copy()
for face in mesh.faces_where({'is_loaded': False}):
idos.delete_face(face)
idos.remove_unused_vertices()
offset = 0.02
for vertex, attr in idos.vertices(True):
x, y, z = mesh.vertex_coordinates(vertex)
nx, ny, nz = mesh.vertex_normal(vertex)
if attr['nx'] is not None:
nx = attr['nx']
if attr['ny'] is not None:
ny = attr['ny']
if attr['nz'] is not None:
nz = attr['nz']
attr['x'] = x + offset * nx
attr['y'] = y + offset * ny
attr['z'] = z + offset * nz
# ==============================================================================
# Edos
# ==============================================================================
edos = idos.copy()
offset = 0.06
for vertex, attr in edos.vertices(True):
x, y, z = idos.vertex_coordinates(vertex)
nx, ny, nz = idos.vertex_normal(vertex)
if attr['nx'] is not None:
nx = attr['nx']
if attr['ny'] is not None:
ny = attr['ny']
if attr['nz'] is not None:
nz = attr['nz']
attr['x'] = x + offset * nx
attr['y'] = y + offset * ny
attr['z'] = z + offset * nz
# ==============================================================================
# Volume
# ==============================================================================
volume = idos.copy()
volume.flip_cycles()
max_vertex = volume._max_vertex + 1
max_face = volume._max_face + 1
for vertex, attr in edos.vertices(True):
volume.add_vertex(key=vertex + max_vertex, **attr)
for face in edos.faces():
vertices = edos.face_vertices(face)
vertices = [vertex + max_vertex for vertex in vertices]
volume.add_face(vertices)
boundary = edos.vertices_on_boundary()
boundary.append(boundary[0])
for a, b in pairwise(boundary):
volume.add_face([b, a, a + max_vertex, b + max_vertex])
# ==============================================================================
# Export
# ==============================================================================
session['idos'] = idos
session['edos'] = edos
session['volume'] = volume
compas.json_dump(session, FILE)
# ==============================================================================
# visualize
# ==============================================================================
artist = MeshArtist(idos, layer="HiLo::Concrete1::Idos")
artist.clear_layer()
artist.draw_mesh(disjoint=True, color=(255, 0, 0))
artist = MeshArtist(edos, layer="HiLo::Concrete1::Edos")
artist.clear_layer()
artist.draw_mesh(disjoint=True, color=(0, 0, 255))
artist = MeshArtist(volume, layer="HiLo::Concrete1::Volume")
artist.clear_layer()
artist.draw_mesh(disjoint=True)
| [
"[email protected]"
] | |
ecfbc6fbd378e0f496251bdb6fea828ba8ec686d | 09d6a9e95f0156e577e068899f20959abb0f733a | /train/trainers/trainer_controller.py | 483cbb084ec931370e279710580127352d459a36 | [] | no_license | miyosuda/animalai | 45267cd6dc63306e97f28d2217046f9e10b5b460 | 94d9d5e3acc593da878fa3dc3f38348567417578 | refs/heads/master | 2020-07-10T06:31:29.100908 | 2020-01-17T19:26:34 | 2020-01-17T19:26:34 | 204,193,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,122 | py | # -*- coding: utf-8 -*-
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning
"""Launches trainers for each External Brains in a Unity Environment."""
import os
import logging
import shutil
import sys
from typing import *
import numpy as np
import tensorflow as tf
from animalai.envs import BrainInfo
from animalai.envs.exception import UnityEnvironmentException
from trainers.ppo.trainer import PPOTrainer
class TrainerController(object):
def __init__(self,
model_path: str,
summaries_dir: str,
run_id: str,
save_freq: int,
load: bool,
train: bool,
keep_checkpoints: int,
lesson: Optional[int],
external_brains: Dict[str, BrainInfo],
training_seed: int,
config=None):
"""
Arguments:
model_path:
Path to save the model.
summaries_dir:
Folder to save training summaries.
run_id:
The sub-directory name for model and summary statistics
save_freq:
Frequency at which to save model
load:
Whether to load the model or randomly initialize.
train:
Whether to train model, or only run inference.
keep_checkpoints:
How many model checkpoints to keep.
lesson:
Start learning from this lesson.
external_brains:
dictionary of external brain names to BrainInfo objects.
training_seed:
Seed to use for Numpy and Tensorflow random number generation.
"""
self.model_path = model_path
self.summaries_dir = summaries_dir
self.external_brains = external_brains
self.external_brain_names = external_brains.keys()
self.logger = logging.getLogger('mlagents.envs')
self.run_id = run_id
self.save_freq = save_freq
self.lesson = lesson
self.load_model = load
self.train_model = train
self.keep_checkpoints = keep_checkpoints
self.trainers = {}
self.global_step = 0
self.seed = training_seed
self.config = config
self.update_config = True
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
def _get_measure_vals(self):
return None
def _save_model(self, steps=0):
"""
Saves current model to checkpoint folder.
steps:
Current number of steps in training process.
saver:
Tensorflow saver for session.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info('Saved Model')
def _save_model_when_interrupted(self, steps=0):
self.logger.info('Learning was interrupted. Please wait '
'while the graph is generated.')
self._save_model(steps)
def initialize_trainers(self, trainer_config):
"""
Initialization of the trainers
trainer_config:
The configurations of the trainers
"""
trainer_parameters_dict = {}
for brain_name in self.external_brains:
# brain_nameは "Leaner"
trainer_parameters = trainer_config['default'].copy()
trainer_parameters['summary_path'] = '{basedir}/{name}'.format(
basedir=self.summaries_dir,
name=str(self.run_id) + '_' + brain_name)
trainer_parameters['model_path'] = '{basedir}/{name}'.format(
basedir=self.model_path,
name=brain_name)
trainer_parameters['keep_checkpoints'] = self.keep_checkpoints
if brain_name in trainer_config:
_brain_key = brain_name # "Learner"
while not isinstance(trainer_config[_brain_key], dict):
_brain_key = trainer_config[_brain_key]
for k in trainer_config[_brain_key]:
trainer_parameters[k] = trainer_config[_brain_key][k]
trainer_parameters_dict[brain_name] = trainer_parameters.copy()
for brain_name in self.external_brains:
if trainer_parameters_dict[brain_name]['trainer'] == 'ppo':
# ここで PPOTrainer 生成
self.trainers[brain_name] = PPOTrainer(
self.external_brains[brain_name],
0,
trainer_parameters_dict[brain_name], # trainer_configで指定した内容
self.train_model,
self.load_model,
self.seed,
self.run_id)
else:
raise UnityEnvironmentException('The trainer config contains '
'an unknown trainer type for '
'brain {}'
.format(brain_name))
@staticmethod
def _create_model_path(model_path):
try:
if not os.path.exists(model_path):
os.makedirs(model_path)
except Exception:
raise UnityEnvironmentException('The folder {} containing the '
'generated model could not be '
'accessed. Please make sure the '
'permissions are set correctly.'
.format(model_path))
def _reset_env(self, env):
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
if self.update_config:
return env.reset(arenas_configurations=self.config)
self.update_config = False
else:
return env.reset()
def start_learning(self, env, trainer_config):
# TODO: Should be able to start learning at different lesson numbers
# for each curriculum.
self._create_model_path(self.model_path)
tf.reset_default_graph()
# Prevent a single session from taking all GPU memory.
# PPOTrainer を生成
self.initialize_trainers(trainer_config)
for _, t in self.trainers.items():
self.logger.info(t)
curr_info = self._reset_env(env)
# Tensorboardにハイパーパラメータを記録
if self.train_model:
for brain_name, trainer in self.trainers.items():
trainer.write_tensorboard_text('Hyperparameters', trainer.parameters)
try:
# 学習ループ
while any([t.get_step <= t.get_max_steps for k, t in self.trainers.items()]) \
or not self.train_model:
# 学習を1ステップ進める
new_info = self.take_step(env, curr_info)
self.global_step += 1
if self.global_step % self.save_freq == 0 and self.global_step != 0 \
and self.train_model:
# 学習モデルの保存
self._save_model(steps=self.global_step)
curr_info = new_info
if self.global_step != 0 and self.train_model:
# 最後にモデルを保存
self._save_model(steps=self.global_step)
except KeyboardInterrupt:
if self.train_model:
self._save_model_when_interrupted(steps=self.global_step)
pass
env.close()
def take_step(self, env, curr_info):
# If any lessons were incremented or the environment is ready to be reset
if env.global_done:
curr_info = self._reset_env(env)
for brain_name, trainer in self.trainers.items():
trainer.end_episode()
# Decide and take an action
take_action_vector, \
take_action_memories, \
take_action_text, \
take_action_value, \
take_action_outputs \
= {}, {}, {}, {}, {}
for brain_name, trainer in self.trainers.items():
# Actionを決定する. 全arena分の配列になっている.
(take_action_vector[brain_name], # 発行するAction
take_action_memories[brain_name], # None (use_recurrent時に利用)
take_action_text[brain_name], # 常にNone
take_action_value[brain_name], # 各Arenaに一つの値
take_action_outputs[brain_name]) = trainer.take_action(curr_info)
# take_action_outputsは action, log_probs, value, entropy, etc...
# 選んだActionによって環境を1 step進める
new_info = env.step(vector_action=take_action_vector,
memory=take_action_memories,
text_action=take_action_text,
value=take_action_value)
# BrainInfoには visual_observations, vector_observations, memories, rewards, local_done,
# etc.. が入っている.
# visual_observations = (4, 84, 84, 3) など
for brain_name, trainer in self.trainers.items():
# ExperienceBufferに貯める
trainer.add_experiences(curr_info, new_info, take_action_outputs[brain_name])
trainer.process_experiences(curr_info, new_info)
if trainer.is_ready_update() and self.train_model \
and trainer.get_step <= trainer.get_max_steps:
# ExperienceBuffer に溜まった内容で Policy の学習をSGDで行う.
trainer.update_policy()
# Write training statistics to Tensorboard.
trainer.write_summary(self.global_step)
if self.train_model and trainer.get_step <= trainer.get_max_steps:
trainer.increment_step_and_update_last_reward()
return new_info
| [
"[email protected]"
] | |
b2b0b0aec75a1ce380bcdf00f88afd9502f5a719 | 5cfea22c5b2c401acae9d429915ed9ba7a73024a | /pausemenu/menues/mainmenu.py | bfd1af3639d97960f8a1cdb1a197442a06296a7d | [] | no_license | LookACastle/Oiram | f48fe3f0c9c6e3cc14737d37e5d91013414a4671 | 2302c929c874eb44fa38e6308d49e4d7a415095d | refs/heads/master | 2022-03-15T06:09:09.106758 | 2019-10-26T23:09:57 | 2019-10-26T23:09:57 | 108,526,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from pausemenu.menuitem.button import *
from constants import *
class MainMenu:
def __init__(self, screen):
options = ["Resume", "Options", "Save", "Exit"]
actions = [resumeAction, optionAction, saveAction, quitAction]
self.objects = []
y = 0
maxwidth = 0
for i in range(0, len(options)):
button = Button(options[i], 45, y + 30, screen, 2, actions[i], 0)
if (button.width > maxwidth):
maxwidth = button.width
self.objects.append(button)
y += 30
for o in self.objects:
o.center(maxwidth)
def resetHover(self):
for o in self.objects:
if (o.isHoverAble):
o.hover = False
def resetPress(self):
for o in self.objects:
if (o.isPressAble()):
o.pressed = False
def getCollision(self, x, y):
collided = []
for o in self.objects:
if (o.checkCollision(x, y)):
collided.append(o)
return collided
def render(self, screen, x, y):
for o in self.objects:
o.render(screen, x, y)
def quitAction(main):
if (main.player.onMap):
main.stop()
else:
main.saveConfig()
main.levelManager.changeLevel(main.player)
main.pausemenu.active = False
def resumeAction(main):
main.pausemenu.active = False
def optionAction(main):
main.pausemenu.changeMenu("option")
def saveAction(main):
main.pausemenu.changeMenu("save") | [
"[email protected]"
] | |
92296cabb36cdc43ac8a55f79c416c6d3190cc2b | f332244831040530c8d4d3ff42ee4e06078ca22b | /cart/views.py | eed5c0e9c50a9e6889b30cec001caa5258639029 | [] | no_license | worlddeleteRin/cosmetics | f8f1bd8a3d9b6b149ae29126fa6f4bd6bb5e72b1 | f7d593f3206606d24084d6281bd6d5472654da25 | refs/heads/master | 2023-03-04T04:34:59.349269 | 2021-02-13T19:43:37 | 2021-02-13T19:43:37 | 296,117,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,561 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import *
from products.models import *
from collections import defaultdict
import pandas as pd
import urllib.parse
# to serialize to json format
from django.core import serializers
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# Create your views here.
def index(request):
allcategories = Category.objects.all()
allbrands = Brand.objects.all()
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(
session_key = current_session_key
)
cart_items = cart[0].item_set.all()
return render(request, 'cart/index.html', {
'allbrands': allbrands,
'categories': allcategories,
'session_key': current_session_key,
'items': cart_items,
'current_cart': cart[0],
})
def add_item(request, product_id):
product = Product.objects.get(id = product_id)
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
cart_items = cart.item_set.all()
if Item.objects.filter(cart = cart, name = product.name).exists():
current_item = Item.objects.get(cart = cart, name = product.name)
current_item.quantity += 1
current_item.save()
else:
new_item = Item(
cart = cart,
name = product.name,
price = product.price,
sale_price = product.sale_price,
imgurl = product.imgurl,
brand = product.pr_brand.name,
series = product.pr_series.name,
obiem = product.obiem,
)
new_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def add_quantity(request, item_id):
current_item = Item.objects.get(id = item_id)
current_item.quantity += 1
current_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def remove_quantity(request, item_id):
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
current_item = Item.objects.get(id = item_id)
if current_item.quantity == 1:
current_item.delete()
else:
current_item.quantity -= 1
current_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def remove_item_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
if current_item.quantity == 1:
current_item.delete()
return JsonResponse({
'message': 'everything is ok',
'need_delete': 'yes',
}, status = 200)
check_items = cart.if_items_empty()
if check_items == True:
cart.promo = None
cart.save()
else:
current_item.quantity -= 1
current_item.save()
quantity = current_item.quantity
return JsonResponse({
'message': 'everything is ok',
'need_delete': 'no',
'quantity': quantity,
}, status = 200)
def add_item_ajax(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
current_item.quantity += 1
current_item.save()
quantity = current_item.quantity
return JsonResponse({
'message': 'everything is ok',
'quantity': quantity,
}, status = 200)
def update_item_amount_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
if current_item.sale_price:
amount = current_item.quantity * current_item.sale_price
else:
amount = current_item.quantity * current_item.price
return JsonResponse({
'message': 'everything is ok',
'item_amount': amount,
}, status = 200)
def update_total_amount_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
total_amount = cart.get_total()
if cart.promo != None:
total_amount_promo = cart.get_total_promo()
has_promo = 'true'
return JsonResponse({
'total_amount_promo': total_amount_promo,
'message': 'everything is ok',
'total_amount': total_amount,
'has_promo': has_promo,
}, status = 200)
else:
has_promo = 'false'
return JsonResponse({
'message': 'everything is ok',
'total_amount': total_amount,
'has_promo': has_promo,
}, status = 200)
def remove_item_from_cart_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
current_item_id = request.GET['item_id']
print('item id is',current_item_id )
current_item = Item.objects.get(cart = cart, id = current_item_id)
print(current_item)
current_item.delete()
check_items = cart.if_items_empty()
if check_items == True:
cart.promo = None
cart.save()
return JsonResponse({
'message': 'everything is ok',
}, status = 200)
def add_to_cart_ajax(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
current_product_id = request.GET['product_id']
current_product = Product.objects.get(id = current_product_id)
message = ""
if Item.objects.filter(cart = cart, name = current_product.name,
price = current_product.price).exists():
item = Item.objects.get(cart = cart, name = current_product.name,
price = current_product.price)
item.quantity += 1
item.save()
else:
item = Item(
cart = cart,
name = current_product.name,
price = current_product.price,
sale_price = current_product.sale_price,
imgurl = current_product.imgurl,
brand = current_product.pr_brand.name,
series = current_product.pr_series.name,
obiem = current_product.obiem,
)
item.save()
return JsonResponse({
'message': 'Товар добавлен в корзину!',
}, status = 200)
def create_order_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
cart_items = cart.item_set.all()
# parse cart info
delivery_method = request.GET['delivery_method']
delivery_method = urllib.parse.unquote(delivery_method)
delivery_cost = request.GET['delivery_cost']
payment_method = request.GET['payment_method']
payment_method = urllib.parse.unquote(payment_method)
customer_name = request.GET['customer_name']
customer_name = urllib.parse.unquote(customer_name)
customer_phone = request.GET['customer_phone']
customer_city = request.GET['customer_city']
customer_city = urllib.parse.unquote(customer_city)
customer_address = request.GET['customer_address']
customer_address = urllib.parse.unquote(customer_address)
order_comment = request.GET['cart_comment']
order_comment = urllib.parse.unquote(order_comment)
customer_email = request.GET['customer_email']
customer_email = urllib.parse.unquote(customer_email)
order_price = int(delivery_cost) + cart.get_total_promo()
new_order = Orders(
name = customer_name,
phone = customer_phone,
email = customer_email,
delivery = delivery_method + ' ' + delivery_cost,
payment = payment_method,
city = customer_city,
address = customer_address,
order_price = order_price,
comment = order_comment,
)
new_order.save()
cart_items_mail = []
order_price_mail = order_price
order_comment_mail = order_comment
customer_address_mail = customer_city + ', ' + customer_address
delivery_method_mail = delivery_method
order_id = new_order.id
for item in cart_items:
new_order.item_set.add(item)
cart_items_mail.append([item.name, item.quantity, item.price])
for item in cart.item_set.all():
cart.item_set.remove(item)
cart.promo = None
cart.save()
cart_items_all = new_order.item_set.all()
context = {
'order_id': order_id,
'order_price_mail': order_price_mail,
'name': customer_name,
'phone': customer_phone,
'email': customer_email,
'delivery_address': customer_address_mail,
'delivery_cost': delivery_cost,
'cart_items_all': cart_items_all,
'delivery_method_mail': delivery_method_mail,
'order_comment_mail': order_comment_mail,
}
client_html_message = render_to_string('cart/blocks/order_mail_template.html', context)
client_html_message_plain = strip_tags(client_html_message)
admin_html_message = render_to_string('cart/blocks/order_mail_template_admin.html', context)
admin_html_message_plain = strip_tags(admin_html_message)
try:
send_mail(
'Заказ № {}'.format(order_id),
admin_html_message_plain,
settings.EMAIL_HOST_USER,
[
# '[email protected]',
'[email protected]'
],
html_message = admin_html_message
)
print('mail is sent')
print('try to send mail')
send_mail(
'Заказ № {}'.format(order_id),
client_html_message_plain,
settings.EMAIL_HOST_USER,
[
customer_email,
# '[email protected]'
],
html_message = client_html_message
)
except:
print('was an error when send mail')
return JsonResponse({
'order_created': 'yes',
}, status = 200)
def update_nav_total(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
cart_total = cart.get_total()
return JsonResponse({
'cart_total': cart_total,
}, status = 200)
def check_promo_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
current_promo = request.GET['promo']
if Promocode.objects.filter(name = current_promo).exists():
print('this promo exist')
promo = Promocode.objects.get(name = current_promo)
cart.promo = promo
cart.save()
return JsonResponse({
'exist': 'yes',
}, status = 200)
else:
print('this promo not exist')
return JsonResponse({
'exist': 'no',
}, status = 200)
def set_promo(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
if cart.promo != None:
print('promo exist')
promo_name = cart.promo.name
return JsonResponse({
'promo_name': promo_name,
'exist': 'yes'
}, status = 200)
else:
return JsonResponse({
'exist': 'no'
}, status = 200)
| [
"[email protected]"
] | |
25239a74775ef3920fb3ecec660792a627e451bc | e69053a7cc99efc9ed560b32f378920613e77517 | /coins_optimal.py | 04fcbabd8130821f86d267f59ed2c2ff2b0ab462 | [] | no_license | aantillonl/CakeInterviewProblems | 41a2a719364440a11af6841ce3b2e334c207c456 | d671aa6b196a433963f62ef5217582d3f80c6c63 | refs/heads/master | 2021-08-23T12:13:22.811133 | 2017-12-04T21:34:52 | 2017-12-04T21:34:52 | 112,225,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # Your quirky boss collects rare, old coins...
# They found out you're a programmer and asked you to solve something they've been wondering for a long time.
# Write a function that, given:
# an amount of money
# a list of coin denominations
# computes the number of ways to make the amount of money with coins of the available denominations.
# Example: for amount=4 (4¢) and denominations=[1,2,3][1,2,3] (1¢, 2¢ and 3¢), your program would output 4—the number of ways to make 4¢ with those denominations:
# 1¢, 1¢, 1¢, 1¢
# 1¢, 1¢, 2¢
# 1¢, 3¢
# 2¢, 2¢
coins = [1,2,3]
target = 5
N = len(coins)
output = []
# Consider base cases: No coins given. And, amount = 0
# sol[ x = num of coins. y => target]
solutions = [[0 for x in range(N + 1)] for x in range(0,target + 1)]
# Fill the enteries for 0 value case (n = 0)
for i in range(N + 1):
solutions[0][i] = 1
for y in range(1, target + 1):
for x in range(1, N + 1):
curr_coin = coins[x-1]
sol_without_coin = solutions[y][x-1]
if curr_coin <= y:
sol_with_coin = solutions[y - curr_coin][x]
solutions[y][x] = sol_with_coin + sol_without_coin
else:
solutions[y][x] = sol_without_coin
print(solutions[target][N])
# Daamn, this was tough as hell, i dont know how i can make it for my solution | [
"[email protected]"
] | |
803fe059f40e4b681949b8595a78dbf9f235cec5 | 7fbd07ed9d23381f73d408cb3a845e94c81ca496 | /mysite/settings.py | 6e3598873769ed4c12d9fe5ad4c2b240db9f6a16 | [] | no_license | halildilaver/E-CommercewithDjango | a1be0ce32f07f7b8874571140f845a8bf1ecc205 | b867a87bf4c272c461a75a5bc50af0895bcb85f7 | refs/heads/master | 2020-06-03T14:51:21.131135 | 2019-06-12T17:08:06 | 2019-06-12T17:08:06 | 191,613,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2kx52jjxr@@xv=@^q+e#hbz%$ujy6x8un*ie$vd!z45j_#v!^d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'product.apps.ProductConfig',
'home.apps.HomeConfig',
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'ckeditor_uploader'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
####################################
## CKEDITOR CONFIGURATION ##
####################################
SITE_ID = 1
CKEDITOR_JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/2.2.4/jquery.min.js'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
},
} | [
"[email protected]"
] | |
5e2e9ee1d976ed4b9dae0c19f9e48d49c14d8d4a | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /src/python/WMCore/Services/Dashboard/DashboardAPI.py | 9f90e4842ae59431378744395dc3404a30601661 | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | #!/usr/bin/python
"""
This is the Dashboard API Module for the Worker Node
"""
from WMCore.Services.Dashboard import apmon
import time
from types import DictType, StringType, ListType
#
# Methods for manipulating the apmon instance
#
# Internal attributes
apmonInstance = None
apmonInit = False
# Monalisa configuration
apmonConf = ["cms-wmagent-job.cern.ch"]
#
# Method to create a single apmon instance at a time
#
def getApmonInstance( logr, apmonServer ):
global apmonInstance
global apmonInit
if apmonInstance is None and not apmonInit :
apmonInit = True
if apmonInstance is None :
try :
if not apmonServer:
apmonInstance = apmon.ApMon(apmonConf, logr) #apmonLoggingLevel)
else:
apmonInstance = apmon.ApMon(apmonServer, logr)
except Exception, e :
pass
return apmonInstance
#
# Method to free the apmon instance
#
def apmonFree() :
global apmonInstance
global apmonInit
if apmonInstance is not None :
try :
apmonInstance.free()
except Exception, e :
pass
apmonInstance = None
apmonInit = False
#
# Method to send params to Monalisa service
#
def apmonSend(taskid, jobid, params, logr, apmonServer) :
apm = getApmonInstance( logr, apmonServer )
if apm is not None :
if not isinstance(params, DictType) and not isinstance(params, ListType) :
params = {'unknown' : '0'}
if not isinstance(taskid, StringType) :
taskid = 'unknown'
if not isinstance(jobid, StringType) :
jobid = 'unknown'
try :
apm.sendParameters(taskid, jobid, params)
return 0
except Exception, e:
pass
return 1
| [
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] | sfoulkes@4525493e-7705-40b1-a816-d608a930855b |
1bd71c4ddec832e0d21c4f00197795038c20d8e4 | e5b5e21451f21bab50ed40eba73fbb1146a166b5 | /ecommerceproject/cart/views.py | a97556adf7f9f6ee15a81b6041b207180a3c90e5 | [] | no_license | Ebyprogramz/grocerystore | 64b46f16c98523bca9b5664bb7437ba549cb51e6 | 91afce25766879d3891f8b1e46b42cef09560f0f | refs/heads/master | 2023-08-15T01:45:16.668194 | 2021-09-29T05:37:30 | 2021-09-29T05:37:30 | 410,158,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | from django.shortcuts import render, redirect, get_object_or_404
from store.models import Product
from .models import Cart,Cart_Item
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def cart_id(request):
cart=request.session.session_key
if not cart:
cart=request.session.create()
return cart
def add_cart(request,product_id):
product=Product.objects.get(id=product_id)
try:
cart=Cart.objects.get(cart_id=cart_id(request))
except Cart.DoesNotExist:
cart=Cart.objects.create(cart_id=cart_id(request))
cart.save(),
try:
cart_item=Cart_Item.objects.get(product=product,cart=cart)
if cart_item.quantity < cart_item.product.stock:
cart_item.quantity +=1
cart_item.save()
except Cart_Item.DoesNotExist:
cart_item=Cart_Item.objects.create(
product=product,
quantity=1,
cart=cart
)
cart_item.save()
return redirect('cart:cart_detail')
def cart_detail(request,total=0,counter=0,cart_items=None):
try:
cart=Cart.objects.get(cart_id=cart_id(request))
cart_items=Cart_Item.objects.filter(cart=cart,active=True)
for cart_item in cart_items:
total+=(cart_item.product.price * cart_item.quantity)
counter +=cart_item.quantity
except ObjectDoesNotExist:
pass
return render(request,'cart.html',dict(cart_items=cart_items,total=total,counter=counter))
def cart_remove(request,product_id):
cart=Cart.objects.get(cart_id=cart_id(request))
product=get_object_or_404(Product,id=product_id)
cart_item=Cart_Item.objects.get(product=product,cart=cart)
if cart_item.quantity >1:
cart_item.quantity -=1
cart_item.save()
else:
cart_item.delete()
return redirect('cart:cart_detail')
def full_remove(request,product_id):
cart=Cart.objects.get(cart_id=cart_id(request))
product=get_object_or_404(Product,id=product_id)
cart_item=Cart_Item.objects.get(product=product,cart=cart)
cart_item.delete()
return redirect('cart:cart_detail') | [
"[email protected]"
] | |
69de6c1c8c8510d0f6fd116fd216eb399a3db19a | a3d73905af9102c6388b7501aa5067e4008d4c6a | /Programming Assignment 3/histogram.py | 73be3f9db0101a93413a4962ca936d1b64d4ba93 | [] | no_license | ShumbaBrown/CSCI-100 | b9a5b6c4446a5fdec3044d82eca3f59e502d5372 | 8eec33e39b26e10ddf9b8395fafe861cf1eb53ec | refs/heads/master | 2020-05-23T06:25:34.908759 | 2016-10-07T05:25:15 | 2016-10-07T05:25:15 | 70,217,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | def PrintHistogram(nums):
# Prints the histogram for a list of numbers
# Dictionary to hold each key (number) and value (number of occurences)
numbers = { }
# Iterate through all the values in the list
for i in nums:
# Increment the number of asterics for the corresponding value in the dictionary
if i not in numbers:
numbers[i] = '*'
else:
numbers[i] = numbers[i] + '*'
# Print the histogram values
for i in numbers:
print('%s: %s' % (str(i), numbers[i]))
PrintHistogram([-2, -2, -3, -2])
PrintHistogram([1, 2.5, 3, 4, 4, 3, 6])
| [
"[email protected]"
] | |
4670ba9b785563921ebd4e8eb26fa337062abb5b | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon14915.py | 7648498a85fccf5a369e7197408b17d1726a754d | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | m,n=map(int,input().strip().split())
def conversion(m,n):
c="0123456789ABCDEF"
if m<n:
return str(c[m])
else:
return conversion(m//n,n)+str(c[m%n])
print(conversion(m,n)) | [
"[email protected]"
] | |
d7551e573e1989b8a7920c2d5ef568749c8cd130 | 642716a67e15459d713217ed02aa38d56348ff89 | /SF_Movies/urls.py | 2462fc91656ad0ae785117573efd3b5c33275fe6 | [] | no_license | zxzhang/coding-challenge-tools | 42eaa2355ad66e82abe2ac6401d702f505d3ff06 | cfc8c3a446331d2f29dae4fe972cfa7f7fdcb25d | refs/heads/master | 2021-01-19T07:24:06.713307 | 2015-05-20T01:43:52 | 2015-05-20T01:43:52 | 34,290,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
RedirectView.permanent = True
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/movies')),
url(r'^movies/', include('movies.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| [
"[email protected]"
] | |
f28822e33cbfcd736db6cbcf9f5ff11dd18a9614 | abcc4679e9977e8e92f029da56f80e3cad69ecbb | /app/models.py | 00c875aec8e8ce243df5431ff3a634beb7b9d1a0 | [] | no_license | emjrymer/MovieRating | ecf2d5d1c096d65c5861f6b6e0511e368a1d4307 | 3785eeab01ffb23efe402f19bdc1a26bbb6f192c | refs/heads/master | 2021-06-04T16:05:58.996506 | 2016-03-15T20:41:59 | 2016-03-15T20:41:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | from django.db import models
# Create your models here.
class Rater(models.Model):
age = models.IntegerField()
gender = models.CharField(max_length=2)
occupation = models.CharField(max_length=50)
zip_code = models.CharField(max_length=10)
class Movie(models.Model):
movie_title = models.CharField(max_length=30)
release_date = models.CharField(max_length=30)
video_release_date = models.CharField(max_length=30, blank=True, default="")
imdb = models.URLField()
unknown_genre = models.BooleanField(default=0)
action = models.BooleanField(default=0)
adventure = models.BooleanField(default=0)
animation = models.BooleanField(default=0)
childrens = models.BooleanField(default=0)
comedy = models.BooleanField(default=0)
crime = models.BooleanField(default=0)
documentary = models.BooleanField(default=0)
drama = models.BooleanField(default=0)
fantasy = models.BooleanField(default=0)
filmnoir = models.BooleanField(default=0)
horror = models.BooleanField(default=0)
musical = models.BooleanField(default=0)
mystery = models.BooleanField(default=0)
romance = models.BooleanField(default=0)
scifi = models.BooleanField(default=0)
thriller = models.BooleanField(default=0)
war = models.BooleanField(default=0)
western = models.BooleanField(default=0)
avg_rating = models.FloatField(null=True)
def __str__(self):
return self.movie_title
class Review(models.Model):
reviewer = models.ForeignKey(Rater)
movie = models.ForeignKey(Movie)
rating = models.IntegerField()
def __str__(self):
return self.movie.movie_title
class Ureview(models.Model):
user_review = models.TextField()
user_movie = models.TextField()
class Meta:
ordering = ["-id"]
| [
"[email protected]"
] | |
00c251321617e1466a341cd899b00c702a0c9568 | bf5058783d658d958bf84910721e828f1316dbbe | /SisconcBNB/apps/alumnos/forms.py | 3345e54865c0d2970346942ce640ffe139b4b2fa | [] | no_license | djangoadminn/jose | 3acfe5a2861305c94658f3177d61ea8c557b3d01 | 1567894037548b7b452ac202b972cf0b1867a064 | refs/heads/master | 2021-01-22T08:38:31.707532 | 2017-05-27T22:20:12 | 2017-05-27T22:20:12 | 92,627,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,729 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django import forms
from SisconcBNB.apps.alumnos.models import Alumno
from SisconcBNB.apps.alumnos.models import Seccion
from SisconcBNB.apps.alumnos.models import Inscripcion
from SisconcBNB.apps.alumnos.models import Enfermedad
from SisconcBNB.apps.alumnos.models import Enfermedad_alumno
from SisconcBNB.apps.alumnos.models import Profesor
from SisconcBNB.apps.alumnos.models import Seccion_profesor
from SisconcBNB.apps.alumnos.forms_date import DateInput
#formulario del alumno 1
class alumnForm(forms.ModelForm):
class Meta:
model = Alumno
fields = [
'codigo_hab',
'cedula_escolar',
'procedencia_alumn',
'plantel_retiro_alumn',
'lugar_habi_alumn',
'punto_referencia_alumn',
'religion',
]
labels = {
'codigo_hab':'codigo_hab',
'cedula_escolar':'cedula_escolar',
'procedencia_alumn':'procedencia_alumn',
'plantel_retiro_alumn':'plantel_retiro_alumn',
'lugar_habi_alumn':'Lugar_habi_alumn',
'punto_referencia_alumn':'punto_referencia_alumn',
'religion':'religion',
}
widgets = {
'codigo_hab':forms.Select(attrs={'class':'material-control tooltips-general'}),
'cedula_escolar':forms.Select(attrs={'class':'material-control tooltips-general','autocomplete': 'off'}),
'procedencia_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'procedencia','size':'40','autocomplete': 'off'}),
'plantel_retiro_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'plantel','size':'40','autocomplete': 'off'}),
'lugar_habi_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'lugar','size':'40','autocomplete': 'off'}),
'punto_referencia_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'punto de referencia','size':'40','autocomplete': 'off'}),
'religion':forms.Select(attrs={'class':'material-control tooltips-general'}),
}
#formulario de seccion 2
class seccForm(forms.ModelForm):
class Meta:
model = Seccion
fields = [
'codigo_seccion',
'turno',
'seccion',
'grado',
]
labels = {
'codigo_seccion':'codigo_seccion',
'turno':'turno',
'seccion':'seccion',
'grado':'grado',
}
widgets = {
'codigo_seccion':forms.NumberInput(attrs={'class':'material-control tooltips-general','placeholder':'codigo','size':'40','autocomplete': 'off'}),
'turno':forms.Select(attrs={'class':'material-control tooltips-general'}),
'seccion':forms.Select(attrs={'class':'material-control tooltips-general'}),
'grado':forms.Select(attrs={'class':'material-control tooltips-general'}),
}
#formulario de inscripcion 3
class inscripForm(forms.ModelForm):
beca_estudiantil=forms.BooleanField(label='beca_estudiantil', required=True,initial=False)
class Meta:
model = Inscripcion
fields = [
'cod_inscri',
'cedula_escolar',
'codigo_seccion',
'talla_pantalon',
'talla_franela',
'talla_Zapato',
'beca_estudiantil',
'ano_escolar',
]
labels = {
'cod_inscri':'cod_inscri',
'cedula_escolar':'cedula_escolar',
'codigo_seccion':'codigo_seccion',
'talla_pantalon':'talla_pantalon',
'talla_franela':'talla_franela',
'talla_Zapato':'talla_Zapato',
'beca_estudiantil':'beca_estudiantil',
'ano_escolar':'ano_escolar',
}
widgets = {
'cod_inscri':forms.NumberInput(attrs={'class':'material-control tooltips-general'}),
'cedula_escolar':forms.Select(attrs={'class':'material-control tooltips-general'}),
'codigo_seccion':forms.Select(attrs={'class':'material-control tooltips-general'}),
'talla_pantalon':forms.Select(attrs={'class':'material-control tooltips-general'}),
'talla_franela':forms.Select(attrs={'class':'material-control tooltips-general'}),
'talla_Zapato':forms.Select(attrs={'class':'material-control tooltips-general'}),
'ano_escolar':forms.Select(attrs={'class':'material-control tooltips-general'}),
}
#enfermedad de alumno 4
class enferForm(forms.ModelForm):
class Meta:
model = Enfermedad
fields = [
'codigo_enfermedad',
'nombres_enfermedad',
'descripcion_enfermedad',
]
labels = {
'codigo_enfermedad':'codigo_enfermedad',
'nombres_enfermedad':'nombres_enfermedad',
'descripcion_enfermedad':'descripcion_enfermedad',
}
widgets = {
'codigo_enfermedad':forms.NumberInput(attrs={'class':'material-control tooltips-general','placeholder':'código','size':'40','autocomplete': 'off'}),
'nombres_enfermedad':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'nombre de la enfermedad','size':'40','autocomplete': 'off'}),
'descripcion_enfermedad':forms.Textarea(attrs={'class':'material-control tooltips-general','placeholder':'descripción','size':'40','autocomplete': 'off'}),
}
#formulario del alumnos 5
class enfermalumnForm(forms.ModelForm):
#fecha_enfer=forms.DateField()
class Meta:
model = Enfermedad_alumno
fields = [
'cedula_escolar',
'codigo_enfermedad',
'fecha_enfer',
]
labels = {
'cedula_escolar':'cedula_escolar',
'codigo_enfermedad':'codigo_enfermedad',
'fecha_enfer':'fecha_enfer',
}
widgets = {
'cedula_escolar':forms.Select(attrs={'class':'material-control tooltips-general'}),
'codigo_enfermedad':forms.Select(attrs={'class':'material-control tooltips-general'}),
'fecha_enfer': DateInput(format = '%Y-%m-%d'),
}
#formulario del pofesor 6
class profeForm(forms.ModelForm):
class Meta:
model = Profesor
fields = [
#'codigo_prof',
'codigo_hab',
'turno',
'especialidad_prof',
'fecha_ingreso',
]
labels = {
#'codigo_prof':'codigo_prof',
'codigo_hab':'codigo_hab',
'turno':'turno',
'especialidad_prof':'especialidad_prof',
'fecha_ingreso':'fecha_ingreso',
}
widgets = {
'codigo_prof':forms.NumberInput(attrs={'class':'material-control tooltips-general','placeholder':'codigo','size':'40','autocomplete': 'off'}),
'codigo_hab':forms.Select(attrs={'class':'material-control tooltips-general'}),
'turno':forms.Select(attrs={'class':'material-control tooltips-general'}),
'especialidad_prof':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'especialidad','size':'40','autocomplete': 'off'}),
'fecha_ingreso':DateInput(format = '%Y-%m-%d'),
}
#formulario de la seccion de profesor 7
class seccproForm(forms.ModelForm):
class Meta:
model = Seccion_profesor
fields = [
'codigo_prof',
'codigo_seccion',
]
labels = {
'codigo_prof':'codigo_prof',
'codigo_seccion':'codigo_seccion',
}
widgets = {
'codigo_prof':forms.Select(attrs={'class':'material-control tooltips-general','autocomplete': 'off'}),
'codigo_seccion':forms.Select(attrs={'class':'material-control tooltips-general','autocomplete': 'off'}),
} | [
"[email protected]"
] | |
b3ffb1f7298e5fec7c80d71946a31e424ff4682d | f3a31d0692f0cc4deb5d23f9da38eb69623ca8d1 | /Dynamics/Finite-sized_SIAM/fig_4.py | 2ada6228f29e1b93cfd3e04bbda484ce20125030 | [] | no_license | soumyodipto/SIAM | f2039d5796412db1e2204b65f1b43189227c521b | 4c7a96684dff759d0da453ed437827dd1b3da0fd | refs/heads/master | 2021-09-09T09:25:10.994397 | 2018-03-14T19:01:36 | 2018-03-14T19:01:36 | 107,188,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,409 | py | from os import path
import h5py
import numpy as np
import matplotlib
import matplotlib.patches as mpatches
import pylab as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import FormatStrFormatter
majorFormatter = FormatStrFormatter('%g')
plt.rc('text.latex', preamble = '\usepackage{amsmath},' '\usepackage{yfonts},' '\usepackage[T1]{fontenc},' '\usepackage[latin1]{inputenc},' '\usepackage{txfonts},' '\usepackage{times},' '\usepackage{blindtext},' '\usepackage{braket}' )
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],'size':8})
plt.rc('lines', linewidth=0.7)
plt.rc('xtick', labelsize='small')
plt.rc('ytick', labelsize='small')
plt.rc('legend', fontsize='medium')
plt.rc('text', usetex=True)
nsites=8
delta=0.005
nimps=[1,2]
Us = [0.0, 2.5, 5.0, 7.5]
omega_grid = np.arange(-1.,1.,0.01)
edy=[]
emb1y=[]
emb2y=[]
#emb4y=[]
for ind, U in enumerate(Us):
eddir = './siam_ED/siam_dos_ed_U'+str(U)+'_nsites'+str(nsites)+'_delta'+str(delta)+'/siam_dos_ed_nsites'+str(nsites)+'_U'+str(U)+'_eta'+str(delta)
data_ed = np.loadtxt(eddir)
edy.append(data_ed[:,1])
emb1dir = './siam_emb/siam_emb_dos_nsites'+str(nsites)+'_U'+str(U/10)+'_nimp'+str(1)+'_eta'+str(delta)
data_emb1 = np.loadtxt(emb1dir)
emb1y.append(data_emb1[:,1])
emb2dir = './siam_emb/siam_emb_dos_nsites'+str(nsites)+'_U'+str(U/10)+'_nimp'+str(2)+'_eta'+str(delta)
data_emb2 = np.loadtxt(emb2dir)
emb2y.append(data_emb2[:,1])
#emb4dir = './siam_emb/siam_emb_dos_nsites'+str(nsites)+'_U'+str(U/10)+'_nimp'+str(4)+'_eta'+str(delta)
#data_emb4 = np.loadtxt(emb4dir)
#emb4y.append(data_emb4[:,1])
fig_size = (3.375,3.375/1.3)
f1, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=fig_size, dpi=200, sharex='col', sharey='row')
#dashes = [5,2,10,5]
dashes=[2, 1]
dashes1 = [5,2]
#ax1 = plt.subplot(221)
line1, = ax1.plot(omega_grid[120:190], edy[0][120:190],'k', markersize=1.5,markevery=1)
line2, = ax1.plot(omega_grid[120:190], emb1y[0][120:190], 'b--')
line3, = ax1.plot(omega_grid[120:190], emb2y[0][120:190], 'r--')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
#line4, = ax1.plot(omega_grid, emb4y[0], 'r--')
#ax1.legend(('ED','Emb(2)','Emb(1)'),'upper right',ncol=1,prop={'size':6})
ax1.set_ylim(0,8)
#textstr = '$\eta='+str(delta)+'$'
#plt.text(0.1, 0.7, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax1.transAxes, color='black', fontsize=20)
textstr = '$U/V=0$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax1.transAxes, color='black')
#ax1.set_xlabel(r'$\omega$')
ax1.set_ylabel(r'$A(\omega)$')
ax1.yaxis.set_ticks(np.arange(0,10,2))
minorLocator = AutoMinorLocator()
ax1.yaxis.set_minor_locator(minorLocator)
ax1.tick_params(which='both', width=0.5)
ax1.tick_params(which='major', length=4)
ax1.tick_params(which='minor', length=1.5)
ax1.yaxis.set_major_formatter(majorFormatter)
#ax2 = plt.subplot(222, sharex=ax1, sharey=ax1)
line1, = ax2.plot(omega_grid[120:190], edy[1][120:190],'k', markersize=1.5,markevery=1, label='ED')
line2, = ax2.plot(omega_grid[120:190], emb1y[1][120:190], 'b--', label='Emb(1)')
line3, = ax2.plot(omega_grid[120:190], emb2y[1][120:190], 'r--', label='Emb(2)')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
ax2.legend(loc='upper right',frameon=True,ncol=1,prop={'size':6})
#line4, = ax2.plot(omega_grid, emb4y[1], 'r--')
textstr = '$U/V=2.5$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax2.transAxes, color='black')
#ax2.tick_params(axis='x',length=0, width=0)
#ax3 = plt.subplot(223, sharex=ax1, sharey=ax1)
line1, = ax3.plot(omega_grid[120:190], edy[2][120:190],'k', markersize=1.5,markevery=1)
line2, = ax3.plot(omega_grid[120:190], emb1y[2][120:190], 'b--')
line3, = ax3.plot(omega_grid[120:190], emb2y[2][120:190], 'r--')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
#line4, = ax3.plot(omega_grid, emb4y[2], 'r--')
ax3.set_xlabel(r'$\omega$')
ax3.set_ylabel(r'$A(\omega)$')
ax3.set_ylim(0,8)
textstr = '$U/V=5.0$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax3.transAxes, color='black')
ax3.yaxis.set_ticks(np.arange(0,10,2))
ax3.xaxis.set_ticks([0.2,0.4,0.6,0.8])
minorLocator = AutoMinorLocator()
ax3.yaxis.set_minor_locator(minorLocator)
ax3.tick_params(which='both', width=0.5)
ax3.tick_params(which='major', length=4)
ax3.tick_params(which='minor', length=1.5)
ax3.xaxis.set_major_formatter(majorFormatter)
ax3.yaxis.set_major_formatter(majorFormatter)
#ax4 = plt.subplot(224, sharex=ax1, sharey=ax1)
line1, = ax4.plot(omega_grid[120:190], edy[3][120:190],'k', markersize=1.5,markevery=1)
line2, = ax4.plot(omega_grid[120:190], emb1y[3][120:190], 'b--')
line3, = ax4.plot(omega_grid[120:190], emb2y[3][120:190], 'r--')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
ax4.xaxis.set_ticks([0.2,0.4,0.6,0.8])
#line4, = ax4.plot(omega_grid, emb4y[3], 'r--')
textstr = '$U/V=7.5$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax4.transAxes, color='black')
ax4.set_xlabel(r'$\omega$')
ax4.xaxis.set_major_formatter(majorFormatter)
f1.subplots_adjust(hspace=0)
#f.subplots_adjust(wspace=0.05)
f1.tight_layout(pad=0.15)
f1.subplots_adjust(wspace=0.1)
f1.savefig('ref1_reply_fig2.pdf')
plt.show()
| [
"[email protected]"
] | |
983aa7b87ff0971ad8a96fbf487455eb742081e6 | 27cf8262cbe20597e61f89bdc2849aaa5c5ce4f4 | /dao/reaction_dao.py | 420b1a99c48e129bdc5294de13579a84b212162b | [] | no_license | rfgsantos/Moodler-Web-Server | dfa69efa01a7ac10a170c69b4ff969daae82114e | be6b1d414a9f5ff201ce9f01fb721102af1473e3 | refs/heads/master | 2022-12-10T22:00:29.131783 | 2018-07-16T03:31:50 | 2018-07-16T03:31:50 | 139,902,354 | 1 | 1 | null | 2022-12-08T02:15:15 | 2018-07-05T21:27:13 | Python | UTF-8 | Python | false | false | 1,756 | py | from datetime import date, datetime, timedelta
from utils.python_database_connector import DatabaseConnector
from dtos.reaction_dto import Reaction
class ReactionDao:
def __init__(self):
self.db = DatabaseConnector()
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(ReactionDao, cls).__new__(cls)
return cls.instance
def get_all_reaction(self):
query = "SELECT * FROM reaction"
self.db.executeQuery(query)
return list(map(lambda reaction: self.map_reaction(reaction), self.db.getQueryResult()))
def get_reaction_by_id(self,id):
query = "SELECT * FROM reaction WHERE reaction.id='%s'" % id
self.db.executeQuery(query)
return list(map(lambda reaction: self.map_reaction(reaction), self.db.getQueryResult()))
def insert_reaction(self,json_params):
params = (json_params['id'],json_params['user_id'],json_params['track_id'],json_params['hrv'],json_params['evaluation'], json_params['user_evaluation'])
query = "INSERT INTO reaction (id,user_id,track_id,hrv,evaluation,user_evaluation) VALUES ('%s','%s','%s','%s','%s','%s')" % params
return self.db.executeQuery(query,isInsert=True)
def delete_reaction(self,id):
query = "DELETE FROM reaction WHERE reaction.id='%s'" % id
return self.db.executeQuery(query,isInsert=True)
def map_reaction(self,reaction_input):
reaction = Reaction(
reaction_input['id'],
reaction_input['user_id'],
reaction_input['track_id'],
reaction_input['hrv'],
reaction_input['evaluation'],
reaction_input['user_evaluation']
)
return reaction.__dict__
| [
"[email protected]"
] | |
ec232876bf6ba4dec0f2ee9821f5917b1a88791d | 584aefa111a279250bde4d2d0fa3fc64fda8706b | /models/WGANGP.py | ff09e4911cd4bc750e115cbf2f42ba8bc9bbbbd4 | [] | no_license | shotakikuchi/GenerativeDeepLearning | cce2b851ba722b44432566a7b5bc02d1c950c0f7 | a67bc43986df63065e81a3a840160e25099653bc | refs/heads/master | 2020-07-02T16:43:09.508058 | 2019-08-17T13:10:14 | 2019-08-17T13:10:14 | 201,592,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,689 | py | from keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, \
BatchNormalization, LeakyReLU, Dropout, ZeroPadding2D, UpSampling2D
from keras.layers.merge import _Merge
from keras.models import Model, Sequential
from keras import backend as K
from keras.optimizers import Adam, RMSprop
from keras.utils import plot_model
from keras.initializers import RandomNormal
from functools import partial
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
class RandomWeightedAverage(_Merge):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
"""
Provides a random weighted average between real and generated image samples.
"""
def _merge_function(self, inputs):
alpha = K.random_uniform((self.batch_size, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self,
input_dim,
critic_conv_filters,
critic_conv_kernel_size,
critic_conv_strides,
critic_batch_norm_momentum,
critic_activation,
critic_dropout_rate,
critic_learning_rate,
generator_initial_dense_layer_size,
generator_upsample,
generator_conv_filters,
generator_conv_kernel_size,
generator_conv_strides,
generator_batch_norm_momentum,
generator_activation,
generator_dropout_rate,
generator_learning_rate,
optimizer,
grad_weight,
z_dim,
batch_size
):
self.name = 'gan'
self.input_dim = input_dim
self.critic_conv_filters = critic_conv_filters
self.critic_conv_kernel_size = critic_conv_kernel_size
self.critic_conv_strides = critic_conv_strides
self.critic_batch_norm_momentum = critic_batch_norm_momentum
self.critic_activation = critic_activation
self.critic_dropout_rate = critic_dropout_rate
self.critic_learning_rate = critic_learning_rate
self.generator_initial_dense_layer_size = generator_initial_dense_layer_size
self.generator_upsample = generator_upsample
self.generator_conv_filters = generator_conv_filters
self.generator_conv_kernel_size = generator_conv_kernel_size
self.generator_conv_strides = generator_conv_strides
self.generator_batch_norm_momentum = generator_batch_norm_momentum
self.generator_activation = generator_activation
self.generator_dropout_rate = generator_dropout_rate
self.generator_learning_rate = generator_learning_rate
self.optimiser = optimizer
self.z_dim = z_dim
self.n_layers_critic = len(critic_conv_filters)
self.n_layers_generator = len(generator_conv_filters)
# 'he normal'
self.weight_init = RandomNormal(mean=0, stddev=0.02)
self.grad_weight = grad_weight
self.batch_size = batch_size
self.d_losses = []
self.g_losses = []
self.epoch = 0
self._build_critic()
self._build_generator()
self._build_adversarial()
def gradient_penalty_loss(self, y_true, y_pred, interpolated_samples):
"""Computes gradient penalty based on prediction and weighted real / fake samples.
"""
gradients = K.gradients(y_pred, interpolated_samples)[0]
# compute the euclidean norm by squaring...
gradients_sqr = K.square(gradients)
# summing over the rows
gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
# and sqrt
gradients_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradients_penalty = K.square(1 - gradients_l2_norm)
# return the mean as loss over all tha batch sample
return K.mean(gradients_penalty)
def wasserstein(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def get_activation(self, activation):
if activation == 'leaky_relu':
layer = LeakyReLU(alpha=0.2)
else:
layer = Activation(activation)
return layer
def _build_critic(self):
critic_input = Input(shape=self.input_dim, name='critic_input')
x = critic_input
for i in range(self.n_layers_critic):
x = Conv2D(
filters=self.critic_conv_filters[i]
, kernel_size=self.critic_conv_kernel_size[i]
, strides=self.critic_conv_strides[i]
, padding='same'
, name='critic_conv_' + str(i)
, kernel_initializer=self.weight_init
)(x)
if self.critic_batch_norm_momentum and i > 0:
x = BatchNormalization(momentum=self.critic_batch_norm_momentum)(x)
x = self.get_activation(self.critic_activation)(x)
if self.critic_dropout_rate:
x = Dropout(rate=self.critic_dropout_rate)(x)
x = Flatten()(x)
critic_output = Dense(1, activation=None
, kernel_initializer=self.weight_init
)(x)
self.critic = Model(critic_input, critic_output)
def _build_generator(self):
generator_input = Input(shape=(self.z_dim,), name='generator_input')
x = generator_input
x = Dense(np.prod(self.generator_initial_dense_layer_size), kernel_initializer=self.weight_init)(x)
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum=self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
x = Reshape(self.generator_initial_dense_layer_size)(x)
if self.generator_dropout_rate:
x = Dropout(rate=self.generator_dropout_rate)(x)
for i in range(self.n_layers_generator):
if self.generator_upsample[i] == 2:
x = UpSampling2D()(x)
x = Conv2D(
filters=self.generator_conv_filters[i]
, kernel_size=self.generator_conv_kernel_size[i]
, padding='same'
, name='generator_conv_' + str(i)
, kernel_initializer=self.weight_init
)(x)
else:
x = Conv2DTranspose(
filters=self.generator_conv_filters[i]
, kernel_size=self.generator_conv_kernel_size[i]
, padding='same'
, strides=self.generator_conv_strides[i]
, name='generator_conv_' + str(i)
, kernel_initializer=self.weight_init
)(x)
if i < self.n_layers_generator - 1:
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum=self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
else:
x = Activation('tanh')(x)
generator_output = x
self.generator = Model(generator_input, generator_output)
def get_opti(self, lr):
if self.optimiser == 'adam':
opti = Adam(lr=lr, beta_1=0.5)
elif self.optimiser == 'rmsprop':
opti = RMSprop(lr=lr)
else:
opti = Adam(lr=lr)
return opti
def set_trainable(self, m, val):
m.trainable = val
for l in m.layers:
l.trainable = val
def _build_adversarial(self):
# For critic
# Freeze generator's layers while training critic
self.set_trainable(self.generator, False)
# Image input (real image)
real_img = Input(shape=self.input_dim)
# Fake image
z_disc = Input(shape=(self.z_dim,))
fake_img = self.generator(z_disc)
# critic determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage(self.batch_size)([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'interpolated_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
interpolated_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(
loss=[self.wasserstein, self.wasserstein, partial_gp_loss]
, optimizer=self.get_opti(self.critic_learning_rate)
, loss_weights=[1, 1, self.grad_weight]
)
# For generator
self.set_trainable(self.critic, False)
self.set_trainable(self.generator, True)
# Sampled noise for input to generator
model_input = Input(shape=(self.z_dim,))
# Generate images besed of noise
img = self.generator(model_input)
# Discriminator determines validity
model_output = self.critic(img)
# Defines generator model
self.model = Model(model_input, model_output)
self.model.compile(optimizer=self.get_opti(self.generator_learning_rate)
, loss=self.wasserstein
)
self.set_trainable(self.critic, True)
def train_critic(self, x_train, batch_size, using_generator):
valid = np.ones((batch_size, 1), dtype=np.float32)
fake = -np.ones((batch_size, 1), dtype=np.float32)
dummy = np.zeros((batch_size, 1), dtype=np.float32) # Dummy gt for gradient penalty
if using_generator:
true_imgs = next(x_train)[0]
if true_imgs.shape[0] != batch_size:
true_imgs = next(x_train)[0]
else:
idx = np.random.randint(0, x_train.shape[0], batch_size)
true_imgs = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
d_loss = self.critic_model.train_on_batch([true_imgs, noise], [valid, fake, dummy])
return d_loss
def train_generator(self, batch_size):
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
valid = np.ones((batch_size, 1), dtype=np.float32)
return self.model.train_on_batch(noise, valid)
def train(self, x_train, batch_size, epochs, run_folder, print_every_n_batches=10, n_critic=5,
using_generator=False):
for epoch in range(self.epoch, epochs + epochs):
if epoch % 100 == 0:
critic_loops = 5
else:
critic_loops = n_critic
for _ in range(critic_loops):
d_loss = self.train_critic(x_train, batch_size, using_generator)
g_loss = self.train_generator(batch_size)
print("%d (%d, %d) [D loss: (%.1f)(R %.1f, F %.1f, G %.1f)] [G loss: %.1f]" % (
epoch, critic_loops, 1, d_loss[0], d_loss[1], d_loss[2], d_loss[3], g_loss))
self.d_losses.append(d_loss)
self.g_losses.append(g_loss)
# If at save interval => save generated image samples
if epoch % print_every_n_batches == 0:
self.sample_images(run_folder)
self.model.save_weights(os.path.join(run_folder, 'weights/weights-%d.h5' % (epoch)))
self.model.save_weights(os.path.join(run_folder, 'weights/weights.h5'))
self.save_model(run_folder)
self.epoch += 1
def sample_images(self, run_folder):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.z_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * (gen_imgs + 1)
gen_imgs = np.clip(gen_imgs, 0, 1)
fig, axs = plt.subplots(r, c, figsize=(15, 15))
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(np.squeeze(gen_imgs[cnt, :, :, :]), cmap='gray_r')
axs[i, j].axis('off')
cnt += 1
fig.savefig(os.path.join(run_folder, "images/sample_%d.png" % self.epoch))
plt.close()
def plot_model(self, run_folder):
plot_model(self.model, to_file=os.path.join(run_folder, 'viz/model.png'), show_shapes=True,
show_layer_names=True)
plot_model(self.critic, to_file=os.path.join(run_folder, 'viz/critic.png'), show_shapes=True,
show_layer_names=True)
plot_model(self.generator, to_file=os.path.join(run_folder, 'viz/generator.png'), show_shapes=True,
show_layer_names=True)
def save(self, folder):
with open(os.path.join(folder, 'params.pkl'), 'wb') as f:
pickle.dump([
self.input_dim
, self.critic_conv_filters
, self.critic_conv_kernel_size
, self.critic_conv_strides
, self.critic_batch_norm_momentum
, self.critic_activation
, self.critic_dropout_rate
, self.critic_learning_rate
, self.generator_initial_dense_layer_size
, self.generator_upsample
, self.generator_conv_filters
, self.generator_conv_kernel_size
, self.generator_conv_strides
, self.generator_batch_norm_momentum
, self.generator_activation
, self.generator_dropout_rate
, self.generator_learning_rate
, self.optimiser
, self.grad_weight
, self.z_dim
, self.batch_size
], f)
self.plot_model(folder)
def save_model(self, run_folder):
self.model.save(os.path.join(run_folder, 'model.h5'))
self.critic.save(os.path.join(run_folder, 'critic.h5'))
self.generator.save(os.path.join(run_folder, 'generator.h5'))
pickle.dump(self, open(os.path.join(run_folder, "obj.pkl"), "wb"))
def load_weights(self, filepath):
self.model.load_weights(filepath)
| [
"[email protected]"
] | |
fc87922135a12612cfa4e49ab3c08ee5f38e1ebe | af69497f9f5fb62ee894646a1804494f167f25a7 | /genbci/run_wgan_SSVEP.py | b6cead987f96a4be0902f71c396b33af585bd50e | [] | no_license | dpstart/genbci | afa1cacff63080d12337b98e066e5ebb2c733c03 | aeccce878a5d6ba668f65d4d181874059fe1b8f9 | refs/heads/master | 2022-12-28T02:00:23.647306 | 2020-10-09T13:32:27 | 2020-10-09T13:32:27 | 260,474,268 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,280 | py | import argparse
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch
from genbci.generate.model import (
SSVEP_Discriminator as Discriminator,
SSVEP_Generator as Generator,
)
# from genbci.scripts import ssvep_sample
from genbci.util import init_torch_and_get_device, weights_init, get_exo_data
torch.set_num_threads(8)
parser = argparse.ArgumentParser()
parser.add_argument(
"--modelpath", type=str, default="models/", help="Path to dave model"
)
parser.add_argument(
"--n_epochs", type=int, default=5000, help="number of epochs of training"
)
parser.add_argument("--lr", type=float, default=0.0001, help="adam: learning rate")
parser.add_argument(
"--b1",
type=float,
default=0.1,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--b2",
type=float,
default=0.999,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--n_cpu",
type=int,
default=4,
help="number of cpu threads to use during batch generation",
)
parser.add_argument(
"--latent_dim", type=int, default=32, help="dimensionality of the latent space"
)
parser.add_argument(
"--img_size", type=int, default=28, help="size of each image dimension"
)
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument(
"--n_critic",
type=int,
default=5,
help="number of training steps for discriminator per iter",
)
parser.add_argument(
"--clip_value",
type=float,
default=0.01,
help="lower and upper clip value for disc. weights",
)
parser.add_argument(
"--sample_interval", type=int, default=200, help="interval between image samples"
)
parser.add_argument(
"--nz",
type=int,
default=64,
help="size of the latent z vector used as the generator input.",
)
opt = parser.parse_args()
opt.device = init_torch_and_get_device()
### Setting some defaults
opt.batch_size = 16
opt.dropout_level = 0.05
# opt.img_shape = (9, 1500)
opt.plot_steps = 250
opt.jobid = 2
opt.modelname = "ssvep_wgan%s"
if not os.path.exists(opt.modelpath):
os.makedirs(opt.modelpath)
# dataloader = torch.utils.data.DataLoader(
# dataset=ssvep_sample.dataset, batch_size=opt.batch_size, shuffle=True
# )
epochs_exo = get_exo_data(
"/Users/daniele/Desktop/thesis/library/genbci/ssvep/data/dataset-ssvep-exoskeleton",
plot=False,
)
data = epochs_exo.get_data()
labels = epochs_exo.events[:, 2] - 1
data = data[labels == 1, :, :]
labels = labels[labels == 1]
# Electrodes 2 and 3 should be O1 and O2 thus occipital
datatrain = torch.from_numpy(data[:, 1:3, :728]).float()
labels = torch.from_numpy(labels)
dataset = torch.utils.data.TensorDataset(datatrain, labels)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=opt.batch_size, shuffle=True
)
def train_fn(dataloader, generator, discriminator, opt):
losses_d, losses_g = [], []
for epoch in range(opt.n_epochs):
for i, (real_imgs, _) in enumerate(dataloader):
generator.train()
discriminator.train()
real_imgs = real_imgs.to(opt.device)
# ---------------------
# Train Discriminator
# ---------------------
# Sample noise for generator input
z = torch.randn(opt.batch_size, opt.nz).to(opt.device)
# Generate a batch of fake images
fake_imgs = generator(z)
# Let the discriminator judge and learn
loss_real_d, loss_fake_d = discriminator.train_batch(real_imgs, fake_imgs)
loss_d = loss_real_d + loss_fake_d
losses_d.append(loss_d)
# Train the generator every n_critic steps
if i % opt.n_critic == 0:
z = torch.randn(opt.batch_size, opt.nz).to(opt.device)
loss_g = generator.train_batch(z, discriminator)
losses_g.append(loss_g)
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), loss_d, loss_g)
)
eval_fn(
dataloader, generator, discriminator, epoch, opt, losses_d, losses_g
)
def eval_fn(dataloader, generator, discriminator, epoch, opt, losses_d, losses_g):
generator.eval()
discriminator.eval()
if epoch % opt.plot_steps == 0:
freqs_tmp = np.fft.rfftfreq(dataset.tensors[0].shape[2], d=1 / 250.0)
# Compute FFT frequencies
train_fft = np.fft.rfft(dataset.tensors[0], axis=2)
# Compute FFT on training data
train_amps = np.abs(train_fft).mean(axis=1).mean(axis=0)
# Noise for generator
z = torch.rand(opt.batch_size, opt.nz).to(opt.device)
# Get a batch of fake data and compute FFT
batch_fake = generator(z)
fake_fft = np.fft.rfft(batch_fake.data.cpu().numpy(), axis=2)
fake_amps = np.abs(fake_fft).mean(axis=1).mean(axis=0)
plt.figure()
plt.plot(freqs_tmp, np.log(fake_amps), label="Fake")
plt.plot(freqs_tmp, np.log(train_amps), label="Real")
plt.title("Frequency Spectrum")
plt.xlabel("Hz")
plt.legend()
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "_fft_%d.png" % epoch
)
)
plt.close()
batch_fake = batch_fake.data.cpu().numpy()
plt.figure(figsize=(10, 10))
for i in range(10):
plt.subplot(10, 1, i + 1)
# Working with 2 channels, plot only first one. A bit ugly.
plt.plot(batch_fake[i, 0, ...].squeeze())
plt.xticks((), ())
plt.yticks((), ())
plt.subplots_adjust(hspace=0)
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "_fakes_%d.png" % epoch
)
)
plt.close()
plt.figure(figsize=(10, 15))
plt.plot(np.asarray(losses_d))
plt.title("Loss Discriminator")
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "loss_disc_%d.png" % epoch
)
)
plt.close()
plt.figure(figsize=(10, 15))
plt.plot(np.asarray(losses_g))
plt.title("Loss generator")
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "loss_gen_%d.png" % epoch
)
)
plt.close()
discriminator.save_model(
os.path.join(opt.modelpath, opt.modelname % opt.jobid + ".disc")
)
generator.save_model(
os.path.join(opt.modelpath, opt.modelname % opt.jobid + ".gen")
)
# Initialize generator and discriminator
discriminator = Discriminator()
discriminator.apply(weights_init)
discriminator.train_init()
discriminator.to(opt.device)
generator = Generator(opt.nz)
generator.apply(weights_init)
generator.train_init()
generator.to(opt.device)
train_fn(dataloader, generator, discriminator, opt)
| [
"[email protected]"
] | |
1c848701cc7ee159f0646499637a78c5cdd12204 | 8337a3098a922b37185ab8bf3e1abe52d527c25e | /my_app/migrations/0002_auto_20151104_1503.py | f9d9637d9b77d8e1c662664586618f5272e3a86a | [] | no_license | dimejy2/StatsFootball | f1e72cf29a658985725774e6145802e2f2c1d713 | dce84f9a5c217243dba9e1f49efe959e5c90c695 | refs/heads/master | 2021-01-10T21:59:25.003768 | 2015-12-01T17:21:13 | 2015-12-01T17:21:13 | 47,205,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('my_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Balances',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('currency_id', models.CharField(max_length=3)),
('value', models.FloatField(default=0.0)),
],
),
migrations.CreateModel(
name='Investor',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('account_number', models.CharField(max_length=10)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='balances',
name='account_holder',
field=models.ForeignKey(to='my_app.Investor'),
),
]
| [
"[email protected]"
] | |
601e04d3f95736775c8e3eee23c2ea0fc2a6192b | 216ddf61c5be758efde2b50fa476ada5354aced5 | /galaxy/gen_test.py | dddbbbb2c8f96cf24df4b8d0981a9c43604dbf60 | [] | no_license | cameronfabbri/ICGANs | 4600020238d6884b710ea0b035b84e86c73705f1 | d6be1a3e752959754be1dbf8af2ead8f75048b37 | refs/heads/master | 2021-01-16T18:11:38.596295 | 2017-11-26T22:35:16 | 2017-11-26T22:35:16 | 100,050,914 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | '''
Generates a dataset of encodings from real images using the trained encoder.
'''
import matplotlib.pyplot as plt
from tqdm import tqdm
from matplotlib.pyplot import cm
import scipy.misc as misc
import tensorflow as tf
import tensorflow.contrib.layers as tcl
import cPickle as pickle
import numpy as np
import argparse
import random
import ntpath
import glob
import time
import sys
import cv2
import os
sys.path.insert(0, '../ops/')
from tf_ops import *
import data_ops
from nets import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--CHECKPOINT_DIR', required=True,help='checkpoint directory',type=str)
parser.add_argument('--DATASET', required=False,help='The DATASET to use', type=str,default='celeba')
parser.add_argument('--DATA_DIR', required=False,help='Directory where data is', type=str,default='./')
parser.add_argument('--OUTPUT_DIR', required=False,help='Directory to save data', type=str,default='./')
parser.add_argument('--ACTIVATION', required=False,help='Activation function', type=str,default='lrelu')
a = parser.parse_args()
CHECKPOINT_DIR = a.CHECKPOINT_DIR
DATASET = a.DATASET
DATA_DIR = a.DATA_DIR
OUTPUT_DIR = a.OUTPUT_DIR
ACTIVATION = a.ACTIVATION
try: os.makedirs(OUTPUT_DIR)
except: pass
# placeholders for data going into the network
global_step = tf.Variable(0, name='global_step', trainable=False)
images = tf.placeholder(tf.float32, shape=(1, 64, 64, 3), name='images')
encoded = encZ(images, ACTIVATION)
saver = tf.train.Saver(max_to_keep=1)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess = tf.Session()
sess.run(init)
# restore previous model if there is one
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
print "Restoring previous model..."
try:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored"
except:
print "Could not restore model"
pass
print 'Loading data...'
# images and annots: _, __
train_images, train_annots, test_images, test_annots, paths = data_ops.load_galaxy(DATA_DIR)
test_len = len(test_annots)
print 'test num:',test_len
info = {}
# want to write out a file with the image path and z vector
for p,img,label in tqdm(zip(paths, test_images, test_annots)):
img = data_ops.normalize(img)
batch_images = np.expand_dims(img, 0)
encoding = sess.run([encoded], feed_dict={images:batch_images})[0][0]
info[p] = [encoding, label]
# write out dictionary to pickle file
p = open(OUTPUT_DIR+'data.pkl', 'wb')
data = pickle.dumps(info)
p.write(data)
p.close()
| [
"[email protected]"
] | |
c8ae13ad78da181556b71e88b5de8e8f6e4b91ab | 38e622a5daafbf115c88e7a1def650e14eedc455 | /apps/users/adminx.py | 15a5d9dfdfcc6058c2809c0e8ee1c43dee8e9da1 | [] | no_license | sunysir/Django-1.10-xadmin-mooc | aa66ae39c4204d24cb472a706382b606b48f9b19 | 4188590daedff52d34e2155c78e68735f5cdf690 | refs/heads/master | 2020-03-21T15:57:25.423422 | 2018-07-29T14:28:36 | 2018-07-29T14:28:36 | 138,742,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # _*_ encoding: utf-8 _*_
import xadmin
from users.models import Banner, EmailVerifyRecord
from xadmin import views
__author__ = 'suny'
__date__ = '2018/7/20 16:27'
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
class GlobalSetting(object):
site_title = '慕学在线后台管理系统'
site_footer = '慕学在线网'
menu_style = 'accordion'
class EmailVerifyRecordAdmin(object):
list_display = ['code', 'email', 'send_type', 'send_time']
list_filter = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code', 'email', 'send_type']
class BannerAdmin(object):
list_display = ['titile', 'image', 'url', 'index', 'add_time']
list_filter = ['titile', 'image', 'url', 'index', 'add_time']
search_fields = ['titile', 'image', 'url', 'index']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| [
"[email protected]"
] | |
8426e077cebea70246a5e452b3422803b8c9272e | 4b2b6a8c2caf0f06e0ce47bbb9174ec4db07adaf | /code/stg_node.py | bcbc4a9401548f48edd347e32cd99be7b3057024 | [
"MIT"
] | permissive | StanfordASL/NHumanModeling | 052e89e8950b54a63a40655527ab29aeed5a48d9 | 0ae2297f563599601b1777a3fc825c43f1a461cd | refs/heads/master | 2021-03-27T20:53:13.930366 | 2019-02-10T01:11:42 | 2019-02-10T01:11:42 | 123,646,313 | 15 | 11 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | class STGNode(object):
def __init__(self, node_name, node_type):
self.name = node_name
self.type = node_type
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.name == other.name
and self.type == other.type)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.name, self.type))
def __repr__(self):
type_str = self.type.replace(' ', '')
name_str = self.name.replace(' ', '').replace("'", "")
return type_str + "/" + name_str
def convert_to_label_node(node):
return STGNode(node.name + '_label', node.type)
| [
"[email protected]"
] | |
52a6526d5fb09f675ab48c251acde7cab3c2e1fc | 3329be589ec0376495948d52c234a31f8ed07cf8 | /utils/ckpt_utils.py | 0ba977f956bdac4fa96f168c7e709765e5746e2b | [] | no_license | arthurtibame/tensorflow_api_utils | 908a68a9c7524b91a340c6a7a02d8d12b8a69ae4 | d6b5fd42dd3445989b9db15431bda0f2e8358c02 | refs/heads/main | 2023-01-18T21:46:20.804311 | 2020-11-26T08:21:48 | 2020-11-26T08:21:48 | 316,155,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,365 | py | import json
import os
def chk_model(model_name):
return os.path.isdir(model_name)
def download_ckpt(model_name):
r"""
if model folder not exists then download
the check point from tensorflow model zoo
model names are as following url: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md
Model name Speed (ms) COCO mAP Outputs
CenterNet HourGlass104 512x512 70 41.9 Boxes
CenterNet HourGlass104 Keypoints 512x512 76 40.0/61.4 Boxes/Keypoints
CenterNet HourGlass104 1024x1024 197 44.5 Boxes
CenterNet HourGlass104 Keypoints 1024x1024 211 42.8/64.5 Boxes/Keypoints
CenterNet Resnet50 V1 FPN 512x512 27 31.2 Boxes
CenterNet Resnet50 V1 FPN Keypoints 512x512 30 29.3/50.7 Boxes/Keypoints
CenterNet Resnet101 V1 FPN 512x512 34 34.2 Boxes
CenterNet Resnet50 V2 512x512 27 29.5 Boxes
CenterNet Resnet50 V2 Keypoints 512x512 30 27.6/48.2 Boxes/Keypoints
EfficientDet D0 512x512 39 33.6 Boxes
EfficientDet D1 640x640 54 38.4 Boxes
EfficientDet D2 768x768 67 41.8 Boxes
EfficientDet D3 896x896 95 45.4 Boxes
EfficientDet D4 1024x1024 133 48.5 Boxes
EfficientDet D5 1280x1280 222 49.7 Boxes
EfficientDet D6 1280x1280 268 50.5 Boxes
EfficientDet D7 1536x1536 325 51.2 Boxes
SSD MobileNet v2 320x320 19 20.2 Boxes
SSD MobileNet V1 FPN 640x640 48 29.1 Boxes
SSD MobileNet V2 FPNLite 320x320 22 22.2 Boxes
SSD MobileNet V2 FPNLite 640x640 39 28.2 Boxes
SSD ResNet50 V1 FPN 640x640 (RetinaNet50) 46 34.3 Boxes
SSD ResNet50 V1 FPN 1024x1024 (RetinaNet50) 87 38.3 Boxes
SSD ResNet101 V1 FPN 640x640 (RetinaNet101) 57 35.6 Boxes
SSD ResNet101 V1 FPN 1024x1024 (RetinaNet101) 104 39.5 Boxes
SSD ResNet152 V1 FPN 640x640 (RetinaNet152) 80 35.4 Boxes
SSD ResNet152 V1 FPN 1024x1024 (RetinaNet152) 111 39.6 Boxes
Faster R-CNN ResNet50 V1 640x640 53 29.3 Boxes
Faster R-CNN ResNet50 V1 1024x1024 65 31.0 Boxes
Faster R-CNN ResNet50 V1 800x1333 65 31.6 Boxes
Faster R-CNN ResNet101 V1 640x640 55 31.8 Boxes
Faster R-CNN ResNet101 V1 1024x1024 72 37.1 Boxes
Faster R-CNN ResNet101 V1 800x1333 77 36.6 Boxes
Faster R-CNN ResNet152 V1 640x640 64 32.4 Boxes
Faster R-CNN ResNet152 V1 1024x1024 85 37.6 Boxes
Faster R-CNN ResNet152 V1 800x1333 101 37.4 Boxes
Faster R-CNN Inception ResNet V2 640x640 206 37.7 Boxes
Faster R-CNN Inception ResNet V2 1024x1024 236 38.7 Boxes
Mask R-CNN Inception ResNet V2 1024x1024 301 39.0/34.6 Boxes/Masks
"""
with open ("./utils/models.json", "r") as f:
url = json.loads(f.read())[model_name]
model_name_tar = url.split("/")[-1]
model_name_dir = url.split("/")[-1].split(".")[0]
if not chk_model(model_name_dir):
try:
print('Downloading %s to %s...' % (url, model_name_tar))
os.system("wget {}".format(str(url)))
os.system("tar xvf {}".format(str(model_name_tar)))
os.remove(model_name_tar)
# assert os.path.exists()# check
return model_name_dir
except Exception as e:
print(e)
return model_name_dir
def customized_ckpt_cofig(
pipeline_config_path,
fine_tune_checkpoint,
train_record_fname,
test_record_fname,
label_map_pbtxt_fname,
batch_size,
num_steps,
num_classes
):
import re
print('writing custom configuration file')
with open(pipeline_config_path) as f:
s = f.read()
with open(pipeline_config_path, 'w') as f:
# fine_tune_checkpoint
s = re.sub('fine_tune_checkpoint: ".*?"',
'fine_tune_checkpoint: "{}"'.format(fine_tune_checkpoint), s)
# tfrecord files train and test.
s = re.sub(
'(input_path: ".*?)(PATH_TO_BE_CONFIGURED)(.*?")', 'input_path: "{}"'.format(train_record_fname), s, count=1)
s = re.sub(
'(input_path: ".*?)(PATH_TO_BE_CONFIGURED)(.*?")', 'input_path: "{}"'.format(test_record_fname), s, count=1)
# label_map_path
s = re.sub(
'label_map_path: ".*?"', 'label_map_path: "{}"'.format(label_map_pbtxt_fname), s)
# Set training batch_size.
s = re.sub('batch_size: [0-9]+',
'batch_size: {}'.format(batch_size), s)
# Set training steps, num_steps
s = re.sub('num_steps: [0-9]+',
'num_steps: {}'.format(num_steps), s)
s = re.sub('total_steps: [0-9]+',
'total_steps: {}'.format(num_steps), s)
# Set number of classes num_classes.
s = re.sub('num_classes: [0-9]+',
'num_classes: {}'.format(num_classes), s)
#fine-tune checkpoint type
s = re.sub(
'fine_tune_checkpoint_type: "classification"', 'fine_tune_checkpoint_type: "{}"'.format('detection'), s)
f.write(s)
| [
"[email protected]"
] | |
4fed9fbfe9b53e716ee5f6989ea66c9f22c91f8b | 2c311a5ce7dd08fd56de05dfcbcf2717406e12a3 | /examples/bootswatch_example/bootswatch_example/urls.py | 21479c51d35af7cf6f0a144ed704deb4bde8fc7a | [
"BSD-2-Clause"
] | permissive | nschlemm/django-themeswitch | b2e40c067fa008cb32f02a6114b2c374e644a1cc | 371495b05abe14fddc2cabc9468f4fb2aedf77f7 | refs/heads/master | 2021-01-22T10:19:36.535062 | 2014-04-08T15:19:48 | 2014-04-08T15:19:48 | 17,511,440 | 11 | 1 | null | 2014-03-07T16:19:16 | 2014-03-07T11:00:24 | Python | UTF-8 | Python | false | false | 192 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'', include('themeswitch.urls')),
url(r'^$', 'bootswatch_example.views.home', name='home'),
)
| [
"[email protected]"
] | |
abe1005bd1d0c5882d3e588d9d3a1e4a7486c579 | 44197b58b52349b0557f4d2327be292d1c01ea50 | /test/test_data_62.py | 2dd9de7ce8a273d4da81d28b4534861d76aaff37 | [] | no_license | jonpurdy/netbox-swagger-python-client | 58b2b7984ea24a690d8910f6a6a496b99e5098f9 | 6bfe8cf3bb753c4d293dd56a541fac026642207f | refs/heads/master | 2021-06-28T03:16:09.670793 | 2017-09-17T18:15:54 | 2017-09-17T18:15:54 | 103,851,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # coding: utf-8
"""
NetBox API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.data_62 import Data62
class TestData62(unittest.TestCase):
""" Data62 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testData62(self):
"""
Test Data62
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.data_62.Data62()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6d11a2a08e99746fcf09d5f7a1e8b2a1c35a11e3 | 9716316eb0c5b5a1487866d37b58efc116511d22 | /charmdet/runReconstruction.py | 7e60609548ccbac61bb0a6d7f587dec8d911689f | [] | no_license | klleung/FairShip | 68245fcd042f47a5ed2feeaad1c2e84e5aa21241 | a7e67ac58387f651722068e8325513b5e0d6832a | refs/heads/master | 2020-06-06T23:02:00.991756 | 2019-07-19T12:23:35 | 2019-07-19T12:23:35 | 192,870,986 | 2 | 0 | null | 2019-06-20T07:26:44 | 2019-06-20T07:26:44 | null | UTF-8 | Python | false | false | 15,856 | py |
import os,subprocess,ROOT,time,multiprocessing
from rootpyPickler import Unpickler
from rootpyPickler import Pickler
import pwd
ncpus = int(multiprocessing.cpu_count()*3./4.)
pathToMacro = '$FAIRSHIP/charmdet/'
def count_python_processes(macroName):
username = pwd.getpwuid(os.getuid()).pw_name
callstring = "ps -f -u " + username
# only works if screen is wide enough to print full name!
status = subprocess.check_output(callstring,shell=True)
n=0
for x in status.split('\n'):
if not x.find(macroName)<0 and not x.find('python') <0: n+=1
return n
fileList = {}
badFiles = []
run = "RUN_8000_2395" # "RUN_8000_2396"
eospath='/eos/experiment/ship/data/muflux/DATA_Rebuild_8000/rootdata/'+run
def getFilesFromEOS():
# list of files
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospath,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
if not x.find('START')<0: continue
fname = x[x.find('/eos'):]
nentries = 0
try:
f=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
except:
print "problem accessing file",fname
badFiles.append(fname)
Nfiles = len(fileList)
tmp = {}
for fname in fileList:
newName = fname[fname.rfind('/')+1:]
rc = os.system("xrdcp -f $EOSSHIP"+fname+" "+newName)
tmp[newName]=fileList[fname]
fnames = tmp.keys()
fnames.sort()
return tmp,fnames
def getFilesLocal():
# list of files
for fname in os.listdir('.'):
if fname.find('.root')<0: continue
if not fname.find('_RT')<0: continue
test = fname.replace('.root','_RT.root')
if os.path.isfile(test): continue
nentries = 0
try:
f=ROOT.TFile.Open(fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
except:
print "problem accessing file",fname
badFiles.append(fname)
Nfiles = len(fileList)
fnames = fileList.keys()
fnames.sort()
return fileList,fnames
def recoStep0(local=False):
if local: tmp,fnames = getFilesLocal()
else: tmp,fnames = getFilesFromEOS()
Nfiles = len(fnames)
print "fileList established ",Nfiles
Ndone = 0
while Ndone < Nfiles:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep0 -f "
# group files to get better stats
Ntot = 0
sample = []
i = 0
for k in range(Ndone,Nfiles):
Ntot += tmp[fnames[k]]
sample.append(fnames[k])
i+=1
if Ntot>350000: break
Ndone += i
# check that enough files remain
Nextsample = []
Ntot = 0
for k in range(Ndone,Nfiles):
Ntot += tmp[fnames[k]]
Nextsample.append(fnames[k])
if Ntot>350000: break
if Ntot < 350000:
for s in Nextsample: sample.append(s)
Ndone += len(Nextsample)
if len(sample)==0: break
for s in sample: cmd+=s+','
print 'step 0:',cmd[:cmd.rfind(',')],Ndone,Nfiles
os.system(cmd[:cmd.rfind(',')]+" &")
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(200)
if Ndone%100==0: cleanUp()
while count_python_processes('drifttubeMonitoring')>0: time.sleep(200)
print "files created with RT relations "
cleanUp()
def checkFilesWithRT():
fok = []
fNotok = []
fRaw = []
for fname in os.listdir('.'):
if not fname.find('histo')<0: continue
if not fname.find('_RT')<0:
f=ROOT.TFile(fname)
RT = f.Get('tMinAndTmax')
if RT:
fok.append(fname)
else:
fNotok.append(fname)
elif fname.find('root')>0 and not fname.find('SPILL')<0:
fRaw.append(fname)
print len(fok),len(fNotok),len(fRaw)
return fok,fNotok,fRaw
def checkMinusTwo():
fok,fNotok,fRaw = checkFilesWithRT()
for fname in fRaw:
if fname in fok: continue
N=0
f=ROOT.TFile(fname)
sTree = f.cbmsim
for n in range(sTree.GetEntries()):
rc = sTree.GetEvent(n)
for m in sTree.Digi_MufluxSpectrometerHits:
if m.GetDetectorID()<0: N+=1
print sTree.GetCurrentFile(),N
def recoStep1():
fileList=[]
# all RT files
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(x)
if test.cbmsim.GetBranch("FitTracks"): continue
fileList.append(x)
fileList.sort()
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep1 -u 1 -f "+fname+' &'
print 'step 1:', cmd
os.system(cmd)
time.sleep(100)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def checkAlignment(fileList=[]):
# all RT files
if len(fileList)==0:
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos-residuals')<0:
fileList.append(x)
fileList.sort()
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c alignment -f "+fname+' &'
print 'make residual plots:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def runMC():
# fast MC
inputFile = "/eos/experiment/ship/data/Mbias/background-prod-2018/pythia8_Geant4_10.0_withCharmandBeauty0_mu.root" # entries 13450391L
os.system("python $FAIRSHIP/macro/run_simScript.py -n 100000 --MuonBack --charm=1 --CharmdetSetup=0 -f "+inputFile)
# full simulation
os.system("python $SHIPBUILD/FairShip/macro/run_simScript.py --Muflux -n 1000 --charm=1 --CharmdetSetup=0 --charm=1 --CharmdetSetup=0")
def checkFilesWithTracks(D='.'):
fileList=[]
rest=[]
zombie=[]
# all RT files
if D.find('eos')<0:
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
if not test.GetKey('cbmsim'):
zombie.append(x)
elif test.cbmsim.GetBranch("FitTracks"): fileList.append(x)
else: rest.append(x)
else:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+D,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
fname = x[x.find('/eos'):]
try:
test=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
if not test.GetKey('cbmsim'):
zombie.append(fname)
elif test.cbmsim.GetBranch("FitTracks"): fileList.append(fname)
else: rest.append(fname)
except:zombie.append(fname)
fileList.sort()
print "n with tracks",len(fileList),' rest:',len(rest),' zombies:',zombie
return fileList
def checkFilesWithTracks2(D='.'):
badFile=[]
# all RT files
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
sTree = test.cbmsim
if not sTree: badFile.append(x+"?")
elif sTree.GetBranch("FitTracks"):
prev = 0
for n in range(min(20000,sTree.GetEntries())):
rc = sTree.GetEvent(n)
if sTree.FitTracks.GetEntries()>0:
st = sTree.FitTracks[0].getFitStatus()
if not st.isFitConverged(): continue
if prev==st.getChi2():
badFile.append(x)
break
else: prev=st.getChi2()
return badFile
def checkFilesWithTracks3(D='.'):
badFile={}
# all RT files
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
sTree = test.cbmsim
if not sTree:
badFile.append(x+"?")
continue
b = sTree.GetBranch("FitTracks")
if b:
if b.GetZipBytes()/1.E6 < 1.: badFile[x]= b.GetZipBytes()/1.E6
return badFile
# for f in bf: os.system('cp ../../ship-ubuntu-1710-64/RUN_8000_2395/'+f+' .')
def cleanUp(D='.'):
# remove raw data files for files with RT relations
fok,fNotok,fRaw = checkFilesWithRT()
for x in fok:
r = x.replace('_RT','')
cmd = 'rm '+r
os.system(cmd)
def copyMissingFiles(remote="../../ship-ubuntu-1710-64/RUN_8000_2395",exclude=[]):
toCopy=[]
allFilesR = os.listdir(remote)
allFilesL = os.listdir(".")
for fname in allFilesR:
if not fname.find('histos')<0: continue
if fname.find('RT')<0: continue
if fname in exclude: continue
if not fname in allFilesL: toCopy.append(fname)
print "len",len(toCopy)
for fname in toCopy: os.system('cp '+remote+"/"+fname+' .')
def importRTFiles(local='.',remote='/home/truf/ship-ubuntu-1710-32/home/truf/muflux/Jan08'):
# mkdir /media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-32
# sshfs ship-ubuntu-1710-32.cern.ch:/home/truf/muflux /media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-32
fileWithTracks = checkFilesWithTracks(local)
allFiles = os.listdir(remote)
for x in allFiles:
if x.find('_RT')>0 and x.find('histos')<0 and not x in fileWithTracks:
os.system('cp '+remote+'/'+x+' .')
def importRecoFiles(local='.',remote='/media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-16/home/truf/muflux/Jan08'):
fileWithTracks = checkFilesWithTracks(remote)
for x in fileWithTracks: os.system('cp '+remote+'/'+x+' .')
def mergeHistos(local='.',case='residuals'):
allFiles = os.listdir(local)
if case == 'residuals':
dest = 'residuals.root'
tag = 'histos-residuals'
else:
dest = 'momDistributions.root'
tag = 'histos-analysis'
cmd = "hadd -f "+dest+' '
N=0
for x in allFiles:
if not x.find(tag)<0 :
cmd += (local+'/'+x+' ')
N+=1
if N>500:
os.system(cmd)
os.system('cp '+dest+' tmp.root')
cmd = "hadd -f "+dest+' tmp.root '
N=0
os.system(cmd)
def checkRecoRun(eosLocation=eospath,local='.'):
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eosLocation,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
if not x.find('START')<0: continue
fname = x[x.rfind('/')+1:]
RTname = fname.replace('.root','_RT.root')
histosName = "histos-residuals-"+RTname
if not os.path.isfile(RTname):
print "missing RT file",fname
if not os.path.isfile(histosName):
print "missing histogram file",fname
def exportRunToEos(eosLocation="/eos/experiment/ship/user/truf/muflux-reco",run=run,local="."):
temp = os.system("xrdfs "+os.environ['EOSSHIP']+" mkdir "+eosLocation+"/"+run)
failures = []
for x in os.listdir(local):
if x.find('.root')<0: continue
cmd = "xrdcp -f "+x+" $EOSSHIP/"+eosLocation+"/"+run+"/"+x
rc = os.system(cmd)
if rc != 0: failures.append(x)
if len(failures)!=0: print failures
def makeMomDistributions(run=0):
if run==0: fileList = checkFilesWithTracks(D='.')
else:
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'+run
fileList = []
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
# all RT files with tracks
for fname in fileList:
if not fname.find('sys')<0: continue
if os.path.isfile('histos-analysis-'+fname[fname.rfind('/')+1:]): continue
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c anaResiduals -f "+fname+' &'
print 'momentum analysis:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
zeroField = ['2199','2200','2201']
noRPC = ['2144','2154','2192','2210','2217','2218','2235','2236','2237','2240','2241','2243','2291','2345','2359']
def massProduction(keyword = 'RUN_8000_23',fnames=[],merge=False):
pathToMacro = "$FAIRSHIP/charmdet/"
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'
if merge:
for run in os.listdir('.'):
if run.find(keyword)<0: continue
os.chdir(run)
mergeHistos(local='.',case='momDistributions')
os.chdir('../')
else:
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find(keyword)<0: continue
run = x[x.rfind('/')+1:]
if not run in os.listdir('.'): os.system('mkdir '+run)
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco+run,shell=True)
if temp2.find('.root')<0: continue
skip = False
for x in zeroField:
if not run.find(x)<0: skip = True
if skip: continue
os.chdir(run)
makeMomDistributions(run)
os.chdir('../')
def massProductionAlignment(keyword = 'RUN_8000_2395',fnames=[],merge=False):
pathToMacro = "$FAIRSHIP/charmdet/"
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'
if merge:
for run in os.listdir('.'):
if run.find(keyword)<0: continue
os.chdir(run)
mergeHistos(local='.')
os.chdir('../')
else:
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find(keyword)<0: continue
run = x[x.rfind('/')+1:]
if not run in os.listdir('.'):
print "directory for this run does not exist",run
# os.system('mkdir '+run)
continue
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco+run,shell=True)
if temp2.find('.root')<0: continue
os.chdir(run)
fileList = []
for x in temp2.split('\n'):
if x.find('.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
checkAlignment(fileList)
os.chdir('../')
def redoMuonTracks():
fileList = checkFilesWithTracks(D='.')
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoMuonTaggerTracks -u 1 -f "+fname+' &'
print 'redo muonTracks:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
def reRunReco(r,fname):
fRT = fname.replace('.root','_RT2.root')
os.system('xrdcp -f $EOSSHIP/eos/experiment/ship/data/muflux/DATA_Rebuild_8000/rootdata/'+r+'/'+fname+' '+fRT)
f = ROOT.TFile.Open(os.environ['EOSSHIP']+'/eos/experiment/ship/user/odurhan/muflux-recodata/'+r+'/'+fname.replace('.root','_RT.root'))
ftemp = ROOT.TFile(fRT,'update')
ftemp.cd('')
upkl = Unpickler(f)
tMinAndTmax = upkl.load('tMinAndTmax')
pkl = Pickler(ftemp)
pkl.dump(tMinAndTmax,'tMinAndTmax')
ftemp.mkdir('histos')
ftemp.histos.cd('')
for tc in ['TDCMapsX','hitMapsX']:
tmp = f.histos.Get(tc)
X = tmp.Clone()
X.Write()
ftemp.Write("",ROOT.TFile.kOverwrite)
ftemp.Close()
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep1 -u 1 -f "+fRT+' &'
os.system(cmd)
print 'step 1:', cmd
def pot():
fileList=[]
# all RT files
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos')<0:
fileList.append(x)
fileList.sort()
scalerStat = {}
for fname in fileList:
f=ROOT.TFile(fname)
if not f.FindKey("scalers"):
print "no scalers in this file",fname
continue
scalers = f.scalers
scalers.GetEntry(0)
for x in scalers.GetListOfBranches():
name = x.GetName()
s = eval('scalers.'+name)
if name!='slices':
if not scalerStat.has_key(name):scalerStat[name]=0
scalerStat[name]+=s
keys = scalerStat.keys()
keys.sort()
for k in keys: print k,':',scalerStat[k]
def makeDTEfficiency(merge=False):
cmd = "hadd -f DTEff.root "
for fname in os.listdir('.'):
if not merge and fname.find('SPILL')==0:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c DTeffWithRPCTracks -f "+fname+' &'
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
elif merge and fname.find('histos-DTEff')==0:
cmd+=fname+' '
if merge: os.system(cmd)
print "finished all the tasks."
def importMomDistr(keyword = 'RUN_8000_2'):
pathHistos = '/media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-64/'
temp = os.listdir(pathHistos)
for x in temp:
if x.find(keyword)<0: continue
run = x
if not run in os.listdir('.'):
os.system('mkdir '+run)
os.system('cp '+pathHistos+run+'/momDistributions.root '+run)
| [
"[email protected]"
] | |
8716e411d7d36e6f03e049d7e4f758924e44cc44 | 169d68e02baceb04ce672c7771db351f71a1e47e | /untitled1/venv/Scripts/easy_install-script.py | bf6482875831339e7b62872ab6bf7d1f6f64f1fc | [] | no_license | yycarry1994/Pycharm_Project | 056959b2b14c171d1d5d87388ce0bd9bc97bd7d5 | da0d6176cccc365554aa46d47f33e7e1120e6e33 | refs/heads/master | 2020-06-28T10:12:29.711669 | 2019-08-02T09:23:41 | 2019-08-02T09:23:41 | 200,204,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #!C:\Users\86181\PycharmProjects\untitled1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
94fa74e2b0d75f11f63daaf66cc8d206618c6999 | a9dd70520385b155d2f929127616d496fd89cd53 | /onecollect.py | 4ccf00cf62c39d3e08e3b1e5a884cf93f6fa30d1 | [] | no_license | singer0503/auto_show_mac_address | ce6917e1d05fab40f3d2a9c1017003eb30096580 | 9cce22d167a734335de04dde65d07359c8b8a587 | refs/heads/main | 2023-01-01T14:26:07.620655 | 2020-10-20T10:07:51 | 2020-10-20T10:07:51 | 305,604,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,659 | py | import time,multiprocessing,os
import paramiko #Note: 提供 ssh 功能模組
import re #Noet: 正則表達式模組
import sys #Noet: 檢測錯誤模組
import traceback #Noet: 檢測錯誤模組
try:
from config import * #Note: 把 config.py 讀進來
except:
hosts = ''
username = ''
password = ''
cmds = ''
stdmore = re.compile(r"-[\S\s]*[Mm]ore[\S\s]*-")
hostname_endcondition = re.compile(r"\S+[#>\]]\s*$")
class ssh_comm(object):
def __init__(self,address,username,password,port=22):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #Note: 自動添加主機名及密鑰到本地並保存,不依賴load_system_host_keys()配置,即如果known_hosts裡沒有遠程主機的公鑰時,默認連接會提示yes/no,自動yes
print('ssh_comm connection...')
self.client.connect(address, port=port, username=username, password=password, timeout=10, look_for_keys=True,allow_agent=False)
self.shell = self.client.invoke_shell()
while True:
time.sleep(0.5)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
self.shell.recv(4096).decode('utf-8')
self.shell.send('\n')
output = self.shell.recv(4096).decode('utf-8')
output = output
while True:
if hostname_endcondition.findall(output):
self.hostname = hostname_endcondition.findall(output)[0].strip().strip('<>[]#')
break
while True:
time.sleep(0.1)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
output += self.shell.recv(4096).decode('utf-8')
def recv_all(self,interval,stdjudge,stdconfirm):
endcondition = re.compile(r"%s\S*[#>\]]\s*$"%self.hostname)
while True:
time.sleep(interval)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
output = self.shell.recv(99999).decode('utf-8')
if (stdjudge != '') and (stdjudge in output):
self.shell.send(stdconfirm+'\n')
while True:
if stdmore.findall(output.split('\n')[-1]):
break
elif endcondition.findall(output):
break
while True:
time.sleep(interval)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
output += self.shell.recv(99999).decode('utf-8')
return output
def send_command(self,command_interval,command,stdjudge,stdconfirm):
command += "\n"
self.shell.send(command)
if ('hostname' in command) or ('sysname' in command):
while True:
time.sleep(0.5)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
stdout = self.shell.recv(4096).decode('utf-8')
self.hostname = hostname_endcondition.findall(stdout)[-1].strip().strip('<>[]#')
else:
stdout = self.recv_all(interval=command_interval,stdjudge=stdjudge,stdconfirm=stdconfirm)
data = stdout.split('\n')
while stdmore.findall(data[-1]):
self.shell.send(" ")
tmp = self.recv_all(interval=command_interval,stdjudge=stdjudge,stdconfirm=stdconfirm)
data = tmp.split('\n')
stdout += tmp
return stdout
def close(self):
if self.client is not None:
self.client.close()
def run(self,cmds,command_interval,stdjudge,stdconfirm):
stdout = ''
rc = 'success'
for cmd in cmds.split('\n'):
if cmd.strip():
stdout += self.send_command(command=cmd,command_interval=command_interval,stdjudge=stdjudge,stdconfirm=stdconfirm)
return rc, stdout
def writeoutput(address,username,password,cmds):
try:
connection = ssh_comm(address=address, username=username, password=password, port=22)
except Exception as e:
error_class = e.__class__.__name__ #取得錯誤類型
detail = e.args[0] #取得詳細內容
cl, exc, tb = sys.exc_info() #取得Call Stack
lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料
fileName = lastCallStack[0] #取得發生的檔案名稱
lineNum = lastCallStack[1] #取得發生的行號
funcName = lastCallStack[2] #取得發生的函數名稱
errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(fileName, lineNum, funcName, error_class, detail)
print(errMsg)
rc = 'connection failed'
return address,rc
stdjudge = 'Y/N'
stdconfirm = 'Y'
rc,stdout = connection.run(cmds=cmds,command_interval=0.1,stdjudge=stdjudge,stdconfirm=stdconfirm)
connection.close()
hostname = connection.hostname.split('/')[-1].split(':')[-1]
#Note: 若有需要可以使用 SNMP v2c 做事情
#hostname = os.popen('/usr/local/net-snmp/bin/snmpwalk -v 2c -c tcnw %s sysname -Oqv'%address).read().strip()
#Note: 根據各機器名稱產生目錄
if not os.path.exists(hostname):
os.makedirs(hostname)
filename = hostname+'-'+time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
#Note: 產生 txt 檔保存資料
with open ('%s/%s.txt'%(hostname,filename),'w') as f:
f.write(stdout)
return address,rc
#Note: 主程式呼叫 main
def main(username,password,hosts,cmds):
print('main('+username+','+password+','+hosts+','+cmds+')')
#Note: 檢查欄位是否有問題
if username == '':
username = raw_input('請輸入使用者名稱:')
if password == '':
password = raw_input('請輸入密碼: ')
if hosts == '':
hosts = raw_input('請輸入主機地址: ')
if cmds == '':
cmds = raw_input('請輸入採集命令: ')
host_list = hosts.split('\n')
if len(host_list) < 5:
processnum = len(host_list)
else:
processnum = 5
#Note: 可以調整多執行緒的地方,預設最高是 5 個 thread 再跑
pool = multiprocessing.Pool(processes=processnum )
process = []
for host in host_list:
if host:
process.append(pool.apply_async(writeoutput, (host.strip(),username,password,cmds)))
pool.close()
pool.join()
outs = ''
for o in process:
rc,ip = o.get()
print('[ '+ip+' : '+rc+' ]')
#Note: 主程式入口
if __name__== '__main__':
main(username,password,hosts,cmds)
| [
"[email protected]"
] | |
072edfb2aef8eea1bcec97a890b4d476ea0fb2ba | e3fbb84151f39c3e576bd857a94256e9ecfc2817 | /0x07-python-test_driven_development/tests/6-max_integer_test.py | 73b68c8c52f56476a1d8122a16c47d872dda234f | [] | no_license | Myduzo/holbertonschool-higher_level_programming | 1a6ed5eb499b16f968aed4522c464e5c9f89a41e | aee7350473a8f740f6eb8e3f1abbb359d88cae0f | refs/heads/master | 2022-12-19T18:59:29.672269 | 2020-09-24T15:18:24 | 2020-09-24T15:18:24 | 259,302,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
def test_list(self):
"""Empty list test"""
self.assertEqual(max_integer([5, 0, 2, 9]), 9)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([-5, -1, 0]), 0)
self.assertEqual(max_integer([-2, -3, -7]), -2)
self.assertEqual(max_integer([3]), 3)
self.assertEqual(max_integer([-4]), -4)
self.assertEqual(max_integer([1.5, 3.9, 4.25, 2.7]), 4.25)
def test_empty(self):
"""Empty list test"""
self.assertIsNone(max_integer([]), None)
def test_error(self):
with self.assertRaises(TypeError):
max_integer(7)
with self.assertRaises(TypeError):
max_integer(None)
with self.assertRaises(TypeError):
max_integer([4, "", 1, 6])
with self.assertRaises(TypeError):
max_integer([4, "x", 1, 6])
| [
"[email protected]"
] | |
3d9f60873d750960d9d30489e80d15c58a53b641 | 152d5204595c4f38be21d85907af54012fb3fece | /python/A1014280203/0/0.py | d1aa0de267004157929207e14a6341ce63895ea3 | [] | no_license | Fayostyle/spiderstudy | 09f7dc0792c64083cbdb7e5b19a025dc05d5d1f2 | 6687e9a192f83f125d532b5df898790acc47f965 | refs/heads/master | 2022-11-30T10:45:21.260739 | 2019-09-19T07:31:50 | 2019-09-19T07:31:50 | 132,615,459 | 0 | 0 | null | 2022-11-22T00:37:43 | 2018-05-08T13:47:01 | HTML | UTF-8 | Python | false | false | 252 | py | from PIL import Image, ImageFont, ImageDraw
image = Image.open('0.png')
w, h = image.size
font = ImageFont.truetype('arial.ttf', 50)
draw = ImageDraw.Draw(image)
draw.text((4*w/5, h/5), '5', fill=(255, 10, 10), font=font)
image.save('0.0.png', 'png')
| [
"[email protected]"
] | |
1ec736c4ee5667fdac6c32e4c815c223c8fd9563 | 183892abe5d5481d037e5568c4505f0fc3370a23 | /nn_sa.py | 125440347ced3cad9e15a827a21f423212b28fd8 | [] | no_license | nani67/SportsAnalytics_ML_DL | 0c921b384a6f6c1f0a4de57ff1a556979f4be310 | 3c0f492130324e813e33444e7ee1a197b2e5697c | refs/heads/master | 2020-08-28T10:49:39.629789 | 2019-10-27T12:47:27 | 2019-10-27T12:47:27 | 217,677,816 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import keras
from keras.models import Sequential
from keras.layers import Dense
import pandas as pd
import numpy as np
dataset = pd.read_csv("sample_data.csv")
X = dataset.drop(["Date","Referee"],axis=1)
Y = dataset["FTR"]
X_train = X[0:15]
X_test = X[15:]
Y_train = Y[0:15]
Y_test = Y[15:]
model = Sequential();
firstLayer = Dense(units = 16, activation='relu', kernel_initializer='uniform',input_dim = 20)
model.add(firstLayer);
secondLayer = Dense(units = 1, activation = 'relu', kernel_initializer = 'uniform')
model.add(secondLayer);
model.compile(optimizer = 'adam', loss = 'mse', metrics=['accuracy'])
history = model.fit(X_train, Y_train, epochs = 300, batch_size = 50)
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
| [
"[email protected]"
] | |
6dee1b5890f0ba2108624a4a64c8378c3f83a85a | 1f5d98c97ac9ff75b1d6b81f0a4a5110b05d4284 | /posts/api/views.py | a71629961d4f008f12bdb8916786235c9f681cc8 | [] | no_license | DukhDmytro/social_network | 70cdd4aeb1448fdbacce6d32f627b421b8614a8c | a8d563b17ffc90dc467c67150fd4f0e7aa5f3992 | refs/heads/master | 2022-12-12T21:04:17.354395 | 2020-03-03T13:34:45 | 2020-03-03T13:34:45 | 241,352,402 | 0 | 0 | null | 2022-12-08T03:38:56 | 2020-02-18T12:12:34 | Python | UTF-8 | Python | false | false | 1,764 | py | from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from posts.models import Post
from .serializers import PostSerializer
from .permissions import IsOwnerOrReadOnly
class PostViewSet(ModelViewSet):
serializer_class = PostSerializer
queryset = Post.objects.all()
lookup_field = 'slug'
pagination_class = PageNumberPagination
def get_permissions(self):
if self.action in ['update', 'partial_update', 'destroy']:
self.permission_classes = IsAuthenticated, IsOwnerOrReadOnly
else:
self.permission_classes = IsAuthenticated,
return super().get_permissions()
def create(self, request, *args, **kwargs):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
serializer.save(author=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(detail=True)
def like(self, request, slug):
post = get_object_or_404(Post, slug=slug)
post.like.add(request.user)
post.unlike.remove(request.user)
post.save()
return Response({'response': 'you like this post'}, status=status.HTTP_200_OK)
@action(detail=True)
def unlike(self, request, slug):
post = get_object_or_404(Post, slug=slug)
post.unlike.add(request.user)
post.like.remove(request.user)
post.save()
return Response({'response': 'you unlike this post'}, status=status.HTTP_200_OK)
| [
"[email protected]"
] | |
49d91b038609858a956b8fc002568e272efc77f6 | f7c3b1f65cc4d6dba1dc9dcf70f523497a14d791 | /mujoco/tf_commons/ops.py | ff3b576def5ff27fcc5369b938fb8a9e04891c09 | [
"MIT"
] | permissive | hiwonjoon/ICML2019-TREX | b9cac1ac5d97b22374a92f2f3cf5d8956cdb2482 | 44f92b61ca6c79ac22d468382d4f2fbee164fb7a | refs/heads/master | 2021-06-16T15:52:33.325054 | 2021-04-10T04:57:24 | 2021-04-10T04:57:24 | 184,654,702 | 76 | 24 | MIT | 2020-01-28T22:11:19 | 2019-05-02T21:36:24 | Python | UTF-8 | Python | false | false | 19,966 | py | import tensorflow as tf
class Conv2d(object) :
def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW',padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
else :
self.strides = [1, d_h, d_w, 1]
self.data_format = data_format
self.padding = padding
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( self.data_format =='NCHW' ) :
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,
use_cudnn_on_gpu=True,data_format='NCHW',
strides=self.strides, padding=self.padding),
b,data_format='NCHW',name=name)
else :
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
b,data_format='NHWC',name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormConv2d(object):
def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.padding = padding
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2])
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class DepthConv2d(object) :
def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW', padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
else :
self.strides = [1, d_h, d_w, 1]
self.data_format = data_format
self.padding = padding
def __call__(self,input_var,name=None,**xargs) :
return tf.nn.bias_add(
tf.nn.depthwise_conv2d(input_var, self.w,
data_format=self.data_format,
strides=self.strides, padding=self.padding),
self.b,data_format=self.data_format,name=name)
class Conv3d(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=4,k_w=4,d_t=1,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [d_t,d_h,d_w]
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
#k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
#_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(input_var, w,
strides=self.strides,
data_format='NDHWC',
padding='SAME'),
b,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class DilatedConv3D(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [1,1,1]
self.dilates = [d_t, d_h, d_w]
def __call__(self,input_var,name=None) :
k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(_t, self.w,
strides=self.strides, dilation_rate=self.dilates,
padding='VALID'),
self.b,name=name)
class Linear(object) :
def __init__(self,name,input_dim,output_dim,stddev=0.02) :
with tf.variable_scope(name) :
self.w = tf.get_variable('w',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
else :
return tf.matmul(input_var,w)+b
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormLinear(object):
def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) :
with tf.variable_scope(name) :
self.v = tf.get_variable('v',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class SymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return tf.nn.bias_add(
tf.nn.conv2d(_t, self.w,
data_format='NHWC', #we can't use cudnn due to resize method...
strides=[1,1,1,1], padding="VALID"),
self.b,data_format='NHWC',name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.conv2d = WeightNormConv2d('conv',input_dim,output_dim,k_h,k_w,1,1,data_format='NHWC',padding='VALID')
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return self.conv2d(_t)
def get_variables(self):
return self.conv2d.get_variables()
class TransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') :
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(0.0))
self.data_format = data_format
if( data_format =='NCHW' ):
self.strides = [1, 1, d_h, d_w]
else:
self.strides = [1, d_h, d_w, 1]
def __call__(self,input_var,name=None,**xargs):
shapes = tf.shape(input_var)
if( self.data_format == 'NCHW' ):
shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]])
else:
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes,
data_format=self.data_format,
strides=self.strides,padding='SAME'),
self.b,data_format=self.data_format,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormTransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, out_dim, input_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[out_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[out_dim],
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
shapes = tf.shape(input_var)
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3])
t = tf.nn.conv2d_transpose(input_var,v_norm,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,w,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC'),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class LayerNorm():
def __init__(self,name,axis,out_dim=None,epsilon=1e-7,data_format='NHWC') :
"""
out_dim: Recentering by adding bias again.
The previous bias can be ignored while normalization.
(when you normalize over channel only)
"""
assert data_format=='NCHW' or data_format=='NHWC'
assert len(axis) != 1 or (len(axis) == 1 and out_dim != None)
"""
TODO: Track Moving mean and variance, and use this statistics.
with tf.variable_scope(name):
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False)
"""
if out_dim is not None:
with tf.variable_scope(name) :
self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0))
else:
self.gamma = None
self.beta = None
self.axis = axis
self.epsilon = epsilon
self.data_format = data_format
self.name = name
def __call__(self,input_var,**kwargs) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
ret = (input_var - mean) / tf.sqrt(var+self.epsilon)
if self.gamma is None :
return ret
else:
return tf.nn.bias_add(ret*self.gamma,
self.beta,data_format=self.data_format)
def get_variables(self):
return {'gamma':self.gamma,'beta':self.beta} if self.gamma is not None else {}
class InstanceNorm():
def __init__(self,name,format='NCHW',epsilon=1e-5) :
assert(format=='NCHW' or format=='NHWC')
self.axis = [2,3] if format == 'NCHW' else [1,2]
self.epsilon = epsilon
self.name = name
def __call__(self,input_var) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
return (input_var - mean) / tf.sqrt(var+self.epsilon)
class BatchNorm(object):
def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) :
self.momentum = momentum
self.epsilon = epsilon
self.axis = axis
self.center=center
self.scale=scale
with tf.variable_scope(name) as scope:
with tf.variable_scope('bn') :
self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0))
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False)
self.scope = scope
def __call__(self,input_var,is_training,**xargs) :
with tf.variable_scope(self.scope) :
return tf.layers.batch_normalization(
input_var,
axis=self.axis,
momentum=self.momentum,
epsilon=self.epsilon,
center=self.center,
scale=self.scale,
training=is_training,
reuse=True,
name='bn')
"""
---Do NOT forget to add update_ops dependencies for your loss function.---
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,tf.get_default_graph().get_name_scope())
#And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.)
print(update_ops)
with tf.control_dependencies(update_ops):
"""
def get_variables(self):
return {}
class Lrelu(object):
def __init__(self,leak=0.2,name='lrelu') :
self.leak = leak
self.name = name
def __call__(self, x, **kwargs) :
return tf.maximum(x, self.leak*x, name=self.name)
def get_variables(self):
return {}
class ResidualBlock() :
def __init__(self,name,filters,filter_size=3,non_linearity=Lrelu,normal_method=InstanceNorm) :
self.conv_1 = Conv2d(name+'_1',filters,filters,filter_size,filter_size,1,1)
self.normal = normal_method(name+'_norm')
self.nl = non_linearity()
self.conv_2 = Conv2d(name+'_2',filters,filters,filter_size,filter_size,1,1)
def __call__(self,input_var) :
_t = self.conv_1(input_var)
_t = self.normal(_t)
_t = self.nl(_t)
_t = self.conv_2(_t)
return input_var + _t
| [
"[email protected]"
] | |
6cc605df33d06fc2f8575460a8beca5e972c9fe6 | f65b633d0760e20ef5e0066be10aa18168f5659e | /documents/views.py | 448b7e5a09e37e05fa8e98adc18b108ace1694e2 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | boxed/curia | a2b5ce5feda114bac1637de112b20026dabee5ae | c19f12f77b570b180acf4ec1ee05ea77b87b5fc9 | refs/heads/master | 2021-03-12T22:53:36.748160 | 2014-10-20T16:56:45 | 2014-10-20T16:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,834 | py | import re
from datetime import datetime
from django.contrib.auth.views import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.simplejson import dumps
from django.contrib.auth.models import User,Group
from django.utils.encoding import smart_unicode
from curia.documents.models import Document, Version
from curia.shortcuts import *
from curia import *
from django.utils.translation import ugettext as _
from sets import Set
from curia.labels import get_labels
from curia.labels.models import Label
from curia.labels import handle_labels, mark_labels_as_deleted
# helper
def get_latest(document_id):
try: return Version.objects.filter(document=document_id).order_by('-id')[0]
except Version.DoesNotExist: return None;
def validate_wiki_links(owner_user, owner_group, form, contents_name = 'contents'):
from django.utils.encoding import smart_unicode
contents = smart_unicode(form.data[contents_name])
links = list(re.finditer(r'(\[(.*?)\])', contents))
errors = []
link_targets = {}
# examples of use:
# [images/groups/1/sets/3]
for link in links:
title = link.groups()[1]
if ';' in title:
group_name, title = title.split(u';')
group = get_objects_from(Group, name=group_name)
if len(group) == 1:
owner_group = group[0]
else:
user = get_objects_from(User, username=group_name)
if len(user) == 1:
owner_user = user[0]
else:
errors.append(_('%s is not a valid group or user name') % group_name)
continue
documents = get_objects_from(Document, owner_user=owner_user, owner_group=owner_group, title=title, deleted=False)
if len(documents) != 1:
errors.append(_('Could not find document %s') % link.groups()[1])
else:
link_targets[link.groups()[1]] = documents[0]
if len(errors) != 0:
form.errors[contents_name] = errors
else:
# replace from the end as to not change the string in a way that interferes with the following replace operation
links.reverse()
for link in links:
target = link_targets[link.groups()[1]]
contents = contents.replace(link.groups()[0], '<a href="'+target.get_absolute_url()+'">'+smart_unicode(target)+'</a>')
return contents
# views
def version_response(request, v):
return render_to_response(request, 'documents/version.html', {'version': v, 'document': v.document, 'owner':get_owner(v.document)})
def view_latest(request, document_id):
v = get_latest(document_id)
check_access(request.user, obj=v.document, command='view')
if v == None:
raise Http404
return version_response(request, v)
def view_version(request, version_id, document_id):
v = get_object_or_404_and_check_access(request, Version, pk=version_id, command='view')
check_access(request.user, obj=v.document, command='view')
#if v.document.id != document_id:
# raise something
return version_response(request, v)
def view_version_list(request, document_id):
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='view')
return render_to_response(request, 'documents/version_list.html', {'version_list': Version.objects.filter(document=document_id), 'document': Document.objects.get(pk=document_id)})
def add_document(request):
is_presentation = get_boolean(request,'is_presentation')
owner_group = None
owner_user = None
class DocumentForm(django.forms.Form):
title = django.forms.CharField(max_length=1024, label=_('Title'))
#labels = django.forms.CharField(required=False, label=_('Labels'))
contents = django.forms.CharField(required=False, widget = django.forms.Textarea, label=_('Contents'))
group_id = get_integer(request,'group_id')
user_id = get_integer(request,'user_id')
if group_id:
owner_group = get_object_or_404_and_check_access(request, Group, pk=group_id, command='add')
check_access(request.user, obj=owner_group, command='add document')
else:
owner_user = get_object_or_404_and_check_access(request, User, pk=user_id, command='add')
check_access(request.user, obj=owner_user, command='add document')
if request.POST:
form = DocumentForm(request.POST)
if form.is_valid():
#Handle the document
if owner_group != None:
document = Document.objects.create(owner_group=owner_group, owner_user=owner_user, is_presentation=is_presentation)
else:
document = Document.objects.create(owner_user=owner_user, is_presentation=is_presentation)
if document.is_presentation:
if group == 0:
title = owner_user.username + 's Presentation'
else:
owner_group = get_object_or_404_and_check_access(request, Group, pk=group, command='add')
title = owner_group.name + 's Presentation'
else:
title = form.cleaned_data['title']
new_version = Version(document=document,title=title, contents=strip_p(form.cleaned_data['contents']), owner=request.user)
new_version.save()
#Handle the labels
#handle_labels(request,document)
if document.is_presentation:
if document.owner_group:
return HttpResponseRedirect(document.owner_group.get_absolute_url())
else:
return HttpResponseRedirect(document.owner_user.get_absolute_url())
return HttpResponseRedirect(document.get_absolute_url())
else:
form = DocumentForm()
return render_to_response(request, 'documents/add.html', {'form':form})
def edit_document(request, document_id, is_creating=False):
group_id = get_integer(request, 'group_id')
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='edit')
user = request.user
class DocumentForm(django.forms.Form):
if not document.is_presentation:
title = django.forms.CharField(max_length=1024, label=_('Title'))
#labels = django.forms.CharField(required=False, label=_('Labels'))
contents = django.forms.CharField(required=False, widget = django.forms.Textarea, label=_('Contents'))
edit_version = django.forms.IntegerField(widget = django.forms.HiddenInput, required=True)
if request.POST:
form = DocumentForm(request.POST)
if int(request.POST['edit_version']) != document.get_latest_version().id:
post = request.POST.copy()
post['edit_version'] = document.get_latest_version().id
form = DocumentForm(post)
form.errors['contents'] = [_('Document was changed after you began editing it, please review the changes and then press save again')]
if form.is_valid():
#Handle the labels
#handle_labels(request,document)
#Handle the document
if not document.is_presentation:
if form.cleaned_data.has_key('title'):
title = form.cleaned_data['title']
else:
title = document.get_latest_version().title
else:
if user.first_name.endswith('s'):
title=user.first_name+' presentation'
else:
title = user.first_name+'s presentation'
new_version = Version(document=document,title=title, contents=strip_p(form.cleaned_data['contents']), owner=request.user)
new_version.save()
if request.external:
from curia.homepage.models import MenuItem
try:
menu = MenuItem.objects.get(content_type=get_content_type(document), object_id=document.id)
menu.title = title
menu.save()
except MenuItem.DoesNotExist:
pass
if document.is_presentation:
if document.owner_group:
return HttpResponseRedirect(document.owner_group.get_absolute_url())
else:
return HttpResponseRedirect(document.owner_user.get_absolute_url())
return HttpResponseRedirect(document.get_absolute_url())
else:
latest_version = document.get_latest_version()
form = DocumentForm(initial={'title': latest_version.title, 'contents': latest_version.contents, 'edit_version':latest_version.id})
return render_to_response(request, 'documents/edit.html', {'form':form, 'document':document})
def delete_document(request, document_id):
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='delete')
from curia import delete_objects
delete_objects(document)
if request.external:
from curia.homepage.models import MenuItem
try:
menu = MenuItem.objects.get(content_type=get_content_type(document), object_id=document.id)
menu.delete()
except MenuItem.DoesNotExist:
pass
return HttpResponse(dumps(document_id, ensure_ascii=False), content_type="text/json; charset=UTF-8")
def view_documents_of_user(request, user_id):
user = get_object_or_404_and_check_access(request, User, pk=user_id, command='view')
objects = get_objects_from(Document, deleted=False, owner_user=user, owner_group=None, is_presentation=False)
return render_to_response(request, 'documents/document_list.html', {'owner':user, 'objects':objects, 'type':'users'})
def view_documents_of_group(request, group_id=None):
if group_id != None:
group = get_object_or_404_and_check_access(request, Group, pk=group_id, command='view')
else:
group = get_current_community()
objects = get_objects_from(Document, deleted=False, owner_group=group, is_presentation=False)
return render_to_response(request, 'documents/document_list.html', {'owner':group, 'objects':objects, 'type':'groups'})
def revert_to_version(request, document_id, version_id):
old_version = Version.objects.get(pk = version_id)
document = Document.objects.get(pk = document_id)
new_version = Version(document=document,title=old_version.title, contents=old_version.contents, owner=request.user)
new_version.save()
return version_response(request, new_version)
| [
"[email protected]"
] | |
6e42e56cfec64fcecc126ae040f1964ab67867ca | adde969450333a4a459e7122521eb20fea9659f7 | /python-flask/openapi_server/test/test_default_controller.py | 16f79d5d61294677c3ddcdee117e032d41c016dd | [] | no_license | mishin/money-tracker-api-docs | 49c480765ecaacd8ff448fa7d264de590239b572 | 9b0f03fd054f54f136d1d28f2d20ecec9515210a | refs/heads/master | 2022-04-12T23:14:33.557855 | 2019-12-07T15:08:19 | 2019-12-07T15:08:19 | 271,111,873 | 1 | 0 | null | 2020-06-09T21:18:06 | 2020-06-09T21:18:05 | null | UTF-8 | Python | false | false | 2,819 | py | # coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.expense_request import ExpenseRequest # noqa: E501
from openapi_server.models.expense_response import ExpenseResponse # noqa: E501
from openapi_server.models.expenses_response import ExpensesResponse # noqa: E501
from openapi_server.models.not_found import NotFound # noqa: E501
from openapi_server.models.result_response import ResultResponse # noqa: E501
from openapi_server.test import BaseTestCase
class TestDefaultController(BaseTestCase):
"""DefaultController integration test stubs"""
def test_add_expense(self):
"""Test case for add_expense
adds a expense
"""
expense_request = {
"date" : "2019-10-22T00:00:00.000+0000",
"amount" : 100,
"content" : "おにぎり"
}
headers = {
'Content-Type': 'application/json',
}
response = self.client.open(
'/v1/expenses',
method='POST',
headers=headers,
data=json.dumps(expense_request),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_expense(self):
"""Test case for get_expense
gets a expense
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1/expenses/{expense_id}'.format(expense_id='expense_id_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_expenses(self):
"""Test case for get_expenses
gets expenses
"""
query_string = [('beginDate', Tue Oct 01 09:00:00 JST 2019),
('endDate', Thu Oct 31 09:00:00 JST 2019)]
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1/expenses',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_result(self):
"""Test case for get_result
gets Result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1/results/{result_id}'.format(result_id=2019-10),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5c4f26641289e95cd612cadb24473c07e4a66f3e | 94551b3b0513f4f2b3930d9c28b36de079fac487 | /config_param_creator.py | 47c0c32a11d039deac4b9fd6858cc6a93e1a3bf1 | [] | no_license | KWiecko/su2_gui_pub | 92b0f10f0a208c57cad62b216b0f1d1e4e1e1566 | 8d781f1a3c1c231527c47f1289277a7851a64329 | refs/heads/master | 2020-07-13T07:19:52.893502 | 2019-09-25T07:36:55 | 2019-09-25T07:36:55 | 205,030,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,470 | py | import pyforms
from pyforms.basewidget import BaseWidget
from pyforms.controls import ControlButton, ControlBase
from su2_basic_widget import SU2BasicWidget
from su2_config_creator import SU2Config
from config_param_creator_ctrl import ConfigParamCreatorCtrl
class ConfigParamCreator(SU2BasicWidget):
# label = '', initial_max_width = None, initial_max_height = None,
# initial_min_width = None, initial_min_height = None)
# @property
# def param_name_ctrl(self) -> ControlBase:
# return self._param_name_ctrl
#
# @param_name_ctrl.setter
# def param_name_ctrl(self, new_val: ControlBase):
# self._param_name_ctrl = new_val
@property
def param_name_ctrl_grpbx(self) -> ControlBase:
return self._param_name_ctrl_grpbx
@param_name_ctrl_grpbx.setter
def param_name_ctrl_grpbx(self, new_val: ControlBase):
self._param_name_ctrl_grpbx = new_val
# @property
# def allwd_vals_ctrl(self) -> ControlBase:
# return self._allwd_vals_ctrl
#
# @allwd_vals_ctrl.setter
# def allwd_vals_ctrl(self, new_val: ControlBase):
# self._allwd_vals_ctrl = new_val
@property
def allwd_vals_ctrl_grpbx(self) -> ControlBase:
return self._allwd_vals_ctrl_grpbx
@allwd_vals_ctrl_grpbx.setter
def allwd_vals_ctrl_grpbx(self, new_val: ControlBase):
self._allwd_vals_ctrl_grpbx = new_val
# @property
# def default_val_ctrl(self) -> ControlBase:
# return self._default_val_ctrl
#
# @default_val_ctrl.setter
# def default_val_ctrl(self, new_val: ControlBase):
# self._default_val_ctrl = new_val
@property
def default_val_ctrl_grpbx(self) -> ControlBase:
return self._default_val_ctrl_grpbx
@default_val_ctrl_grpbx.setter
def default_val_ctrl_grpbx(self, new_val: ControlBase):
self._default_val_ctrl_grpbx = new_val
# @property
# def tooltip_ctrl(self) -> ControlBase:
# return self._tooltip_ctrl
#
# @tooltip_ctrl.setter
# def tooltip_ctrl(self, new_val: ControlBase):
# self._tooltip_ctrl = new_val
@property
def tooltip_ctrl_grpbx(self) -> ControlBase:
return self._tooltip_ctrl_grpbx
@tooltip_ctrl_grpbx.setter
def tooltip_ctrl_grpbx(self, new_val: ControlBase):
self._tooltip_ctrl_grpbx = new_val
@property
def ctrld_cfg_f_creator(self) -> object:
return self._ctrld_cfg_f_creator
@ctrld_cfg_f_creator.setter
def ctrld_cfg_f_creator(self, new_val: object):
self._ctrld_cfg_f_creator = new_val
@property
def config_field_creator_ctr(self) -> ConfigParamCreatorCtrl:
return self._config_field_creator_ctr
@config_field_creator_ctr.setter
def config_field_creator_ctr(self, new_val: ConfigParamCreatorCtrl):
self._config_field_creator_ctr = new_val
@property
def set_param_button(self) -> ControlButton:
return self._set_param_button
@set_param_button.setter
def set_param_button(self, new_val: ControlButton):
self._set_param_button = new_val
@property
def cancel_button(self) -> ControlButton:
return self._cancel_button
@cancel_button.setter
def cancel_button(self, new_val: ControlButton):
self._cancel_button = new_val
def __init__(
self, tabs_ctrl: object, label='Config param creator',
initial_max_width: int = 400,
initial_max_height: int = 700, initial_min_width: int = 200,
initial_min_height: int = 500,
su2_cfg_obj: SU2Config = None, # {'example_sect': {}},
des_cfg_section: str = 'INPUT_OUTPUT_INFORMATION'):
super(ConfigParamCreator, self).__init__(
label=label, initial_max_width=initial_max_width,
initial_max_height=initial_max_height,
initial_min_width=initial_min_width,
initial_min_height=initial_min_height)
if not su2_cfg_obj:
print('SU2 cfg was not found')
su2_cfg_obj = SU2Config()
input('SU2 cfg was not found')
self.config_field_creator_ctr = \
ConfigParamCreatorCtrl(
su2_cfg_obj=su2_cfg_obj, des_cfg_section=des_cfg_section,
ctrld_cfg_f_creator=self, tabs_ctrl=tabs_ctrl)
if __name__ == '__main__':
pyforms.start_app(ConfigParamCreator, geometry=(400, 500, 500, 500))
# test_cfc = ConfigFieldCreator() | [
"[email protected]"
] | |
5738640214bb8de04103ec491a2e51acea88a2f5 | 4182189a2624d4aa36619915b7dba42fc5ecdaad | /xxxRekt/urls.py | e179cc2dd01d6dffeacffb8380cffad8d245d708 | [] | no_license | HackerProjects/lindahearts | 3b74e187176404b587bb55b03301f285966d393f | 37492d88ebd28d65b225125b9c75547cb8687a10 | refs/heads/master | 2020-03-21T15:34:51.570455 | 2018-06-26T11:08:07 | 2018-06-26T11:08:07 | 138,721,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django.contrib import admin
from django.urls import path, re_path, include
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
from . import settings
urlpatterns = [
path('admin/', admin.site.urls),
re_path(r'', include('home.urls'))
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
0c61ce225d80072549a004ed2591a718c5672896 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_8/models/target_get_response.py | ac033a0a864676d3ec597b61877bb7714e0e01c8 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,220 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.8, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class TargetGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[Target]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.Target]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[Target]): A list of target objects.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `TargetGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TargetGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TargetGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
048f1454876a4040818f971ff05431b5505e1f2b | 4e31845d08ae3d8a525ce67bb1abab271e037eca | /FirefoxBotAD/fb.sikuli/sikuli.puller.v1.1/puller.py | f1a63ecf8fb0612ea8c16c81a1b1770b513c721d | [] | no_license | AdCrushMedia/sikuli | d1a17b814ec5b80c6107828dd0d8c2c89ae24995 | 7dd03f7653a3ecef25eb1ceb1970baca4e4cfd9b | refs/heads/master | 2021-06-15T22:08:46.487573 | 2017-05-02T19:23:33 | 2017-05-02T19:23:33 | 73,114,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,196 | py | import os
import sys
import json
import time
import getopt
import shutil
import ctypes
import zipfile
import hashlib
import paramiko
import functools
import subprocess
from collections import defaultdict
_NEW_ = "*new*"
_UPD_ = "*upd*"
_DEL_ = "*del*"
_ZIP_ = '.zip'
_JSON_ = '.json'
_PKGVER_ = 'version'+ _JSON_
_CHANGES_ = 'changes'+ _JSON_
_FILE_MAP_ = 'filemap'+ _JSON_
_SETTINGS_ = 'settings'+ _JSON_
__home = os.getcwd()
__temp_path = os.path.join(__home, 'temp')
__pkgver_path = os.path.join(__home, _PKGVER_)
__settings_path = os.path.join(__home, _SETTINGS_)
class AllowAnythingPolicy(paramiko.MissingHostKeyPolicy):
def missing_host_key(self, client, hostname, key):
return
def sftp_transcb(filepath, trans, spinner, bytes_so_far, bytes_total):
sys.stdout.write('> sftp %s: %r %s (%d/%d) \r' %
(trans, os.path.basename(filepath), spinner.next(), bytes_so_far, bytes_total))
sys.stdout.flush()
def sftp_rexists(sftp, path):
try:
sftp.stat(path)
return True
except IOError, e:
if e.errno == 2:
return False
return True
def sftp_upload(sftp, local_path, remote_path):
spinner = spinning_cursor()
cb = functools.partial(sftp_transcb, remote_path, 'put', spinner)
remote_stat = sftp.put(local_path, remote_path, callback=cb, confirm=True)
sys.stdout.write("\n")
local_stat = os.stat(local_path)
if remote_stat.st_size == local_stat.st_size:
print '> transfer complete! remote file last modified: %s\n' % (
time.ctime(remote_stat.st_mtime))
return True
else:
print '> An error occured during the transfer..'
return False
def sftp_download(sftp, remote_path, local_path):
spinner = spinning_cursor()
cb = functools.partial(sftp_transcb, remote_path, 'get', spinner)
try:
sftp.get(remote_path, local_path, callback=cb)
sys.stdout.write("\n")
return True
except Exception, e:
print "\n> An error occured during donwload"
return False
def ask(question):
# raw_input returns the empty string for "enter"
yes = set(['yes','y', 'ye', ''])
no = set(['no','n'])
sys.stdout.write(question + ' [Y/n] ')
while True:
choice = raw_input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
sys.stdout.write('Please respond with [Y/n] ')
def Mbox(title, text, style=1):
return ctypes.windll.user32.MessageBoxA(0, text, title, style)
## Styles:
## 0 : OK
## 1 : OK | Cancel
## 2 : Abort | Retry | Ignore
## 3 : Yes | No | Cancel
## 4 : Yes | No
## 5 : Retry | No
## 6 : Cancel | Try Again | Continue
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
def checksum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_hasver(sftp, rmt_ver_path):
if not sftp_rexists(sftp, rmt_ver_path):
return False
else:
return True
def rmvfile(filepath):
if os.path.isfile(filepath):
os.remove(filepath)
def rmvdir_contents(dirpath):
for the_file in os.listdir(dirpath):
file_path = os.path.join(dirpath, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print "> READ ONLY folder cant be deleted"
print "> %S" % file_path
def reset():
rmvfile(__pkgver_path)
with open(__settings_path) as settings_file:
settings = json.load(settings_file)
watch_folder = settings['watch_folder']
rmvdir_contents(watch_folder)
rmvdir_contents(__temp_path)
def extract(zipath, dest):
filename = os.path.basename(zipath)
print "> Extracting package '%s'" % filename
with zipfile.ZipFile(zipath, "r") as z:
try:
z.extractall(dest)
z.close()
return True
except:
print "> Error on zip extract. \n>'%s'" % filename
return False
def check_integrity(watch_folder):
print '# Checking for file integrity'
fmap = defaultdict(dict)
spinner = spinning_cursor()
filemap_path = os.path.join(watch_folder, _FILE_MAP_)
with open(filemap_path) as fmout:
fmap = json.load(fmout)
for path, csum in fmap['map'].items():
target = os.path.join(watch_folder, path)
if os.path.isfile(target):
localcsum = checksum(target)
same = localcsum == csum
if same:
sys.stdout.write('> %s %s\r' % (csum, spinner.next()))
sys.stdout.flush()
time.sleep(0.002)
else:
print "> Bad checksum: %s %s" % (csum, path)
print "\n> done.."
def checkploy(tempver_path, watch_folder):
changes = defaultdict(dict)
fmap = defaultdict(dict)
changes_path = os.path.join(tempver_path, _CHANGES_)
with open(changes_path) as fileout:
changes = json.load(fileout)
csum = changes['checksum']
filemap_path = os.path.join(tempver_path, _FILE_MAP_)
with open(filemap_path) as fmout:
fmap = json.load(fmout)
pkgzip_path = os.path.join(tempver_path, csum + _ZIP_)
extr_dest = os.path.join(tempver_path, csum)
if not os.path.exists(extr_dest):
os.makedirs(extr_dest)
if extract(pkgzip_path, extr_dest):
print '> Checking for file integrity at temp level'
for path, csum in fmap['map'].items():
target = os.path.join(extr_dest, path)
if os.path.isfile(target):
localcsum = checksum(target)
same = localcsum == csum
if same:
sys.stdout.write('> %s\r' % csum)
sys.stdout.flush()
else:
print "> Bad checksum: local: %s >> %s %s" % (csum, localcsum, path)
return False
print '> checksum passed.. '
rmvdir_contents(extr_dest)
os.rmdir(extr_dest)
print "\n# Deploying version package"
if not extract(pkgzip_path, watch_folder):
return False
for path, tag in changes['pkg'].items():
if tag == _DEL_:
abspath = os.path.join(watch_folder, path)
rmvfile(abspath)
return True
def deploy_pkg(pkgpath, watch_folder, version):
filename = os.path.basename(pkgpath)
fn, fe = os.path.splitext(filename)
extr_dest = os.path.join(__temp_path, fn)
if not os.path.exists(extr_dest):
os.makedirs(extr_dest)
if not extract(pkgpath, extr_dest):
return False
if not checkploy(extr_dest, watch_folder):
return False
else:
pkgver = defaultdict(dict)
pkgver['version'] = version
with open(__pkgver_path, 'w') as pvout:
json.dump(pkgver, pvout)
print "> Package deployed. Current version: %d\n" % version
return True
def analyze(watch_folder, ver):
changes = defaultdict(dict)
changes_path = os.path.join(watch_folder, _CHANGES_)
with open(changes_path) as fileout:
changes = json.load(fileout)
if ver != changes['version']:
print '> package version not the same [%d.zip]' % ver
return False
zipname = changes['checksum']+ _ZIP_
pkgzip = os.path.join(watch_folder, zipname)
if not os.path.isfile(pkgzip):
print "> package %s not found in '%s.zip'" % (changes['checksum'], ver)
return False
extract(pkgzip, watch_folder, zipname)
rmvfile(pkgzip)
print '> root folder: %s' % watch_folder
for path, tag in changes['pkg'].items():
print "> %s %s" % (tag, path)
if tag == _DEL_:
abspath = os.path.join(watch_folder, path)
rmvfile(abspath)
rmvfile(changes_path)
def run_cmd(exe, arg):
print "> running defined command.."
if os.path.isfile(exe):
p = subprocess.Popen([exe, arg])
else:
print "> Cant find defined 'exe_to_run' path"
print '> Please check your settings'
# --- main
def main_puller():
_pkgver = defaultdict(dict)
if not os.path.isfile(__settings_path):
print '> Cant find required file \'%s\'' %(_SETTINGS_)
sys.exit()
# -- load settings variables
with open(__settings_path) as settings_file:
_settings = json.load(settings_file)
_address = _settings['ftp']['address']
_username = _settings['ftp']['username']
_password = _settings['ftp']['password']
_keep_pkg = _settings['keep_pkg']
_ftp_home = _settings['ftp_home']
_app_title = _settings['app_title']
_watch_folder = _settings['watch_folder']
_exe_to_run = _settings['exe_to_run']
_exe_argument = _settings['exe_argument']
if not os.path.exists(_watch_folder):
print "> Defined watch folder not found!"
print "> %s" % _watch_folder
print "\n> Please check your settings"
errmsg = "> Defined watch folder not found!\n> %s\n> Please check your settings" % _watch_folder
Mbox( "Settings Problem", str(errmsg), 0)
sys.exit()
if not os.path.exists(__temp_path):
os.makedirs(__temp_path)
if not os.path.isfile(__pkgver_path):
print "> Cant find '%s'. Assumed first pull setting version to 0" % _PKGVER_
_pkgver['version'] = 0
else:
with open(__pkgver_path) as pkgver_file:
_pkgver = json.load(pkgver_file)
print "# Current version: %d" % _pkgver['version']
user_ok = False
box_title = "Package Update for '%s'" % _app_title
box_msg = "An update has been detected. Click 'OK' to download or 'Cancel' to ingore."
print '# Connecting to server \'%s\'' % _address
client = paramiko.SSHClient()
client.set_missing_host_key_policy(AllowAnythingPolicy())
client.connect(_address, username=_username, password=_password)
_sftp = client.open_sftp()
print '# Checking for updates..'
version = _pkgver['version']
while True:
version += 1
verzip = str(version)+ _ZIP_
remote_verpkg = os.path.join(_ftp_home, verzip)
if not get_hasver(_sftp, remote_verpkg):
print '> No updates detected..'
break
else:
if not user_ok:
if Mbox( str(box_title), box_msg) == 1:
user_ok = True
else:
sys.exit()
pkgver_zip = os.path.join(__temp_path, verzip)
if not sftp_download(_sftp, remote_verpkg, pkgver_zip):
sys.exit()
if not deploy_pkg(pkgver_zip, _watch_folder, version):
sys.exit()
_sftp.close()
client.close()
run_cmd(_exe_to_run, _exe_argument)
def print_help():
print 'puller.py [-h]'
def main(argv):
try:
opts,args = getopt.getopt(argv,"hxp", ['help', 'reset', 'pull'])
except getopt.GetoptError:
print_help()
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-x", "--reset"):
reset() # for debugging only
sys.exit()
elif opt in ("-p", "--pull"):
print ''
main_puller()
if __name__ == "__main__":
main(sys.argv[1:]) | [
"[email protected]"
] | |
c9baa64f4b82950354c16fb73ca806ec1d27be5f | a9c3be4041c2a93fcdf3407a03ca33ceb839dc12 | /Hostel Management Project/hostel_mgmt/right_frame.py | 25d8a4c86dbe4ae8f65cebd1bd6f2edf351c440e | [] | no_license | Soham-Chatterjee/Hostel_mgmt | 082bcbeb76fe06ddf606b517c8d690ebb7184014 | 8225181ff8cc1fbcf6c9cf35087d9bd6123ce5eb | refs/heads/main | 2023-02-20T17:15:36.119611 | 2021-01-23T16:25:35 | 2021-01-23T16:25:35 | 317,414,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | import tkinter
from hostel_mgmt import root
from PIL import Image, ImageTk
from hostel_mgmt.design import right_frame
def on_enter(btn):
btn['bg'] = "bisque"
def on_leave(btn):
btn['bg'] = "bisque3"
image_load = Image.open("./Fee Chart.png")
image_render = ImageTk.PhotoImage(image_load)
pos = {'x': 12, 'btn1': 10, 'btn2': 60, 'btn3': 110, 'btn4': 160, 'btn5': 210, 'btn6': 260, 'btn7':310}
btn1 = tkinter.Button(right_frame, text="Button 1", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn2 = tkinter.Button(right_frame, text="Button 2", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn3 = tkinter.Button(right_frame, text="Button 3", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn4 = tkinter.Button(right_frame, text="Button 4", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn5 = tkinter.Button(right_frame, text="Button 4", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
img = tkinter.Label(right_frame, image=image_render)
img.image = image_render
img.config(bg='bisque4')
img.place(x=5, y=435)
btn_list = [btn1, btn2, btn3, btn4, btn5]
btn1.bind("<Enter>", lambda event: on_enter(btn1))
btn1.bind("<Leave>", lambda event: on_leave(btn1))
btn2.bind("<Enter>", lambda event: on_enter(btn2))
btn2.bind("<Leave>", lambda event: on_leave(btn2))
btn3.bind("<Enter>", lambda event: on_enter(btn3))
btn3.bind("<Leave>", lambda event: on_leave(btn3))
btn4.bind("<Enter>", lambda event: on_enter(btn4))
btn4.bind("<Leave>", lambda event: on_leave(btn4))
btn5.bind("<Enter>", lambda event: on_enter(btn5))
btn5.bind("<Leave>", lambda event: on_leave(btn5))
| [
"[email protected]"
] | |
36fd9544ce7e5544264fcd2e31e7b8e4bac2fe1b | 70caddd0199b026b9fae11bc8da4bbc875b3a737 | /test1/sentence_test.py | 1b40fd4ee85bc018354c8ad8ba5cfc080b841880 | [] | no_license | zzxzzg/python | 997e36fc9e8676dfb108082d273c03eb40eb2fae | d58d142ea5d5fb7f1197cf2981ece85909ddb0ba | refs/heads/master | 2020-06-13T12:39:53.612822 | 2016-12-20T10:35:18 | 2016-12-20T10:35:18 | 75,379,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #! /usr/bin/env python3
# 使用 elif替代 else if ,去掉大括号,使用缩进来分割代码块
# if condition_1:
# statement_block_1
# elif condition_2:
# statement_block_2
# else:
# statement_block_3
#注意冒号!!!
# while 判断条件:
# 语句
#另外,在Python中没有do..while循环。
# while还提供了else的语句配对,当不满足条件的时候运行else(只运行一次)
count = 0
while count < 5:
print (count, " 小于 5")
count = count + 1
else:
print (count, " 大于或等于 5")
# Python for循环可以遍历任何序列的项目,如一个列表或者一个字符串。
# for循环的一般格式如下:
# for <variable> in <sequence>:
# <statements>
# else:
# <statements>
#如果你需要遍历数字序列,可以使用内置range()函数 for i in range(5) for i in range(5,9)
# Python pass是空语句,是为了保持程序结构的完整性。
# pass 不做任何事情,一般用做占位语句,如下实例
while True:
pass
| [
"[email protected]"
] | |
445508632ba9ca0cb62f6858ef1bb3b3b3ba0625 | f98dc868a74dc7a9b128a2d4ce2c53224dd11f63 | /intermediate_source/pipeline_tutorial.py | 49b37b1f564e74c3f8bc8ff8d8be0e03ec5f33b8 | [
"BSD-3-Clause"
] | permissive | walkingpanda/tutorials | ebe69e3fa8818b596922136273c74f6b0ab3b479 | 973193b4e28f4c277e14dcd93fd199f26cd1038b | refs/heads/master | 2023-05-29T23:34:27.635650 | 2021-06-14T21:05:10 | 2021-06-14T21:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,273 | py | """
Training Transformer models using Pipeline Parallelism
======================================================
**Author**: `Pritam Damania <https://github.com/pritamdamania87>`_
This tutorial demonstrates how to train a large Transformer model across
multiple GPUs using pipeline parallelism. This tutorial is an extension of the
`Sequence-to-Sequence Modeling with nn.Transformer and TorchText <https://pytorch.org/tutorials/beginner/transformer_tutorial.html>`__ tutorial
and scales up the same model to demonstrate how pipeline parallelism can be
used to train Transformer models.
Prerequisites:
* `Pipeline Parallelism <https://pytorch.org/docs/stable/pipeline.html>`__
* `Sequence-to-Sequence Modeling with nn.Transformer and TorchText <https://pytorch.org/tutorials/beginner/transformer_tutorial.html>`__
"""
######################################################################
# Define the model
# ----------------
#
######################################################################
# In this tutorial, we will split a Transformer model across two GPUs and use
# pipeline parallelism to train the model. The model is exactly the same model
# used in the `Sequence-to-Sequence Modeling with nn.Transformer and TorchText
# <https://pytorch.org/tutorials/beginner/transformer_tutorial.html>`__ tutorial,
# but is split into two stages. The largest number of parameters belong to the
# `nn.TransformerEncoder <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoder.html>`__ layer.
# The `nn.TransformerEncoder <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoder.html>`__
# itself consists of ``nlayers`` of `nn.TransformerEncoderLayer <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html>`__.
# As a result, our focus is on ``nn.TransformerEncoder`` and we split the model
# such that half of the ``nn.TransformerEncoderLayer`` are on one GPU and the
# other half are on another. To do this, we pull out the ``Encoder`` and
# ``Decoder`` sections into seperate modules and then build an nn.Sequential
# representing the original Transformer module.
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import tempfile
from torch.nn import TransformerEncoder, TransformerEncoderLayer
if sys.platform == 'win32':
print('Windows platform is not supported for pipeline parallelism')
sys.exit(0)
if torch.cuda.device_count() < 2:
print('Need at least two GPU devices for this tutorial')
sys.exit(0)
class Encoder(nn.Module):
def __init__(self, ntoken, ninp, dropout=0.5):
super(Encoder, self).__init__()
self.pos_encoder = PositionalEncoding(ninp, dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.init_weights()
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
# Need (S, N) format for encoder.
src = src.t()
src = self.encoder(src) * math.sqrt(self.ninp)
return self.pos_encoder(src)
class Decoder(nn.Module):
def __init__(self, ntoken, ninp):
super(Decoder, self).__init__()
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, inp):
# Need batch dimension first for output of pipeline.
return self.decoder(inp).permute(1, 0, 2)
######################################################################
# ``PositionalEncoding`` module injects some information about the
# relative or absolute position of the tokens in the sequence. The
# positional encodings have the same dimension as the embeddings so that
# the two can be summed. Here, we use ``sine`` and ``cosine`` functions of
# different frequencies.
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
######################################################################
# Load and batch data
# -------------------
#
######################################################################
# The training process uses Wikitext-2 dataset from ``torchtext``. The
# vocab object is built based on the train dataset and is used to numericalize
# tokens into tensors. Starting from sequential data, the ``batchify()``
# function arranges the dataset into columns, trimming off any tokens remaining
# after the data has been divided into batches of size ``batch_size``.
# For instance, with the alphabet as the sequence (total length of 26)
# and a batch size of 4, we would divide the alphabet into 4 sequences of
# length 6:
#
# .. math::
# \begin{bmatrix}
# \text{A} & \text{B} & \text{C} & \ldots & \text{X} & \text{Y} & \text{Z}
# \end{bmatrix}
# \Rightarrow
# \begin{bmatrix}
# \begin{bmatrix}\text{A} \\ \text{B} \\ \text{C} \\ \text{D} \\ \text{E} \\ \text{F}\end{bmatrix} &
# \begin{bmatrix}\text{G} \\ \text{H} \\ \text{I} \\ \text{J} \\ \text{K} \\ \text{L}\end{bmatrix} &
# \begin{bmatrix}\text{M} \\ \text{N} \\ \text{O} \\ \text{P} \\ \text{Q} \\ \text{R}\end{bmatrix} &
# \begin{bmatrix}\text{S} \\ \text{T} \\ \text{U} \\ \text{V} \\ \text{W} \\ \text{X}\end{bmatrix}
# \end{bmatrix}
#
# These columns are treated as independent by the model, which means that
# the dependence of ``G`` and ``F`` can not be learned, but allows more
# efficient batch processing.
#
import io
import torch
from torchtext.utils import download_from_url, extract_archive
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url))
tokenizer = get_tokenizer('basic_english')
vocab = build_vocab_from_iterator(map(tokenizer,
iter(io.open(train_filepath,
encoding="utf8"))))
def data_process(raw_text_iter):
data = [torch.tensor([vocab[token] for token in tokenizer(item)],
dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
train_data = data_process(iter(io.open(train_filepath, encoding="utf8")))
val_data = data_process(iter(io.open(valid_filepath, encoding="utf8")))
test_data = data_process(iter(io.open(test_filepath, encoding="utf8")))
device = torch.device("cuda")
def batchify(data, bsz):
# Divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_data, batch_size)
val_data = batchify(val_data, eval_batch_size)
test_data = batchify(test_data, eval_batch_size)
######################################################################
# Functions to generate input and target sequence
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
######################################################################
# ``get_batch()`` function generates the input and target sequence for
# the transformer model. It subdivides the source data into chunks of
# length ``bptt``. For the language modeling task, the model needs the
# following words as ``Target``. For example, with a ``bptt`` value of 2,
# we'd get the following two Variables for ``i`` = 0:
#
# .. image:: ../_static/img/transformer_input_target.png
#
# It should be noted that the chunks are along dimension 0, consistent
# with the ``S`` dimension in the Transformer model. The batch dimension
# ``N`` is along dimension 1.
#
bptt = 35
def get_batch(source, i):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
# Need batch dimension first for pipeline parallelism.
return data.t(), target
######################################################################
# Model scale and Pipe initialization
# -----------------------------------
#
######################################################################
# To demonstrate training large Transformer models using pipeline parallelism,
# we scale up the Transformer layers appropriately. We use an embedding
# dimension of 4096, hidden size of 4096, 16 attention heads and 12 total
# transformer layers (``nn.TransformerEncoderLayer``). This creates a model with
# **~1.4 billion** parameters.
#
# We need to initialize the `RPC Framework <https://pytorch.org/docs/stable/rpc.html>`__
# since Pipe depends on the RPC framework via `RRef <https://pytorch.org/docs/stable/rpc.html#rref>`__
# which allows for future expansion to cross host pipelining. We need to
# initialize the RPC framework with only a single worker since we're using a
# single process to drive multiple GPUs.
#
# The pipeline is then initialized with 8 transformer layers on one GPU and 8
# transformer layers on the other GPU.
#
# .. note::
# For efficiency purposes we ensure that the ``nn.Sequential`` passed to
# ``Pipe`` only consists of two elements (corresponding to two GPUs), this
# allows the Pipe to work with only two partitions and avoid any
# cross-partition overheads.
ntokens = len(vocab.stoi) # the size of vocabulary
emsize = 4096 # embedding dimension
nhid = 4096 # the dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 12 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 16 # the number of heads in the multiheadattention models
dropout = 0.2 # the dropout value
from torch.distributed import rpc
tmpfile = tempfile.NamedTemporaryFile()
rpc.init_rpc(
name="worker",
rank=0,
world_size=1,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(tmpfile.name),
# Specifying _transports and _channels is a workaround and we no longer
# will have to specify _transports and _channels for PyTorch
# versions >= 1.8.1
_transports=["ibv", "uv"],
_channels=["cuda_ipc", "cuda_basic"],
)
)
num_gpus = 2
partition_len = ((nlayers - 1) // num_gpus) + 1
# Add encoder in the beginning.
tmp_list = [Encoder(ntokens, emsize, dropout).cuda(0)]
module_list = []
# Add all the necessary transformer blocks.
for i in range(nlayers):
transformer_block = TransformerEncoderLayer(emsize, nhead, nhid, dropout)
if i != 0 and i % (partition_len) == 0:
module_list.append(nn.Sequential(*tmp_list))
tmp_list = []
device = i // (partition_len)
tmp_list.append(transformer_block.to(device))
# Add decoder in the end.
tmp_list.append(Decoder(ntokens, emsize).cuda(num_gpus - 1))
module_list.append(nn.Sequential(*tmp_list))
from torch.distributed.pipeline.sync import Pipe
# Build the pipeline.
chunks = 8
model = Pipe(torch.nn.Sequential(*module_list), chunks = chunks)
def get_total_params(module: torch.nn.Module):
total_params = 0
for param in module.parameters():
total_params += param.numel()
return total_params
print ('Total parameters in model: {:,}'.format(get_total_params(model)))
######################################################################
# Run the model
# -------------
#
######################################################################
# `CrossEntropyLoss <https://pytorch.org/docs/master/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__
# is applied to track the loss and
# `SGD <https://pytorch.org/docs/master/optim.html?highlight=sgd#torch.optim.SGD>`__
# implements stochastic gradient descent method as the optimizer. The initial
# learning rate is set to 5.0. `StepLR <https://pytorch.org/docs/master/optim.html?highlight=steplr#torch.optim.lr_scheduler.StepLR>`__ is
# applied to adjust the learn rate through epochs. During the
# training, we use
# `nn.utils.clip_grad_norm\_ <https://pytorch.org/docs/master/nn.html?highlight=nn%20utils%20clip_grad_norm#torch.nn.utils.clip_grad_norm_>`__
# function to scale all the gradient together to prevent exploding.
#
criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
import time
def train():
model.train() # Turn on the train mode
total_loss = 0.
start_time = time.time()
ntokens = len(vocab.stoi)
# Train only for 50 batches to keep script execution time low.
nbatches = min(50 * bptt, train_data.size(0) - 1)
for batch, i in enumerate(range(0, nbatches, bptt)):
data, targets = get_batch(train_data, i)
optimizer.zero_grad()
# Since the Pipe is only within a single host and process the ``RRef``
# returned by forward method is local to this node and can simply
# retrieved via ``RRef.local_value()``.
output = model(data).local_value()
# Need to move targets to the device where the output of the
# pipeline resides.
loss = criterion(output.view(-1, ntokens), targets.cuda(1))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
log_interval = 10
if batch % log_interval == 0 and batch > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | '
'lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, nbatches // bptt, scheduler.get_lr()[0],
elapsed * 1000 / log_interval,
cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def evaluate(eval_model, data_source):
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.
ntokens = len(vocab.stoi)
# Evaluate only for 50 batches to keep script execution time low.
nbatches = min(50 * bptt, data_source.size(0) - 1)
with torch.no_grad():
for i in range(0, nbatches, bptt):
data, targets = get_batch(data_source, i)
output = eval_model(data).local_value()
output_flat = output.view(-1, ntokens)
# Need to move targets to the device where the output of the
# pipeline resides.
total_loss += len(data) * criterion(output_flat, targets.cuda(1)).item()
return total_loss / (len(data_source) - 1)
######################################################################
# Loop over epochs. Save the model if the validation loss is the best
# we've seen so far. Adjust the learning rate after each epoch.
best_val_loss = float("inf")
epochs = 3 # The number of epochs
best_model = None
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(model, val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = model
scheduler.step()
######################################################################
# Evaluate the model with the test dataset
# -------------------------------------
#
######################################################################
# Apply the best model to check the result with the test dataset.
test_loss = evaluate(best_model, test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
######################################################################
# Output
# ------
#
######################################################################
#.. code-block:: py
#
# Total parameters in model: 1,847,087,215
# | epoch 1 | 10/ 50 batches | lr 5.00 | ms/batch 2387.45 | loss 42.16 | ppl 2036775646369743616.00
# | epoch 1 | 20/ 50 batches | lr 5.00 | ms/batch 2150.93 | loss 48.24 | ppl 891334049215401558016.00
# | epoch 1 | 30/ 50 batches | lr 5.00 | ms/batch 2155.23 | loss 34.66 | ppl 1125676483188404.62
# | epoch 1 | 40/ 50 batches | lr 5.00 | ms/batch 2158.42 | loss 38.87 | ppl 76287208340888368.00
# -----------------------------------------------------------------------------------------
# | end of epoch 1 | time: 119.65s | valid loss 2.95 | valid ppl 19.15
# -----------------------------------------------------------------------------------------
# | epoch 2 | 10/ 50 batches | lr 4.51 | ms/batch 2376.16 | loss 34.92 | ppl 1458001430957104.00
# | epoch 2 | 20/ 50 batches | lr 4.51 | ms/batch 2160.96 | loss 34.75 | ppl 1232463826541886.50
# | epoch 2 | 30/ 50 batches | lr 4.51 | ms/batch 2160.66 | loss 28.10 | ppl 1599598251136.51
# | epoch 2 | 40/ 50 batches | lr 4.51 | ms/batch 2160.07 | loss 20.25 | ppl 621174306.77
# -----------------------------------------------------------------------------------------
# | end of epoch 2 | time: 119.76s | valid loss 0.87 | valid ppl 2.38
# -----------------------------------------------------------------------------------------
# | epoch 3 | 10/ 50 batches | lr 4.29 | ms/batch 2376.49 | loss 13.20 | ppl 537727.23
# | epoch 3 | 20/ 50 batches | lr 4.29 | ms/batch 2160.12 | loss 10.98 | ppl 58548.58
# | epoch 3 | 30/ 50 batches | lr 4.29 | ms/batch 2160.05 | loss 12.01 | ppl 164152.79
# | epoch 3 | 40/ 50 batches | lr 4.29 | ms/batch 2160.03 | loss 10.63 | ppl 41348.00
# -----------------------------------------------------------------------------------------
# | end of epoch 3 | time: 119.76s | valid loss 0.78 | valid ppl 2.17
# -----------------------------------------------------------------------------------------
# =========================================================================================
# | End of training | test loss 0.69 | test ppl 1.99
# =========================================================================================
| [
"[email protected]"
] | |
bb4bfb31dbd4a51179b6d3da13cc9b01c3d47928 | a691ae93f31f7fac56027c9f5205731ea1d63c66 | /euler/euler1/euler1.py | b6bc320e2955d56cfc1ca2eb988f97a3f83d111c | [] | no_license | KnoxMakers/CodingWorkshops | c5cbb4ac0dd8abd7a68efc3b8eb3ad1f6b107658 | 12a0698f31d05c4e63d964dd3f9bc134086636e5 | refs/heads/master | 2021-01-19T07:56:27.199119 | 2018-12-05T23:57:33 | 2018-12-05T23:57:33 | 7,244,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | #!/usr/bin/python
x = range(1,1000)
woot = 0
for i in x:
if not i%3 or not i%5:
print i, "is a multiple of 3 or 5"
#woot += i
woot = woot + i
print "Total:",woot
| [
"[email protected]"
] | |
20f03c830a4454b43d9ab80e86ffc4f75dbfde63 | 370090ab025824b5d38618a6c845fb5f293efe38 | /manage.py | 070e489cdb724e330a0ddd103793cf82b814ab50 | [
"MIT"
] | permissive | Antavio/Moringa-Project-Awards | 4bcb1d325226cc9dcd239556c085e8d8d47491ab | 941d6ca0e8978cbb54bb8858ac02bed34fd73306 | refs/heads/master | 2021-09-08T04:06:14.895597 | 2019-05-29T07:37:46 | 2019-05-29T07:37:46 | 188,390,930 | 0 | 1 | MIT | 2021-09-08T01:00:56 | 2019-05-24T09:12:00 | Python | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Project_Awards.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
d1e5565535aa6454248bea2abdc3892524a84c42 | 36c95182453f97bc5587ac8f5f763641b9739dce | /src/video/urls.py | a32614ee211a03c78eb98a09f2c48450c2ca6018 | [] | no_license | DreamfoxTeam/MyDream.API.Video | ea8d4194ccbc2f4f247645cf116070bc44b4c4b8 | 72abd0258a8e0a46cc337210b32c69720f81a858 | refs/heads/master | 2020-04-05T02:21:45.808768 | 2018-11-25T13:37:15 | 2018-11-25T13:37:15 | 156,474,229 | 0 | 0 | null | 2018-11-25T13:37:16 | 2018-11-07T01:44:14 | Python | UTF-8 | Python | false | false | 825 | py | """video URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
urlpatterns = [
url(r'^', include('api.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
7ea45cbd4ae13f3f6d4ca94d080968135b93965b | af9c3ca608bd3c0b9aa664756fad5f4da495a163 | /second.py | af911857c52401f822e540b0d1b302e42872dfc8 | [] | no_license | 1186141415/COVID-19 | 6f3e397fa73f1b6e845a8db69c02300aa5157e7e | 52f4788b563dae776059a621125e605051175cdf | refs/heads/master | 2022-09-27T16:58:27.788021 | 2020-06-06T04:21:54 | 2020-06-06T04:21:54 | 255,866,776 | 1 | 3 | null | 2020-06-06T03:23:58 | 2020-04-15T09:23:55 | Jupyter Notebook | UTF-8 | Python | false | false | 10,964 | py | import pandas as pd
# 导入matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
# 读取数据
today_world = pd.read_csv("today_world_2020_04_03.csv")
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
today_world.rename(columns=name_dict,inplace=True) # inplace参数判断是否在原数据上进行修改
# 缺失值处理
today_world['当日现存确诊'] = today_world['累计确诊']-today_world['累计治愈']-today_world['累计死亡']
print(today_world.head())
print(today_world.info())# 查看数据基本信息
print(today_world.describe())# 默认只计算数值型特征的统计信息
# 计算缺失值比例
today_world_nan = today_world.isnull().sum()/len(today_world)
# 转变为百分数
print(today_world_nan.apply(lambda x: format(x, '.1%')) )
# 计算病死率,且保留两位小数
today_world['病死率'] = (today_world['累计死亡']/today_world['累计确诊']).apply(lambda x: format(x, '.2f'))
# 将病死率数据类型转换为float
today_world['病死率'] = today_world['病死率'].astype('float')
# 根据病死率降序排序
today_world.sort_values('病死率',ascending=False,inplace=True)
print(today_world.head(10))# 显示病死率前十国家
# 将国家名称设为索引
today_world.set_index('名称',inplace=True)
print(today_world.head(3))
print(today_world.loc['中国'])#可以通过传入列表获取指定国家的数据
# 查看当前累计确诊人数前十国家
world_top10 = today_world.sort_values(['累计确诊'],ascending=False)[:10]
world_top10 = world_top10[['累计确诊','累计死亡','病死率']]
print(world_top10)
#绘图准备
plt.rcParams['font.sans-serif']=['SimHei'] #正常显示中文
plt.rcParams['figure.dpi'] = 120 #设置所有图片的清晰度
# 绘制条形图
world_top10.sort_values('累计确诊').plot.barh(subplots=True,layout=(1,3),sharex=False,
figsize=(7,4),legend=False,sharey=True)
plt.tight_layout() #调整子图间距
plt.show()
# 读取数据
today_province = pd.read_csv("today_province_2020_04_03.csv")
# 创建中文列名字典
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
today_province.rename(columns=name_dict,inplace=True) # inplace参数是否在原对象基础上进行修改
print(today_province.head())
print(today_province.info())# 查看数据基本信息
print(today_province.describe())# 查看数值型特征的统计量
# 计算各省当日现存确诊人数
today_province['当日现存确诊'] = today_province['累计确诊']-today_province['累计治愈']-today_province['累计死亡']
# 将各省名称设置为索引
today_province.set_index('名称',inplace=True)
print(today_province.info())
# 查看全国新增确诊top10的地区,new_top6 就是指代新增确诊的10个地区
new_top6 = today_province['当日新增确诊'].sort_values(ascending=False)[:10]
print(new_top6)
# 绘制条形图和饼图
fig,ax = plt.subplots(1,2,figsize=(10,5))
new_top6.sort_values(ascending=True).plot.barh(fontsize=10,ax=ax[0])
new_top6.plot.pie(autopct='%.1f%%',fontsize=10,ax=ax[1])
plt.ylabel('')
plt.title('全国新增确诊top10地区',size=15)
plt.show()
# 查看全国现存确诊人数top10的省市
store_top10 = today_province['当日现存确诊'].sort_values(ascending=False)[:10]
print(store_top10)
# 绘制条形图
store_top10.sort_values(ascending=True).plot.barh(fontsize=10)
plt.title('全国现存确诊top10地区',size=15)
plt.show()
#全国历史数据探索性分析
# 读取数据
alltime_china = pd.read_csv("alltime_China_2020_04_03.csv")
# 创建中文列名字典
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
alltime_china.rename(columns=name_dict,inplace=True)
print(alltime_china.head())
print(alltime_china.info())
print(alltime_china.describe())
# 缺失值处理
# 计算当日现存确诊人数
alltime_china['当日现存确诊'] = alltime_china['累计确诊']-alltime_china['累计治愈']-alltime_china['累计死亡']
# 删除更新时间一列
alltime_china.drop(['更新时间','当日新增重症'],axis=1,inplace=True)
print(alltime_china.info())
# 将日期改成datetime格式
alltime_china['日期'] = pd.to_datetime(alltime_china['日期'])
# 设置日期为索引
alltime_china.set_index('日期',inplace=True) # 也可使用pd.read_csv("./input/alltime_China_2020_03_27.csv",parse_dates=['date'],index_col='date')
print(alltime_china.index)
# 举例
print(alltime_china.loc['2020-01'])
# 时间序列数据绘制折线图
fig, ax = plt.subplots(figsize=(8,4))
alltime_china.plot(marker='o',ms=2,lw=1,ax=ax)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
# 图例位置调整
plt.legend(bbox_to_anchor = [1,1])
plt.title('全国新冠肺炎数据折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.show()
# 时间序列数据绘制折线图
fig, ax = plt.subplots(figsize=(8,4))
alltime_china['当日新增确诊'].plot(ax=ax, style='-',lw=1,color='c',marker='o',ms=3)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
plt.title('全国新冠肺炎新增确诊病例折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.show()
#世界各国历史数据探索性分析
# 读取数据
alltime_world = pd.read_csv("alltime_world_2020_04_04.csv")
# 创建中文列名字典
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
alltime_world.rename(columns=name_dict,inplace=True)
print(alltime_world.head())
print(alltime_world.info())# 查看数据基本信息
print(alltime_world.describe())
# 将日期一列数据类型变为datetime
alltime_world['日期'] = pd.to_datetime(alltime_world['日期'])
# 计算当日现存确诊
alltime_world['当日现存确诊'] = alltime_world['累计确诊']-alltime_world['累计治愈']-alltime_world['累计死亡']
print(alltime_world.info())
# 查看唯一值,可使用len()查看个数
alltime_world['名称'].unique()
# 统计每天有多少国家出现疫情,即哪20天疫情出现的最多
alltime_world['日期'].value_counts().head(20)
# 设置日期索引
alltime_world.set_index('日期',inplace=True)
# 3月31日数据统计不完全,我们将其删除
alltime_world = alltime_world.loc[:'2020-03-31']
# groupby创建层次化索引
data = alltime_world.groupby(['日期','名称']).mean()
print(data.head())
# 提取部分数据
data_part = data.loc(axis=0)[:,['中国','日本','韩国','美国','意大利','英国','西班牙','德国']]
print(data_part.head())
# 将层级索引还原
data_part.reset_index('名称',inplace=True)
print(data_part.head())
# 绘制多个国家的累计确诊人数折线图
fig, ax = plt.subplots(figsize=(8,4))
data_part['2020-02':].groupby('名称')['累计确诊'].plot(legend=True,marker='o',ms=3,lw=1)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
plt.title('各国新冠肺炎累计确诊病例折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.legend(bbox_to_anchor = [1,1])
plt.show()
# 绘制各国新增确诊人数折线图
fig, ax = plt.subplots(figsize=(8,4))
data_part['2020-03':'2020-03-29'].groupby('名称')['当日新增确诊'].plot(legend=True,marker='o',ms=3,lw=1)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
plt.title('各国新冠肺炎新增确诊病例折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.legend(bbox_to_anchor = [1,1])
plt.show()
#绘制日本新冠肺炎疫情折线图
japan = alltime_world[alltime_world['名称']=='日本']
fig, ax = plt.subplots(figsize=(8,4))
japan['累计确诊'].plot(ax=ax, fontsize=10, style='-',lw=1,color='c',marker='o',ms=3,legend=True)
ax.set_ylabel('人数', fontsize=10)
ax1 = ax.twinx()
ax1.bar(japan.index, japan['当日新增确诊'])
ax1.xaxis.set_major_locator(dates.DayLocator(interval = 5))
ax1.xaxis.set_major_formatter(dates.DateFormatter('%b %d'))
ax1.legend(['当日新增确诊'],loc='upper left',bbox_to_anchor=(0.001, 0.9))
plt.grid(axis='y')
plt.box(False)
plt.title('日本新冠肺炎疫情折线图',size=15)
plt.show()
| [
"[email protected]"
] | |
419e7e0ab8339ade64f7a829744f7147b5e4a0f3 | 72e3cb4dbc09423433040bf0ef56c07b8539c136 | /6/7.py | c39351cdf3d3ae53bf24f4ced3a453606d6ef94b | [] | no_license | refanr/2020 | 50cefe20519f2693d2133181a718046734f2ca3d | 51ae536f21f0c5b20dc067180d54392a09b3d5cb | refs/heads/master | 2023-01-04T21:38:08.327450 | 2020-10-20T14:48:30 | 2020-10-20T14:48:30 | 292,705,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | my_int = int(input('Give me an int >= 0: '))
# Fill in the missing code
working_int = my_int
quotient = 1
bin_str = ''
while quotient > 0:
if working_int % 2:
bin_str += '1'
else:
bin_str += '0'
quotient = working_int // 2
working_int = quotient
bin_str = bin_str[::-1]
print("The binary of {} is {}".format(my_int,bin_str)) | [
"[email protected]"
] | |
6fe67275ed3c74ae869826a226fcfe2957de6f5b | 0aeb4ce84990053523a95b7d260bb89ea8958de4 | /Python_Labs/Remote File Inclusion/app/RFI.py | 5dba120b7f28b83d3f554958710598fa90ba9d66 | [] | no_license | Helmanso/Lplatform_All_Labs | 52474256b73662edf3e72c2be75345ac79162764 | 963438e7f0f55130a2241bd7124b8b9c3563277a | refs/heads/master | 2022-12-08T02:43:10.359270 | 2020-08-29T14:11:48 | 2020-08-29T14:11:48 | 288,520,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from flask import Flask, request, url_for, render_template, redirect
import datetime, requests, os, validators
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
app = Flask(__name__, static_url_path='/static', template_folder='.', static_folder='static')
app.config['DEBUG'] = True
@app.route("/")
def start():
return render_template("index.html")
@app.route("/cmd", methods=['POST'])
def cmd():
filename = request.form['filename']
try:
if "http" not in str(urlparse(filename).scheme):
host = request.url[:-4]
filename = host+"/static/" + filename
result = eval(requests.get(filename).text)
return render_template("index.html", result=result)
else:
result = eval(requests.get(filename).text)
return render_template("index.html", result=result)
except Exception:
return render_template("index.html", result="Unexpected error during the execution of the predefined command.")
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=True)
| [
"[email protected]"
] | |
fd282217f2a3abbd3385cb2e72791deea7441668 | 771ed599c431f7cc05b4b90895cea24dac3895ba | /utils/edit_misc.py | d3baf8250121fd7a05b544a6510ad407f7a5672b | [] | no_license | baaaad/coco-edit | 8e5395a52f1f1b85db62393c2ac2ab1d00230c7a | b70e36bf05be5af16923f5ca0b8cce91ec9c29d1 | refs/heads/main | 2023-07-14T23:40:54.684513 | 2021-08-13T07:22:57 | 2021-08-13T07:22:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,978 | py | import numpy as np
def edit_distance(sent1, sent2):
# edit from sent1 to sent2
# Create a table to store results of subproblems
m = len(sent1)
n = len(sent2)
dp = [[0 for x in range(n+1)] for x in range(m+1)]
# Fill d[][] in bottom up manner
for i in range(m+1):
for j in range(n+1):
# If first string is empty, only option is to
# isnert all characters of second string
if i == 0:
dp[i][j] = j # Min. operations = j
# If second string is empty, only option is to
# remove all characters of second string
elif j == 0:
dp[i][j] = i # Min. operations = i
# If last characters are same, ignore last char
# and recur for remaining string
elif sent1[i-1] == sent2[j-1]:
dp[i][j] = dp[i-1][j-1]
# If last character are different, consider all
# possibilities and find minimum
else:
dp[i][j] = 1 + min(
dp[i][j-1], # Insert
dp[i-1][j] # Remove
)
return dp
def sent2edit(sent1, sent2):
dp = edit_distance(sent1, sent2)
edits = []
pos = []
m, n = len(sent1), len(sent2)
while m != 0 or n != 0:
curr = dp[m][n]
if m==0: #have to insert all here
while n>0:
left = dp[1][n-1]
edits.append(sent2[n-1])
pos.append(left)
n-=1
elif n==0:
while m>0:
top = dp[m-1][n]
edits.append('DEL')
pos.append(top)
m -=1
else: # we didn't reach any special cases yet
diag = dp[m-1][n-1]
left = dp[m][n-1]
top = dp[m-1][n]
if sent2[n-1] == sent1[m-1]: # keep
edits.append('KEEP')
pos.append(diag)
m -= 1
n -= 1
elif curr == top+1: # INSERT preferred before DEL
edits.append('DEL')
pos.append(top) # (sent2[n-1])
m -= 1
else: #insert
edits.append(sent2[n - 1])
pos.append(left) # (sent2[n-1])
n -= 1
edits = edits[::-1]
# replace the keeps at the end to stop, this helps a bit with imbalanced classes (KEEP,INS,DEL,STOP)
# for i in range(len(edits))[::-1]: #reversely checking
# if edits[i] == 'KEEP':
# if edits[i-1] =='KEEP':
# edits.pop(i)
# else:
# edits[i] = 'STOP'
# break
# if edits == []: # do we learn edits if input and output are the same?
# edits.append('STOP') #in the case that input and output sentences are the same
return edits
def edit2sent(sent, edits, last=False):
"""
Edit the sentence given the edit operations.
:param sent: sentence to edit, list of string
:param edits: a sequence of edits in ['KEEP','DEL','STOP']+INS_vocab_set
:return: the new sentence, as the edit sequence is deterministic based on the edits labels
"""
new_sent = []
sent_pointer = 0 #counter the total of KEEP and DEL, then align with original sentence
if len(edits) == 0 or len(sent) ==0: # edit_list empty, return original sent
return sent
for i, edit in enumerate(edits):
if len(sent) > sent_pointer: #there are tokens left for editing
if edit =="KEEP":
new_sent.append(sent[sent_pointer])
sent_pointer += 1
elif edit =="DEL":
sent_pointer += 1
elif edit == 'STOP':
break # go outside the loop and copy everything after current sent_pointer into the new sentence
else: #insert the word in
new_sent.append(edit)
if sent_pointer < len(sent):
for i in range(sent_pointer,len(sent)):
new_sent.append(sent[i])
return new_sent
def decode_edit(p_pos, p_edit, p_wins, p_wsub):
'''
Edit the sentence given the prediction of the model
'''
bsz = p_wins.shape[0]
edit = np.argmax(p_edit, axis=-1)
pos = np.argmax(p_pos, axis=-1)
wins = np.argmax(p_wins, axis=-1)[np.arange(bsz), pos]
wsub = np.argmax(p_wsub, axis=-1)[np.arange(bsz), pos]
#print(edit.shape, pos.shape, wins.shape, wsub.shape)
return edit, pos, wins, wsub
def main():
#this prints an example of doing minimum editing distance
sent1 = "Military experts say the line between combat is getting blurry .".split()
sent2 = "Military experts say war is changing .".split()
A = edit_distance(sent1, sent2)
print('\n'.join([''.join(['{:4}'.format(item) for item in row])
for row in A]))
B = sent2edit(sent1, sent2)
print(B)
print(edit2sent(sent1,B))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
a8aed15c162e96815fa1961c2bdd1299d9c3454d | a29a73de4df917da642adec96286d7ed3b2a0a42 | /TankDPPO/kankan.py | 0315d142c18ecb16f26c20009d72880e4e7f967d | [] | no_license | tankche1/Learn-To-Run | 9f0546f2d2c74cf18879579a3ccb2aeb3bea2765 | 27a48c8e1ec5864ab58caa9df4098a1089641cc0 | refs/heads/master | 2021-03-24T11:07:15.949621 | 2017-10-18T14:43:41 | 2017-10-18T14:43:41 | 101,266,609 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,281 | py | """
A simple version of OpenAI's Proximal Policy Optimization (PPO). [https://arxiv.org/abs/1707.06347]
Distributing workers in parallel to collect data, then stop worker's roll-out and train PPO on collected data.
Restart workers once PPO is updated.
The global PPO updating rule is adopted from DeepMind's paper (DPPO):
Emergence of Locomotion Behaviours in Rich Environments (Google Deepmind): [https://arxiv.org/abs/1707.02286]
View more on my tutorial website: https://morvanzhou.github.io/tutorials
Dependencies:
tensorflow r1.3
gym 0.9.2
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym, threading, queue
EP_MAX = 1000
EP_LEN = 200
N_WORKER = 4 # parallel workers
GAMMA = 0.9 # reward discount factor
A_LR = 0.0001 # learning rate for actor
C_LR = 0.001 # learning rate for critic
MIN_BATCH_SIZE = 64 # minimum batch size for updating PPO
UPDATE_STEP = 5 # loop update operation n-steps
EPSILON = 0.2 # for clipping surrogate objective
GAME = 'Pendulum-v0'
S_DIM, A_DIM = 3, 1 # state and action dimension
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # operation of choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum( # clipped surrogate objective
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # copy pi to old pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())] # collect data from all workers
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# update actor and critic in a update loop
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 200, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
class Worker(object):
def __init__(self, wid):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br))) # put data in the queue
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPO()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # not update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue() # workers putting data in this queue
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and test
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
env = gym.make('Pendulum-v0')
while True:
s = env.reset()
for t in range(300):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0]
| [
"[email protected]"
] | |
c99a631a7811780613dfb8c523090511e72b47ed | 7e2198e218bbec882cce3d2db246e3e41db70f51 | /getIamgeStatsPytorch.py | 54ba8af415ee43a98282091f7ed4e3272c26d3be | [
"MIT"
] | permissive | adityag6994/pytorch_ssd_training | 72445d1150f0115aac4c978b6cf9c28c1e4cdab4 | 404f3cbef815e314337ec2c1b4f06a2403a7ce03 | refs/heads/master | 2023-05-08T21:13:34.538207 | 2021-05-25T02:06:06 | 2021-05-25T02:06:06 | 368,198,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | import torch.utils.data
from datasets import *
from utils import *
# Data parameters
data_folder = 'data/rafeeq/' # folder with data files
keep_difficult = True # use objects considered difficult to detect?
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Learning parameters
batch_size = 1 # batch size
workers = 1 # number of workers for loading data in the DataLoader
# Custom dataloaders
train_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
# find mean variance of dataset
mean, std = zzzf_mean_and_std(train_loader)
# mean, std = xwkuang_mean_and_std(train_loader)
print(mean, std)
## test hard | zzzf || xwkuang
# tensor([0.5881, 0.5617, 0.4820])
# tensor([0.2968, 0.3004, 0.2938])
## train
# mean = [0.4898, 0.4867, 0.4050]
# std = [0.2774, 0.2832, 0.2501] | [
"[email protected]"
] | |
d9e0220046732ed6ddb31fb31a5d9bdbd1e08ecf | a28df6dc7901e0799ddbcdd43dc17f1f966e5eb5 | /interview_preperation_kit/arrays/array_manipulation/editorial.py | 586037ec6827623696fb06fc01fac1bee8c27853 | [] | no_license | jlucasldm/hackerrank | ede7aafa0131171a358c1601a0ccb26da4f3b5dc | 3189c3b9844eaff5873f4d4cf6c94aaf3b88b864 | refs/heads/master | 2023-07-30T22:18:30.166508 | 2021-09-12T20:15:00 | 2021-09-12T20:15:00 | 396,453,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | def arrayManipulation(n, queries):
arr = [0] * (n+1)
# add the value at first index
# subtract the value at last index + 1
for q in queries:
start, end, amt = q
arr[start-1] += amt
arr[end] -= amt
# max value and running sum
mv = -1
running = 0
for a in arr:
running += a
if running > mv:
mv = running
return mv | [
"[email protected]"
] | |
5c8fe94773b018fec08156dff5a0e0e776f1b94c | 159a08aeb28fecfaeaa85cd253d1435255aad466 | /HW13/HW13.py | 6a47e237671b35e83c0ac065e22a314261858c4b | [
"MIT"
] | permissive | ji3g4aunaun/assignment-speech-recognition | ee5220fb8588fee11316d63545935f562a1531c1 | 794907a417d054477812c1f50695312601eae929 | refs/heads/master | 2020-03-24T07:33:05.884142 | 2018-07-25T20:49:48 | 2018-07-25T20:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import numpy as np
from dspBox import str2ndar
obs1 = str2ndar(open('observation1.txt', 'r').read())
obs2 = str2ndar(open('observation2.txt', 'r').read())
obs3 = str2ndar(open('observation3.txt', 'r').read())
# 助教附的oberservation3.txt跟上次一樣沒有換行,所以只會讀到49個狀態,如果加了換行可以讀到50個。
a1 = np.array([[0.2, 0.7, 0.1], [0.1, 0.2, 0.7], [0.7, 0.1, 0.2]])
b1 = np.array([[0.5, 0.4, 0.1], [0.7, 0.2, 0.1], [0.7, 0.1, 0.2]])
pi1 = np.array([0.7, 0.2, 0.1])
a2 = np.array([[0.7, 0.2, 0.1], [0.3, 0.6, 0.1], [0.1, 0.2, 0.7]])
b2 = np.array([[0.1, 0.8, 0.1], [0.2, 0.7, 0.1], [0.4, 0.5, 0.1]])
pi2 = np.array([0.1, 0.7, 0.2])
a3 = np.array([[0.2, 0.7, 0.1], [0.6, 0.3, 0.1], [0.2, 0.7, 0.1]])
b3 = np.array([[0.1, 0.2, 0.7], [0.2, 0.2, 0.6], [0.3, 0.1, 0.6]])
pi3 = np.array([0.2, 0.2, 0.6])
obs, a ,b, pi = [obs1, obs2, obs3], [a1, a2, a3], [b1, b2, b3], [pi1, pi2, pi3]
# -------------------
for obsi in range(3): # Index of observation
size = len(obs[obsi])
print("\nobser" + str(obsi + 1))
for mi in range(3): # Index of model
# =====viterbi algorithm=====
p = np.zeros((size, 3)) # possibility
s = np.zeros((size, 3)) # max state
p[0] = [pi[mi][state] * b[mi][state, obs[obsi][0]] for state in range(3)]
s[0] = [state for state in range(3)]
for i in range(1, size):
for state in range(3):
evaluate = [p[i-1, from_] * a[mi][from_, state] * b[mi][state, obs[obsi][i]] for from_ in range(3)]
p[i, state] = np.max(evaluate)
s[i, state] = np.argmax(evaluate)
seq = np.zeros((size), dtype=int)
seq[-1] = np.argmax(p[-1])
for i in range(size-2, -1, -1):
seq[i] = s[i+1, seq[i+1]]
print("viterbi max state sequence", np.array_str(seq, 100))
print('model_{:d} probability:{:.6e}'.format(mi+1, np.max(p[-1])))
| [
"[email protected]"
] | |
9ff1afa9d84a4c4fd731361a64cad925d42d083b | b69314d436bc921289a418a373ef6035d40886d5 | /Top.py | a2396ba726fcc614f1215667d07bccc3a0f51ab0 | [] | no_license | Ashfaaq98/SDN_POX_Controller-Load_Balancer | c4329d18b6e1dc1d85ec62c9fbfaf2b371c083cb | 0e1b96120264e574393c964f051bd366c21767ca | refs/heads/main | 2023-08-26T00:11:48.368747 | 2021-10-17T18:09:29 | 2021-10-17T18:09:29 | 413,499,988 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #! /usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, OVSKernelSwitch, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
def emptyNet():
net = Mininet(controller=RemoteController, switch=OVSKernelSwitch)
c1 = net.addController('c1', controller=RemoteController, ip="127.0.0.1", port=6633)
h1 = net.addHost( 'h1', ip='10.0.0.1' )
h2 = net.addHost( 'h2', ip='10.0.0.2' )
# h3 = net.addHost( 'h3', ip='10.0.0.3')
# h4 = net.addHost( 'h4', ip='10.0.0.4')
# h5 = net.addHost( 'h5', ip='10.0.0.5')
# h6 = net.addHost( 'h6', ip='10.0.0.6')
s1 = net.addSwitch( 's1' )
# s2 = net.addSwitch( 's2' )
# s3 = net.addSwitch( 's3' )
s1.linkTo( h1 )
s1.linkTo( h2 )
# s2.linkTo( h3 )
# s2.linkTo( h4 )
# s3.linkTo( h5 )
# s3.linkTo( h6 )
net.build()
c1.start()
s1.start([c1])
# s2.start([c1])
# s3.start([c1])
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
emptyNet()
| [
"[email protected]"
] | |
33cb9493d7550749594d94675bba7dd68faf783d | d92cd1bce2af9d86e83a732a26419be0d76ae8eb | /demo_final/mvc/controllers/alumnos/delete.py | b4f77f02691bb3d8c541d728780cb34b2d8396fd | [] | no_license | MariaFernandaReyesLopez/demo_web_page | fd0973d0cc362dc3cc24784c0fdf10cb58154e2a | 40879f2265c068fed226b4bc560efd975d56974d | refs/heads/master | 2022-11-29T13:03:08.334216 | 2020-08-13T21:18:04 | 2020-08-13T21:18:04 | 287,380,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import web
import mvc.models.alumnos as alumnos
model_alumnos = alumnos.Alumnos()
render = web.template.render("mvc/views/alumnos/", base="template")
class Delete():
def GET(self, id_alumno):
try:
result = model_alumnos.view(id_alumno)[0]
return render.delete(result) # renderizando delete.html
except Exception as e:
print(e)
return "Error"
def POST(self, id_alumno):
try:
form = web.input()
id_alumno = form.id_alumno #hidden
result = model_alumnos.delete(id_alumno)
web.seeother('/alumnos_list')
except Exception as e:
print(e)
return "Error"
| [
"[email protected]"
] | |
5a346858f79c757e13d5570ec88addd3bfc652ac | 0ae6e24fa95b8a22d9fe9d36f8786cdc4adbceac | /tests/test_api.py | 1c95830966214ef48771a702eba86858d69f7bfe | [] | no_license | lopezjimenezjesus/flasktaskr | cd0c407d7dec0920a1bf2eabb39f1b40ea04fa03 | f7167092a0a99969a730c3d80290ce0919de8ddb | refs/heads/master | 2022-11-10T00:31:22.165929 | 2016-12-18T01:15:41 | 2016-12-18T01:15:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | import os
import unittest
from datetime import date
from project import app, db
from project._config import basedir
from project.models import Task
TEST_DB = 'test.db'
class APITest(unittest.TestCase):
#######################
# setup and tear down #
#######################
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, TEST_DB)
self.app = app.test_client()
db.create_all()
self.assertEquals(app.debug, False)
# executed after each test
def tearDown(self):
db.session.remove()
db.drop_all()
##################
# helper methods #
##################
def add_tasks(self):
db.session.add(
Task(
"Run around in circles",
date(2015, 10, 5),
'10',
date(2015, 10, 5),
1,
1
)
)
db.session.commit()
db.session.add(
Task(
"Purchase Real Python",
date(2016, 2, 23),
10,
date(2016, 2, 7),
1,
1
)
)
db.session.commit()
def login(self, name, password):
return self.app.post('/', data=dict(
name=name, password=password), follow_redirects=True)
def register(self, name, email, password, confirm):
return self.app.post(
'register/',
data=dict(name=name, email=email, password=password,
confirm=confirm),
follow_redirects=True
)
#########
# tests #
#########
def test_collection_endpoint_returns_correct(self):
self.register('Michael', '[email protected]',
'python', 'python')
self.login('Michael', 'python')
self.add_tasks()
response = self.app.get('api/v1/tasks/',
follow_redirects=True)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.mimetype, 'application/json')
self.assertIn(b'Run around in circles', response.data)
self.assertIn(b'Purchase Real Python', response.data)
def test_resource_endpoint_returns_correct_data(self):
self.register('Michael', '[email protected]',
'python', 'python')
self.login('Michael', 'python')
self.add_tasks()
response = self.app.get('api/v1/tasks/2', follow_redirects=True)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.mimetype, 'application/json')
self.assertIn(b'Purchase Real Python', response.data)
self.assertNotIn(b'Run around in circles', response.data)
def test_invalid_resource_endpoint_returns_error(self):
self.register('Michael', '[email protected]',
'python', 'python')
self.login('Michael', 'python')
self.add_tasks()
response = self.app.get('api/v1/tasks/209', follow_redirects=True)
self.assertEquals(response.status_code, 404)
self.assertEquals(response.mimetype, 'application/json')
self.assertIn(b'Element does not exists', response.data)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
b91ee62ab15974dcb724ceeb00d00689410e332f | 7bf1dc58ba0884ed957efdb5459ae44851b2b36e | /practice_450/greedy/33_rearrange_characters.py | 858f75ba242070202848f0f4f1146c91f0ceea28 | [] | no_license | ksaubhri12/ds_algo | 672260f07f41bcfc33f8ac23a64085a1f27ab4a5 | 46505b89371cae3321f48609dd755c7e5cfed302 | refs/heads/master | 2023-05-12T08:37:06.789111 | 2023-05-03T03:06:49 | 2023-05-03T03:06:49 | 211,793,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | def rearrange_characters(input_string: str):
n = len(input_string)
dict_value = {}
for i in range(0, n):
if input_string[i] in dict_value:
dict_value[input_string[i]] = dict_value[input_string[i]] + 1
else:
dict_value[input_string[i]] = 1
sorted_count_list = sorted(dict_value, key=dict_value.get, reverse=True)
i = 0
start = 0
char_list = list(input_string)
while len(sorted_count_list) > 0:
char = sorted_count_list.pop(0)
count = dict_value[char]
if count > n / 2:
return -1
start = start + 1
for k in range(0, count):
char_list[i] = char
i = i + 2
if i >= n:
i = 1
return ''.join(char_list)
if __name__ == '__main__':
print(rearrange_characters('geeksforgeeks'))
print(rearrange_characters('bbbbb'))
print(rearrange_characters('kkk'))
| [
"[email protected]"
] | |
ab7a78b9db6f60371ee1fac74f8b8411ff23aa43 | a179d2abea58ee4d987bf05729a5e7df727af3cd | /instaclone/settings.py | 6e13a5e35aa00ba74ca16a19dd70fe50c0cb34ee | [
"MIT"
] | permissive | Derrick-Nyongesa/instagram-clone | ced05a4c334c9e95e96bec9a3883b448c5fa95c6 | 2f3c018c33aa440160401f0c1878a2670f2f0081 | refs/heads/main | 2023-05-14T01:32:36.211904 | 2021-05-26T13:42:26 | 2021-05-26T13:42:26 | 369,403,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,796 | py | """
Django settings for instaclone project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
from decouple import config, Csv
import cloudinary
import cloudinary.uploader
import cloudinary.api
import django_heroku
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'instagram',
'bootstrap3',
'cloudinary'
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'instaclone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instaclone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/accounts/login/'
#AUTH_PROFILE_MODULE = 'accounts.Profile'
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
cloudinary.config(
cloud_name = config("CLOUDINARY_NAME"),
api_key = config("CLOUDINARY_KEY"),
api_secret = config("CLOUDINARY_SECRET")
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
django_heroku.settings(locals()) | [
"[email protected]"
] | |
acd62ee375dff59ca59e70a02bc32374c22f0693 | 02f25642ee890ef749e29ead0009309519412906 | /serverCS.py | 58c4bc85c86c48f3550e3d72e54eb0ded880163f | [] | no_license | romantikatara/Analytics_Model_Deployment | c753da09b8df26f6a20780316fbcf5f36668a4cf | 6e70c3cc8fe095a97350483ec7e3321a4f8967d5 | refs/heads/master | 2020-07-30T19:05:05.015626 | 2019-09-23T14:14:12 | 2019-09-23T14:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | # Membuat API dari model Machine Learning dengan menggunakan flask
# Ingin mengeload model .pkl ke suatu server
'''
Code ini mengambil data JSON dengan POST request suatu tampilan prediksi menggunakan model yang telah di-load.
Kemudian mengeluarkan hasil dengan format JSON.
'''
# Import libraries
import numpy as np
from flask import Flask, request, jsonify
import pickle
app = Flask(__name__)
# Load Model Random Forest
# De-serialization
model = pickle.load(open('modelcreditscoring.pkl','rb'))
@app.route('/api',methods=['POST']) #menerima masukan/POST/API
def predict():
# Get the data from the POST request.
data = request.get_json(force=True)
# Make prediction using model loaded from disk as per the data.
prediction = model.predict([np.array([data['AGE'],
data['MARRIAGE'],
data['PAY_1'],
data['PAY_2'],
data['PAY_3']])])
# Take the first value of prediction
output = int(prediction[0])
return jsonify(output)
if __name__ == '__main__':
app.run(port=5000, debug=True) | [
"[email protected]"
] | |
ec491a079245a58e14d42601f0fca085af3e714e | 12738d4511f72cf8ac2f8a63df635f74698d5f63 | /configs/fpn_resnext50_32x4d_cityscapes_2gpu_f.py | 77269bafb8deda39295a9c1c77835019749bb104 | [
"MIT"
] | permissive | ternaus/iglovikov_segmentation | 5731bbd0cbf7125bdc121e7357820b8bec8297ae | 5a9463031e5da7c2cf34c967a4f2657416c11bd2 | refs/heads/master | 2020-08-09T12:26:44.779660 | 2019-12-15T21:59:42 | 2019-12-15T21:59:42 | 214,087,415 | 18 | 1 | MIT | 2020-04-03T02:16:36 | 2019-10-10T04:35:44 | Python | UTF-8 | Python | false | false | 3,589 | py | # Cityscapes. 2 x TeslaV100
from pathlib import Path
import albumentations as albu
import cv2
import segmentation_models_pytorch as smp
import torch
from torch.optim import Adam
from src.loss import CCE
ignore_index = 255
num_classes = 19
encoder_type = "resnext50_32x4d"
preprocess_parameters = smp.encoders.get_preprocessing_params(encoder_type)
mean = preprocess_parameters["mean"]
std = preprocess_parameters["std"]
num_gpu = 2
num_samples = None
train_parameters = dict(
lr=0.001,
train_batch_size=8 * num_gpu,
val_batch_size=num_gpu,
fp16=False,
num_epochs=300,
height_crop_size=512,
width_crop_size=512,
ignore_index=ignore_index,
tta=None, # can be None, d4 or lr
downsample_mask_factor=None, # can be 4 for FPN
)
if train_parameters["downsample_mask_factor"] is not None:
if not train_parameters["height_crop_size"] / train_parameters["downsample_mask_factor"]:
raise ValueError(
f"Height crop size ({train_parameters['height_crop_size']}) "
f"should be divisible by the downsample_mask_factor "
f"({train_parameters['downsample_mask_factor']})"
)
if not train_parameters["width_crop_size"] / train_parameters["downsample_mask_factor"]:
raise ValueError(
f"Width crop size ({train_parameters['width_crop_size']}) "
f"should be divisible by the downsample_mask_factor"
f"({train_parameters['downsample_mask_factor']})"
)
final_upsampling = None
else:
final_upsampling = 4
model = smp.FPN(
encoder_type,
encoder_weights="imagenet",
classes=num_classes,
activation=None,
final_upsampling=final_upsampling,
dropout=0.5,
decoder_merge_policy="cat",
)
pad_factor = 64
imread_library = "cv2" # can be cv2 or jpeg4py
optimizer = Adam(
[
{"params": model.decoder.parameters(), "lr": train_parameters["lr"]},
# decrease lr for encoder in order not to permute
# pre-trained weights with large gradients on training start
{"params": model.encoder.parameters(), "lr": train_parameters["lr"] / 100},
],
weight_decay=1e-3,
)
normalization = albu.Normalize(mean=mean, std=std, p=1)
train_augmentations = albu.Compose(
[
albu.RandomSizedCrop(
min_max_height=(
int(0.5 * (train_parameters["height_crop_size"])),
int(2 * (train_parameters["height_crop_size"])),
),
height=train_parameters["height_crop_size"],
width=train_parameters["width_crop_size"],
w2h_ratio=1.0,
p=1,
),
albu.ShiftScaleRotate(rotate_limit=20, scale_limit=0, p=0.5, mask_value=ignore_index),
albu.RandomBrightnessContrast(p=0.5),
albu.RandomGamma(p=0.5),
albu.HueSaturationValue(p=0.5),
albu.HorizontalFlip(p=0.5),
normalization,
],
p=1,
)
val_augmentations = albu.Compose(
[
albu.PadIfNeeded(
min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
),
normalization,
],
p=1,
)
test_augmentations = albu.Compose([normalization], p=1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200, 250, 280], gamma=0.1)
train_image_path = Path("data/train/images")
train_mask_path = Path("data/train/masks")
val_image_path = Path("data/val/images")
val_mask_path = Path("data/val/masks")
loss = CCE(ignore_index=ignore_index)
callbacks = []
logdir = f"runs/2gpu_{model.name}_f/baseline"
| [
"[email protected]"
] | |
298c5334b8cb8458e240533cc78ba90bc20dd8b5 | 5964e11c3d1ea543e139c6a4a66751a611fd0ac5 | /test/test_forest_delete.py | 63d8f63b4002dea380136151df3c75eb74080640 | [
"Apache-2.0"
] | permissive | codycollier/booster | 333a60caeb4df554d49d1cd8251fc4c58b57c76c | 8639d74cc98c4276fba281bbe244795e8f44a833 | refs/heads/master | 2020-04-16T17:42:04.494735 | 2019-07-12T03:25:21 | 2019-07-12T03:25:21 | 2,858,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,131 | py | #!/usr/bin/env python
import unittest
import boostertest
class TestForestDelete(boostertest.BoosterTestCase):
""" Test the forest-delete action """
def setUp(self):
""" Set the action and other commonly used fixture data """
self.params = {}
self.params['action'] = "forest-delete"
self.params['forest-name'] = "pinecone-a"
self.params['delete-data'] = "true"
# collect forest names for later teardown
self.teardown_forests = []
def tearDown(self):
""" Remove items from server created during tests """
params = {}
params['action'] = "forest-delete"
params['delete-data'] = "true"
for forest in self.teardown_forests:
params['forest-name'] = forest
response, body = self.booster.request(params)
self.assertTrue(response.status in (404, 200))
def test_basic_forest_deletion_results_in_200(self):
""" A successful forest deletion should result in 200 """
# create the forest
params = {}
params['action'] = "forest-create"
params['forest-name'] = "firs"
params['host-name'] = "localhost"
params['data-directory'] = ""
self.teardown_forests.append(params['forest-name'])
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 201)
self.assertEqual(err, "none")
# delete and assert
params = self.params
params['forest-name'] = "firs"
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 200)
self.assertEqual(err, "none")
def test_delete_nonexistent_forest_results_in_404(self):
""" Attempting to delete a non-existent forest should return 404 """
params = self.params
params['forest-name'] = "no-such-forest-exists-here"
response, body = self.booster.request(params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 404)
self.assertTrue(err.find("does not exist") != 1)
def test_empty_forest_name_results_in_404(self):
""" A forest-delete with empty forest-name value should result in 404 """
params = self.params
params['forest-name'] = ""
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 404)
self.assertTrue(err.find("Forest '' does not exist") != 1)
def test_delete_forest_with_no_forest_name_results_in_400(self):
""" A forest-delete with missing forest-name should result in 400 """
params = self.params
del params['forest-name']
response, body = self.booster.request(self.params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 400)
self.assertTrue(err.find("valid set of arguments was not provided") != 1)
if __name__=="__main__":
unittest.main()
| [
"[email protected]"
] | |
b2db820fc500dacc8d7771ff92eaa649fc902760 | cb28de2d461cb7f099ca8b61bb5b2015dff0c776 | /spyder_scripts/jan_workshop/ex4.py | 27323317afdf37ef6f16d56f2e58bb6d7681a0bc | [] | no_license | chuymtz/python_scripts | 04082241ae3e3c3a4772aa45f6d667e665f7200c | 6b06a44671ddbafd073764e50bab9117ee72d2cc | refs/heads/master | 2021-01-17T17:37:03.486857 | 2016-06-11T03:11:22 | 2016-06-11T03:11:22 | 60,887,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # -*- coding: utf-8 -*-
"""
Python Workshop 1/30/2015
Exercise 4
1. Write a function that returns its GC-content when a sequence is given
GC_Content = (#G + #C) / length(seq)
2. SeqForEX4.txt contains 5 different DNA sequences with its corresponding
headers, find a way to read in the 5 sequences alone and print them out,
respectively
3. Calculate the GC-content for all 5 sequences
* Find the sequence with the highest GC-content, write its ID from header
and its coresponding GC-content to a file named 'result.txt'
@author: Yuan
"""
| [
"[email protected]"
] | |
dc764acc87abd223ec64716f693f78733a9ce009 | acdbaf53db693f63313ee5811b15c810b6f2df60 | /backend/test_flaskr.py | 763983588893e2501ddf8728dcbbe0672423c72d | [] | no_license | fanchenjie/Trivia | 5de5482b163496f6126cadd51cae8b4b797a4499 | 6fe9fc8175fd6d651a582f40a1ad67cbf68425e8 | refs/heads/main | 2023-01-15T11:26:08.678506 | 2020-11-20T23:10:32 | 2020-11-20T23:10:32 | 302,809,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,213 | py | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "triviatest"
self.database_path = "postgres://{}/{}".format('xiaofan@localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
self.new_question = {
'question':'question1',
'answer':'answer1',
'difficulty':1,
'category':1
}
# edit to test serch by term
self.search_term_with_result = {
'searchTerm':'question'
}
# self.total_questions_with_search_term = 6
self.search_term_without_result = {
'searchTerm':'xxxxxxxxxx'
}
# edit these to test delete
# res = self.client().post('/questions', json = self.new_question)
# id = json.loads(res.data)['question_id']
self.exist_question_ID_to_delete = 11
self.non_exist_question_ID_to_delete = 1000
# edit these to test non_valid page
self.non_valid_page = 1000
# edit these to test get question by category
self.exist_category_ID = 1
# edit these to test post quiz type and previous question
self.quiz_type_previous_questions = {
'quiz_category':{'id':2},
'previous_questions':[16]
}
def tearDown(self):
"""Executed after reach test"""
pass
"""
TODO
Write at least one test for each test for successful operation and for expected errors.
"""
def test_get_categories(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(data['categories']))
# get questions
def test_get_paginated_questions(self):
res = self.client().get('/questions?page=1')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertTrue(len(data['questions']))
self.assertTrue(len(data['categories']))
self.assertFalse(data['current_category'])
def test_404_sent_requesting_beyond_valid_page(self):
res = self.client().get('/questions?page={}'.format(self.non_valid_page))
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'],'Not Found')
# search by term
def test_search_question_by_searchTerm_with_result(self):
res = self.client().post('/questions/searchTerm?page=1', json=self.search_term_with_result)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertTrue(len(data['questions']))
self.assertTrue(data['total_questions'])
self.assertFalse(data['current_category'])
self.assertTrue(data['search_term'])
# self.assertEqual(data['total_questions'], self.total_questions_with_search_term)
def test_search_question_by_searchTerm_without_result(self):
res = self.client().post('/questions/searchTerm?page=1', json=self.search_term_without_result)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertEqual(len(data['questions']), 0)
self.assertFalse(data['current_category'])
self.assertTrue(data['search_term'])
self.assertEqual(data['total_questions'], 0)
def test_400_bad_search_post_without_body(self):
res = self.client().post('/questions/searchTerm')
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
self.assertEqual(data['message'], 'Bad Request')
# get questions by category
def test_get_questions_by_category(self):
res = self.client().get('/categories/{}/questions'.format(self.exist_category_ID))
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertTrue(len(data['questions']))
self.assertTrue(data['total_questions'])
# self.assertTrue(data['current_category'])
self.assertFalse(data['search_term'])
def test_404_questions_by_category_beyond_valid_page(self):
res = self.client().get('/categories/{}/questions?page={}'.format(self.exist_category_ID, self.non_valid_page))
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'],'Not Found')
# quizzes
def test_post_quiz_type_previous_questions(self):
res = self.client().post('/quizzes', json=self.quiz_type_previous_questions)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertTrue(data['question'])
def test_400_bad_quiz_post_without_body(self):
res = self.client().post('/quizzes')
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
self.assertEqual(data['message'], 'Bad Request')
# post question
def test_post_question(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
def test_400_bad_post_request_without_body(self):
res = self.client().post('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
self.assertEqual(data['message'], 'Bad Request')
# delete question
# def test_delete_question_by_id(self):
# res = self.client().delete('/questions/{}'.format(self.exist_question_ID_to_delete))
# data = json.loads(res.data)
# self.assertEqual(res.status_code, 200)
# self.assertEqual(data['success'], True)
def test_404_if_question_does_not_exist(self):
res = self.client().delete('/questions/{}'.format(self.non_exist_question_ID_to_delete))
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'],'Not Found')
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
ee760f49be6ef89ccf152093972a09626ddee30f | 6d0eb72dfbc165b6129d955b9fe5e4a1d8b1681e | /rules/file_dialogue.py | ca404e0aed146b539dd034ef7b3ef3ab64df350a | [] | no_license | seekM/.caster | 4ee55974e75ef8cab04b21866959664d551e14a5 | f36152e6d60a2b9a486493c3271d204388b1bfaf | refs/heads/master | 2020-05-17T14:31:23.156785 | 2019-06-18T22:02:17 | 2019-06-18T22:02:17 | 183,766,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from dragonfly import (AppContext, Dictation, Grammar, IntegerRef, Key, MappingRule,
Pause, Repeat, Text)
from dragonfly.actions.action_mimic import Mimic
from castervoice.lib import control, settings
from castervoice.lib.dfplus.additions import IntegerRefST
from castervoice.lib.dfplus.merge import gfilter
from castervoice.lib.dfplus.merge.mergerule import MergeRule
from castervoice.lib.dfplus.state.short import R
class FileDialogueRule(MergeRule):
pronunciation = "file dialogue"
mapping = {
"get up [<n>]":
R(Key("a-up"))*Repeat(extra="n"),
"get back [<n>]":
R(Key("a-left"))*Repeat(extra="n"),
"get forward [<n>]":
R(Key("a-right"))*Repeat(extra="n"),
"new folder":
R(Key("cs-n")),
"address bar":
R(Key("c-l")),
"search":
R(Key("c-l, tab")),
"left pane":
R(Key("c-l, tab:3")),
"center pane":
R(Key("c-l, tab:4")),
"sort":
R(Key("c-l, tab:5")),
"organize":
R(Key("c-l, tab:2")),
"(dateiname | filename)":
R(Key("c-l, tab:6")),
"(dateityp | file type)":
R(Key("c-l, tab:7")),
}
extras = [IntegerRefST("n", 1, 10)]
defaults = {
"n": 1,
}
dialogue_names = [
"open",
"ffnen",
"speichern",
"select",
]
context = AppContext(title="save")
for name in dialogue_names:
context = context | AppContext(title=name)
grammar = Grammar("FileDialogue", context=context)
rule = FileDialogueRule()
gfilter.run_on(rule)
grammar.add_rule(FileDialogueRule(name="filedialogue"))
grammar.load()
| [
"[email protected]"
] | |
ce74370ba73cd08e5d80892ce3594a66fbe400e2 | 1496fd578daaf13bebccd68057c455b74481b8de | /week3/hangman/ps3_hangman.py | 89ffb62c683272716b7e0678921ac1462f6ae894 | [] | no_license | leahfrye/MITx-6.00.1x | 274c58a6d7ca4c64dbd1bda5c2f102012f4a4f17 | 41e26ded3606b83b21998777ff32cf862e2b7f1d | refs/heads/master | 2021-09-03T14:43:04.654759 | 2018-01-09T21:43:45 | 2018-01-09T21:43:45 | 115,056,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | py | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "C:\projects\mitCourse6.00.1x\week3\hangman\words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
wordlist = loadWords()
# end of helper code
# -----------------------------------
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
guessedString = ""
for letter in secretWord:
if letter in lettersGuessed:
guessedString += letter
else:
guessedString += "_"
if secretWord == guessedString:
return True
else:
return False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
# FILL IN YOUR CODE HERE...
string = ""
for letter in secretWord:
if letter in lettersGuessed:
string += letter
else:
string += "_"
return string
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE...
alphabet = string.ascii_lowercase
for letter in lettersGuessed:
alphabet = alphabet.replace(letter, "")
return alphabet
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE...
## Add correct number of spaces to lettersGuessed list
letterLength = len(secretWord)
correctGuesses = []
incorrectGuesses = []
print("Welcome to the game Hangman!")
print("I am thinking of a word that is " + str(letterLength) + " letters long")
print("-------------")
turn = 8;
while turn > 0:
print("You have " + str(turn) + " guesses left")
print("Available letters: " + getAvailableLetters(correctGuesses + incorrectGuesses))
guess = input("Please guess a letter: ")
guess = guess.lower()
wordSoFar = getGuessedWord(secretWord, correctGuesses)
if isWordGuessed(secretWord, correctGuesses):
print("Congratulations, you won!")
else:
if guess in incorrectGuesses or guess in correctGuesses:
print("Oops! You've already guessed that letter: " + wordSoFar)
elif guess not in secretWord:
print("Oops! That letter is not in my word: " + wordSoFar);
turn -= 1
incorrectGuesses.append(guess)
else:
correctGuesses.append(guess)
wordSoFar = getGuessedWord(secretWord, correctGuesses)
print("Good guess: " + wordSoFar)
if isWordGuessed(secretWord, correctGuesses):
print("-------------")
print("Congratulations, you won!")
print("-------------")
if "_" in wordSoFar:
print("Sorry, you ran out of guesses. The word was " + secretWord + ".")
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
secretWord = chooseWord(wordlist).lower()
hangman(secretWord)
| [
"[email protected]"
] | |
2f43fcb6f336490af1bbc1d36b02d53bc911db08 | 78cff47ad6e4c8dc600249915aff2a3c54b723a2 | /AntiAlias.py | c2d25ee7bdcaf3239d2397ab1d5d6b0e1b66e246 | [] | no_license | willstem/Research_Tools | c6e7e526863abbe048a1c973430211a2bfa36b5b | 42e5dbed7ed786e212a1115974c2235de26cf63a | refs/heads/master | 2020-03-22T05:57:43.184438 | 2018-07-03T15:17:08 | 2018-07-03T15:17:08 | 139,602,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 30 11:24:23 2018
@author: will
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import butter, lfilter
def cosine(t, amp, freq, phase = 0):
return amp*np.cos(2*np.pi*freq*t + phase)
def view_section(datax, datay, samp_density, osc_win, Nosc = 10):
#Just take a section for viewing: first Nosc oscillations
Psamp = samp_density/osc_win #number of samples per period
sec = int(Psamp*Nosc)
return datax[0:sec], datay[0:sec]
def no_off_spectrum(sig, t):
f = np.fft.rfftfreq(len(sig), d = t[1]-t[0])
sig -= np.mean(sig)
y = np.fft.rfft(sig)
return f, y
class anti_alias:
def __init__(self, tdata, ydata):
self.ydata = ydata
self.tdata = tdata
def butter_bandpass(self, lowcut, highcut, fs, order=5):
nyq = 0.5*fs
low = lowcut/nyq
high = highcut/nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(self, lowcut, highcut, fs, order=5):
b, a = self.butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, self.ydata)
return y
def sig_shift(self, freq):
return self.tdata*freq
def sample(self, samp_f):
return self.ydata[0::samp_f]
def aa_filt(self, freq, fs):
yfilt = self.butter_bandpass_filter(freq - 0.1*freq, freq + 0.1*freq, fs)
yshift = self.sig_shift(freq)
#STILL NEED TO FINISH THIS!! MAKE SURE YOU DON'T MIX UP X's and Y's
def main():
#define some parameters
Amp = 2.
freq = 50.
nyq = 2*freq
window = 100
osc_win = window*freq #number of oscillations in the window
samp_dense = 1e6
t = np.linspace(0, window, samp_dense)
y = cosine(t, Amp, freq)
t_sec, y_sec = view_section(t, y, samp_dense, osc_win)
#samp_sparse
eps = 0.01*osc_win
samp_sparse = 1.9*osc_win
#samp_sparse = 2*osc_win - eps
ts = np.linspace(0, window, samp_sparse)
ys = cosine(ts, Amp, freq)
ts_sec, ys_sec = view_section(ts, ys, samp_sparse, osc_win)
#Now use anti-aliasing BPF before sampling
T = 1/freq
fs = samp_dense/T
filt = anti_alias(t, y)
#Take some FFTs
fd, yd = no_off_spectrum(y, t)
fs, ys = no_off_spectrum(ys, ts)
#plot formatting
plt.rc('text', usetex = False)
plt.rc('font', family = 'serif')
plt.rc('font', size = 22)
plt.rc('axes', linewidth = 2)
plt.rc('lines', linewidth = 3)
plt.rc('legend', fontsize = 16)
plt.rc('figure', figsize = (10, 6))
plt.rc('lines', markersize = 15)
#plots
plt.figure(1)
plt.plot(t_sec, y_sec)
plt.plot(ts_sec, ys_sec, '.', color = 'red')
plt.ylim(-2*Amp, 2*Amp)
#plot FFTs
plt.figure(2)
plt.plot(fd, abs(yd)/max(abs(yd)))
plt.plot((fs-1.9*freq)*-1, abs(ys)/max(abs(ys)))
plt.xlim(-freq-eps, freq+eps)
plt.ylim(-0.5, 1.5)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
e3baf698b803e39d4869c69af482d97836496848 | 91d96fc4084a55a74f761ed7bc7d0adba533618a | /projects/pset2.0_Forkable_Difficulty_Adjusting/blockchain-visualizer/visualize.py | 352b2f7230f8cd77c28efa64538cda9744295698 | [
"MIT"
] | permissive | Averylamp/mas.s62 | 169bb76f1289a3d4569a952075bfb8e7842e1dca | 382dc036ae014785be4c464ed8c4aef533fd52ab | refs/heads/master | 2020-03-17T16:14:56.613227 | 2018-05-17T03:56:09 | 2018-05-17T03:56:09 | 133,741,785 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | import pickle
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import graphviz
def mine_rate_info(endpoint_block, origin_block, block_information, time_interval):
endpoint_dt = datetime.fromtimestamp(highest_block[0]['timestamp'])
origin_dt = datetime.fromtimestamp(block_information[origin_block]['timestamp'])
block_hash = endpoint_block
num_buckets = int((endpoint_dt - origin_dt).total_seconds() / time_interval) + 5
mined_buckets = [0]*num_buckets
times_list = [origin_dt + timedelta(seconds=x*time_interval) for x in range(0, num_buckets)]
assert len(times_list) == len(mined_buckets)
while block_hash != '':
block_info = block_information[block_hash]
timestamp = block_information[block_hash]['timestamp']
dt = datetime.fromtimestamp(timestamp)
bucket_ind = int((dt - origin_dt).total_seconds() / time_interval)
mined_buckets[bucket_ind] += 1
block_hash = block_info['blockInformation']['previousHash']
return times_list, mined_buckets
def aggregate_info(mined_buckets):
num_buckets = len(mined_buckets)
aggregate_buckets = [0]*num_buckets
for i in range(num_buckets):
if i == 0:
aggregate_buckets[0] = mined_buckets[0]
else:
aggregate_buckets[i] = aggregate_buckets[i-1] + mined_buckets[i]
return aggregate_buckets
def generate_graphviz(block_information):
g = graphviz.Digraph('G', filename='block_information.gv')
g.node("origin", "")
for block_hash in block_information:
g.node(block_hash, "")
prev_hash = block_information[block_hash]['blockInformation']['previousHash']
if prev_hash == '':
prev_hash = "origin"
g.edge(prev_hash, block_hash)
g.view()
block_information = pickle.load(open("../server-python/block_information.pickle", 'rb'))
highest_block = pickle.load(open("../server-python/highest_block.pickle", 'rb'))
print("Creating graphviz...")
# generate_graphviz(block_information)
print("Done.")
# exit()
# block height 0: 6c179f21e6f62b629055d8ab40f454ed02e48b68563913473b857d3638e23b28
origin_block = "6c179f21e6f62b629055d8ab40f454ed02e48b68563913473b857d3638e23b28"
forked_block = "00001d87846888b85e4b9b757b59a936b0ff33d8128518c78efaa092572efbfd" # Put the hash of another tip here to graph it as well
endpoint_block = highest_block[0]['blockHash']
print(endpoint_block)
time_interval = 0.5 # seconds
times_list, mined_buckets = mine_rate_info(endpoint_block, origin_block, block_information, time_interval)
forked_times_list, forked_mined_buckets = mine_rate_info(forked_block, origin_block, block_information, time_interval)
aggregate_buckets = aggregate_info(mined_buckets)
forked_aggregate_buckets = aggregate_info(forked_mined_buckets)
print("Plotting data...")
# line1, = plt.plot(times_list, mined_buckets, label="blocks mined / {}s".format(time_interval))
line2, = plt.plot(times_list, aggregate_buckets, label="total blocks mined")
# line3, = plt.plot(times_list, forked_mined_buckets, label="attacker blocks mined / {}s".format(time_interval))
line4, = plt.plot(times_list, forked_aggregate_buckets, label="attacker total blocks mined")
plt.legend(handles=[line2, line4])
plt.show()
print("Done")
| [
"[email protected]"
] | |
8c08e9331e2a444c62e36c3a44d664c13b4c40df | 122cef3b7e5c65f4df2b5d17b88bbb6986c16b3f | /script_1.py | 462eafb9a1a567ee22ba559cf0538723cbbe4ca1 | [
"MIT"
] | permissive | markregine/FHIR_Python_fhirclient_testing_open | 98d843c6d2c223256afbb948c256d09fa7276e84 | 44061ee1522bfc7de5fe553683d6cb778c4b5f4e | refs/heads/master | 2020-04-30T02:44:19.943085 | 2019-03-19T18:13:00 | 2019-03-19T18:13:00 | 176,568,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from fhirclient import client
import fhirclient.models.patient as p
import fhirclient.models.bundle as bundle
settings = {'app_id':'xxxx',
'api_base': 'https://r2.smarthealthit.org',
'patient_id': 'smart-1137192'}
# In[2]:
settings = {'app_id': 'my-app',
'api_base': 'https://fhir.sitenv.org/open/fhir',
'app_secret':'my-app-secret-123',
'launch_token': 'bXktYXBwOm15LWFwcC1zZWNyZXQtMTIz'
}
# In[3]:
smart = client.FHIRClient(settings=settings)
# In[4]:
#smart.ready
#smart.prepare()
#smart.ready
#smart.authorize_url
# In[5]:
patient = p.Patient.read('?_id=1&_format=json', smart.server)
patient.birthDate.isostring
# In[ ]:
| [
"[email protected]"
] | |
447215391bd91ac4d5a721c47f8d0298d4eb5b3f | c001d8cff7e634bfa19d682ccdcf5261bc7bf397 | /cotizacionweb/migrations/0005_auto_20160420_1104.py | f9051f65ba22309b3fc40fa1bad989072d8ebdc8 | [] | no_license | yusnelvy/mtvmcotizacionv02 | 4053a6883519901e3652a141ef83c297c5aa0ccd | f0d94faff9c721f25018b7db12a07786508da565 | refs/heads/master | 2021-01-21T12:58:49.014716 | 2016-05-06T20:49:59 | 2016-05-06T20:49:59 | 50,135,715 | 0 | 0 | null | 2016-05-25T12:32:34 | 2016-01-21T20:48:27 | CSS | UTF-8 | Python | false | false | 2,464 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenedor', '0005_contenedortipicopormueble_tipo_de_contenido'),
('cotizacionweb', '0004_auto_20160414_1529'),
]
operations = [
migrations.RenameField(
model_name='serviciomueble',
old_name='porcentaje_complejidad',
new_name='cantidad',
),
migrations.RenameField(
model_name='serviciomueble',
old_name='descripcion_monto_servicio',
new_name='descripcion_cantidad',
),
migrations.RemoveField(
model_name='cotizacionhistoricofecha',
name='fecha_actual',
),
migrations.RemoveField(
model_name='cotizacionhistoricofecha',
name='hora_actual',
),
migrations.RemoveField(
model_name='serviciomueble',
name='complejidad_servicio',
),
migrations.RemoveField(
model_name='serviciomueble',
name='incluido',
),
migrations.RemoveField(
model_name='serviciomueble',
name='monto_servicio',
),
migrations.RemoveField(
model_name='serviciomueble',
name='monto_servicio_asignado',
),
migrations.AddField(
model_name='contenedormueble',
name='tipo_de_contenido',
field=models.ForeignKey(to='contenedor.TipoDeContenido', default=1),
preserve_default=False,
),
migrations.AddField(
model_name='cotizacionhistoricofecha',
name='fecha',
field=models.DateTimeField(default='2016-04-01 00:00:00'),
preserve_default=False,
),
migrations.AddField(
model_name='cotizacionservicio',
name='cantidad_servicio',
field=models.DecimalField(max_digits=7, decimal_places=2, default=1),
preserve_default=False,
),
migrations.AddField(
model_name='fechadecotizacion',
name='obligatoria',
field=models.BooleanField(default=None),
),
migrations.AlterField(
model_name='cotizacionestado',
name='fecha_registro',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"[email protected]"
] | |
5957173b293dfaea06fcdce0d133e9d754b9f80c | c030e3de3b6822c859f453456dc5f34845ae86da | /2.add-two-numbers.py | 511ca1d156495ca20cf1d8ba552cce7057e77629 | [] | no_license | xxli807/PythonForFun | 5ca07e5728d04e08019efe33f8bc126256ec05e6 | 5c980466146450309949388ca3ab0cedcf5f12fc | refs/heads/master | 2020-05-16T23:24:57.798569 | 2019-05-11T12:28:38 | 2019-05-11T12:28:38 | 183,364,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | #
# @lc app=leetcode id=2 lang=python3
#
# [2] Add Two Numbers
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
root = ListNode((l1.val + l2.val) % 10)
carry = (l1.val + l2.val) // 10
node = root
while l1.next or l2.next:
s = (l1.next.val if l1.next else 0) + (l2.next.val if l2.next else 0) + carry
node.next = ListNode(s % 10)
if l1.next:
l1 = l1.next
if l2.next:
l2 = l2.next
node = node.next
carry = s // 10
if carry:
node.next = ListNode(carry)
return root
| [
"[email protected]"
] | |
8ffd467913fac03b01efbb61d71509207cf1337c | c5cee987dd9fc79778ae0804048af13f76d7567f | /perTreatment.py | 9b3b7a6b280e3e55d11461e487913dfcd2ed9ed5 | [] | no_license | LingGuguang/new-word-discover | 063dcdf0cc3d95be2306ed8405e3c8cf43d8e829 | 37640afeb8c10216fb44d05d7ed7a1171910f710 | refs/heads/master | 2020-06-01T16:25:11.812212 | 2019-06-08T06:07:35 | 2019-06-08T06:07:35 | 190,849,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from Delete import Delete
import pandas as pd
import jieba
import re
# #以下为弹幕去重
txt = 'danmu.txt'
save_txt = 'save.txt'
ammos = []
with open(txt, 'r', encoding='utf-8') as t:
for ammo in t.readlines():
ammos.append(ammo[:-1])
#print(len(ammos)) = (48000)
dele = Delete(ammos)
ammos = dele.delete()
ammos = [re.sub(r"[0-9\s+\.\!\/_,$%^*()?;;:-【】+\"\']+|[+——!,;:。?、~@#¥%……&*()]+", " ", ammo)
for ammo in ammos]
ammos = [ammo.replace(' ','') for ammo in ammos]
with open(save_txt, 'w', encoding='utf-8') as f:
for ammo in ammos:
if(len(ammo)>2):
f.write(ammo + '\n')
| [
"[email protected]"
] | |
c856a237edfe16a2af62af7f31bb6f4ea80ef390 | c96a5c821a24cdb41095d33ee734f94f35e81fb2 | /octoprint_rgbcontrol/__init__.py | 055ce2c4304023baf7719c59ff01a1c596c927cd | [] | no_license | mikedmor/Octoprint_RGBControl | 7915c806942f53c930ed672be71bdd7a2784699b | 0711896004079cc2efcdf74775a44f2892c16d0f | refs/heads/master | 2020-03-16T23:10:44.868686 | 2018-05-12T20:31:27 | 2018-05-12T20:31:27 | 133,068,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | # coding=utf-8
from __future__ import absolute_import
import octoprint.plugin
import octoprint.settings
class RGBControlPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin):
def get_assets(self):
return dict(
js=["js/jscolor/jscolor.js","js/rgbcontrol.js"],
css=["css/rgbcontrol.css","css/iris.css"]
)
def get_template_configs(self):
return [
dict(type="generic", template="rgbcontrol.jinja2", custom_bindings=True)
]
##~~ Softwareupdate hook
def get_version(self):
return self._plugin_version
def get_update_information(self):
return dict(
multicam=dict(
displayName="RGBControl",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="mikedmor",
repo="OctoPrint_RGBControl",
current=self._plugin_version,
# update method: pip
pip="https://github.com/mikedmor/OctoPrint_RGBControl/archive/{target_version}.zip"
)
)
__plugin_name__ = "RGBControl"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = RGBControlPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
| [
"[email protected]"
] | |
dd9b515004d1932557315b736706692edce7c9f4 | a7a11639a6c7fddc702b55530b7a86e77796813c | /green_rose_shop/settings.py | 43f273257b1b5a1fa5f4fca49f6f37292fd1e069 | [] | no_license | venindobhasa/green_rose_shop | 2f9f9ec7eaa25e7e9f85d7d5da8c9dcf15e195c3 | 44ae92c29fe9eebbdc5c63177292907697263a17 | refs/heads/master | 2022-11-05T04:30:14.459444 | 2020-06-19T02:50:21 | 2020-06-19T02:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | """
Django settings for green_rose_shop project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dsiim%c=7h#lup128-qn+v^6j3qv!9&3d*e%%1s0jn**-2qmzc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#custom apps
'user_login',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'green_rose_shop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'green_rose_shop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'green_rose',
'USER': 'mysql_username',
'PASSWORD': 'mysql_password',
'HOST': '127.0.0.1',
'PORT' : '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
f25e7844fc3f123aff20df8ed51e37a677b10356 | 1107279f4447455a2fea8ff4341e856845cf8e57 | /testes/I2C_sniffer.py | faa21308ec2891c94910ec8d4369c332d9d2c356 | [] | no_license | rmborges/Distributed-Lighting-Control | a0d9cea26b5c538bc41d328e94bc805c97b56a9f | e59a8c5f299cbf739352d1db98787e934e9c4c16 | refs/heads/master | 2021-03-30T18:03:04.574228 | 2018-01-07T15:59:11 | 2018-01-07T15:59:11 | 113,502,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | #!/usr/bin/env python
import time
import pigpio
class sniffer:
"""
A class to passively monitor activity on an I2C bus. This should
work for an I2C bus running at 100kbps or less. You are unlikely
to get any usable results for a bus running any faster.
"""
def __init__(self, pi, SCL, SDA, set_as_inputs=False):
"""
Instantiate with the Pi and the gpios for the I2C clock
and data lines.
If you are monitoring one of the Raspberry Pi buses you
must set set_as_inputs to False so that they remain in
I2C mode.
The pigpio daemon should have been started with a higher
than default sample rate.
For an I2C bus rate of 100Kbps sudo pigpiod -s 2 should work.
A message is printed for each I2C transaction formatted with
"[" for the START
"XX" two hex characters for each data byte
"+" if the data is ACKd, "-" if the data is NACKd
"]" for the STOP
E.g. Reading the X, Y, Z values from an ADXL345 gives:
[A6+32+]
[A7+01+FF+F2+FF+06+00-]
"""
self.pi = pi
self.gSCL = SCL
self.gSDA = SDA
self.FALLING = 0
self.RISING = 1
self.STEADY = 2
self.in_data = False
self.byte = 0
self.bit = 0
self.oldSCL = 1
self.oldSDA = 1
self.transact = ""
if set_as_inputs:
self.pi.set_mode(SCL, pigpio.INPUT)
self.pi.set_mode(SDA, pigpio.INPUT)
self.cbA = self.pi.callback(SCL, pigpio.EITHER_EDGE, self._cb)
self.cbB = self.pi.callback(SDA, pigpio.EITHER_EDGE, self._cb)
def _parse(self, SCL, SDA):
"""
Accumulate all the data between START and STOP conditions
into a string and output when STOP is detected.
"""
if SCL != self.oldSCL:
self.oldSCL = SCL
if SCL:
xSCL = self.RISING
else:
xSCL = self.FALLING
else:
xSCL = self.STEADY
if SDA != self.oldSDA:
self.oldSDA = SDA
if SDA:
xSDA = self.RISING
else:
xSDA = self.FALLING
else:
xSDA = self.STEADY
if xSCL == self.RISING:
if self.in_data:
if self.bit < 8:
self.byte = (self.byte << 1) | SDA
self.bit += 1
else:
self.transact += '{:02X}'.format(self.byte)
if SDA:
self.transact += '-'
else:
self.transact += '+'
self.bit = 0
self.byte = 0
elif xSCL == self.STEADY:
if xSDA == self.RISING:
if SCL:
self.in_data = False
self.byte = 0
self.bit = 0
self.transact += ']' # STOP
print (self.transact)
self.transact = ""
if xSDA == self.FALLING:
if SCL:
self.in_data = True
self.byte = 0
self.bit = 0
self.transact += '[' # START
def _cb(self, gpio, level, tick):
"""
Check which line has altered state (ignoring watchdogs) and
call the parser with the new state.
"""
SCL = self.oldSCL
SDA = self.oldSDA
if gpio == self.gSCL:
if level == 0:
SCL = 0
elif level == 1:
SCL = 1
if gpio == self.gSDA:
if level == 0:
SDA = 0
elif level == 1:
SDA = 1
self._parse(SCL, SDA)
def cancel(self):
"""Cancel the I2C callbacks."""
self.cbA.cancel()
self.cbB.cancel()
if __name__ == "__main__":
import time
import pigpio
import I2C_sniffer
pi = pigpio.pi()
s = I2C_sniffer.sniffer(pi,3,2, False) # leave gpios 1/0 in I2C mode
time.sleep(60000)
s.cancel()
pi.stop()
| [
"[email protected]"
] | |
40723c472ada5bfb1875a6690c1f32bc5eb18c60 | ee193c0df3494673769e107094402a28bfbeba74 | /remove_topics.py | 49749093c06a9e673d5cc0b266c106b56ab9ef08 | [] | no_license | always-akshat/summarization | a41ad675989712def846af32d49ec87a65fe826d | 25f65abfcd2301c4879e8f21e5f5d0e4973aa31c | refs/heads/master | 2020-04-14T09:25:03.713117 | 2014-10-28T05:50:40 | 2014-10-28T05:50:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | __author__ = 'akshat'
| [
"[email protected]"
] | |
77b98aa829530524b65763daa60caaacde33ebbb | 6a0ae86bca2d2ece6c92efd5594c0e3b1777ead7 | /EDBRCommon/python/datasets/summer12_MWp_350_gg_cff.py | 990eb65ae0bde2bdcbdbdfc05e23b0bd86d0be75 | [] | no_license | wangmengmeng/ExoDiBosonResonances | c4b5d277f744e1b1986df9317ac60b46d202a29f | bf5d2e79f59ad25c7a11e7f97552e2bf6a283428 | refs/heads/master | 2016-09-06T14:54:53.245508 | 2014-06-05T15:02:37 | 2014-06-05T15:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,971 | py | import FWCore.ParameterSet.Config as cms
readFiles = cms.untracked.vstring()
source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = readFiles
)
readFiles.extend([
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_10_1_BYR.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_11_1_mnN.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_12_1_p9d.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_13_1_mZc.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_14_1_I6o.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_15_1_NBO.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_16_1_L2E.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_17_1_WWV.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_18_1_O71.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_19_1_gsK.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_1_1_WXk.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_20_1_J7a.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_21_1_mmN.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_22_1_I5O.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_23_1_Viu.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_24_1_Voj.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_25_1_uHN.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_26_1_uew.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_27_1_Gaa.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_28_1_e7z.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_29_1_FfO.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_2_1_HVa.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_30_1_gOp.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_31_1_V1o.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_32_1_c1S.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_33_1_rI6.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_34_1_JGi.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_35_1_agq.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_36_1_Yfb.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_37_1_TY7.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_38_1_zd0.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_39_1_L7L.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_3_1_Scl.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_40_1_cVm.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_41_1_Wg0.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_42_1_ffs.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_43_1_heS.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_44_1_46A.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_45_1_YhP.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_46_1_OZD.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_47_1_f6v.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_48_1_A3s.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_49_1_bqW.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_4_1_dUY.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_50_1_GNz.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_51_1_bYJ.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_5_1_QW1.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_6_1_U9k.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_7_1_KVy.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_8_1_OQf.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_9_1_mjs.root',
] )
| [
"[email protected]"
] | |
d8740f4f41ffea793ecdef962f03b008182d8ec7 | c07ecb7b177d3296580dec755e4c5a34c01f4132 | /120_Triangle.py | 3b5add816517340c3027d281b3ef7a59950e827b | [] | no_license | bokveizen/leetcode | 78c6e00a97ab9b658752dfcbf6380918ce5611d7 | 7c7eb77e23310563dbbabae4ff9c726620a8dd6d | refs/heads/master | 2021-07-09T19:59:15.338846 | 2021-03-18T06:28:14 | 2021-03-18T06:28:14 | 231,543,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # https://leetcode-cn.com/problems/triangle/
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
n = len(triangle)
temp = triangle[-1] # triangle[n-1]
while True:
if n == 1:
return temp[0]
temp = [min(temp[i], temp[i + 1]) for i in range(n - 1)]
n -= 1
for i in range(n):
temp[i] += triangle[n - 1][i]
| [
"[email protected]"
] | |
edb0dd2db6e36653e6d52a2316956c6790eed846 | e3fe44de92acb664c65dfc53c4dc7450ea994c81 | /utils/fast_inverse.py | 1ede5e863c4570d8c6bc7af2a213ff1c3677bc66 | [] | no_license | chixma/Visual-Template-Free-Form-Parsing | a723022857b1a9adb641d1f9c9f39564ed65f67f | b05e80f5fbc5b1296ab1e4b968ae206b7e556e4b | refs/heads/master | 2020-07-22T13:27:39.873315 | 2019-09-06T20:42:32 | 2019-09-06T20:42:32 | 207,217,082 | 1 | 0 | null | 2019-09-09T03:33:43 | 2019-09-09T03:33:43 | null | UTF-8 | Python | false | false | 1,420 | py | import numpy as np
import torch
def adjoint(A):
"""compute inverse without division by det; ...xv3xc3 input, or array of matrices assumed"""
AI = np.empty_like(A)
for i in xrange(3):
AI[...,i,:] = np.cross(A[...,i-2,:], A[...,i-1,:])
return AI
def inverse_transpose(A):
"""
efficiently compute the inverse-transpose for stack of 3x3 matrices
"""
I = adjoint(A)
det = dot(I, A).mean(axis=-1)
return I / det[...,None,None]
def inverse(A):
"""inverse of a stack of 3x3 matrices"""
return np.swapaxes( inverse_transpose(A), -1,-2)
def dot(A, B):
"""dot arrays of vecs; contract over last indices"""
return np.einsum('...i,...i->...', A, B)
def adjoint_torch(A):
AI = A.clone()
for i in xrange(3):
AI[...,i,:] = torch.cross(A[...,i-2,:], A[...,i-1,:])
return AI
def inverse_transpose_torch(A):
I = adjoint_torch(A)
det = dot_torch(I, A).mean(dim=-1)
return I / det[:,None,None]
def inverse_torch(A):
return inverse_transpose_torch(A).transpose(1, 2)
def dot_torch(A, B):
A_view = A.view(-1,1,3)
B_view = B.contiguous().view(-1,3,1)
out = torch.bmm(A_view, B_view)
out_view = out.view(A.size()[:-1])
return out_view
if __name__ == "__main__":
A = np.random.rand(2,3,3)
I = inverse(A)
A_torch = torch.from_numpy(A)
I_torch = inverse_torch(A_torch)
print(I)
print(I_torch)
| [
"[email protected]"
] | |
ddebfadd0c2daa17807f8177153eeb52ef49dbd9 | df2499ddc1e44aa36cf82d652a3d4501efdeea14 | /burnerbotserver/admin.py | 3ae9dd6521daad321efcb966d5637ac8d4d5ae87 | [] | no_license | johngrantuk/burnerbotserver | 5e232c85df58076818c4d5353440bfa0b82545c2 | 3a102d46073a6ed25ca8cca8a4cea465f6d65cdc | refs/heads/master | 2020-05-17T00:19:01.545866 | 2019-04-25T09:04:06 | 2019-04-25T09:04:06 | 183,393,293 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.contrib import admin
from .models import UserDetail
admin.site.register(UserDetail)
| [
"[email protected]"
] | |
c869b985f6efcc06cb7b2c9d71bff7a64242bbeb | 856c2ce82997975f747f04b823819e6c65624477 | /blog/urls.py | f69c1a3a567d53eeb5703fa1058d4968a4795da9 | [] | no_license | leadmeout/simple_blog | 348f83db921d6b0a9cf46d24cdb87b160552d61f | edfef1295dbaa9a35d60e6d7fa4cfb0c9a267da0 | refs/heads/master | 2023-01-04T10:30:15.605104 | 2020-10-28T20:14:49 | 2020-10-28T20:14:49 | 308,122,152 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog_posts.urls')),
]
| [
"[email protected]"
] | |
2a46dd981ecb739e9e46f9c71da3e33492974bb5 | a075ee186532121edbe5febd9978d9c98fb7d888 | /app/test/test_service.py | 2d99be24b353940df520ed27d15ce723d6f85acc | [] | no_license | HoonJin/fastapi-sample | ef68c77b4129bf6d855ef23d19089aba4e81f3ee | 0b535711e47bf0c0b2d2b2e984859223d0bfb502 | refs/heads/master | 2022-12-24T00:27:41.655204 | 2020-09-29T10:10:52 | 2020-09-29T10:10:52 | 249,173,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from config.exceptions import NotFoundException
from database import db
from .test_dao import TestDao
class TestService:
@staticmethod
async def get_all_pagination(page: int, per_page: int) -> dict:
total_cnt = await TestDao.get_all_count()
total_page = int(total_cnt / per_page) + (0 if total_cnt % per_page == 0 else 1)
offset = (page - 1) * per_page
result = await TestDao.get_all_with_offset_and_limit(offset, per_page)
return {
'content': result,
'total_cnt': total_cnt,
'total_page': total_page
}
@staticmethod
async def delete(t_id: int) -> None:
row = await TestDao.find_by_id(t_id)
if row is not None:
async with db.transaction():
await TestDao.delete_by_id(t_id)
else:
raise NotFoundException
| [
"[email protected]"
] | |
7e6327db88706423541fdcd736a385bd77c5cb65 | 4cbe1145ef29aab3ef17c5ebd74dd94c2fece7c7 | /Guess_the_country_name.py | 90fcc438f7e08aaa6de061bf0db40eacbdddfe9e | [
"MIT"
] | permissive | jyothi1910/Guess_the_country_name | 2903b4d943b8d9495559c8d68756fbc9ce88e0d8 | fc154488a4f3ff6aa7e27a127963f2c5a4c42628 | refs/heads/main | 2023-06-06T09:53:28.414026 | 2021-06-21T07:00:16 | 2021-06-21T07:00:16 | 378,835,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py |
import random
Country_names=["India","Korea","Japan","China","USA"]
print(Country_names)
randomName = random.choice(Country_names)
for guessTaken in range(1,4):
guess=input("Take a guess:")
if guess != randomName:
print("Your guess is wrong, try another guess")
elif guess == randomName:
print("Well done you guess is correct, you guessed country name in ",end="")
print(str(guessTaken)+" guess")
break
else:
print("Nope, The correct country name is "+randomName)
| [
"[email protected]"
] | |
f956625b352f998eb26833b0e51195907af6cf82 | b3bac39420c4864de4a9f53a27845ba71e4f08fa | /App.py | 864555395f640864608ca50ba8d233c23cbcf07b | [] | no_license | nickbetke/Music-Mood-Recognition | bb2c6377de99b47b3e629bb02853412e63494e80 | 3811e0d3bb28ae4e06039f7d2ab463cbcc7a667f | refs/heads/master | 2021-02-10T02:55:55.878712 | 2020-10-17T14:35:53 | 2020-10-17T14:35:53 | 244,346,711 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,490 | py | import tkinter as tk # python 3
from tkinter import *
from tkinter.ttk import *
from tkinter import font as tkFont
from tkinter.filedialog import askopenfilename
from tkinter import messagebox
import numpy as np
from pydub import AudioSegment as pas
import librosa as lr
import joblib
global do
class SampleApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.title_font = tkFont.Font(family='Helvetica', size=20, weight="bold", slant="italic")
self.mood_font = tkFont.Font(family='Helvetica', size=16, slant="italic")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, PageOne):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("StartPage")
def show_frame(self, page_name):
frame = self.frames[page_name]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent,bg = "crimson")
self.controller = controller
label = tk.Label(self, text="Music Mood Recogniser", font=controller.title_font)
label.pack(side="top", fill="x", pady=20)
#for sending filename to other page
self.filename = ""
self.lbl = tk.Label(self, text="Select an Audio file", font=controller.mood_font, bg = "bisque", fg="black")
self.lbl.pack(side="top", fill="x", pady=200)
#==================================================
# style = Style()
# style.configure('TButton', font =
# ('calibri', 20, 'bold'),
# borderwidth = '4')
# style.configure('TButton', font =
# ('calibri', 10, 'bold', 'underline'),
# foreground = 'red')
#================================================
self.button1 = tk.Button(self, text="Browse", bg="greenyellow", command=self.clicked)
self.button1.place(x =250, y = 400)
self.button2 = tk.Button(self, text="Next", bg="chartreuse",command=lambda: controller.show_frame("PageOne"))
self.button2["state"] = "disabled"
self.button2.place(x =400, y = 400)
def lop(self):
PageOne.predict()
def clicked(self):
global do
self.filename = askopenfilename(title = "Select an audio file", filetypes = (("mp3 files", "*.mp3*"),
("m4a files", "*.m4a*"),
("All files", "*.*")))
if self.filename:
do.set(self.filename)
self.button2["state"] = "normal"
self.lbl.configure(text=self.filename)
print(self.filename)
#Frame 2
class PageOne(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, bg = "crimson")
self.controller = controller
# global do
# k1 = do.get()
label = tk.Label(self, text="Music Mood Recogniser", font=controller.title_font)
label.pack(side="top", fill="x", pady=20)
button = tk.Button(self, text="Go Back", bg="chartreuse", command=lambda: controller.show_frame("StartPage"))
button.pack()
button2 = tk.Button(self, text="Recognize", bg="chartreuse", command = self.testg)
button2.pack()
#mood Label
label2 = tk.Label(self, text="mood", font=controller.title_font, bg = "cornsilk", fg = "black")
label2.pack(side="top", fill="x", pady=50)
# label2.place(x=100 ,y=250)
#self.predict()
def testg(self):
global do
f1 = do.get()
print("mooded")
print(f1)
self.predict()
def predict(self):
global do
ad = do.get()
n = ad.split('/')
i = 'cutpiece.mp3'
i = n[-1]
if 'mp3' in i:
song = pas.from_mp3(ad)
if 'm4a' in i:
song = pas.from_file(ad)
ii = i[:-3] + 'wav'
op = song[:30000]
op.export('/home/nick/1PROjectX/trash/' + ii, format = 'wav')
feat1 = np.empty((0,181))
audio, freq = lr.load('/trash/' + ii)
stft = np.abs(lr.stft(audio))
mfcc = np.mean(lr.feature.mfcc(y = audio, sr = freq, n_mfcc=40).T, axis=0)
mel = np.mean(lr.feature.melspectrogram(audio, sr = freq).T, axis=0)
contrast = np.mean(lr.feature.spectral_contrast(S = stft, sr = freq).T, axis=0)
tonnetz = np.mean(lr.feature.tonnetz(y = lr.effects.harmonic(audio), sr = freq).T, axis=0)
ext_feat = np.hstack([mfcc, mel, contrast, tonnetz])
feat1 = np.vstack([feat1, ext_feat])
filename = '/46/decison-tree-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
print(x.predict(feat1)[0])
label3 = tk.Label(self, text="DTree: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label3.pack(side="top", fill="x", pady=20)
filename = '/46/linear-svm-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
label4 = tk.Label(self, text="SVM: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label4.pack(side="top", fill="x", pady=20)
filename = '/46/naive-bayes-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
label5 = tk.Label(self, text="NaiveBayes: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label5.pack(side="top", fill="x", pady=20)
filename = '/46/random-forest-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
label6 = tk.Label(self, text="Randomforest: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label6.pack(side="top", fill="x", pady=20)
# label2['text'] = 'mood: ' + str(ww)
if __name__ == "__main__":
app = SampleApp()
app.title("Music Mood Recogniser")
app.geometry("800x720+0+0")
app.resizable(True, True)
do = StringVar()
app.mainloop()
| [
"[email protected]"
] | |
da05f206093955bc97ef19a62bc0a70e69711fc6 | 5e9dacbb7a9613b7c8d8c92398bb66926a314c38 | /script.py | ecff88305875f987118660b170ce2849290d9f87 | [] | no_license | pol9111/tencent_WechatOffAcc_auto | 645b95bfd893706df4651f1e8f67ea1dc57a03de | 3aa2a9a8a78796d5b829f9bf49cc849713ed41b7 | refs/heads/master | 2020-03-24T16:24:08.783424 | 2018-07-30T04:07:14 | 2018-07-30T04:07:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | import json
import pymongo
import re
from config import *
def response(flow):
global like_num, title, pub_time, read_num, comment_num
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
table = db[MONGO_COLLECTION]
# 获取微信广告json文件, 里面有阅读数和点赞数
url_msg = 'mp.weixin.qq.com/mp/getappmsgext?'
if url_msg in flow.request.url:
text_msg = flow.response.text
data_py = json.loads(text_msg)
content = data_py.get('appmsgstat')
like_num = content.get('like_num')
read_num = content.get('read_num')
comment_num = data_py.get('comment_count')
# 获取文章响应文件, 并匹配标题和发布时间
url_article = 'mp.weixin.qq.com/s?'
if url_article in flow.request.url:
text_arti = flow.response.text
pub_time = re.findall(r'publish_time.*"(\d+-\d+-\d+)".*', text_arti)[0]
title = re.findall(r'msg_title\s=\s"(.*?)";', text_arti)[0]
data = {
'文章标题': title,
'发布时间': pub_time,
'阅读数': read_num,
'点赞数': like_num,
'评论数': comment_num,
}
print(data)
table.update({'文章标题': title}, {'$set': data}, True)
| [
"[email protected]"
] | |
f9da954cdcb17cee51e9d873568d288fdf2c9cdb | f6f29c2fa719c53eee73de2acd86db9e1278182e | /code_wars/calculating_with_functions.py | a3f2c7e84a244f5b3dd4d6052494c5ab40d538cb | [] | no_license | byt3-m3/python_code_practice | ca08320e1778449d30204b65f15903d5830b7975 | 40e215c4d4ab62cf7d55d2456d94550335825906 | refs/heads/master | 2023-07-24T08:29:06.624850 | 2021-09-04T02:39:32 | 2021-09-04T02:39:32 | 256,984,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,290 | py | '''
This time we want to write calculations using functions and get the results. Let's have a look at some examples:
seven(times(five())) # must return 35
four(plus(nine())) # must return 13
eight(minus(three())) # must return 5
six(divided_by(two())) # must return 3
Requirements:
There must be a function for each number from 0 ("zero") to 9 ("nine")
There must be a function for each of the following mathematical operations: plus, minus, times, dividedBy (divided_by in Ruby and Python)
Each calculation consist of exactly one operation and two numbers
The most outer function represents the left operand, the most inner function represents the right operand
Divison should be integer division. For example, this should return 2, not 2.666666...:
'''
def _process(data, base):
num = data[0]
oper = data[1]
if oper == "*":
return base * num
if oper == "/":
return base // num
if oper == "+":
return base + num
if oper == "-":
return base - num
def zero(data=None):
if isinstance(data, tuple):
return _process(data, 0)
return 0
def one(data=None):
if isinstance(data, tuple):
return _process(data, 1)
return 1
def two(data=None):
if isinstance(data, tuple):
return _process(data, 2)
return 2
def three(data=None):
if isinstance(data, tuple):
return _process(data, 3)
return 3
def four(data=None):
if isinstance(data, tuple):
return _process(data, 4)
return 4
def five(data=None):
if isinstance(data, tuple):
return _process(data, 5)
return 5
def six(data=None):
if isinstance(data, tuple):
return _process(data, 6)
return 6
def seven(data=None):
if isinstance(data, tuple):
return _process(data, 7)
return 7
def eight(data=None):
if isinstance(data, tuple):
return _process(data, 8)
return 8
def nine(data=None):
if isinstance(data, tuple):
return _process(data, 9)
return 9
def plus(num):
return (num, "+")
def minus(num):
return (num, "-")
def times(num):
return (num, "*")
def divided_by(num):
return (num, "/")
result_1 = one(minus(five()))
result_2 = five(times(seven()))
print(result_1)
print(result_2)
| [
"[email protected]"
] | |
a901c6830b2d98e5814d5c7d028e4744d7464039 | fa8344a5f37ffd5f10bef00a6f7cab6acc6382f7 | /apps/family/views.py | 5b69f137536d5f86284785fb0032fdfff6f28e77 | [] | no_license | hustels/django-crud-ajax-demo | 2fcd8d3e1c050a76ec1d7e16cfd04fbf1175d8fa | 06de79bf60918b7e270491e4d5cbc7e1422ffd23 | refs/heads/master | 2021-01-12T19:43:44.480199 | 2011-11-16T01:07:31 | 2011-11-16T01:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | # -*- coding: utf-8 -*-
from django.views.generic.simple import direct_to_template
from django.http import HttpResponse
from django.db.models import Q
from family.models import People
from family.forms import PeopleForm, ImporterForm
def index(request, form=None, peoples=None, importer_form=None):
if form is None:
form = PeopleForm()
if peoples is None:
peoples = People.objects.all()
if importer_form is None:
importer_form = ImporterForm()
context = {'form': form, 'importer_form': importer_form, 'peoples': peoples}
return direct_to_template(request, 'family_tree.html', context)
def add(request):
if request.POST:
form = PeopleForm(request.POST)
if form.is_valid():
form.save()
form = PeopleForm()
return index(request, form)
return HttpResponse('Fill the form')
def filter(request):
if request.GET:
s = request.GET.get('s')
peoples = People.objects.filter(Q(first_name__icontains=s) | Q(last_name__icontains=s))
return index(request, None, peoples)
return HttpResponse('Enter a valid string to search')
def importer(request):
if request.POST:
form = ImporterForm(request.POST, request.FILES)
if form.is_valid():
form = ImporterForm()
return index(request, None, None, form)
return HttpResponse('Select the file')
| [
"[email protected]"
] | |
ec9c7d2a3db1de8e236b53724f1cbf8c028163a0 | 8638bf84783732f87d88b02ad0a41f68684fbe1f | /plotsprecht.py | 4bb13dda0e82bafc31cdb209b2b5a4505d07baeb | [] | no_license | JLammering/V-351 | fa7f43d025e44426e143b12e4517a4bd734fe559 | 2532300f9ae01a3acc68afc2061efacf349b12e4 | refs/heads/master | 2021-01-10T16:30:20.834091 | 2016-01-13T12:43:44 | 2016-01-13T12:45:33 | 48,593,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | import matplotlib.pyplot as plt
import numpy as np
#from itertools import chain
#Messdaten einladen
# k, U = np.genfromtxt('datenb_1.txt', unpack = True)
# #plt.plot(f, U, 'k.', label = r'Messdaten')
#
# N=9
#
# ind = np.arange(N)
# width = 0.3
# ax = plt.subplots()
#
#
# rects = ax.bar(ind, U, width, color ='r')
# ax.set_ylabel(r'$U\:/\:\si{\milli\volt}$')
# ax.set_xlabel(r'$\text{Frequenz}$')
# ax.set_xticks(ind)
# ax.set_xticklabels((r'$\nu$', r'$3\nu$', r'$5\nu$', r'$7\nu$', r'$9\nu$', r'$11\nu$',
# r'$13\nu$', r'$15\nu$', r'$17\nu$'))
#
# ax.legend((rects[0]),('Linienspektrum'))
#
# autolabel(rects)
#
#
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/plotsprecht.pdf')
k, U = np.genfromtxt('datenb_1.txt', unpack = True)
#plt.plot(k, U, 'k.', label = r'Messdaten')
#k_1 = list (chain.from_iterable((x, x)for x in k))
#U_1 = list (chain.from_iterable((0, x)for x in U))
#plt.plot(k_1, U_1, 'r-', label = r'Linien')
for a in zip(k,U):
keins, Ueins = a
plt.plot((keins, keins), (0,Ueins),label = r'Amplituden')
plt.xlabel(r'$\text{Frequenz}$')
plt.ylabel(r'$U\:/\:\si{\milli\volt}$')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plotsprecht.pdf')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.