blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b9a88c391878412fc429f3a75e2414c760997cf | 620323fc090cebaf7aca456ff3f7fbbe1e210394 | /flask__webservers/get_size_upload_file_with_progress/main.py | 35ae0b78ba57d5a84ea62acf77d081e6e19a12bf | [
"CC-BY-4.0"
] | permissive | gil9red/SimplePyScripts | bd2733372728bf9b9f00570e90316fa12116516b | 773c2c9724edd8827a1dbd91694d780e03fcb05a | refs/heads/master | 2023-08-31T04:26:09.120173 | 2023-08-30T17:22:59 | 2023-08-30T17:22:59 | 22,650,442 | 157 | 46 | null | 2023-09-08T17:51:33 | 2014-08-05T16:19:52 | Python | UTF-8 | Python | false | false | 4,527 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import logging
from flask import Flask, request, redirect, render_template_string, jsonify
# pip install humanize
from humanize import naturalsize as sizeof_fmt
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
@app.route("/")
def index():
return render_template_string(
"""\
<html>
<head>
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type'/>
<title>get_size_upload_file</title>
<script type="text/javascript" src="{{ url_for('static', filename='js/jquery-3.1.1.min.js') }}"></script>
</head>
<body>
<br>
<form class="form__upload_file" action="/get_file_size" method="post" enctype="multipart/form-data">
<p>Узнайте размер файла:</p>
<p><input type="file" name="file"></p>
<p><input type="submit"></p>
</form>
<div class="block progress" style="display: none">
<p>Пожалуйста, подождите, файл загружаются.</p>
<progress class="progress upload" max="100" value="0"></progress>
</div>
<br><br>
<div class="info size" style="display: none">
<div class="show_size" style="display: inline-block;"></div><div style="display: inline-block;"> Bytes</div>
<div class="show_size_human"></div>
</div>
<script>
$(document).ready(function() {
function progress(e) {
if(e.lengthComputable) {
var max = e.total;
var current = e.loaded;
var percentage = (current * 100) / max;
console.log(percentage);
$('.progress.upload').val(percentage);
}
}
$(".form__upload_file").submit(function() {
$('.block.progress').show();
$('.info.size').hide();
var thisForm = this;
var url = $(this).attr("action");
var method = $(this).attr("method");
if (method === undefined) {
method = "get";
}
// var data = $(this).serialize();
//
// For send file object:
var input = $(".form__upload_file > input[type=file]");
var data = new FormData(thisForm);
$.ajax({
url: url,
method: method, // HTTP метод, по умолчанию GET
data: data,
dataType: "json", // тип данных загружаемых с сервера
// Без этих опций неудастся передать файл
processData: false,
contentType: false,
xhr: function() {
var myXhr = $.ajaxSettings.xhr();
if (myXhr.upload) {
myXhr.upload.addEventListener('progress', progress, false);
}
return myXhr;
},
cache:false,
success: function(data) {
console.log(data);
console.log(JSON.stringify(data));
$('.info.size > .show_size').text(data.length);
$('.info.size > .show_size_human').text(data.length_human);
$('.block.progress').hide();
$('.info.size').show();
},
});
return false;
});
});
</script>
</body>
</html>
"""
)
@app.route("/get_file_size", methods=["POST"])
def get_file_size():
print(request.files)
# check if the post request has the file part
if "file" not in request.files:
return redirect("/")
length = 0
file = request.files["file"]
if file:
data = file.stream.read()
length = len(data)
return jsonify({"length": length, "length_human": sizeof_fmt(length)})
if __name__ == "__main__":
app.debug = True
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(port=5000)
# # Public IP
# app.run(host='0.0.0.0')
| [
"[email protected]"
] | |
d9febf29c060feccd2c30acdf80550e51cdf5573 | 6a32cba18ed153b7a7611804223c74dbc923c761 | /5.py | aea6c3c163bf391016e8706b78860f5c4fd2bb65 | [] | no_license | Mrhairui/leetcode | 8cfea60868c37f2a7d0675c4ee1f6d431c75dd37 | a0884db8fe70e63707cc0fa06c6367e42857e4de | refs/heads/master | 2022-03-29T16:18:42.827912 | 2019-11-13T14:48:36 | 2019-11-13T14:48:36 | 197,877,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | class Solution:
def longestPalindrome(self, s: str) -> str:
size = len(s)
if size <= 1:
return s
longest_l = 1
curlen = 0
res = s[0]
dp = [[False for _ in range(size)] for _ in range(size)] # 存储用的表
for r in range(1, size):
for l in range(r): # 暴力法也得有这个循环,完全一样。但是暴力法后面判
# 断一个字符串是不是回文需要n的时间复杂度,动态规划不需要
if s[l] == s[r] and (r - l <= 2 or dp[l+1][r-1]): # 状态转移方程以及边界
dp[l][r] = True
curlen = r - l + 1 # 求得此时长度
if curlen > longest_l: # 求得最大长度并用一个字符串记录
longest_l = curlen
res = s[l:r+1]
return res
solution = Solution()
m = solution.longestPalindrome('fdfgabcbad')
print(m)
| [
"[email protected]"
] | |
267fa3c0d0f216b97c4faa8db8e6ac57b8ec92c9 | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /_google_app_engine-projects/django-gae2django/django/dispatch/dispatcher.py | eb183786df321fede1a3d546f0c5e5ba1822fede | [
"Apache-2.0"
] | permissive | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,639 | py | import weakref
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.dispatch import saferef
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, 'im_func'):
return (id(target.im_self), id(target.im_func))
return id(target)
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
if weak is True, then receiver must be weak-referencable (more
precisely saferef.safeRef() must be able to create a reference
to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver)
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
if any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
none_senderkey = _make_id(None)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
receivers.append(receiver)
else:
receivers.append(receiver)
return receivers
def _remove_receiver(self, receiver):
"""
Remove dead receivers from connections.
"""
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
for idx, (r_key, _) in enumerate(self.receivers):
if r_key == key:
del self.receivers[idx]
| [
"[email protected]"
] | |
99a157c10331ba134de0da50bb8c9272687b2a54 | c6eee54ffef7f99f2825cc332a649d9a6e9e181d | /matrixscreener/imagej.py | 944d1b559197f620ac9c63be68652944f0c5ee99 | [
"MIT"
] | permissive | imcf/matrixscreener | 5d3fa6040e62c14ff504dfcbb3818e405d9e9254 | 727711654269d93e528ae9a604ce5ac5b24fa816 | refs/heads/master | 2021-01-17T04:25:14.575654 | 2015-02-08T15:23:39 | 2015-02-08T16:11:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,754 | py | # encoding: utf-8
"""
Stitch images with ImageJ.
* ``matrixscreener.imagej._bin`` should be set if you are on Windows or Linux.
"""
import pydebug, subprocess, os, fijibin
from tempfile import NamedTemporaryFile
# debug with DEBUG=matrixscreener python script.py
debug = pydebug.debug('matrixscreener')
_bin = fijibin.BIN
def stitch_macro(folder, filenames, x_size, y_size, output_filename,
x_start=0, y_start=0, overlap=10):
"""
Creates a ImageJ Grid/Collection stitching macro. Parameters are the same as
in the plugin and are described in further detail here:
http://fiji.sc/Image_Stitching#Grid.2FCollection_Stitching.
**Default stitch parameters:**
* Filename defined positions
* Compute overlap
* Subpixel accurancy
* Save computation time (but use more RAM)
* Fusion method: Linear blending
* Regression threshold: 0.30
* Max/avg displacement threshold: 2.50
* Absolute displacement threshold: 3.50
Parameters
----------
folder : string
Path to folder with images or folders with images.
Example: */path/to/slide--S00/chamber--U01--V02/*
filenames : string
Filenames of images.
Example: *field-X{xx}-Y{yy}/image-X{xx}-Y{yy}.ome.tif*
x_size : int
Size of grid, number of images in x direction.
y_size : int
Size of grid, number of images in y direction.
output_filename : string
Where to store fused image. Should be `.png`.
x_start : int
Which x position grid start with.
y_start : int
Which y position grid start with.
overlap : number
Tile overlap in percent. ImageJ will find the optimal overlap, but a
precise overlap assumption will decrase computation time.
Returns
-------
string
IJM-macro.
"""
macro = []
macro.append('run("Grid/Collection stitching",')
macro.append('"type=[Filename defined position]')
macro.append('order=[Defined by filename ]')
macro.append('grid_size_x={}'.format(x_size))
macro.append('grid_size_y={}'.format(y_size))
macro.append('tile_overlap={}'.format(overlap))
macro.append('first_file_index_x={}'.format(x_start))
macro.append('first_file_index_y={}'.format(y_start))
macro.append('directory=[{}]'.format(folder))
macro.append('file_names={}'.format(filenames))
macro.append('output_textfile_name=TileConfiguration.txt')
macro.append('fusion_method=[Linear Blending]')
macro.append('regression_threshold=0.30')
macro.append('max/avg_displacement_threshold=2.50')
macro.append('absolute_displacement_threshold=3.50')
macro.append('compute_overlap')
macro.append('subpixel_accuracy')
macro.append('computation_parameters=[Save computation time (but use more RAM)]')
# use display, such that we can specify output filename
# this is 'Fused and display' for previous stitching version!!
macro.append('image_output=[Fuse and display]");')
# save to png
macro.append('selectWindow("Fused");')
macro.append('run("PNG ...", "save={}'.format(output_filename))
macro.append('imageiosaveas.codecname=png')
macro.append('imageiosaveas.filename={}");'.format(output_filename))
macro.append('close();')
return ' '.join(macro)
def run_imagej(macro):
"""
Runs ImageJ with the suplied macro. Output of ImageJ can be viewed by
running python script with environment variable DEBUG=matrixscreener.
Parameters
----------
macro : string
IJM-macro to run.
Returns
-------
int
ImageJ exit code.
"""
# avoid verbose output of ImageJ when DEBUG environment variable set
env = os.environ.copy()
debugging = False
if 'DEBUG' in env:
if env['DEBUG'] == 'matrixscreener' or env['DEBUG'] == '*':
debugging = True
del env['DEBUG']
with NamedTemporaryFile(mode='w', suffix='.ijm') as m:
m.write(macro)
m.flush() # make sure macro is written before running ImageJ
cmd = [_bin, '--headless', '-macro', m.name]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = proc.communicate()
for line in out.decode('latin1', errors='ignore').splitlines():
debug('stdout:' + line)
for line in err.decode('latin1', errors='ignore').splitlines():
debug('stderr:' + line)
if proc.returncode != 0 and not debugging:
print('matrixscreener ERROR: ImageJ exited with code {}.'.format(proc.returncode))
print('matrixscreener Try running script with `DEBUG=matrixscreener python script.py`')
return proc.returncode
| [
"[email protected]"
] | |
1eabae2d19ec646f5caa21ad2542291a6db58275 | 5c842a91854b0061bdec96a36e30860fb1e1321e | /Chapter3_MCMC/github_pull.py | dff8070e7791d77ff97664dc4e3c8ebedbf3adc3 | [
"MIT"
] | permissive | bzillins/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | fe2be77b9859566e2a923c8022a85925aa190d1d | c08a6344b8d0e39fcdb9702913b46e1b4e33fb9a | refs/heads/master | 2020-12-30T22:09:05.936082 | 2013-02-27T04:21:05 | 2013-02-27T04:21:05 | 19,747,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py | #github data scrapper
"""
variables of interest:
indp. variables
- language, given as a binary variable. Need 4 positions for 5 langagues
- #number of days created ago, 1 position
- has wiki? Boolean, 1 position
- followers, 1 position
- following, 1 position
- constant
dep. variables
-stars/watchers
-forks
"""
from requests import get
from json import loads
import datetime
import numpy as np
MAX = 8000000
today = datetime.datetime.today()
randint = np.random.randint
N = 120 #sample size.
auth = ("username", "password" )
language_mappings = {"Python": 0, "JavaScript": 1, "Ruby": 2, "Java":3, "Shell":4, "PHP":5}
#define data matrix:
X = np.zeros( (N , 12), dtype = int )
for i in xrange(N):
is_fork = True
is_valid_language = False
while is_fork == True or is_valid_language == False:
is_fork = True
is_valid_language = False
params = {"since":randint(0, MAX ) }
r = get("https://api.github.com/repositories", params = params, auth=auth )
results = loads( r.text )[0]
#im only interested in the first one, and if it is not a fork.
is_fork = results["fork"]
r = get( results["url"], auth = auth)
#check the language
repo_results = loads( r.text )
try:
language_mappings[ repo_results["language" ] ]
is_valid_language = True
except:
pass
#languages
X[ i, language_mappings[ repo_results["language" ] ] ] = 1
#delta time
X[ i, 6] = ( today - datetime.datetime.strptime( repo_results["created_at"][:10], "%Y-%m-%d" ) ).days
#haswiki
X[i, 7] = repo_results["has_wiki"]
#get user information
r = get( results["owner"]["url"] , auth = auth)
user_results = loads( r.text )
X[i, 8] = user_results["following"]
X[i, 9] = user_results["followers"]
#get dep. data
X[i, 10] = repo_results["watchers_count"]
X[i, 11] = repo_results["forks_count"]
print
print " -------------- "
print i, ": ", results["full_name"], repo_results["language" ], repo_results["watchers_count"], repo_results["forks_count"]
print " -------------- "
print
np.savetxt("data/github_data.csv", X, delimiter=",", fmt="%d" )
| [
"[email protected]"
] | |
cca8d7bb1e4c795995e6db7675bd3f7bfad39018 | e9d139f5108ca115d6254763438dd8855fc4454d | /view/__init__.py | fc9912fe0a91e8e2066ec8e866c29d702fb6cd05 | [] | no_license | Letractively/simulation-modeling | 119d1376a75ff825903a0dd4bbbbc161e1d19e05 | aca18bf1f50b1083bbc9cbd97b87d3df1c71000b | refs/heads/master | 2016-08-12T18:44:07.605687 | 2011-12-14T11:04:29 | 2011-12-14T11:04:29 | 45,956,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,531 | py | # -*- coding: utf-8 -*-
# Импорт и настройка Jinja2
from jinja2 import Environment, PackageLoader
templates = Environment(loader=PackageLoader('view', 'templates'), extensions = ['jinja2.ext.with_'])
from GChartWrapper import Pie, Line, LineXY
from settings import app
def index(names):
'Главная страница'
template = templates.get_template('index.html')
return template.render(app = app, models = names)
def help(name, title):
'Страница справки'
template = templates.get_template('help/%s.html' % name)
return template.render(app = app, name = name, title = title)
def mss(*a, **kw):
'Страница модели массового обслуживания'
if 'output' in kw and kw['output']['requests']['total']:
# Генерация диаграммы баланса заявок
requests = kw['output']['requests']
# Исходные данные
data = []
for key in ('timeout', 'sizeout', 'shutdown'):
data.append(round(requests['denied'][key], 2))
data.append(round(requests['accepted'], 2))
# Цвета
colors = ('FBFF53', '1560BD', 'FF4D6F', '008090')
# Заголовки
labels = [str(round(value / requests['total'] * 100, 2)) + '%' for value in data]
# Размер
sizes = [250, 160]
kw['output']['requests_diagram'] = Pie(data).color(*colors).label(*labels).size(*sizes).scale(0, sum(data)).img(alt=u'Количество заявок')
if kw['output']['balance']['accepted']:
# Генерация диаграммы выручки
balance = kw['output']['balance']
data = [balance['income'], balance['repairs'], balance['costs']]
colors = ['8F007E', '000000', '999999']
labels = [str(round(value / balance['accepted'] * 100, 2)) + '%' for value in data]
sizes[1] = 80
kw['output']['income_diagram'] = Pie(data).color(*colors).label(*labels).size(*sizes).scale(0, sum(data)).img(alt=u'Распределение выручки')
# Генерация диаграммы загрузки
data = kw['output']['workload']
sizes = [560, 180]
diagram = Line(data).scale(0, round(max(data), 3)).size(sizes).color('1560BD')
diagram['chm'] = 'B,008090,0,0,0|B,FF4D6F,0,%s:,0' % (kw['form'].data['channels_count'])
diagram.axes.type('xxy')
diagram.axes.label(1, '', 'Количество заявок в системе', '')
y_labels_count = 4
diagram.axes.label(2, *(str(round(max(data) / (y_labels_count - 1) * n, 2)) + '%' for n in range(y_labels_count)))
diagram.axes.range(0, 0, len(data) - 1)
kw['output']['workload_diagram'] = diagram.img(alt=u'Вероятностное распределение количества заявок в системе', style='margin-top: 18px')
return model(*a, **kw)
def warehouse(*args, **kwargs):
'Модель склада'
if not kwargs.get('output', False):
return model(*args, **kwargs)
# Генерация диаграммы баланса
if kwargs['output']['balance']['sales'] > 0 and kwargs['output']['balance']['profit'] > 0:
balance = kwargs['output']['balance']
data = [round(balance[field], 2) for field in ('supplies', 'storage', 'fines', 'profit')]
colors = ('FBFF53', '1560BD', 'FF4D6F', '008090')
labels = [str(round(value / balance['sales'] * 100, 2)) + '%' for value in data]
sizes = [250, 160]
kwargs['output']['balance_diagram'] = Pie(data).color(*colors).label(*labels).size(*sizes).scale(0, balance['sales']).img(alt=u'Диаграмма баланса')
history = kwargs['output']['history']
if history:
# Генерация диаграммы истории
# Абсциссы и ординаты
x, y = zip(*history)
sizes = [560, 180]
diagram = LineXY([x, y]).size(sizes).scale(0, max(x), 0, max(y)).color('1560BD')
diagram['chm'] = 'B,1560BD,0,:,0'
diagram.axes.type('xxy')
diagram.axes.range(0, 0, max(x))
diagram.axes.range(2, 0, max(y))
diagram.axes.label(1, '', 'Время, ч', '')
kwargs['output']['history_diagram'] = diagram.img()
return model(*args, **kwargs)
def model(name, title, form=None, output={}, query=''):
'Страница модели'
template = templates.get_template(name + '.html')
# Ссылка
shorten = '/url/%s?%s' % (name, query) if query else None
return template.render(app=app, name=name, title=title, form=form, shorten=shorten, output=bool(output), **output)
def shorten(url):
'Укорочение URL'
template = templates.get_template('shorten.html')
return template.render(url=url)
def notfound():
'Страница не найдена'
template = templates.get_template('notfound.html')
return template.render(app=app, title=u'Страница не найдена')
def internal_error():
'Внутренняя ошибка'
template = templates.get_template('internal_error.html')
return template.render(app=app, title=u'Внутренняя ошибка')
| [
"[email protected]"
] | |
a0d3c2bd42f70d32b9c784462fa44007a8f0adf7 | 10dcfd809f342fd822b6df198f4045f92b157124 | /bin/po2ini | 1a43bcf93206ef9589e290b07fe2adcdcf2ee58f | [] | no_license | mozilla/affiliates-lib | c731c910c8d9fe04e211541e54f304a127a0b829 | 1000f98d9df217ed66a0ecd07e1e0a1d822a712a | refs/heads/master | 2023-07-03T17:18:49.841809 | 2016-02-01T10:49:47 | 2016-02-01T10:49:47 | 2,291,186 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 989 | #!/Users/fred/.virtualenvs/playdoh/bin/python2.6
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""simple script to convert a gettext .po localization file to a .ini file"""
from translate.convert import po2ini
if __name__ == '__main__':
po2ini.main()
| [
"[email protected]"
] | ||
313c33cbac5a657dac8e135d547bd7a34207608b | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1532+522/sdB_pg_1532+522_lc.py | 8187be807799c8b0ee1f61b7c1dc1a03cbbd8db0 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[233.374333,52.113889], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1532+522/sdB_pg_1532+522_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c977864e64c62b6e7d106375290362fa31ff27ed | da55b6cb2b7356f589c56bb647e49f79aedfc4f1 | /python-fabric/interface.test/example2_app/app/DemoComponent.py | b1b8f58cdd0340fe06bec5703129b2ed628bc745 | [] | no_license | eiselesr/sandbox | 2d0cbb16e2601dfda9d9e5ea5a5e96a053fdf72f | 0b3fc878a613600c6071019c820ba79e2d2a9a2d | refs/heads/master | 2023-09-01T00:40:37.222840 | 2023-08-24T19:20:27 | 2023-08-24T19:20:27 | 151,609,265 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # riaps:keep_import:begin
from riaps.run.comp import Component
# riaps:keep_import:end
class DemoComponent(Component):
# riaps:keep_constr:begin
def __init__(self):
super().__init__()
# riaps:keep_constr:end
# riaps:keep_tick:begin
def on_tick(self):
now = self.tick.recv_pyobj()
self.logger.info(f"DemoComponent | on_tick | now: {now}")
# riaps:keep_tick:end
# riaps:keep_impl:begin
# riaps:keep_impl:end
| [
"[email protected]"
] | |
a4ddf2709fce33cd5aab0418f9028ac97efa25d6 | fab39aa4d1317bb43bc11ce39a3bb53295ad92da | /tools/clip_dot.py | e0123db1d5a9fae276ffc01267f7a1be6649f5f5 | [
"Apache-2.0"
] | permissive | dupeljan/nncf | 8cdce27f25f01ce8e611f15e1dc3036fb8548d6e | 0abfd7103ca212888a946ba4d0fbdb9d436fdaff | refs/heads/develop | 2023-06-22T00:10:46.611884 | 2021-07-22T10:32:11 | 2021-07-22T10:32:11 | 388,719,455 | 0 | 0 | Apache-2.0 | 2021-07-23T07:46:15 | 2021-07-23T07:43:43 | null | UTF-8 | Python | false | false | 2,144 | py | #pylint:skip-file
import sys
from argparse import ArgumentParser
import networkx as nx
def main(argv):
parser = ArgumentParser()
parser.add_argument('-i', '--input_file', help='Input .dot file',
required=True)
parser.add_argument('-s', '--start_id', help='Start ID (inclusive)',
required=True)
parser.add_argument('-f', '--finish_id', help='Finish ID (inclusive)', required=True)
parser.add_argument('-o', '--output_file', help='Output .dot file', required=True)
args = parser.parse_args(args=argv)
graph = nx.DiGraph(nx.drawing.nx_pydot.read_dot(args.input_file))
new_graph = nx.DiGraph()
start_key = None
for node_key in nx.lexicographical_topological_sort(graph):
id_portion = node_key.split()[0]
has_id = id_portion.isdigit()
if has_id:
curr_id = int(id_portion)
if curr_id == int(args.start_id):
start_key = node_key
break
if start_key is None:
raise RuntimeError("Could not find the node with ID {} to start from!".format(args.start_id))
for edge in nx.edge_bfs(graph, start_key, orientation='ignore'):
from_key, to_key, _ = edge
id_portion = from_key.split()[0]
has_id = id_portion.isdigit()
end_key = from_key
if has_id:
curr_id = int(id_portion)
if curr_id >= int(args.finish_id):
break
node_data = graph.nodes[from_key]
new_graph.add_node(from_key, **node_data)
edge_data = graph.edges[from_key, to_key]
new_graph.add_edge(from_key, to_key, **edge_data)
# for edge in nx.edge_bfs(graph, end_key, reverse=True):
# from_key, to_key = edge
# if from_key == start_key:
# break
# node_data = graph.nodes[from_key]
# new_graph.add_node(from_key, **node_data)
# edge_data = graph.edges[from_key, to_key]
# new_graph.add_edge(from_key, to_key, **edge_data)
nx.drawing.nx_pydot.write_dot(new_graph, args.output_file)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"[email protected]"
] | |
997cfed7e90d02a247f84a51c5934988fa32ac75 | d3cfa86c22ab3bd6f2dcbbd72f9cf5f6bf574cd1 | /gridappsd/utils.py | b455a1f97896802d08297150dfac9078f62c104b | [] | no_license | ariwoolar/gridappsd-python | 30fbc9a36b0d7be43f6fb0ff8b0f39d76fa6f4ed | 1e8ddc88de7fc9cabb17b1ab34f2756e8c37127c | refs/heads/master | 2023-02-02T20:09:46.491618 | 2020-09-10T20:09:34 | 2020-09-10T20:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,691 | py | import datetime, time
from dateutil import parser
import os
try: # python2.7
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
__GRIDAPPSD_URI__ = os.environ.get("GRIDAPPSD_URI", "localhost:61613")
__GRIDAPPSD_USER__ = os.environ.get("GRIDAPPSD_USER", "system")
__GRIDAPPSD_PASS__ = os.environ.get("GRIDAPPSD_PASS", "manager")
__GRIDAPPSD_URI__
if not __GRIDAPPSD_URI__.startswith("tcp://"):
__GRIDAPPSD_URI__ = "tcp://" + __GRIDAPPSD_URI__
__GRIDAPPSD_URI_PARSED__ = urlparse(__GRIDAPPSD_URI__)
def datetime_to_epoche(dt):
return int(time.mktime(dt.timetuple()) * 1000)
def datestr_to_epoche(dt_str):
dt = parser.parse(dt_str)
return datetime_to_epoche(dt)
def epoche_to_datetime(epoche):
return datetime.datetime.fromtimestamp(epoche)
def utc_timestamp():
return datetime_to_epoche(datetime.datetime.utcnow())
def validate_gridappsd_uri():
problems = []
gridapspd_uri = __GRIDAPPSD_URI__
if not gridapspd_uri.startswith("tcp://"):
gridapspd_uri = "tcp://" + gridapspd_uri
gridapspd_parsed_uri = urlparse(gridapspd_uri)
if not gridapspd_parsed_uri.port:
problems.append("Invalid port specified in URI modify environment GRIDAPPSD_URI")
if not gridapspd_parsed_uri.hostname:
problems.append("Invalid hostname not specified!")
return problems
def get_gridappsd_address():
"""
Returns the address in such a way that the response will be
able to be passed directly to a socket and/or the stomp libraray.
:return: tuple(adddress, port)
"""
return (__GRIDAPPSD_URI_PARSED__.hostname,
__GRIDAPPSD_URI_PARSED__.port)
def get_gridappsd_user():
return __GRIDAPPSD_USER__
def get_gridappsd_pass():
return __GRIDAPPSD_PASS__
def get_gridappsd_application_id():
""" Retrieve the application_id from the environment.
In order to use this function an environmental variable `GRIDAPPSD_APPLICATION_ID`
must have been set. For docker containers this is done in the
`gridappsd.app_registration` callback when the application is started. If the
environmental variable is not set an AttributeError will be raised.
"""
app_id = os.environ.get("GRIDAPPSD_APPLICATION_ID")
if not app_id:
raise AttributeError("environmental variable for GRIDAPPSD_APPLICATION_ID is not set")
return app_id
def get_gridappsd_simulation_id():
""" Retrieve simulation_id from environment.
This method will return a `None` if the GRIDAPPSD_SIMULATION_ID environmental
variable is not set.
"""
simulation_id = os.environ.get("GRIDAPPSD_SIMULATION_ID")
return simulation_id
| [
"[email protected]"
] | |
d4244e67b3754369184180c6aee3052921518a90 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_315/ch40_2020_09_19_02_31_10_320887.py | 4062bfd4d8115f73c346ece53c0c8d84dafed63f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | def soma_valores(a):
soma = 0
for x in range(a):
soma += x[a]
return soma | [
"[email protected]"
] | |
73f77c0541a623faed3445fd42c78e95a3f43de3 | b7a98a761007cf8b913b75152a278c1693f2d00d | /code/ortools/simple.py | a22169600415ebac07b2a7f71ee7fba972372f13 | [] | no_license | VuHoangvn/descrete_optimize | be18fcfc230a9d1133ec94a49b1fc5cfcb50b449 | 7a17d472af7d54624860aeff590db423a7e47d59 | refs/heads/master | 2022-10-14T00:22:50.368095 | 2020-06-05T09:48:31 | 2020-06-05T09:48:31 | 261,913,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ortools.sat.python import cp_model
def SimpleSatProgram():
"""Minimal CP-SAT example to showcase calling the solver."""
# Creates the model
model = cp_model.CpModel()
# Creates the variables.
num_vals = 3
x = model.NewIntVar(0, num_vals - 1, 'x')
y = model.NewIntVar(0, num_vals - 1, 'y')
z = model.NewIntVar(0, num_vals - 1, 'z')
# Creates the constraints.
model.Add(x != y)
# Creates a solver and solves the model.
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.FEASIBLE:
print('x = %i' % solver.Value(x))
print('y = %i' % solver.Value(y))
print('z = %i' % solver.Value(z))
SimpleSatProgram() | [
"[email protected]"
] | |
0657eee5800ac668b250193a08ebbb5b92cfbdb1 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D09B/SANCRTD09BUN.py | b94d0510dd191e38d7227ae909cae41f4e766c11 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 4,024 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD09BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 99},
{ID: 'STS', MIN: 0, MAX: 99},
{ID: 'LOC', MIN: 0, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'MOA', MIN: 0, MAX: 99},
{ID: 'GEI', MIN: 0, MAX: 99},
{ID: 'CST', MIN: 0, MAX: 1},
{ID: 'DOC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TDT', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
]},
{ID: 'PAC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'SEL', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
]},
{ID: 'PRC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'DOC', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
]},
{ID: 'LIN', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'CST', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'GIN', MIN: 0, MAX: 9999},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 9},
{ID: 'DOC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'PAC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'SEL', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
]},
{ID: 'PRC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'DOC', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
] | |
d1c16ded968ad2ed9bdec10b3979d2cc493cc4d1 | e835059c8084d3bd92d8f24a748136617cf8a2a3 | /8/radd_test.py | 87352539fc8175af3eeab0c1c908d923ecdf9d3a | [] | no_license | Carlzkh/CrazyPythonNotes | 7141b062722e2b8354ce3566f0b8d086bbfad9b1 | 04a8648ac2150e07520252d882c5cbc81cc3b9f9 | refs/heads/master | 2021-07-11T22:57:06.610728 | 2021-06-29T07:37:16 | 2021-06-29T07:37:16 | 247,253,027 | 0 | 0 | null | 2021-01-05T08:48:45 | 2020-03-14T10:08:34 | Python | UTF-8 | Python | false | false | 2,076 | py | """
object.__radd__(self, other):当y提供该方法时,可执行 x + y
object.__rsub__(se f, other):当y提供该方法时,可执行 x - y
object.__rmul__(self, other):当y提供该方法时,可执行 x * y
object.__rmatmul__(self other):当y提供该方法时,可执行 x @ y
object.__rtruediv__( If, other :当y提供该方法时,可执行 x / y
object.__rfloordiv__ (self, other ): 当y提供该方法时,可执行 x // y
object.__rmod__(self, other):当y提供该方法时,可执行 x % y
object.__rdivmod__(self, other):当y提供该方法时,可执行 x divmod y
object.__rpow__(self other[, modulo]):当y提供该方法时,可执行 x ** y
object.__rlshift__(self, other):当y提供该方法时,可执行 x < y
object.__rrshift__(self, other): 当y提供该方法时,可执行 x > y
object.__rand__(self, other):当y提供该方法时,可执行 x & y
object.__rxor__(self, other) 当y提供该方法时,可执行 x ^ y
object.__ror__(self, other) :当y提供该方法时,可执行 x | y
简单来说,定义类提供了上面列 rxxx()方法,那么该自定义类的对象就可以出现在对应运算符的右边
"""
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
# 定义 setSize ()函数
def set_size(self, size):
self.width, self.height = size
# 定义 getSize()函数
def get_size(self):
return self.width, self.height
# 使用 property 定义属性
size = property(get_size, set_size)
# 定义__add__方法,该对象可执行“+”运算
def __radd__(self, other):
if not (isinstance(other, int) or isinstance(other, float)):
raise TypeError('+运算要求目标是数值')
return Rectangle(self.width + other, self.height + other)
def __repr__(self):
return 'Rectangle(width=%g, height=%g)' % (self.width, self.height)
r1 = Rectangle(4, 5)
r = 4 + r1
print(r) # Rectangle(width=7 , height=9)
| [
"[email protected]"
] | |
efa0e5ddb6154a7a27511647626b98da91f3bf51 | 1b57d1ce3baa5484cb517e916e2b3c7b66196672 | /tests/test_config.py | fe63aa3f70ced5c38ca198ca7e118ae12d80d2b3 | [
"MIT"
] | permissive | cfhamlet/os-config | 84e3b03456c554408148b448ee934a74a50b0bb0 | 5a875ac07972ef9e4d1d6887ea48c974363f2438 | refs/heads/master | 2020-04-09T07:43:36.575488 | 2019-01-08T04:43:20 | 2019-01-08T04:43:20 | 160,168,165 | 3 | 0 | MIT | 2019-01-08T04:43:21 | 2018-12-03T09:47:25 | Python | UTF-8 | Python | false | false | 4,513 | py | import pytest
from os_config import Config
def test_create():
with pytest.raises(TypeError):
c = Config()
c = Config.create(a=1, b=2)
assert c.a == 1
assert c.b == 2
def test_create_from_dict():
d = {'a': 3, 'b': 4}
c = Config.from_dict(d)
assert c.a == 3
assert c.b == 4
def test_simple_recursion():
c = Config.create()
with pytest.raises(AttributeError):
c.c = c
def test_tuple_recursion():
c = Config.create()
with pytest.raises(AttributeError):
c.c = (c, )
def test_deep_recursion():
a = Config.create()
b = Config.create()
c = Config.create()
b.c = c
c.a = a
with pytest.raises(AttributeError):
a.b = b
def test_invalid_attribute_name():
c = Config.create()
for k in ['1', '_a', '*']:
with pytest.raises(AttributeError):
setattr(c, k, None)
def test_valid_type():
c = Config.create()
for v in [1, False, (1, 2), None, 1.1, Config.create()]:
setattr(c, 'test_key', v)
assert getattr(c, 'test_key') == v
def test_invalid_type():
class TestClass(object):
pass
def test_method():
pass
c = Config.create()
for v in [TestClass, TestClass(), test_method, ]:
with pytest.raises(AttributeError):
c.c = v
def test_update_from_config_01():
c = Config.create(a=1, b=2)
d = Config.create()
Config.update(d, c)
assert d.a == 1
assert d.b == 2
def test_update_from_config_02():
c = Config.create(a=1, b=2)
d = Config.create()
d.a = 2
Config.update(d, c)
assert d.a == 1
def test_udpate_from_config_recursive_01():
c = Config.create()
d = Config.create()
d.m = c
with pytest.raises(AttributeError):
Config.update(c, d)
def test_udpate_from_config_recursive_02():
c = Config.create()
d = Config.create()
d.m = (c,)
with pytest.raises(AttributeError):
Config.update(c, d)
def test_udpate_from_config_recursive_03():
c = Config.create()
d = Config.create()
e = Config.create()
e.m = c
d.m = (e,)
with pytest.raises(AttributeError):
Config.update(c, d)
def test_update_from_dict_01():
c = Config.create()
Config.update(c, {'a': 1, 'b': 2})
assert c.a == 1
assert c.b == 2
def test_update_from_dict_02():
c = Config.create()
d = {'a': {'b': 1}}
Config.update(c, d)
assert c.a.b == 1
def test_update_from_dict_03():
c = Config.create()
b = Config.create(b=1)
d = {'a': b}
Config.update(c, d)
assert c.a.b == 1
def test_update_from_dict_04():
c = Config.create(a=1)
assert c.a == 1
b = Config.create(b=1)
d = {'a': b}
Config.update(c, d)
assert c.a.b == 1
def test_update_from_dict_05():
c = Config.create()
b = Config.create(b=1)
d = {'a': b}
Config.update(c, d)
d = {'a': 1}
Config.update(c, d)
assert c.a == 1
def test_create_from_json_01():
d = {'a': 1}
import json
j = json.dumps(d)
c = Config.from_json(j)
assert c.a == 1
def test_dump_to_json_01():
c = Config.create(a=1)
j = Config.to_json(c)
import json
d = json.loads(j)
assert d['a'] == 1
def test_tuple_with_list():
d = {'a': (1, 2, [1, 2, 3])}
c = Config.from_dict(d)
assert c.a == (1, 2, (1, 2, 3))
def test_tuple_with_dict():
d = {'a': (1, {'b': 2}, [3, 4, 5])}
c = Config.from_dict(d)
assert c.a[1].b == 2
def test_create_from_object():
class A(object):
a = 1
c = Config.from_object(A)
assert c.a == 1
def test_sub_config():
c = Config.create()
a = Config.create()
c.a = a
c.b = a
c.a = 1
with pytest.raises(AttributeError):
a.c = c
def test_pop():
c = Config.create(a=1)
Config.pop(c, 'a')
assert len(c) == 0
def test_get():
c = Config.create(a=1)
assert Config.get(c, 'a') == 1
assert Config.get(c, 'b') is None
assert Config.get(c, 'c', 2) == 2
def test_from_pyfile(tmpdir):
txt = r'''
a = 1
b = [1,2,3]
'''
f = tmpdir.join('test_config.py')
f.write(txt)
c = Config.from_pyfile(f.strpath)
assert c.a == 1
assert c.b == (1, 2, 3)
def test_to_json():
import json
c = Config.create(a=1)
d = json.loads(Config.to_json(c))
d['a'] == 1
def test_to_dict():
d = {'a': 1, 'b': (1, 2, 3), 'c': {'e': (4, 5, 6)}}
c = Config.from_dict(d)
dd = Config.to_dict(c)
assert d == dd
| [
"[email protected]"
] | |
ccfe4b8ffb05eaba3eeba44922978edf51809bda | 3aab11d445011f4a0de1376886dd3899aba44e68 | /opps/db/conf.py | eedef8cb2db0ef10333332563c15c6b0d5c6b5ca | [
"MIT"
] | permissive | opps/opps | 4ba6a08ac5aa31be48c245b2e8f9d9a714a5e473 | 5552924fa34ea40d24febeac5046bd59f62e0e4f | refs/heads/master | 2023-08-24T21:09:23.489540 | 2023-05-22T20:07:33 | 2023-05-22T20:07:33 | 7,712,379 | 166 | 76 | MIT | 2022-01-06T22:53:23 | 2013-01-20T03:56:15 | Python | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
from appconf import AppConf
class OppsDataBaseConf(AppConf):
HOST = getattr(settings, 'OPPS_DB_HOSR', None)
USER = getattr(settings, 'OPPS_DB_USER', None)
PASSWORD = getattr(settings, 'OPPS_DB_PASSWORD', None)
PORT = getattr(settings, 'OPPS_DB_PORT', None)
NAME = getattr(settings, 'OPPS_DB_NAME', 'opps_db')
ENGINE = getattr(settings, 'OPPS_DB_ENGINE', 'opps.db.')
OPTION = getattr(settings, 'OPPS_BD_OPTION', None)
class Meta:
prefix = 'opps_db'
| [
"[email protected]"
] | |
c618f6eb814a4c418adb0624e5c2dc54c47b4cc3 | 542c040b9b2150d789096f031dcf7a4362b034fe | /training/migrations/0003_auto_20210501_1721.py | bb92ba559a946476499ee525d0edc0012c29ba25 | [
"Unlicense"
] | permissive | rafimuhammad01/mtf-hackathon | 3115412f673774cc3991bd2a67854bfa645966d1 | 83ab410239a93ff04e57d7ceb2d1d292ba365866 | refs/heads/main | 2023-04-12T15:28:41.506450 | 2021-05-22T11:27:05 | 2021-05-22T11:27:05 | 360,573,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # Generated by Django 3.2 on 2021-05-01 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('training', '0002_auto_20210501_1607'),
]
operations = [
migrations.AddField(
model_name='training',
name='linkUrl',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='training',
name='location',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='training',
name='method',
field=models.IntegerField(blank=True, choices=[(0, 'Online'), (0, 'Offline')], null=True),
),
]
| [
"[email protected]"
] | |
abeb6300b115c2987444589968634de355eca323 | 16819f2d2a924a8df8c24754241f94247d202141 | /backend/advyon_24955/wsgi.py | 02ac84499c7199934b67c6b4cab2b07b0c264618 | [] | no_license | crowdbotics-apps/advyon-24955 | 922dea646dc87c5bd786ff01aa2a8ed4d19636a5 | a153d936b8cba32003c66bccf95d617c1a63d869 | refs/heads/master | 2023-03-16T07:40:45.604601 | 2021-03-10T20:47:22 | 2021-03-10T20:47:22 | 346,487,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for advyon_24955 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advyon_24955.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
a5fe8e2a5902cb702f9b7dbe2ca631eb86704b49 | da459e1380b18fbac9d8868dc26b3b791547ed63 | /replacesum.py | 46904922d98a9b9badbe13c6aae58249985478b7 | [] | no_license | tchelmella/dictionary | a2cc50cacf61bcb3d6af15b1125e90fd5abd1982 | ef95e559142944c3d067991ad9a521e11bb6d0cd | refs/heads/master | 2020-07-20T18:58:24.100373 | 2019-09-06T02:20:16 | 2019-09-06T02:20:16 | 206,695,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | d = {'a': [3, 4, 5], 'b': [1, 2, 3]}
for k, v in d.items():
d[k] = sum(v) / len(v)
print(d)
| [
"[email protected]"
] | |
7ec138bc879d91a44e191223ca877c75087340d8 | 232fc2c14942d3e7e28877b502841e6f88696c1a | /dizoo/pomdp/entry/pomdp_ppo_default_config.py | 8b4bbde7287f6f31890d1be86bf31692489890e4 | [
"Apache-2.0"
] | permissive | shengxuesun/DI-engine | ebf84221b115b38b4b3fdf3079c66fe81d42d0f7 | eb483fa6e46602d58c8e7d2ca1e566adca28e703 | refs/heads/main | 2023-06-14T23:27:06.606334 | 2021-07-12T12:36:18 | 2021-07-12T12:36:18 | 385,454,483 | 1 | 0 | Apache-2.0 | 2021-07-13T02:56:27 | 2021-07-13T02:56:27 | null | UTF-8 | Python | false | false | 2,413 | py | from ding.entry import serial_pipeline
from easydict import EasyDict
pong_ppo_config = dict(
env=dict(
collector_env_num=16,
evaluator_env_num=4,
n_evaluator_episode=8,
stop_value=20,
env_id='Pong-ramNoFrameskip-v4',
frame_stack=4,
warp_frame=False,
use_ram=True,
pomdp=dict(noise_scale=0.01, zero_p=0.2, reward_noise=0.01, duplicate_p=0.2),
manager=dict(
shared_memory=False,
)
),
policy=dict(
cuda=True,
on_policy=False,
# (bool) whether use on-policy training pipeline(behaviour policy and training policy are the same)
model=dict(
obs_shape=[512, ],
action_shape=6,
encoder_hidden_size_list=[512, 512, 256],
actor_head_hidden_size=256,
actor_head_layer_num=2,
critic_head_hidden_size=256,
critic_head_layer_num=2,
),
learn=dict(
update_per_collect=16,
batch_size=128,
# (bool) Whether to normalize advantage. Default to False.
normalize_advantage=False,
learning_rate=0.0001,
weight_decay=0,
# (float) loss weight of the value network, the weight of policy network is set to 1
value_weight=0.5,
# (float) loss weight of the entropy regularization, the weight of policy network is set to 1
entropy_weight=0.03,
clip_ratio=0.1,
),
collect=dict(
# (int) collect n_sample data, train model n_iteration times
n_sample=1024,
# (float) the trade-off factor lambda to balance 1step td and mc
gae_lambda=0.97,
discount_factor=0.99,
),
eval=dict(evaluator=dict(eval_freq=200, )),
other=dict(
replay_buffer=dict(
replay_buffer_size=100000,
max_use=3,
min_sample_ratio=1,
),
),
),
)
main_config = EasyDict(pong_ppo_config)
pong_ppo_create_config = dict(
env=dict(
type='pomdp',
import_names=['app_zoo.pomdp.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='ppo'),
)
create_config = EasyDict(pong_ppo_create_config)
if __name__ == '__main__':
serial_pipeline((main_config, create_config), seed=0)
| [
"[email protected]"
] | |
e257465739f45cb8c41e0db62fca6e5c5961ffa1 | adbf09a31415e6cf692ff349bd908ea25ded42a8 | /revision/imports/at_decorators.py | 2aabeeb6b8c5d049d7838859f8ed33c4e323cd5f | [] | no_license | cmulliss/gui_python | 53a569f301cc82b58880c3c0b2b415fad1ecc3f8 | 6c83d8c2e834464b99024ffd8cf46ac4e734e7a4 | refs/heads/main | 2023-08-12T22:33:01.596005 | 2021-10-11T12:35:41 | 2021-10-11T12:35:41 | 408,176,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | import functools
user = {"username": "jose", "access_level": "guest"}
def make_secure(func):
@functools.wraps(func)
def secure_function():
if user["access_level"] == "admin":
return func()
else:
return f"No admin permissions for {user['username']}."
return secure_function
# @make_secure on top of a fn definition, that will prevent the fn from being created as is, and instead it will create it and pass it through the decorator in one go.
@make_secure
def get_admin_password():
return "1234"
# user = {"username": "bob", "access_level": "admin"}
print(get_admin_password())
| [
"[email protected]"
] | |
8ecbea64502b0476e4dd3ec28e53802d98d8344c | 5945903ff7b3c0be799d8b228aa96309e8d6b68a | /LeetCode_Offer_II_004.py | 3d9937377035c818327328a19057cccb76588d80 | [] | no_license | freesan44/LeetCode | 44fd01fa37e2d7e729ae947da2350b1649c163ae | 2ed9f1955c527d43fe1a02e5bebf5a6f981ef388 | refs/heads/master | 2021-12-07T20:07:02.308097 | 2021-11-01T23:58:11 | 2021-11-01T23:58:11 | 245,178,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | class Solution:
def singleNumber(self, nums: List[int]) -> int:
from collections import Counter
counter = Counter(nums).most_common()
x,y = counter[-1]
# print(x,y)
return x
if __name__ == '__main__':
# nums = [2,2,3,2]
nums = [0, 1, 0, 1, 0, 1, 100]
ret = Solution().singleNumber(nums)
print(ret) | [
"[email protected]"
] | |
cf6a17b5b27c54829138ed5b6d13c654a57a13d9 | 03869888260ab1b28c5912f6019a44b1fcbb6c19 | /acrylamid/templates/jinja2.py | 95c8c1f7f305e42f346aaed4e42d7d357029850f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kenkeiras/acrylamid | 515971b5385a554f310683d993eb34e38611d50a | 792298eb32daa0e703afdb2894ee121dc3861d43 | refs/heads/master | 2020-03-27T12:33:54.176728 | 2013-03-26T11:35:09 | 2013-03-26T11:35:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py | # -*- encoding: utf-8 -*-
#
# Copyright 2012 Martin Zimmermann <[email protected]>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
from __future__ import absolute_import
from io import StringIO
from os.path import exists, getmtime
from jinja2 import Environment as J2Environemnt, FileSystemBytecodeCache
from jinja2 import FileSystemLoader, meta
from acrylamid.templates import AbstractEnvironment, AbstractTemplate
class ExtendedFileSystemLoader(FileSystemLoader):
"""A custom :class:`jinja2.FileSystemLoader` to work with Acrylamid's
caching requirements. Jinja2 does track template changes using the
modification timestamp of the compiled but we need template dependencies
as well as consistent modified values over the whole compilation
process."""
# remember already resolved templates
resolved = {}
# used templates
used = set(['macros.html', ])
def load(self, environment, name, globals=None):
"""patched `load` to add a modified attribute providing information
whether the template or its parents have changed."""
def resolve(parent):
"""We check whether any dependency (extend-block) has changed and
update the bucket -- recursively. Returns True if the template
itself or any parent template has changed. Otherwise False."""
self.used.add(parent)
if parent in self.resolved:
return self.resolved[parent]
source, filename, uptodate = self.get_source(environment, parent)
bucket = bcc.get_bucket(environment, parent, filename, source)
p = bcc._get_cache_filename(bucket)
modified = getmtime(filename) > getmtime(p) if exists(p) else True
if modified:
# updating cached template if timestamp has changed
code = environment.compile(source, parent, filename)
bucket.code = code
bcc.set_bucket(bucket)
self.resolved[parent] = True
return True
ast = environment.parse(source)
for name in meta.find_referenced_templates(ast):
rv = resolve(name)
if rv:
# XXX double-return to break this recursion?
return True
if globals is None:
globals = {}
source, filename, uptodate = self.get_source(environment, name)
bcc = environment.bytecode_cache
bucket = bcc.get_bucket(environment, name, filename, source)
modified = bool(resolve(name))
code = bucket.code
if code is None:
code = environment.compile(source, name, filename)
tt = environment.template_class.from_code(environment, code,
globals, uptodate)
tt.modified = modified
return tt
class Environment(AbstractEnvironment):
def init(self, layoutdir, cachedir):
self.jinja2 = J2Environemnt(loader=ExtendedFileSystemLoader(layoutdir),
bytecode_cache=FileSystemBytecodeCache(cachedir))
# jinja2 is stupid and can't import any module during runtime
import time, datetime, urllib
for module in (time, datetime, urllib):
self.jinja2.globals[module.__name__] = module
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if hasattr(obj, '__class__') and callable(obj):
self.jinja2.filters[module.__name__ + '.' + name] = obj
def register(self, name, func):
self.jinja2.filters[name] = func
def fromfile(self, path):
return Template(self.jinja2.get_template(path))
def extend(self, path):
self.jinja2.loader.searchpath.append(path)
@property
def templates(self):
return self.jinja2.loader.used
@property
def extension(self):
return ['.html', '.j2']
class Template(AbstractTemplate):
def __init__(self, template):
self.template = template
def render(self, **kw):
buf = StringIO()
self.template.stream(**kw).dump(buf)
return buf
@property
def modified(self):
return self.template.modified
| [
"[email protected]"
] | |
48a2ca87abfdda05840e297a3a45819b20ef60d0 | c3a84a07539c33040376f2c1e140b1a1041f719e | /wagtail-stubs/contrib/postgres_search/models.pyi | c9990209da1b7ca59c1ff89fcdab8dbd624dd3d4 | [] | no_license | tm-kn/tmp-wagtail-stubs | cc1a4434b7142cb91bf42efb7daad006c4a7dbf4 | 23ac96406610b87b2e7751bc18f0ccd27f17eb44 | refs/heads/master | 2023-01-20T14:41:33.962460 | 2020-11-30T23:15:38 | 2020-11-30T23:15:38 | 317,332,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | pyi | from .utils import get_descendants_content_types_pks as get_descendants_content_types_pks
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from typing import Any
from wagtail.search.index import class_is_indexed as class_is_indexed
class TextIDGenericRelation(GenericRelation):
auto_created: bool = ...
def get_content_type_lookup(self, alias: Any, remote_alias: Any): ...
def get_object_id_lookup(self, alias: Any, remote_alias: Any): ...
def get_extra_restriction(self, where_class: Any, alias: Any, remote_alias: Any): ...
def resolve_related_fields(self): ...
class IndexEntry(models.Model):
content_type: Any = ...
object_id: Any = ...
content_object: Any = ...
autocomplete: Any = ...
title: Any = ...
title_norm: Any = ...
body: Any = ...
@property
def model(self): ...
@classmethod
def add_generic_relations(cls) -> None: ...
| [
"[email protected]"
] | |
4526c6ad6240b783d55680571665a34fb0c9ab6b | c934e7c27f0e72385218a14b4e2a7e94a747a360 | /google-cloud-sdk/lib/surface/workspace_add_ons/deployments/__init__.py | c882b0f970690ca156c971efa50fc7708fcc45e7 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PrateekKhatri/gcloud_cli | 5f74b97494df4f61816026af9460b9c4d8e89431 | 849d09dd7863efecbdf4072a504e1554e119f6ae | refs/heads/master | 2023-03-27T05:53:53.796695 | 2021-03-10T04:08:14 | 2021-03-10T04:08:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for the api_keys CLI subcommands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Deployments(base.Group):
"""Manage Google Workspace Add-ons Deployments."""
| [
"[email protected]"
] | |
df1c8a26e0becbc5ddede3a5087948f61aaf7f2e | d3efc82dfa61fb82e47c82d52c838b38b076084c | /crossmarketetf_bak/crossmarket_creation_HA/YW_CETFSS_SHSG_020.py | 5d28365fc63ea90b37a0dd3fec3d52f5c0307129 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,323 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from crossmarketetf.cetfservice.cetf_main_service import *
from crossmarketetf.cetfservice.cetf_get_components_asset import *
from crossmarketetf.cetfservice.cetf_utils import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
from service.mainService import *
from mysql.getUpOrDownPrice import getUpPrice
class YW_CETFSS_SHSG_020(xtp_test_case):
def test_YW_CETFSS_SHSG_020(self):
# -----------ETF申购-------------
title = '上海ETF申购--可现金替代:T-1日无成分股&资金不足&计算现金比例=最大现金比例→T日申购ETF'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、全成、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010120,
'errorMSG': queryOrderErrorMsg(11010120),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
unit_info = {
'ticker': '550320', # etf代码
'etf_unit': 1.0, # etf申购单位数
'etf_unit_sell': 1.0, # etf卖出单位数
'component_unit_sell': 1.0 # 成分股卖出单位数
}
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = cetf_get_all_component_stk(Api,unit_info['ticker'])
# 查询etf最小申赎数量
unit_number = query_creation_redem_unit(unit_info['ticker'])
# etf申购数量
quantity = int(unit_info['etf_unit'] * unit_number)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
quantity
}
g_func.cetf_parm_init(case_goal['期望状态'])
rs1 = cetf_service_test(Api, case_goal, wt_reqs,component_stk_info)
etf_creation_log(case_goal, rs1)
self.assertEqual(rs1['用例测试结果'], True)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 二级市场卖出的etf数量
quantity = int(unit_info['etf_unit_sell'] * unit_number)
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(unit_info['ticker'])
rs2 = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs2 = serviceTest(Api, case_goal, wt_reqs_etf)
if rs2['用例测试结果'] is False:
etf_sell_log(case_goal, rs2)
self.assertEqual(rs2['用例测试结果'], True)
return
etf_sell_log(case_goal, rs2)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 查询etf成分股代码和数量
etf_components = query_cetf_component_share(unit_info['ticker'])
# 如果卖出单位大于100,表示卖出数量;小于100,表示卖出份数
rs3 = {}
for stk_code in etf_components:
# 申购用例1-43会有上海和深圳的成分股各一支,深圳成分股为'008000',只卖上海的
if stk_code != '008000':
components_share = etf_components[stk_code]
quantity = (int(unit_info['component_unit_sell'])
if unit_info['component_unit_sell'] >= 100
else int(components_share * unit_info['component_unit_sell']))
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs3 = serviceTest(Api, case_goal, wt_reqs)
if rs3['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
09920af61b71677948e7bed9fa77cabae508c78f | 87e62af4768c0f594e675551f4c7c1c81ce7f7d9 | /lawyer/spiders/legislation/caipan_wenshu_spider.py | 09b385d1e6eb1bcee041a82c399db8cb43aa3e79 | [] | no_license | dulei001/Spider | 78d12adbef1d865da6978704fe146cc21a8d2d3e | 628d468501c6502763ce453a58a09813b2a46b8c | refs/heads/master | 2021-01-18T17:27:11.771434 | 2019-05-14T02:14:33 | 2019-05-14T02:14:33 | 86,802,130 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,078 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import scrapy
import re
#聚法网裁判文书
class CaipanInfoSpider(scrapy.spiders.Spider):
name = "jufa"
allowed_domains = ['www.jufaanli.com']
start_urls = [
"http://www.jufaanli.com/search2?TypeKey=1%253A%25E6%25A1%2588"
]
pagesize = 20
# 爬虫入口爬去所有省下的地址
def parse(self, response):
pageurl = 'http://www.jufaanli.com/home/search/searchJson'
totalPage = (26694941-1) / (self.pagesize+1)
#totalPage = 2
for page in range(1,totalPage):
yield scrapy.FormRequest(url=pageurl,
method="POST",
headers={'X-Requested-With': 'XMLHttpRequest'},
dont_filter=True,
callback=self.parseAjaxList,
errback=self.handle_error,
formdata={"page":str(page),"searchNum":str(self.pagesize)})
#列表
def parseAjaxList(self,response):
data = json.loads(response.body_as_unicode())
detailUrl='http://www.jufaanli.com/detail/'
for item in data['info']["searchList"]['list']:
yield scrapy.Request(url=detailUrl+ item['uuid'],
method="GET",
dont_filter=True,
callback=self.parseAjaxDetail,
errback=self.handle_error,
)
#详细
def parseAjaxDetail(self,response):
item={}
#标题
item['title']= ''.join(response.css('.text-center.text-black::text').re(u'[^指导案例\d号:]'))
#法院
item['fanyuan']=''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="审理法院"]/parent::div/following-sibling::div/a/text()').extract())
#案号
item['anhao'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="案号"]/parent::div/following-sibling::div/span/text()').extract())
#案由
item['anyou'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="案由"]/parent::div/following-sibling::div/a/text()').extract())
#案件类型
item['type'] = ','.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="案件类型"]/parent::div/following-sibling::div/span/text()').extract()).rstrip(',')
#审判日期
item['stime'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="审判日期"]/parent::div/following-sibling::div/span/text()').extract())
#审理程序
item['slcx'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="审理程序"]/parent::div/following-sibling::div/span/text()').extract())
#关键词
item['keywords'] = ','.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="关键词"]/parent::div/following-sibling::div').css('.info-item.info-item-gray a::text').extract()).rstrip(',')
#律师and律所
lvshidic=[];
for i in response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="律师"]/parent::div/following-sibling::div').css('.legislation-info'):
lvshidic.append({"lvsuo":''.join(i.css('a::text').extract()),'names':''.join(i.css('span::text').extract())})
item['fagui']=response.css('.eachCiteLaw a::text').extract()
#律所律师
item['lvsuolvshi'] =lvshidic
#内容
item['content'] = re.sub('(class|name|id)="[^"]*?"','', ''.join(response.xpath('//*[@id="caseText"]').extract()))
item['collection'] = 'caipan'
item['source'] = '聚法网'
item["url"] = response.url
#item["html"] = response.text
return item
def handle_error(self, result, *args, **kw):
self.logger.error("error url is :%s" % result.request.url) | [
"[email protected]"
] | |
60c1b6630661e4beab1e47ea2c1e71f35603c745 | cb1f4f794b9efe7fcf90726cfff12e0051af7702 | /timeserio/batches/single/base.py | ad6fa65aebf095a5aaffce294cf272284456bceb | [
"MIT"
] | permissive | valeman/timeserio | 67cd4e9da977decbfe99a7a5324f08ab3d88e75a | b52576b24a5643a2a21490b9f7ca63f8566a88ce | refs/heads/master | 2022-04-12T00:52:26.368003 | 2020-03-11T10:53:37 | 2020-03-11T10:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | import abc
from keras.utils import Sequence
class BatchGenerator(Sequence, abc.ABC):
pass
| [
"[email protected]"
] | |
fbd1a036d4757c1093524dbb514fd4f26baeb738 | 3ee747ca63eb16997401ef88fadf0a646af2ca1e | /tests/test_utils.py | 95e397b43d9ba94716bf110b1fe3e00138d65602 | [
"MIT",
"Python-2.0"
] | permissive | riverflowo/NURBS-Python | 5f382f7a24b34f7b09e26ce4d64136fbba95481f | b564eaceef72f00b385981dac3c7b93b5490f4fe | refs/heads/master | 2020-03-29T13:57:39.647404 | 2018-09-12T02:05:34 | 2018-09-12T02:05:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,580 | py | """
Tests for the NURBS-Python package
Released under The MIT License. See LICENSE file for details.
Copyright (c) 2018 Onur Rauf Bingol
Tests geomdl.utilities module. Requires "pytest" to run.
"""
import pytest
from geomdl import utilities
GEOMDL_DELTA = 10e-8
def test_generate_knot_vector1():
with pytest.raises(ValueError):
degree = 0
num_ctrlpts = 12
utilities.generate_knot_vector(degree, num_ctrlpts)
def test_generate_knot_vector2():
with pytest.raises(ValueError):
degree = 4
num_ctrlpts = 0
utilities.generate_knot_vector(degree, num_ctrlpts)
def test_generate_knot_vector3():
with pytest.raises(ValueError):
degree = 0
num_ctrlpts = 0
utilities.generate_knot_vector(degree, num_ctrlpts)
def test_generate_knot_vector4():
degree = 4
num_ctrlpts = 12
autogen_kv = utilities.generate_knot_vector(degree, num_ctrlpts)
result = [0.0, 0.0, 0.0, 0.0, 0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0]
assert autogen_kv == result
def test_generate_knot_vector5():
# testing auto-generated unclamped knot vector
degree = 3
num_ctrlpts = 5
result = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]
autogen_kv = utilities.generate_knot_vector(degree, num_ctrlpts, clamped=False)
assert autogen_kv == result
def test_check_knot_vector1():
with pytest.raises(ValueError):
utilities.check_knot_vector(4, tuple(), 12)
def test_check_knot_vector2():
to_check = utilities.check_knot_vector(4, (1, 2, 3, 4), 12)
result = False
assert to_check == result
def test_check_knot_vector3():
to_check = utilities.check_knot_vector(3, (5, 3, 6, 5, 4, 5, 6), 3)
result = False
assert to_check == result
def test_check_knot_vector4():
degree = 4
num_ctrlpts = 12
autogen_kv = utilities.generate_knot_vector(degree, num_ctrlpts)
check_result = utilities.check_knot_vector(degree=degree, num_ctrlpts=num_ctrlpts, knot_vector=autogen_kv)
assert check_result
def test_check_knot_vector5():
degree = 4
num_ctrlpts = 12
with pytest.raises(TypeError):
utilities.check_knot_vector(degree=degree, num_ctrlpts=num_ctrlpts, knot_vector=5)
def test_normalize_knot_vector1():
# check for empty list/tuple
with pytest.raises(ValueError):
utilities.normalize_knot_vector(tuple())
def test_normalize_knot_vector2():
input_kv = (-5, -5, -3, -2, 2, 3, 5, 5)
output_kv = [0.0, 0.0, 0.2, 0.3, 0.7, 0.8, 1.0, 1.0]
to_check = utilities.normalize_knot_vector(input_kv)
assert to_check == output_kv
def test_normalize_knot_vector3():
with pytest.raises(TypeError):
utilities.normalize_knot_vector(5)
def test_check_uv1():
with pytest.raises(ValueError):
u = -0.1
v = 0.1
utilities.check_uv(u, v)
def test_check_uv2():
with pytest.raises(ValueError):
u = 2
v = 0.1
utilities.check_uv(u, v)
def test_check_uv3():
with pytest.raises(ValueError):
v = -0.1
u = 0.1
utilities.check_uv(u, v)
def test_check_uv4():
with pytest.raises(ValueError):
v = 2
u = 0.1
utilities.check_uv(u, v)
def test_linspace():
start = 5
stop = 11
num = 4
result = [5.0, 7.0, 9.0, 11.0]
to_check = utilities.linspace(start, stop, num)
assert to_check == result
def test_vector_dot1():
with pytest.raises(ValueError):
vec1 = ()
vec2 = ()
utilities.vector_dot(vec1, vec2)
def test_vector_dot2():
result = 32
vec1 = (1, 2, 3)
vec2 = (1, 5, 7)
to_check = utilities.vector_dot(vec1, vec2)
assert to_check == result
def test_vector_dot3():
with pytest.raises(TypeError):
utilities.vector_dot(5, 9.7)
def test_vector_cross1():
with pytest.raises(ValueError):
vec1 = ()
vec2 = ()
utilities.vector_cross(vec1, vec2)
def test_vector_cross2():
with pytest.raises(ValueError):
vec1 = (1, 2, 3, 4)
vec2 = (1, 5, 7, 9)
utilities.vector_cross(vec1, vec2)
def test_vector_cross3():
result = (-1.0, -4.0, 3.0)
vec1 = (1, 2, 3)
vec2 = (1, 5, 7)
to_check = utilities.vector_cross(vec1, vec2)
assert to_check == result
def test_vector_cross4():
with pytest.raises(TypeError):
utilities.vector_cross(5, 9.7)
def test_vector_normalize1():
with pytest.raises(ValueError):
vec = ()
utilities.vector_normalize(vec)
def test_vector_normalize2():
with pytest.raises(ValueError):
vec = (0, 0)
utilities.vector_normalize(vec)
def test_vector_normalize3():
with pytest.raises(TypeError):
utilities.vector_normalize(5)
def test_vector3_normalize():
vec = (5, 2.5, 5)
result = (0.667, 0.333, 0.667)
to_check = utilities.vector_normalize(vec, decimals=3)
assert to_check == result
def test_vector4_normalize():
vec = (5, 2.5, 5, 10)
result = (0.4, 0.2, 0.4, 0.8)
to_check = utilities.vector_normalize(vec)
assert to_check == result
def test_vector_generate1():
with pytest.raises(ValueError):
pt1 = ()
pt2 = (1, 2, 3)
utilities.vector_generate(pt1, pt2)
def test_vector_generate2():
pt1 = (0, 0, 0)
pt2 = (5, 3, 4)
result = (5, 3, 4)
result_normalized = (0.707107, 0.424264, 0.565685)
to_check = utilities.vector_generate(pt1, pt2)
to_check_normalized = utilities.vector_generate(pt1, pt2, normalize=True)
assert to_check == result
assert to_check_normalized == result_normalized
def test_vector_generate3():
with pytest.raises(TypeError):
utilities.vector_generate(5, 9.7)
def test_point_translate1():
with pytest.raises(ValueError):
pt1 = ()
pt2 = (1, 2, 3)
utilities.point_translate(pt1, pt2)
def test_point_translate2():
pt = (1, 0, 0)
vec = (5, 5, 5)
result = (6, 5, 5)
to_check = utilities.point_translate(pt, vec)
assert to_check == result
def test_point_translate3():
with pytest.raises(TypeError):
utilities.point_translate(5, 9.7)
def test_binomial_coefficient1():
result = 0.0
to_check = utilities.binomial_coefficient(13, 14)
assert to_check == result
def test_binomial_coefficient2():
result = 1.0
to_check = utilities.binomial_coefficient(13, 13)
assert to_check == result
def test_binomial_coefficient3():
result = 680.0
to_check = utilities.binomial_coefficient(17, 3)
assert to_check == result
def test_frange1():
start = 5
stop = 11
step = 2
to_check = []
for fr in utilities.frange(start, stop, step):
to_check.append(fr)
result = [5.0, 7.0, 9.0, 11.0]
assert to_check == result
def test_frange2():
check = list(utilities.frange(0, 1, 0.1))
result = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
check_flag = True
for c, r in zip(check, result):
if abs(c - r) > GEOMDL_DELTA:
check_flag = False
assert check_flag
def test_color_generator():
seed = 17 # some number to be used as the random seed
result = utilities.color_generator(seed)
to_check = utilities.color_generator(seed)
assert to_check == result
def test_init_var1():
test_var_type = list
assert test_var_type() == utilities.init_var(test_var_type)
def test_init_var2():
test_var_type = None
assert test_var_type == utilities.init_var(test_var_type)
def test_vector_multiply():
result = (2, 4, 6)
computed = utilities.vector_multiply((1, 2, 3), 2)
assert result == computed
def test_vector_mean():
vector_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
result = (4, 5, 6)
computed = utilities.vector_mean(*vector_list)
assert result == computed
def test_vector_angle_between():
computed_deg = utilities.vector_angle_between((1, 2, 3), (3, 2, 1), degrees=True)
computed_rad = utilities.vector_angle_between((1, 2, 3), (3, 2, 1), degrees=False)
result_deg = 44.415308597193
result_rad = 0.775193373310361
assert abs(computed_deg - result_deg) < GEOMDL_DELTA
assert abs(computed_rad - result_rad) < GEOMDL_DELTA
def test_point_distance():
result = 17.691806
computed = utilities.point_distance((5, 7, 9), (-7, -5, 4))
assert abs(result - computed) < GEOMDL_DELTA
def test_point_mid():
result = (2.5, 3.5, 4.5)
computed = utilities.point_mid((1, 2, 3), (4, 5, 6))
assert result == computed
| [
"[email protected]"
] | |
fa41413edc689db57f3afe37347b2bb07b49f3a1 | b9e4bf5c00ac0d6c1a6e6038e8dc18041819ff99 | /Python3/0716_Max_Stack.py | 45a35d6d09c45a3a9630134e1a1c123ef683ea60 | [] | no_license | kiranani/playground | 98fdb70a3ca651436cc1eede0d2ba1b1ea9aba1d | 12f62a218e827e6be2578b206dee9ce256da8d3d | refs/heads/master | 2021-06-03T12:43:29.388589 | 2020-06-12T15:43:45 | 2020-06-12T15:43:45 | 149,614,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | class MaxStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.l = [(float("inf"), -float("inf"))]
def push(self, x: int) -> None:
self.l.append((x, max(x, self.l[-1][1])))
def pop(self) -> int:
return self.l.pop()[0]
def top(self) -> int:
return self.l[-1][0]
def peekMax(self) -> int:
return self.l[-1][1]
def popMax(self) -> int:
mx, s = self.peekMax(), []
while self.l[-1][0] != mx:
s.append(self.l.pop()[0])
self.l.pop()
while s:
self.push(s.pop())
return mx
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
| [
"[email protected]"
] | |
4386d9bd629660635a27a3b219d035be73a9ac41 | 79babd1502ea1bd701ce021cfa75dc25ca35a700 | /python/rootTools/Utils.py | a4e4d3433c4ff2b0999a0502997f62013d960800 | [] | no_license | RazorCMS/RazorAnalyzer | 99b89b33d2ec2be1d42e3705569d49cd3346d40a | 2e2adff5ba5d2306c9f0b40c2a5297782fae3158 | refs/heads/master | 2021-01-23T21:18:53.534772 | 2019-03-22T14:21:41 | 2019-03-22T14:21:41 | 24,916,087 | 1 | 9 | null | 2017-09-11T15:51:28 | 2014-10-07T23:27:09 | C++ | UTF-8 | Python | false | false | 3,724 | py | #$Revision:$
#the following is from http://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-in-python
def determineNumberOfCPUs():
""" Number of virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling userspace-only program"""
import os,re,subprocess
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError,NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError,ValueError):
pass
# Windows
try:
res = int(os.environ['NUMBER_OF_PROCESSORS'])
if res > 0:
return res
except (KeyError, ValueError):
pass
# jython
try:
from java.lang import Runtime
runtime = Runtime.getRuntime()
res = runtime.availableProcessors()
if res > 0:
return res
except ImportError:
pass
# BSD
try:
sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE)
scStdout = sysctl.communicate()[0]
res = int(scStdout)
if res > 0:
return res
except (OSError, ValueError):
pass
# Linux
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if res > 0:
return res
except IOError:
pass
# Solaris
try:
pseudoDevices = os.listdir('/devices/pseudo/')
expr = re.compile('^cpuid@[0-9]+$')
res = 0
for pd in pseudoDevices:
if expr.match(pd) != None:
res += 1
if res > 0:
return res
except OSError:
pass
# Other UNIXes (heuristic)
try:
try:
dmesg = open('/var/run/dmesg.boot').read()
except IOError:
dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)
dmesg = dmesgProcess.communicate()[0]
res = 0
while '\ncpu' + str(res) + ':' in dmesg:
res += 1
if res > 0:
return res
except OSError:
pass
raise Exception('Can not determine number of CPUs on this system')
def importToWS(workspace, *args):
"""Utility function to call the RooWorkspace::import methods"""
return getattr(workspace,'import')(*args)
#------------------------------------------------------------------------------
# File: Color.py
# Description: colors
# Created: 22 Sep 2010 Harrison B. Prosper
#------------------------------------------------------------------------------
RED ="\x1b[0;31;48m"
GREEN ="\x1b[0;32;48m"
YELLOW ="\x1b[0;33;48m"
BLUE ="\x1b[0;34;48m"
MAGENTA="\x1b[0;35;48m"
CYAN ="\x1b[0;36;48m"
BOLDRED ="\x1b[1;31;48m"
BOLDGREEN ="\x1b[1;32;48m"
BOLDYELLOW ="\x1b[1;33;48m"
BOLDBLUE ="\x1b[1;34;48m"
BOLDMAGENTA="\x1b[1;35;48m"
BOLDCYAN ="\x1b[1;36;48m"
RESETCOLOR ="\x1b[0m" # reset to default foreground color
#------------------------------------------------------------------------------
def nameonly(s):
import os
return os.path.splitext(os.path.basename(s))[0]
def scream(message):
from random import randint
i = randint(0,4)
random_phrases = {0: 'Twas brillig and the slithy tothes',
1: 'Let all the evil that lurks in the mud hatch out',
2: 'Alas poor CMS I new them well!',
3: 'Lies, damned lies, and statistics',
4: 'Speak severely to your little boy and beat him '\
'when he sneezes'}
print "\n** %s\n** %s%s%s\n" % (random_phrases[i], BLUE, message,
RESETCOLOR)
sys.exit(0)
| [
"[email protected]"
] | |
a2aa0983b8a49972d004006dd2709b75fd1ab70d | 7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d | /packages/autorest.python/test/azure/version-tolerant/Expected/AcceptanceTests/SubscriptionIdApiVersionVersionTolerant/subscriptionidapiversionversiontolerant/operations/__init__.py | c58de146570db0acc07d6828002cccd80c37cc94 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/autorest.python | cc4bfbf91ae11535731cad37cedd6b733edf1ebd | a00d7aaa3753ef05cb5a0d38c664a90869478d44 | refs/heads/main | 2023-09-03T06:58:44.246200 | 2023-08-31T20:11:51 | 2023-08-31T20:11:51 | 100,315,955 | 47 | 40 | MIT | 2023-09-14T21:00:21 | 2017-08-14T22:58:33 | Python | UTF-8 | Python | false | false | 771 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import GroupOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"GroupOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"[email protected]"
] | |
bdcbaa97c7e2dc4010390b06ac580bfda23aa946 | 71c331e4b1e00fa3be03b7f711fcb05a793cf2af | /QA-System-master/SpeechToText_test/google-cloud-sdk/lib/surface/run/deploy.py | 32cd25c00b8387be0cb01911f797495b0301f3b6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iofh/QA-System | 568228bb0c0adf9ec23b45cd144d61049e720002 | af4a8f1b5f442ddf4905740ae49ed23d69afb0f6 | refs/heads/master | 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,853 | py | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deploy a container to Cloud Run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import uuid
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.run import traffic
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.builds import flags as build_flags
from googlecloudsdk.command_lib.builds import submit_util
from googlecloudsdk.command_lib.run import config_changes as config_changes_mod
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run import messages_util
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.run import resource_args
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.command_lib.run import stages
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import progress_tracker
def GetAllowUnauth(args, operations, service_ref, service_exists):
"""Returns allow_unauth value for a service change.
Args:
args: argparse.Namespace, Command line arguments
operations: serverless_operations.ServerlessOperations, Serverless client.
service_ref: protorpc.messages.Message, A resource reference object
for the service See googlecloudsdk.core.resources.Registry.ParseResourceId
for details.
service_exists: True if the service being changed already exists.
Returns:
allow_unauth value where
True means to enable unauthenticated acess for the service.
False means to disable unauthenticated access for the service.
None means to retain the current value for the service.
"""
allow_unauth = None
if flags.GetPlatform() == flags.PLATFORM_MANAGED:
allow_unauth = flags.GetAllowUnauthenticated(args, operations, service_ref,
not service_exists)
# Avoid failure removing a policy binding for a service that
# doesn't exist.
if not service_exists and not allow_unauth:
allow_unauth = None
return allow_unauth
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Deploy(base.Command):
"""Deploy a container to Cloud Run."""
detailed_help = {
'DESCRIPTION': """\
Deploys container images to Google Cloud Run.
""",
'EXAMPLES': """\
To deploy a container to the service `my-backend` on Cloud Run:
$ {command} my-backend --image=gcr.io/my/image
You may also omit the service name. Then a prompt will be displayed
with a suggested default value:
$ {command} --image=gcr.io/my/image
To deploy to Cloud Run on Kubernetes Engine, you need to specify a cluster:
$ {command} --image=gcr.io/my/image --cluster=my-cluster
""",
}
@staticmethod
def CommonArgs(parser):
# Flags specific to managed CR
managed_group = flags.GetManagedArgGroup(parser)
flags.AddAllowUnauthenticatedFlag(managed_group)
flags.AddCloudSQLFlags(managed_group)
flags.AddRevisionSuffixArg(managed_group)
# Flags specific to connecting to a cluster
cluster_group = flags.GetClusterArgGroup(parser)
flags.AddEndpointVisibilityEnum(cluster_group)
flags.AddSecretsFlags(cluster_group)
flags.AddConfigMapsFlags(cluster_group)
flags.AddHttp2Flag(cluster_group)
# Flags not specific to any platform
service_presentation = presentation_specs.ResourcePresentationSpec(
'SERVICE',
resource_args.GetServiceResourceSpec(prompt=True),
'Service to deploy to.',
required=True,
prefixes=False)
flags.AddFunctionArg(parser)
flags.AddMutexEnvVarsFlags(parser)
flags.AddMemoryFlag(parser)
flags.AddConcurrencyFlag(parser)
flags.AddTimeoutFlag(parser)
flags.AddAsyncFlag(parser)
flags.AddLabelsFlags(parser)
flags.AddMaxInstancesFlag(parser)
flags.AddCommandFlag(parser)
flags.AddArgsFlag(parser)
flags.AddPortFlag(parser)
flags.AddCpuFlag(parser)
flags.AddNoTrafficFlag(parser)
concept_parsers.ConceptParser([service_presentation]).AddToParser(parser)
@staticmethod
def Args(parser):
Deploy.CommonArgs(parser)
flags.AddImageArg(parser)
# Flags specific to managed CR
managed_group = flags.GetManagedArgGroup(parser)
flags.AddServiceAccountFlag(managed_group)
# Flags only supported on GKE and Knative
cluster_group = flags.GetClusterArgGroup(parser)
flags.AddMinInstancesFlag(cluster_group)
def Run(self, args):
"""Deploy a container to Cloud Run."""
service_ref = flags.GetService(args)
build_op_ref = None
messages = None
build_log_url = None
image = args.image
include_build = flags.FlagIsExplicitlySet(args, 'source')
# Build an image from source if source specified.
if include_build:
# Create a tag for the image creation
if image is None and not args.IsSpecified('config'):
image = 'gcr.io/{projectID}/cloud-run-source-deploy/{service}:{tag}'.format(
projectID=properties.VALUES.core.project.Get(required=True),
service=service_ref.servicesId,
tag=uuid.uuid4().hex)
messages = cloudbuild_util.GetMessagesModule()
build_config = submit_util.CreateBuildConfig(
image, args.no_cache, messages, args.substitutions, args.config,
args.IsSpecified('source'), False, args.source,
args.gcs_source_staging_dir, args.ignore_file, args.gcs_log_dir,
args.machine_type, args.disk_size)
build, build_op = submit_util.Build(messages, True, build_config, True)
build_op_ref = resources.REGISTRY.ParseRelativeName(
build_op.name, 'cloudbuild.operations'
)
build_log_url = build.logUrl
# Deploy a container with an image
conn_context = connection_context.GetConnectionContext(
args, flags.Product.RUN, self.ReleaseTrack())
config_changes = flags.GetConfigurationChanges(args)
with serverless_operations.Connect(conn_context) as operations:
image_change = config_changes_mod.ImageChange(image)
changes = [image_change]
if config_changes:
changes.extend(config_changes)
service = operations.GetService(service_ref)
allow_unauth = GetAllowUnauth(args, operations, service_ref, service)
pretty_print.Info(
messages_util.GetStartDeployMessage(conn_context, service_ref))
has_latest = (service is None or
traffic.LATEST_REVISION_KEY in service.spec_traffic)
deployment_stages = stages.ServiceStages(
include_iam_policy_set=allow_unauth is not None,
include_route=has_latest,
include_build=include_build)
header = 'Deploying'
if include_build:
header += ' and building'
if service is None:
header += ' new service'
header += '...'
with progress_tracker.StagedProgressTracker(
header,
deployment_stages,
failure_message='Deployment failed',
suppress_output=args.async_) as tracker:
operations.ReleaseService(
service_ref,
changes,
tracker,
asyn=args.async_,
allow_unauthenticated=allow_unauth,
prefetch=service,
build_op_ref=build_op_ref,
build_log_url=build_log_url)
if args.async_:
pretty_print.Success(
'Service [{{bold}}{serv}{{reset}}] is deploying '
'asynchronously.'.format(serv=service_ref.servicesId))
else:
pretty_print.Success(
messages_util.GetSuccessMessageForSynchronousDeploy(
operations, service_ref))
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class BetaDeploy(Deploy):
"""Deploy a container to Cloud Run."""
@staticmethod
def Args(parser):
Deploy.Args(parser)
# Flags specific to VPCAccess
flags.AddVpcConnectorArg(parser)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaDeploy(Deploy):
"""Deploy a container to Cloud Run."""
@staticmethod
def Args(parser):
Deploy.CommonArgs(parser)
# Flags specific to VPCAccess
flags.AddVpcConnectorArg(parser)
# Flags not specific to any platform
flags.AddMinInstancesFlag(parser)
flags.AddServiceAccountFlagAlpha(parser)
flags.AddDeployTagFlag(parser)
# Flags inherited from gcloud builds submit
flags.AddConfigFlags(parser)
flags.AddSourceFlag(parser)
flags.AddBuildTimeoutFlag(parser)
build_flags.AddGcsSourceStagingDirFlag(parser, True)
build_flags.AddGcsLogDirFlag(parser, True)
build_flags.AddMachineTypeFlag(parser, True)
build_flags.AddDiskSizeFlag(parser, True)
build_flags.AddSubstitutionsFlag(parser, True)
build_flags.AddNoCacheFlag(parser, True)
build_flags.AddIgnoreFileFlag(parser, True)
AlphaDeploy.__doc__ = Deploy.__doc__
| [
"[email protected]"
] | |
67e504ae45a28ad46c3633d6603215e04e77ce66 | 1cb0cc435061b6a0156b37813343ae46b1f7346e | /1_learn_step/try_second/glorot_normal-RMSprop-16.py | eb6e65fdeee43652b530579008e195c2f09d190b | [] | no_license | youthliuxi/keras | 6370a9de11e152d8ba96e68e9ff02337203b7e66 | 60a367442f74313d0bd9af01f76068d56e23bec0 | refs/heads/master | 2020-04-30T19:54:16.628943 | 2019-08-21T09:47:13 | 2019-08-21T09:47:13 | 177,051,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | # -*- coding:utf-8 -*-
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
np.random.seed(123)
from keras.layers import *
from keras.models import Sequential
from keras.utils import np_utils
from keras.datasets import mnist
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
path = "./mnist.npz"
f = np.load(path)
X_train, y_train = f['x_train'],f['y_train']
X_test, y_test = f['x_test'],f['y_test']
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
import pylab
from matplotlib import pyplot as plt
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',init = 'glorot_normal', input_shape=(1,28,28)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='RMSprop',metrics=['accuracy'])
hist = model.fit(X_train, Y_train, batch_size=16, nb_epoch=100, verbose=1, validation_data=(X_test, Y_test))
log_file_name = "try_second/txt/glorot_normal-RMSprop-16.txt"
with open(log_file_name,'w') as f:
f.write(str(hist.history))
# score = model.evaluate(X_test, Y_test, verbose=0, batch_size=16)
# print(score[0])
# print(score[1])
| [
"[email protected]"
] | |
5257553d3c163205f228efbd85550aedd5fa8e8e | 6adf334dd2a074686447e15898ed3fff793aab48 | /03_Fast_and_Slow_Pointers/08_circular_array_loop_exists.py | a679195ddcea8fbbc7f8af0248a3d958cd5cb1c4 | [] | no_license | satyapatibandla/Patterns-for-Coding-Interviews | 29ac1a15d5505293b83a8fb4acf12080851fe8d6 | b3eb2ac82fd640ecbdf3654a91a57a013be1806f | refs/heads/main | 2023-05-07T07:56:01.824272 | 2021-06-01T04:02:50 | 2021-06-01T04:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | # Time O(N) | Space O(1)
def circular_array_loop_exists(nums):
for i in range(len(nums)):
if nums[i] == 0:
continue
direction = nums[i] > 0
slow = fast = i
while True:
slow = get_next_idx(nums, slow, direction)
fast = get_next_idx(nums, get_next_idx(nums, fast, direction), direction)
if slow == -1 or fast == -1:
break
elif slow == fast:
return True
slow = i
while get_next_idx(nums, slow, direction) != -1:
temp_slow = slow
slow = get_next_idx(nums, slow, direction)
nums[temp_slow] = 0
return False
def get_next_idx(nums, idx, direction):
if idx == -1:
return -1
elif (nums[idx] > 0) != direction:
return -1
next_idx = (idx + nums[idx]) % len(nums)
return -1 if next_idx == idx else next_idx
def main():
print(circular_array_loop_exists([1, 2, -1, 2, 2]))
print(circular_array_loop_exists([2, 2, -1, 2]))
print(circular_array_loop_exists([2, 1, -1, -2]))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8b8498e424f3ba6a5662bd3a5d6401e4d2ca6e12 | 81adc22ee20698506397135b916903936837db3b | /examples/cuda-c++/vector_add.py | e5a70e49b2240c3b90135d9c9c037d7034622217 | [
"Apache-2.0"
] | permissive | KernelTuner/kernel_tuner | 6c25ca551795cc49a7754f2957de4e59aa98578c | b3ff4cdecb12655009b356e3b1840e25b1dd1421 | refs/heads/master | 2023-08-10T00:34:53.984541 | 2023-06-01T16:31:47 | 2023-06-01T16:31:47 | 54,894,320 | 59 | 6 | Apache-2.0 | 2023-09-08T19:28:24 | 2016-03-28T13:32:17 | Python | UTF-8 | Python | false | false | 904 | py | #!/usr/bin/env python
"""This is the minimal example from the README converted to C++11"""
import json
import numpy
from kernel_tuner import tune_kernel
def tune():
kernel_string = """
template<typename T>
__global__ void vector_add(T *c, T *a, T *b, int n) {
auto i = blockIdx.x * block_size_x + threadIdx.x;
if (i<n) {
c[i] = a[i] + b[i];
}
}
"""
size = 10000000
a = numpy.random.randn(size).astype(numpy.float32)
b = numpy.random.randn(size).astype(numpy.float32)
c = numpy.zeros_like(b)
n = numpy.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["block_size_x"] = [128+64*i for i in range(15)]
result, env = tune_kernel("vector_add<float>", kernel_string, size, args, tune_params)
with open("vector_add.json", 'w') as fp:
json.dump(result, fp)
return result
if __name__ == "__main__":
tune()
| [
"[email protected]"
] | |
9316183ffc05549b868b860274aaaebaf7e4fa7d | cfe18377cf7823658f38a9ffd832a2026004fc98 | /swav/vissl/vissl/models/model_helpers.py | bb4445a792adc7257124b8c269a177e1489c0275 | [
"CC-BY-NC-4.0",
"Apache-2.0"
] | permissive | yhn112/DeDLOC | ec26d28a0bfcdb4fc33db730e87f669adc8568ee | 0dc65b6eed8b3f842303fe00601335b3b9e2d7c5 | refs/heads/main | 2023-07-14T06:04:03.676662 | 2021-07-15T11:10:32 | 2021-07-15T11:10:32 | 379,111,549 | 0 | 0 | Apache-2.0 | 2021-06-22T01:50:47 | 2021-06-22T01:50:46 | null | UTF-8 | Python | false | false | 19,952 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
import warnings
from enum import Enum
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _ntuple
from torch.utils.checkpoint import checkpoint
from vissl.utils.activation_checkpointing import checkpoint_trunk
from vissl.utils.misc import is_apex_available
# Tuple of classes of BN layers.
_bn_cls = (nn.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)
if is_apex_available():
import apex
try:
# try importing the optimized version directly
_bn_cls = _bn_cls + (apex.parallel.optimized_sync_batchnorm.SyncBatchNorm,)
except AttributeError:
_bn_cls = _bn_cls + (apex.parallel.SyncBatchNorm,)
def transform_model_input_data_type(model_input, model_config):
"""
Default model input follow RGB format. Based the model input specified,
change the type. Supported types: RGB, BGR, LAB
"""
model_output = model_input
# In case the model takes BGR input type, we convert the RGB to BGR
if model_config.INPUT_TYPE == "bgr":
model_output = model_input[:, [2, 1, 0], :, :]
# In case of LAB image, we take only "L" channel as input. Split the data
# along the channel dimension into [L, AB] and keep only L channel.
if model_config.INPUT_TYPE == "lab":
model_output = torch.split(model_input, [1, 2], dim=1)[0]
return model_output
def is_feature_extractor_model(model_config):
"""
If the model is a feature extractor model:
- evaluation model is on
- trunk is frozen
- number of features specified for features extraction > 0
"""
if (
model_config.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON
and model_config.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_ONLY
and len(model_config.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP) > 0
):
return True
return False
def get_trunk_output_feature_names(model_config):
"""
Get the feature names which we will use to associate the features witl.
If Feature eval mode is set, we get feature names from
config.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP.
"""
feature_names = []
if is_feature_extractor_model(model_config):
feat_ops_map = model_config.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP
feature_names = [item[0] for item in feat_ops_map]
return feature_names
class Wrap(nn.Module):
"""
Wrap a free function into a nn.Module.
Can be useful to build a model block, and include activations or light tensor alterations
"""
def __init__(self, function):
super().__init__()
self.function = function
def forward(self, x):
return self.function(x)
class SyncBNTypes(str, Enum):
"""
Supported SyncBN types
"""
apex = "apex"
pytorch = "pytorch"
def convert_sync_bn(config, model):
"""
Convert the BatchNorm layers in the model to the SyncBatchNorm layers.
For SyncBatchNorm, we support two sources: Apex and PyTorch. The optimized
SyncBN kernels provided by apex run faster.
Args:
config (AttrDict): configuration file
model: Pytorch model whose BatchNorm layers should be converted to SyncBN
layers.
NOTE: Since SyncBatchNorm layer synchronize the BN stats across machines, using
the syncBN layer can be slow. In order to speed up training while using
syncBN, we recommend using process_groups which are very well supported
for Apex.
To set the process groups, set SYNC_BN_CONFIG.GROUP_SIZE following below:
1) if group_size=-1 -> use the VISSL default setting. We synchronize within a
machine and hence will set group_size=num_gpus per node. This gives the best
speedup.
2) if group_size>0 -> will set group_size=value set by user.
3) if group_size=0 -> no groups are created and process_group=None. This means
global sync is done.
"""
sync_bn_config = config.MODEL.SYNC_BN_CONFIG
def get_group_size():
world_size = config.DISTRIBUTED.NUM_PROC_PER_NODE * config.DISTRIBUTED.NUM_NODES
if sync_bn_config["GROUP_SIZE"] > 0:
# if the user specifies group_size to create, we use that.
# we also make sure additionally that the group size doesn't exceed
# the world_size. This is beneficial to handle especially in case
# of 1 node training where num_gpu <= 8
group_size = min(world_size, sync_bn_config["GROUP_SIZE"])
elif sync_bn_config["GROUP_SIZE"] == 0:
# group_size=0 is considered as world_size and no process group is created.
group_size = None
else:
# by default, we set it to number of gpus in a node. Within gpu, the
# interconnect is fast and syncBN is cheap.
group_size = config.DISTRIBUTED.NUM_PROC_PER_NODE
logging.info(f"Using SyncBN group size: {group_size}")
return group_size
def to_apex_syncbn(group_size):
logging.info("Converting BN layers to Apex SyncBN")
if group_size is None:
process_group = None
logging.info("Not creating process_group for Apex SyncBN...")
else:
process_group = apex.parallel.create_syncbn_process_group(
group_size=group_size
)
return apex.parallel.convert_syncbn_model(model, process_group=process_group)
def to_pytorch_syncbn(group_size):
logging.info("Converting BN layers to PyTorch SyncBN")
if group_size is None:
process_group = None
logging.info("Not creating process_group for PyTorch SyncBN...")
else:
logging.warning(
"Process groups not supported with PyTorch SyncBN currently. "
"Traning will be slow. Please consider installing Apex for SyncBN."
)
process_group = None
# TODO (prigoyal): process groups don't work well with pytorch.
# import os
# num_gpus_per_node = config.DISTRIBUTED.NUM_PROC_PER_NODE
# node_id = int(os.environ["RANK"]) // num_gpus_per_node
# assert (
# group_size == num_gpus_per_node
# ), "Use group_size=num_gpus per node as interconnect is cheap in a machine"
# process_ids = list(
# range(
# node_id * num_gpus_per_node,
# (node_id * num_gpus_per_node) + group_size,
# )
# )
# logging.info(f"PyTorch SyncBN Node: {node_id} process_ids: {process_ids}")
# process_group = torch.distributed.new_group(process_ids)
return nn.SyncBatchNorm.convert_sync_batchnorm(
model, process_group=process_group
)
group_size = get_group_size()
# Apply the correct transform, make sure that any other setting raises an error
return {SyncBNTypes.apex: to_apex_syncbn, SyncBNTypes.pytorch: to_pytorch_syncbn}[
sync_bn_config["SYNC_BN_TYPE"]
](group_size)
class Flatten(nn.Module):
"""
Flatten module attached in the model. It basically flattens the input tensor.
"""
def __init__(self, dim=-1):
super(Flatten, self).__init__()
self.dim = dim
def forward(self, feat):
"""
flatten the input feat
"""
return torch.flatten(feat, start_dim=self.dim)
def flops(self, x):
"""
number of floating point operations performed. 0 for this module.
"""
return 0
class Identity(nn.Module):
"""
A helper module that outputs the input as is
"""
def __init__(self, args=None):
super().__init__()
def forward(self, x):
"""
Return the input as the output
"""
return x
class LayerNorm2d(nn.GroupNorm):
"""
Use GroupNorm to construct LayerNorm as pytorch LayerNorm2d requires
specifying input_shape explicitly which is inconvenient. Set num_groups=1 to
convert GroupNorm to LayerNorm.
"""
def __init__(self, num_channels, eps=1e-5, affine=True):
super(LayerNorm2d, self).__init__(
num_groups=1, num_channels=num_channels, eps=eps, affine=affine
)
class RESNET_NORM_LAYER(str, Enum):
"""
Types of Norms supported in ResNe(X)t trainings. can be easily set and modified
from the config file.
"""
BatchNorm = "BatchNorm"
LayerNorm = "LayerNorm"
GroupNorm = "GroupNorm"
def _get_norm(trunk_config):
"""
return the normalization layer to use in the model based on the layer name
"""
layer_name = trunk_config.NORM
n_groups = trunk_config.GROUPNORM_GROUPS
def group_norm(num_channels):
return nn.GroupNorm(num_groups=n_groups, num_channels=num_channels)
return {
RESNET_NORM_LAYER.BatchNorm: nn.BatchNorm2d,
RESNET_NORM_LAYER.LayerNorm: LayerNorm2d,
RESNET_NORM_LAYER.GroupNorm: group_norm,
}[layer_name]
def parse_out_keys_arg(
out_feat_keys: List[str], all_feat_names: List[str]
) -> Tuple[List[str], int]:
"""
Checks if all out_feature_keys are mapped to a layer in the model.
Returns the last layer to forward pass through for efficiency.
Allow duplicate features also to be evaluated.
Adapted from (https://github.com/gidariss/FeatureLearningRotNet).
"""
# By default return the features of the last layer / module.
if out_feat_keys is None or (len(out_feat_keys) == 0):
out_feat_keys = [all_feat_names[-1]]
if len(out_feat_keys) == 0:
raise ValueError("Empty list of output feature keys.")
for _, key in enumerate(out_feat_keys):
if key not in all_feat_names:
raise ValueError(
f"Feature with name {key} does not exist. "
f"Existing features: {all_feat_names}."
)
# Find the highest output feature in `out_feat_keys
max_out_feat = max(all_feat_names.index(key) for key in out_feat_keys)
return out_feat_keys, max_out_feat
def get_trunk_forward_outputs_module_list(
feat: torch.Tensor,
out_feat_keys: List[str],
feature_blocks: nn.ModuleList,
all_feat_names: List[str] = None,
) -> List[torch.Tensor]:
"""
Args:
feat: model input.
out_feat_keys: a list/tuple with the feature names of the features that
the function should return. By default the last feature of the network
is returned.
feature_blocks: list of feature blocks in the model
feature_mapping: name of the layers in the model
Returns:
out_feats: a list with the asked output features placed in the same order as in
`out_feat_keys`.
"""
out_feat_keys, max_out_feat = parse_out_keys_arg(out_feat_keys, all_feat_names)
out_feats = [None] * len(out_feat_keys)
for f in range(max_out_feat + 1):
feat = feature_blocks[f](feat)
key = all_feat_names[f]
if key in out_feat_keys:
out_feats[out_feat_keys.index(key)] = feat
return out_feats
def get_trunk_forward_outputs(
feat: torch.Tensor,
out_feat_keys: List[str],
feature_blocks: nn.ModuleDict,
feature_mapping: Dict[str, str] = None,
use_checkpointing: bool = True,
checkpointing_splits: int = 2,
) -> List[torch.Tensor]:
"""
Args:
feat: model input.
out_feat_keys: a list/tuple with the feature names of the features that
the function should return. By default the last feature of the network
is returned.
feature_blocks: ModuleDict containing feature blocks in the model
feature_mapping: an optional correspondence table in between the requested
feature names and the model's.
Returns:
out_feats: a list with the asked output features placed in the same order as in
`out_feat_keys`.
"""
# Sanitize inputs
if feature_mapping is not None:
out_feat_keys = [feature_mapping[f] for f in out_feat_keys]
out_feat_keys, max_out_feat = parse_out_keys_arg(
out_feat_keys, list(feature_blocks.keys())
)
# Forward pass over the trunk
unique_out_feats = {}
unique_out_feat_keys = list(set(out_feat_keys))
# FIXME: Ideally this should only be done once at construction time
if use_checkpointing:
feature_blocks = checkpoint_trunk(
feature_blocks, unique_out_feat_keys, checkpointing_splits
)
# If feat is the first input to the network, it doesn't have requires_grad,
# which will make checkpoint's backward function not being called. So we need
# to set it to true here.
feat.requires_grad = True
# Go through the blocks, and save the features as we go
# NOTE: we are not doing several forward passes but instead just checking
# whether the feature should is requested to be returned.
for i, (feature_name, feature_block) in enumerate(feature_blocks.items()):
# The last chunk has to be non-volatile
if use_checkpointing and i < len(feature_blocks) - 1:
# Un-freeze the running stats in any BN layer
for m in filter(lambda x: isinstance(x, _bn_cls), feature_block.modules()):
m.track_running_stats = m.training
feat = checkpoint(feature_block, feat)
# Freeze the running stats in any BN layer
# the checkpointing process will have to do another FW pass
for m in filter(lambda x: isinstance(x, _bn_cls), feature_block.modules()):
m.track_running_stats = False
else:
feat = feature_block(feat)
# This feature is requested, store. If the same feature is requested several
# times, we return the feature several times.
if feature_name in unique_out_feat_keys:
unique_out_feats[feature_name] = feat
# Early exit if all the features have been collected
if i == max_out_feat and not use_checkpointing:
break
# now return the features as requested by the user. If there are no duplicate keys,
# return as is.
if len(unique_out_feat_keys) == len(out_feat_keys):
return list(unique_out_feats.values())
output_feats = []
for key_name in out_feat_keys:
output_feats.append(unique_out_feats[key_name])
return output_feats
def lecun_normal_init(tensor, fan_in):
trunc_normal_(tensor, std=math.sqrt(1 / fan_in))
# Contains code from https://github.com/rwightman/pytorch-image-models
# and https://github.com/facebookresearch/deit/blob/main/models.py, modified by
# Matthew # Leavitt ([email protected], [email protected]) and Vedanuj
# Goswami ([email protected]).
# trunc_normal_ and _no_grad_trunc_normal_ from:
# https://github.com/rwightman/pytorch-image-models/blob/678ba4e0a2c0b52c5e7b2ec0ba689399840282ee/timm/models/layers/weight_init.py # NOQA
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Supposedly should be available in PyTorch soon. Replace when available.
Fills the input Tensor with values drawn
from a truncated normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
# Contains code from https://github.com/rwightman/pytorch-image-models
# and https://github.com/facebookresearch/deit/blob/main/models.py, modified by
# Matthew # Leavitt ([email protected], [email protected]) and Vedanuj
# Goswami ([email protected]).
# Standardized convolution (Conv2d with Weight Standardization), as used in
# the paper, Big Transfer (BiT): General Visual Representation Learning -
# https://arxiv.org/abs/1912.11370
class StandardizedConv2d(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(StandardizedConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
)
def forward(self, x):
weight = self.weight
weight_mean = (
weight.mean(dim=1, keepdim=True)
.mean(dim=2, keepdim=True)
.mean(dim=3, keepdim=True)
)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
weight = weight / std.expand_as(weight)
return F.conv2d(
x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
)
# drop_path and DropPath modified from
# https://github.com/facebookresearch/deit/blob/main/models.py
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
residual blocks).
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
# work with diff dim tensors, not just 2D ConvNets
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path
of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
| [
"[email protected]"
] | |
6496385a65adfdf5d6dd2990cf9ca6dc390ce0a4 | e3ec5f1898ae491fa0afcdcc154fb306fd694f83 | /src/components/opPicker/onFilterTextEvent.py | 4e4fb7d55e5df26b7e2595dd237d87b347bae0e8 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | phoebezhung/raytk | 42397559a76a9ba39308ac03344b4446f64ea04d | b91483ce88b2956d7b23717b11e223d332ca8395 | refs/heads/master | 2023-08-27T05:20:38.062360 | 2021-10-21T04:33:18 | 2021-10-21T04:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from .opPicker import OpPicker
ext.opPicker = OpPicker(COMP())
def onValueChange(panelValue: 'PanelValue', prev):
ext.opPicker.setFilterText(panelValue.val)
| [
"[email protected]"
] | |
5d7ecd12aac912be773a379df1d6f109317b84c0 | de392462a549be77e5b3372fbd9ea6d7556f0282 | /operations_9001/migrations/0011_auto_20200806_1350.py | 5305a4a2d7c547e06c938388a8dbfece7f2c931d | [] | no_license | amutebe/AMMS_General | 2830770b276e995eca97e37f50a7c51f482b2405 | 57b9b85ea2bdd272b44c59f222da8202d3173382 | refs/heads/main | 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | # Generated by Django 3.0.2 on 2020-08-06 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operations_9001', '0010_auto_20200806_1252'),
]
operations = [
migrations.AlterField(
model_name='maintenance',
name='maintenance_number',
field=models.CharField(default='TEGA-M-06082020125', max_length=200, primary_key=True, serialize=False, verbose_name='Maintenance no.:'),
),
migrations.AlterField(
model_name='mod9001_calibration',
name='calibration_number',
field=models.CharField(default='TEGA-C-06082020249', max_length=200, primary_key=True, serialize=False, verbose_name='Calibration no.:'),
),
migrations.AlterField(
model_name='mod9001_document_manager',
name='document_number',
field=models.CharField(default='TEGA-Q-06082020119', max_length=200, primary_key=True, serialize=False, verbose_name='Document no.:'),
),
migrations.AlterField(
model_name='mod9001_processtable',
name='process_number',
field=models.CharField(default='Comp-Pr-06082020218', max_length=200, primary_key=True, serialize=False, verbose_name='Process ID:'),
),
migrations.AlterField(
model_name='mod9001_qmsplanner',
name='planner_number',
field=models.CharField(default='Comp-QP-06082020151', max_length=200, primary_key=True, serialize=False, verbose_name='Planner no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingplanner',
name='plan_number',
field=models.CharField(default='Comp-TP-06082020133', max_length=200, primary_key=True, serialize=False, verbose_name='Plan no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingregister',
name='training_number',
field=models.CharField(default='Comp-TR-06082020131', max_length=200, primary_key=True, serialize=False, verbose_name='Training no.:'),
),
]
| [
"[email protected]"
] | |
01f324c6bcbb1c9a274932a8fafb8cbc266973f2 | e384f5467d8bcfd70845997bcbd68d950e874a61 | /example/python/mesh/mesh_007_cube_color_per_triangle_Tex1D/cube.py | f445866308c763ea6d3dbaf04fc7e72b93a6e0b5 | [] | no_license | Rabbid76/graphics-snippets | ee642f1ed9ceafc6d320e467d3a084d2446d22c2 | fa187afeabb9630bc1d988304fb5787e95a91385 | refs/heads/master | 2023-08-04T04:32:06.884318 | 2023-07-21T09:15:43 | 2023-07-21T09:15:43 | 109,126,544 | 177 | 12 | null | 2023-04-11T20:05:52 | 2017-11-01T12:05:56 | C++ | UTF-8 | Python | false | false | 3,947 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import numpy
# PyOpenGL import
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
# MyLibOGL import
from MyLibOGL.math import mat
from MyLibOGL.math import cam
from MyLibOGL.ogl import shader
from MyLibOGL.ogl import vertex
from MyLibOGL.ogl import uniform
from MyLibOGL.glut import window
class MyWindow(window.CameraWindow):
def __init__( self, cx, cy, multisample=True ):
super().__init__(cx, cy, multisample)
def _InitCamera_(self):
camera = super()._InitCamera_()
#camera.fov_y = 120
camera.pos = (0, -3, 0)
return camera
# draw event
def OnDraw(self):
# set up projection matrix
prjMat = self.Perspective()
# set up view matrix
viewMat = self.LookAt()
# set up attributes and shader program
glEnable( GL_DEPTH_TEST )
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
progDraw.Use()
modelMat = mat.IdentityMat44()
modelMat = self.AutoModelMatrix()
#modelMat = mat.RotateX( modelMat, self.CalcAng( 13.0 ) )
#modelMat = mat.RotateY( modelMat, self.CalcAng( 17.0 ) )
progDraw.SetUniforms( {
b"u_projectionMat44" : self.Perspective(),
b"u_viewMat44" : self.LookAt(),
b"u_modelMat44" : modelMat,
b"u_lightDir" : [-1.0, -0.5, -2.0],
b"u_ambient" : 0.2,
b"u_diffuse" : 0.8,
b"u_specular" : 0.8,
b"u_shininess" : 10.0 } )
# draw object
cubeVAO.Draw()
def AddToBuffer( buffer, data, count=1 ):
for inx_c in range(0, count):
for inx_s in range(0, len(data)): buffer.append( data[inx_s] )
# create window
wnd = MyWindow( 800, 600, True )
# define cube vertex array opject
cubePts = [
(-1.0, -1.0, 1.0), ( 1.0, -1.0, 1.0), ( 1.0, 1.0, 1.0), (-1.0, 1.0, 1.0),
(-1.0, -1.0, -1.0), ( 1.0, -1.0, -1.0), ( 1.0, 1.0, -1.0), (-1.0, 1.0, -1.0) ]
cubeCol = [ [1.0, 0.0, 0.0, 1.0], [1.0, 0.5, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0] ]
cubeHlpInx = [ 0, 1, 2, 3, 1, 5, 6, 2, 5, 4, 7, 6, 4, 0, 3, 7, 3, 2, 6, 7, 1, 0, 4, 5 ]
cubePosData = []
for inx in cubeHlpInx: AddToBuffer( cubePosData, cubePts[inx] )
cubeNVData = []
for inx_nv in range(len(cubeHlpInx) // 4):
nv = [0.0, 0.0, 0.0]
for inx_p in range(4):
for inx_s in range(0, 3): nv[inx_s] += cubePts[ cubeHlpInx[inx_nv*4 + inx_p] ][inx_s]
AddToBuffer( cubeNVData, nv, 4 )
cubeColFaceData = []
for inx_col in range(6):
for inx_c in range(0, 4): cubeColFaceData.append( cubeCol[inx_col][inx_c] )
for inx_c in range(0, 4): cubeColFaceData.append( cubeCol[inx_col][inx_c] )
cubeIndices = []
for inx in range(6):
for inx_s in [0, 1, 2, 0, 2, 3]: cubeIndices.append( inx * 4 + inx_s )
cubeVAO = vertex.VAObject( [ (3, cubePosData), (3, cubeNVData) ], cubeIndices )
# 1D texture
color_texture_unit = 1
glActiveTexture( GL_TEXTURE0+color_texture_unit )
color_texture = glGenTextures( 1 )
glBindTexture( GL_TEXTURE_1D, color_texture )
glTexImage1D( GL_TEXTURE_1D, 0, GL_RGBA, len(cubeColFaceData) // 4, 0, GL_RGBA, GL_FLOAT, numpy.matrix(cubeColFaceData, dtype=numpy.float32))
glTexParameteri( GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST )
glTexParameteri( GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST )
glTexParameteri( GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_REPEAT )
glActiveTexture( GL_TEXTURE0 )
# load, compile and link shader
progDraw = shader.ShaderProgram(
[ ('resource/shader/blinn_phong.vert', GL_VERTEX_SHADER),
('resource/shader/blinn_phong.frag', GL_FRAGMENT_SHADER) ] )
# start main loop
wnd.Run() | [
"[email protected]"
] | |
a75c23d732db2e10115e15e929fd78eef4a1c8ab | 26c02b95916f8766a3c0c10d8c8552982b110332 | /auto_process_ngs/qc/utils.py | 7617de2f9d99aca028b7d5f4084464114a5818bf | [
"AFL-3.0",
"LicenseRef-scancode-unknown-license-reference",
"AFL-2.1"
] | permissive | fls-bioinformatics-core/auto_process_ngs | 410a4226375b76ede239b9564527199a8fdc734a | d3da61b858560d87ff070c34d906ec12717bd81d | refs/heads/devel | 2023-08-17T11:59:09.603274 | 2023-08-11T11:47:57 | 2023-08-11T11:47:57 | 40,700,431 | 8 | 7 | NOASSERTION | 2023-08-24T10:12:47 | 2015-08-14T06:53:01 | Python | UTF-8 | Python | false | false | 19,605 | py | #!/usr/bin/env python
#
# utils: utility classes and functions for QC
# Copyright (C) University of Manchester 2018-2022 Peter Briggs
#
"""
Provides utility classes and functions for analysis project QC.
Provides the following functions:
- verify_qc: verify the QC run for a project
- report_qc: generate report for the QC run for a project
- get_bam_basename: return the BAM file basename from a Fastq filename
- get_seq_data_samples: identify samples with biological (sequencing)
data
- set_cell_count_for_project: sets total number of cells for a project
"""
#######################################################################
# Imports
#######################################################################
import os
import logging
from ..analysis import AnalysisFastq
from ..analysis import AnalysisProject
from ..command import Command
from ..metadata import AnalysisProjectQCDirInfo
from ..conda import CondaWrapper
from ..conda import CondaWrapperError
from ..conda import make_conda_env_name
from ..settings import Settings
from ..simple_scheduler import SchedulerJob
from ..tenx.cellplex import CellrangerMultiConfigCsv
from .cellranger import CellrangerCount
from .cellranger import CellrangerMulti
# Module-specific logger
logger = logging.getLogger(__name__)
#######################################################################
# Functions
#######################################################################
def verify_qc(project,qc_dir=None,fastq_dir=None,qc_protocol=None,
runner=None,log_dir=None):
"""
Verify the QC run for a project
Arguments:
project (AnalysisProject): analysis project
to verify the QC for
qc_dir (str): optional, specify the subdir with
the QC outputs being verified
fastq_dir (str): optional, specify a non-default
directory with Fastq files being verified
qc_protocol (str): optional, QC protocol to
verify against
runner (JobRunner): optional, job runner to use
for running the verification
log_dir (str): optional, specify a directory to
write logs to
Returns:
Boolean: True if QC passes verification, otherwise
False.
"""
# Sort out runners
if runner is None:
runner = Settings().general.default_runner
# Construct command for QC verification
verify_cmd = Command(
"reportqc.py",
"--verify")
if qc_protocol is not None:
verify_cmd.add_args("--protocol",qc_protocol)
if qc_dir is not None:
verify_cmd.add_args("--qc_dir",qc_dir)
if fastq_dir is not None:
verify_cmd.add_args("--fastq_dir",fastq_dir)
verify_cmd.add_args(project.dirn)
# Run the command
verify = SchedulerJob(runner,
verify_cmd.command_line,
name="verify_qc.%s" % project.name,
working_dir=project.dirn,
log_dir=log_dir)
verify.start()
try:
verify.wait()
except KeyboardInterrupt as ex:
logger.warning("Keyboard interrupt, terminating QC verification")
verify.terminate()
raise ex
# Return boolean based on the exit code
return (verify.exit_code == 0)
def report_qc(project,qc_dir=None,fastq_dir=None,qc_protocol=None,
report_html=None,zip_outputs=True,multiqc=False,
force=False,runner=None,log_dir=None,
suppress_warning=False):
"""
Generate report for the QC run for a project
Arguments:
project (AnalysisProject): analysis project
to report the QC for
qc_dir (str): optional, specify the subdir with
the QC outputs being reported
fastq_dir (str): optional, specify a non-default
directory with Fastq files being verified
qc_protocol (str): optional, QC protocol to
verify against
report_html (str): optional, path to the name of
the output QC report
zip_outputs (bool): if True then also generate ZIP
archive with the report and QC outputs
multiqc (bool): if True then also generate MultiQC
report
force (bool): if True then force generation of
QC report even if verification fails
runner (JobRunner): optional, job runner to use
for running the reporting
log_dir (str): optional, specify a directory to
write logs to
suppress_warning (bool): if True then don't show the
warning message even when there are missing metrics
(default: show the warning if there are missing
metrics)
Returns:
Integer: exit code from reporting job (zero indicates
success, non-zero indicates a problem).
"""
# Sort out runners
if runner is None:
runner = Settings().general.default_runner
# Basename for the outputs
if qc_dir is None:
qc_base = os.path.basename(project.qc_dir)
else:
qc_base = os.path.basename(qc_dir)
# Report HTML file name
if report_html is None:
out_file = '%s_report.html' % qc_base
else:
out_file = report_html
if not os.path.isabs(out_file):
out_file = os.path.join(project.dirn,out_file)
# Report title
if project.info.run is None:
title = "%s" % project.name
else:
title = "%s/%s" % (project.info.run,
project.name)
if fastq_dir is not None:
title = "%s (%s)" % (title,fastq_dir)
title = "%s: QC report" % title
# Construct command for reporting
report_cmd = Command(
"reportqc.py",
"--filename",out_file,
"--title",title)
if qc_protocol is not None:
report_cmd.add_args("--protocol",qc_protocol)
if qc_dir is not None:
report_cmd.add_args("--qc_dir",qc_dir)
if fastq_dir is not None:
report_cmd.add_args("--fastq_dir",fastq_dir)
if multiqc:
report_cmd.add_args("--multiqc")
if zip_outputs:
report_cmd.add_args("--zip")
if force:
report_cmd.add_args("--force")
if suppress_warning:
report_cmd.add_args("--suppress-warning")
report_cmd.add_args(project.dirn)
# Check if environment modules are defined
module_load_cmds = None
if Settings().modulefiles['report_qc']:
print("Attempting to acquire environment modules for reporting")
module_load_cmds = []
try:
modulepath = os.environ['MODULEPATH']
if modulepath:
module_load_cmds.append("export MODULEPATH=%s" % modulepath)
except KeyError:
pass
try:
envmodules = Settings().modulefiles['report_qc'].split(',')
for envmodule in envmodules:
module_load_cmds.append("module load %s" % envmodule)
except Exception as ex:
logger.warning("couldn't acquire env modules?: %s" % ex)
module_load_cmds = '\n'.join(module_load_cmds)
# Check if conda environments are enabled
conda_activate_cmd = None
if Settings().conda.enable_conda:
print("Attempting to acquire conda environment for reporting")
# Get location for conda environments
conda_env_dir = Settings().conda.env_dir
# Set up conda wrapper
conda = CondaWrapper(env_dir=conda_env_dir)
# Get environment for QC reporting
report_qc_conda_pkgs = ("multiqc=1.8",
"pillow",
"python=3.8")
env_name = make_conda_env_name(*report_qc_conda_pkgs)
try:
conda.create_env(env_name,*report_qc_conda_pkgs)
conda_env = os.path.join(conda_env_dir,env_name)
# Script fragment to activate the environment
conda_activate_cmd = conda.activate_env_cmd(conda_env)
except CondaWrapperError as ex:
# Failed to acquire the environment
logger.warning("failed to acquire conda environment '%s': %s" %
(env_name,ex))
# Wrap the command in a script
scripts_dir = os.path.join(project.dirn,"ScriptCode")
if not os.path.isdir(scripts_dir):
logger.warning("no ScriptCode directory found in '%s'" %
project.name)
scripts_dir = project.dirn
report_script = os.path.join(scripts_dir,
"report_qc.%s.sh" % project.name)
prologue = []
if module_load_cmds:
prologue.append(module_load_cmds)
if conda_activate_cmd:
prologue.append(str(conda_activate_cmd))
if prologue:
prologue = '\n'.join(prologue)
else:
prologue = None
report_cmd.make_wrapper_script(filen=report_script,
prologue=prologue,
quote_spaces=True)
# Locate log dir
if log_dir is None:
log_dir = os.path.join(project.dirn,"logs")
if not os.path.isdir(log_dir):
log_dir = None
# Run the command
report = SchedulerJob(runner,
Command('/bin/bash','-l',report_script).command_line,
name="report_qc.%s" % project.name,
working_dir=project.dirn,
log_dir=log_dir)
report.start()
try:
report.wait()
except KeyboardInterrupt as ex:
logger.warning("Keyboard interrupt, terminating QC reporting")
report.terminate()
raise ex
# Return the exit code
return report.exit_code
def get_bam_basename(fastq,fastq_attrs=None):
"""
Return basename for BAM file from Fastq filename
Typically this will be the Fastq basename with the
read ID removed, for example the Fastq filename
'SM1_S1_L001_R1_001.fastq.gz' will result in the
BAM basename of 'SM1_S1_L001_001'.
Arguments:
fastq (str): Fastq filename; can include leading
path and extensions (both will be ignored)
fastq_attrs (BaseFastqAttrs): class for extracting
data from Fastq names (defaults to 'AnalysisFastq')
Returns:
String: basename for BAM file.
"""
if fastq_attrs is None:
fastq_attrs = AnalysisFastq
bam_basename = fastq_attrs(fastq)
bam_basename.read_number = None
return str(bam_basename)
def get_seq_data_samples(project_dir,fastq_attrs=None):
"""
Identify samples with biological (sequencing) data
Arguments:
project_dir (str): path to the project directory
fastq_attrs (BaseFastqAttrs): class for extracting
data from Fastq names (defaults to 'AnalysisFastq')
Returns:
List: list with subset of samples with biological
data
"""
# Set up
if fastq_attrs is None:
fastq_attrs = AnalysisFastq
project = AnalysisProject(project_dir,
fastq_attrs=fastq_attrs)
# Initial sample list
samples = sorted([s.name for s in project.samples])
# If biological samples explicitly defined in
# project metadata then use those
if project.info.biological_samples:
bio_samples = []
for s in [str(s).strip()
for s in project.info.biological_samples.split(',')]:
if s not in samples:
logger.warning("Sample '%s' defined as biological data "
"but no sample found with that name?" % s)
else:
bio_samples.append(s)
return bio_samples
# 10x Genomics CellPlex
single_cell_platform = project.info.single_cell_platform
if single_cell_platform:
if single_cell_platform.startswith("10xGenomics Chromium 3'") and \
project.info.library_type == "CellPlex":
# Check for config file
config_file = os.path.join(project.dirn,
"10x_multi_config.csv")
if os.path.exists(config_file):
config_csv = CellrangerMultiConfigCsv(config_file)
samples = sorted([s for s in config_csv.gex_libraries
if s in samples])
return samples
def set_cell_count_for_project(project_dir,qc_dir=None,
source="count"):
"""
Set the total number of cells for a project
Depending on the specified 'source', sums the number
of cells for each sample in a project as determined
from either 'cellranger* count' or 'cellranger multi'.
Depending the 10x Genomics package and analysis type
the cell count for individual samples is extracted
from the 'metrics_summary.csv' file for scRNA-seq
(i.e. 'cellranger count' or 'cellranger multi'), or
from the 'summary.csv' file for scATAC (ie.
'cellranger-atac count').
The final count is written to the 'number_of_cells'
metadata item for the project.
Arguments:
project_dir (str): path to the project directory
qc_dir (str): path to QC directory (if not the default
QC directory for the project)
source (str): either 'count' or 'multi' (default is
'count')
Returns:
Integer: exit code, non-zero values indicate problems
were encountered.
"""
# Set up basic info
project = AnalysisProject(project_dir)
if qc_dir is None:
qc_dir = project.qc_dir
qc_dir = os.path.abspath(qc_dir)
print("QC dir: %s" % qc_dir)
number_of_cells = None
# Determine which 10x pipeline was used
pipeline = None
single_cell_platform = project.info.single_cell_platform
if single_cell_platform:
if single_cell_platform.startswith("10xGenomics Chromium 3'"):
pipeline = "cellranger"
elif single_cell_platform == "10xGenomics Single Cell ATAC":
pipeline = "cellranger-atac"
elif single_cell_platform == "10xGenomics Single Cell Multiome":
pipeline = "cellranger-arc"
if not pipeline:
raise NotImplementedError("Not implemented for platform '%s'"
% single_cell_platform)
# Fetch information on version and reference data
cellranger_refdata = None
cellranger_version = None
qc_info_file = os.path.join(qc_dir,"qc.info")
if os.path.exists(qc_info_file):
qc_info = AnalysisProjectQCDirInfo(filen=qc_info_file)
try:
cellranger_refdata = qc_info['cellranger_refdata']
except KeyError:
pass
try:
cellranger_version = qc_info['cellranger_version']
except KeyError:
pass
else:
print("%s: not found" % qc_info_file)
# Determine whether we're handling output from 'multi'
# or from 'count'
if source == "multi":
print("Looking for '%s multi' outputs" % pipeline)
if not os.path.exists(os.path.join(qc_dir,"cellranger_multi")):
logger.warning("Unable to set cell count: no data found")
return
# Handle outputs from 'multi'
number_of_cells = 0
try:
multi_outs = CellrangerMulti(
os.path.join(qc_dir,
"cellranger_multi",
cellranger_version,
os.path.basename(
cellranger_refdata)),
cellranger_exe=pipeline)
if multi_outs.sample_names:
for sample in multi_outs.sample_names:
print("- %s" % sample)
try:
ncells = multi_outs.metrics(sample).cells
print(" %d cells" % ncells)
number_of_cells += ncells
except Exception as ex:
raise Exception("Failed to add cell count for sample "
"'%s': %s" % (sample,ex))
else:
raise Exception("No samples found under %s" %
os.path.join(qc_dir,"cellranger_multi"))
except Exception as ex:
number_of_cells = None
logger.warning("Unable to set cell count from data in "
"%s: %s" %
(os.path.join(qc_dir,"cellranger_multi"),ex))
elif source == "count":
print("Looking for '%s count' outputs" % pipeline)
if not os.path.exists(os.path.join(qc_dir,"cellranger_count")):
logger.warning("Unable to set cell count: no data found")
return
# Handle outputs from 'count'
# Determine possible locations for outputs
count_dirs = []
# New-style with 'version' and 'reference' subdirectories
if cellranger_version and cellranger_refdata:
count_dirs.append(os.path.join(qc_dir,
"cellranger_count",
cellranger_version,
os.path.basename(
cellranger_refdata)))
# Old-style without additional subdirectories
count_dirs.append(os.path.join(qc_dir,
"cellranger_count"))
# Check each putative output location in turn
for count_dir in count_dirs:
print("Examining %s" % count_dir)
# Check that the directory exists
if os.path.exists(count_dir):
number_of_cells = 0
try:
# Loop over samples and collect cell numbers for
# each sample
for sample in project.samples:
print("- %s" % sample)
ncells = None
sample_dir = os.path.join(count_dir,
sample.name)
sample_outs = CellrangerCount(sample_dir,
cellranger_exe=pipeline)
for metric in ('Estimated Number of Cells',
'Estimated number of cells',
'annotated_cells',):
# Try to fetch metric for cell count
try:
ncells = sample_outs.metrics.fetch(metric)
break
except KeyError:
pass
if ncells is None:
number_of_cells = None
raise Exception("Failed to add cell count "
"for sample '%s'" % sample.name)
else:
print(" %d cells" % ncells)
number_of_cells += ncells
# Extracted cell numbers so break out
break
except Exception as ex:
logger.warning("Unable to get cell counts from '%s': %s"
% (count_dir,ex))
else:
# No known outputs to get cell counts from
raise Exception("Unknown source type: '%s'" % source)
if number_of_cells is not None:
# Report
print("Total number of cells: %d" % number_of_cells)
# Store in the project metadata
project.info['number_of_cells'] = number_of_cells
project.info.save()
return 0
else:
# Cell count wasn't set
return 1
| [
"[email protected]"
] | |
cb3b99330dec69408592872eba11b0a5f54912fe | e82d49a32b843d02019fe770824d10bbdfc16c1b | /Misc/args.py | 219c767f303ffbf718eb0d407394684e9b33f549 | [] | no_license | deesaw/PythonD-005 | 142bfdfd6515aa4d570509cab5e6b6008ccae999 | 65b7423b5251b12d06cd64a5135dd0afabde60a1 | refs/heads/master | 2023-03-10T02:51:05.967446 | 2021-03-02T14:09:33 | 2021-03-02T14:09:33 | 343,795,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | #This program adds two numbers given on the command line.
#At the OS command prompt call this program as follows
#args.py 3 4
#It should return 7
########################################
import sys
print ("You have entered ",len(sys.argv)-1," arguments")
print (sys.argv[0])
sum=0
for x in sys.argv[1:]:
sum=sum+int(x)
print (sum)
############################################
#run it as below on windows
#c:\python27\python args.py 3 4 | [
"[email protected]"
] | |
598e7e1951f92af159f2e76bc02bd394360df3cd | be0a3aa7b83b87c5d2c257b538545bdded39c051 | /Chatbot_Web/impl/view/sp_view.py | 48c5784591afdbd8ef823393a0b3a239e1eeeb6b | [
"Apache-2.0"
] | permissive | water123li/Chatbot_CN | 480e3bc6d6c0d8b6b0823452556acef14df1c2c3 | e63808030c6cc516020075cdcd0c332120a998fc | refs/heads/master | 2022-01-25T10:34:34.726243 | 2019-06-13T10:44:44 | 2019-06-13T10:44:44 | 192,504,292 | 1 | 0 | Apache-2.0 | 2019-06-18T09:01:55 | 2019-06-18T09:01:55 | null | UTF-8 | Python | false | false | 557 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: sp_view.py
Description : 语义分析视图跳转
Author : charl
date: 2018/11/2
-------------------------------------------------
Change Activity: 2018/11/2:
-------------------------------------------------
"""
from django.shortcuts import render
def sp_view(request): # index页面需要一开始就加载的内容写在这里
context = {}
return render(request, 'semantic_parsing/semantic_parsing.html', context) | [
"[email protected]"
] | |
0719c05250e74207d69b6469b6281bd629a2d5d8 | 8f1996c1b5a0211474c7fa287be7dc20a517f5f0 | /hail/python/hail/vds/combiner/__init__.py | 66f14a1f2905d37375d28189b406329ce94c335f | [
"MIT"
] | permissive | johnc1231/hail | 9568d6effe05e68dcc7bf398cb32df11bec061be | 3dcaa0e31c297e8452ebfcbeda5db859cd3f6dc7 | refs/heads/main | 2022-04-27T10:51:09.554544 | 2022-02-08T20:05:49 | 2022-02-08T20:05:49 | 78,463,138 | 0 | 0 | MIT | 2022-03-01T15:55:25 | 2017-01-09T19:52:45 | Python | UTF-8 | Python | false | false | 238 | py | from .combine import transform_gvcf, combine_variant_datasets
from .variant_dataset_combiner import new_combiner, load_combiner
__all__ = [
'combine_variant_datasets',
'transform_gvcf',
'new_combiner',
'load_combiner',
]
| [
"[email protected]"
] | |
e76ae7afc9085bd4469750939c331cf04a22eae6 | 61ff94d2987b3bc95f82c5a58897f50d1efa1db8 | /hive/db/adapter.py | 88c60225e4070ae7cd7f3e645bf073894c30e7e3 | [
"MIT"
] | permissive | arpwv/hivemind | ee77c9805731fda2bb95e1127a56152fe53b707a | a87e5578f9020be02c867021a8acdfff41f06777 | refs/heads/master | 2021-01-24T03:43:46.507207 | 2018-02-23T22:18:56 | 2018-02-23T22:18:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | import logging
import collections
from funcy.seqs import first
import sqlalchemy
from sqlalchemy import text
from hive.conf import Conf
from hive.db.query_stats import QueryStats
logger = logging.getLogger(__name__)
class Db:
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = Db()
return cls._instance
def __init__(self):
self._conn = None
self._trx_active = False
def conn(self):
if not self._conn:
self._conn = Db.create_engine(echo=False).connect()
# It seems as though sqlalchemy tries to take over transactions
# and handle them itself; seems to issue a START TRANSACTION on
# connect, which makes postgres complain when we start our own:
# > WARNING: there is already a transaction in progress
# TODO: handle this behavior properly. In the meantime:
self._conn.execute(text("COMMIT"))
return self._conn
@staticmethod
def create_engine(echo=False):
engine = sqlalchemy.create_engine(
Conf.get('database_url'),
isolation_level="READ UNCOMMITTED", # only works in mysql
pool_recycle=3600,
echo=echo)
return engine
def is_trx_active(self):
return self._trx_active
# any non-SELECT queries
def query(self, sql, **kwargs):
# if prepared tuple, unpack
if isinstance(sql, tuple):
assert not kwargs
kwargs = sql[1]
sql = sql[0]
assert isinstance(sql, str)
assert isinstance(kwargs, dict)
# this method is reserved for anything but SELECT
assert self._is_write_query(sql), sql
return self._query(sql, **kwargs)
# SELECT n*m
def query_all(self, sql, **kwargs):
res = self._query(sql, **kwargs)
return res.fetchall()
# SELECT 1*m
def query_row(self, sql, **kwargs):
res = self._query(sql, **kwargs)
return first(res)
# SELECT n*1
def query_col(self, sql, **kwargs):
res = self._query(sql, **kwargs).fetchall()
return [r[0] for r in res]
# SELECT 1*1
def query_one(self, sql, **kwargs):
row = self.query_row(sql, **kwargs)
if row:
return first(row)
def db_engine(self):
engine = self.conn().dialect.name
if engine not in ['postgresql', 'mysql']:
raise Exception("db engine %s not supported" % engine)
return engine
@staticmethod
def build_upsert(table, pk, values):
pks = [pk] if isinstance(pk, str) else pk
values = collections.OrderedDict(values)
fields = list(values.keys())
pks_blank = [values[k] is None for k in pks]
if all(pks_blank):
cols = ', '.join([k for k in fields if k not in pks])
params = ', '.join([':'+k for k in fields if k not in pks])
sql = "INSERT INTO %s (%s) VALUES (%s)"
sql = sql % (table, cols, params)
else:
update = ', '.join([k+" = :"+k for k in fields if k not in pks])
where = ' AND '.join([k+" = :"+k for k in fields if k in pks])
sql = "UPDATE %s SET %s WHERE %s"
sql = sql % (table, update, where)
return (sql, values)
@QueryStats()
def _query(self, sql, **kwargs):
if sql == 'START TRANSACTION':
assert not self._trx_active
self._trx_active = True
elif sql == 'COMMIT':
assert self._trx_active
self._trx_active = False
query = text(sql).execution_options(autocommit=False)
try:
return self.conn().execute(query, **kwargs)
except Exception as e:
print("[SQL] Error in query {} ({})".format(sql, kwargs))
#self.conn.close() # TODO: check if needed
logger.exception(e)
raise e
@staticmethod
def _is_write_query(sql):
action = sql.strip()[0:6].strip()
if action == 'SELECT':
return False
if action in ['DELETE', 'UPDATE', 'INSERT', 'COMMIT', 'START', 'ALTER']:
return True
raise Exception("unknown action: {}".format(sql))
| [
"[email protected]"
] | |
1dad10d4f022b45a885361c1ef7cad694f8b1ae6 | 4099891546014e49b74f28987d26f93e77559471 | /app/models.py | 2b60aa9f5ce76532d09b9a3ed3443348f9d2b2da | [
"MIT"
] | permissive | leezichanga/Newshighlights | 519ecac73341adcf90b364024d335fe3574a12c6 | abaca8891fe0d62e624e8c83ca4ba65f5ad6fe0f | refs/heads/master | 2020-03-08T21:12:27.616700 | 2018-04-10T13:24:19 | 2018-04-10T13:24:19 | 128,401,925 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | class Source:
'''
News class to define news objects
'''
def __init__(self,id,name,description,url,category,language,country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.language = language
self.country = country
class Article:
'''
Article class to define article objects
'''
def __init__(self,id,name,author,title,description,url,urlToImage,publishedAt):
self.id = id
self.name = name
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
| [
"[email protected]"
] | |
3b876b254779a8a51c619573dd173dba1daf235b | fcf870abec4a3fe936668ed14afcded9c10e4aa3 | /featureselection/CHI2.py | 3dbd1809a6a48214769cfd116f2e38f093ac09ef | [] | no_license | sirpan/iLearn | f8d81523720245cc1ab8368aeb609511fc93af5a | 507aae17d9fea3d74a7c77984f1f1750eb734f53 | refs/heads/master | 2023-03-22T06:55:48.791894 | 2021-03-17T07:23:15 | 2021-03-17T07:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | #!/usr/bin/env python
# _*_coding:utf-8_*_
import numpy as np
import pandas as pd
binBox = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def CHI2(encodings, labels):
features = encodings[0][1:]
encodings = np.array(encodings)[1:]
data = encodings[:, 1:]
shape = data.shape
data = np.reshape(data, shape[0] * shape[1])
data = np.reshape([float(i) for i in data], shape)
e = ''
if shape[0] < 5 or shape[1] < 2:
return 0, e
dataShape = data.shape
if dataShape[1] != len(features):
print('Error: inconsistent data shape with feature number.')
return 0, 'Error: inconsistent data shape with feature number.'
if dataShape[0] != len(labels):
print('Error: inconsistent data shape with sample number.')
return 0, 'Error: inconsistent data shape with sample number.'
sampleNumber = len(data)
labelClass = set(labels)
myFea = {}
for i in range(len(features)):
array = data[:, i]
newArray = list(pd.cut(array, len(binBox), labels=binBox))
binBoxClass = set(newArray)
myObservation = {}
for j in range(len(labels)):
# print(labels[j], newArray[j])
myObservation[str(labels[j]) + str(newArray[j])] = myObservation.get(str(labels[j]) + str(newArray[j]),
0) + 1
myExpect = {}
for j in labelClass:
for k in binBox:
myExpect[str(j) + str(k)] = labels.count(j) * newArray.count(k) / sampleNumber
chiValue = 0
for j in labelClass:
for k in binBoxClass:
chiValue = chiValue + pow(((myObservation.get(str(j) + str(k), 0)) - myExpect.get(str(j) + str(k), 0)),
2) / myExpect[str(j) + str(k)]
myFea[features[i]] = chiValue
res = []
res.append(['feature', 'CHI-value'])
for key in sorted(myFea.items(), key=lambda item: item[1], reverse=True):
res.append([key[0], '{0:.3f}'.format(myFea[key[0]])])
return res, e
| [
"[email protected]"
] | |
8f611b94b68f4bdb69f9435b97e40be3767a17b3 | dcd3f61b47de7cc3e27be76c54c38de1d1fb4261 | /leap_year.py | 1bbe42a169cec9b725a81d7b699fd257a872cf28 | [] | no_license | Somi-Singh/if-else | 853b06b8f64ba1ac4ae879783cd749a5081aac34 | 05422c0a508b377a01f2851899c204ff350099d2 | refs/heads/main | 2023-06-08T12:37:55.160487 | 2021-07-01T09:34:36 | 2021-07-01T09:34:36 | 381,979,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | user=int(input("enter the year"))
if user%4==0:
print("leap year")
else:
print("leap year nhi") | [
"[email protected]"
] | |
c72949e7eb1456018af8dbaec4339fb7adc37b26 | 0a3322707e58aae8abe27c92f63164f53dc6a4ac | /ecogdata/expconfig/exp_descr/__init__.py | 1a21126ca584798ebde2f8ce3a0579e69b67891e | [
"BSD-3-Clause"
] | permissive | miketrumpis/ecogdata | 740f9a61cad7c3fea380ac3670f2b513cfd407f8 | 2b6af886ae1dfbcd0640661138a3ed2f927fba57 | refs/heads/master | 2022-11-05T06:27:22.497965 | 2022-10-13T21:31:52 | 2022-10-13T21:31:52 | 133,874,922 | 0 | 0 | BSD-3-Clause | 2021-11-03T21:58:04 | 2018-05-17T22:32:17 | Python | UTF-8 | Python | false | false | 97 | py | from .base_exp import *
from .audio_exp import *
from .expo_exp import *
from .builders import *
| [
"[email protected]"
] | |
7b77ba86eb35c3ed786fe7a7e707898bd1163f50 | 45e49a395fe58783cdc662ba6cf3805ef499190e | /raiden/tests/unit/test_notifyingqueue.py | 24caa4ff4e423841f70bbc59687e829245155fe6 | [
"MIT"
] | permissive | mat7ias/raiden | 862708d7e2f1f84ade6623c626daf3578a948c10 | 7463479ffde4f48577b74421f3c47a097e95a36f | refs/heads/master | 2020-03-28T06:17:30.899834 | 2018-09-07T10:24:37 | 2018-09-07T11:44:18 | 146,172,440 | 0 | 0 | MIT | 2018-08-26T10:50:17 | 2018-08-26T10:50:17 | null | UTF-8 | Python | false | false | 925 | py | import gevent
from gevent.event import Event
from raiden.utils.notifying_queue import NotifyingQueue
from raiden.network.transport.udp.udp_utils import event_first_of
def add_element_to_queue(queue, element):
queue.put(element)
def test_copy():
queue = NotifyingQueue()
assert queue.copy() == []
queue.put(1)
assert queue.copy() == [1]
assert queue.peek() == 1, 'copy must preserve the queue'
queue.put(2)
assert queue.copy() == [1, 2], 'copy must preserve the items order'
def test_event_must_be_set():
queue = NotifyingQueue()
event_stop = Event()
data_or_stop = event_first_of(
queue,
event_stop,
)
spawn_after_seconds = 1
element = 1
gevent.spawn_later(spawn_after_seconds, add_element_to_queue, queue, element)
assert data_or_stop.wait()
def test_not_empty():
queue = NotifyingQueue(items=[1, 2])
assert queue.is_set()
| [
"[email protected]"
] | |
ebb258466ca8a94b44c533eca60e68a2ab3edd10 | 2347a00aa41c023924de6bc4ffe0e8bc244a0f3f | /application_form/migrations/0030_auto_20150908_1132.py | 937d76c617b12ba3080d51e9f999a4d0ef15307c | [] | no_license | Dean-Christian-Armada/prod-people | 2ac20d16aecb0cf1ae50a08e456060eee270b518 | fb8d99394d78bbf4d1831223fce2d7ac4a04f34d | refs/heads/master | 2021-01-01T16:19:36.904967 | 2016-01-26T09:20:36 | 2016-01-26T09:20:36 | 42,503,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('application_form', '0029_applicationformflagdocuments'),
]
operations = [
migrations.AlterField(
model_name='applicationformflagdocuments',
name='user',
field=models.ForeignKey(default=None, to='login.UserProfile'),
),
]
| [
"[email protected]"
] | |
0458b01249ba1787f2bee6dbad3d2c2a9e97c9d8 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/smsdroid/testcase/firstcases/testcase2_012.py | 700a6ce30d93469cae97356efa5d69b55ac3671f | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,924 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.ub0r.android.smsdroid',
'appActivity' : 'de.ub0r.android.smsdroid.ConversationListActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.ub0r.android.smsdroid/de.ub0r.android.smsdroid.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase2_012
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"66560866\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.smsdroid:id/inout\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.smsdroid:id/item_answer\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Me\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Call 12312312\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_012\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.ub0r.android.smsdroid'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
c39e5331ecbec1272a0f70f65618ae44e1ef8ff4 | 86c5360e5a98088c76bbbcf93e3180b825744708 | /yolo_v3/video_test.py | 31082b2b1f160853466a29120a891091b9470756 | [] | no_license | FenHua/yolo | 586b154f77f6855c2b8f731f101c92dd07840b39 | 6da4aa7c2ad2656182b6694b44d3c8e7cd6f3aa8 | refs/heads/master | 2020-04-26T11:37:37.092504 | 2019-03-03T02:51:23 | 2019-03-03T02:51:23 | 173,523,090 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,823 | py | # coding: utf-8
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import time
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from model import yolov3
parser = argparse.ArgumentParser(description="YOLO-V3 video test procedure.")
parser.add_argument("input_video", type=str,
help="The path of the input video.")
parser.add_argument("--anchor_path", type=str, default="./data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--class_name_path", type=str, default="./data/coco.names",
help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="./data/darknet_weights/yolov3.ckpt",
help="The path of the weights to restore.")
parser.add_argument("--save_video", type=lambda x: (str(x).lower() == 'true'), default=False,
help="Whether to save the video detection results.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
vid = cv2.VideoCapture(args.input_video)
video_frame_cnt = int(vid.get(7))
video_width = int(vid.get(3))
video_height = int(vid.get(4))
video_fps = int(vid.get(5))
if args.save_video:
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
videoWriter = cv2.VideoWriter('video_result.mp4', fourcc, video_fps, (video_width, video_height))
with tf.Session() as sess:
input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
pred_feature_maps = yolo_model.forward(input_data, False)
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=30, score_thresh=0.5, iou_thresh=0.5)
saver = tf.train.Saver()
saver.restore(sess, args.restore_path)
for i in range(video_frame_cnt):
ret, img_ori = vid.read()
height_ori, width_ori = img_ori.shape[:2]
img = cv2.resize(img_ori, tuple(args.new_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, np.float32)
img = img[np.newaxis, :] / 255.
start_time = time.time()
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
end_time = time.time()
# 将图片坐标系改为原始图片大小
boxes_[:, 0] *= (width_ori/float(args.new_size[0]))
boxes_[:, 2] *= (width_ori/float(args.new_size[0]))
boxes_[:, 1] *= (height_ori/float(args.new_size[1]))
boxes_[:, 3] *= (height_ori/float(args.new_size[1]))
for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]], color=color_table[labels_[i]])
cv2.putText(img_ori, '{:.2f}ms'.format((end_time - start_time) * 1000), (40, 40), 0,
fontScale=1, color=(0, 255, 0), thickness=2)
cv2.imshow('image', img_ori)
if args.save_video:
videoWriter.write(img_ori)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
if args.save_video:
videoWriter.release()
| [
"[email protected]"
] | |
6f539cd8fba94c719c7cdbc20951e989a1caeba5 | 279a141a3d4451b53f24bd369d80ac471da6fd95 | /helloworld.py | fac8390bc075e79093fe9d45c734a6dca7cea8bc | [] | no_license | emilyjennings/python-practice | 4f010878263174487ab9ed5ae8c30c3b9ae2e1ca | b98b9ed354999fe3d2286bdd27e83ffd43807f20 | refs/heads/master | 2020-05-24T23:31:43.645103 | 2019-05-20T21:33:45 | 2019-05-20T21:33:45 | 187,516,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #My first python code - in a while, anyway
print("Hello World")
if 5 > 2:
print("good work")
x = "me"
y = 10
print(x)
print(y)
# Codewars
def xo(s):
array = s.split()
xs = 0
os = 0
x = "x"
o = "o"
for item in array:
if item == o:
os += 1
elif item == x:
xs += 1
if xs == os:
return True
def xo(s):
s = s.lower()
return s.count('x') == s.count('o')
| [
"github email address"
] | github email address |
f7ff1126916a332306e7385b85ada97ec2c7f820 | e32ee307e4c59cc18f9dea18d797784a1b23148f | /Quinton-t a single line, the minimum subarray sum..py | f1c7da01a7b41f0859e383b1d01ebc66770c9e60 | [] | no_license | GuhanSGCIT/SGCIT | f4ab44346186d45129c74cbad466c6614f9f0f08 | 8b2e5ccf693384aa22aa9d57f39b63e4659f6261 | refs/heads/master | 2020-07-11T05:47:54.033120 | 2020-07-07T05:02:41 | 2020-07-07T05:02:41 | 204,459,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | n = int(input())
l = [int(x) for x in input().split()]
lst = []
for i in range(n):
for j in range(i, n):
lst.append(l[i:j+1])
sm = sum(lst[0])
for i in range(1, len(lst)):
if sm > sum(lst[i]): sm = sum(lst[i])
print(sm)
| [
"[email protected]"
] | |
874d3a461033beb96e17976e837e5e249cbfeb4b | 935672cfefee4a8fe861f3247a6e27b7d1d0669a | /hoer/models/cifar/rezero/preactresnet.py | f79f59dd0a14b959453c3f9f64789b35d6ffefdf | [] | no_license | sbl1996/hoer | 533b10f047a4175a95f8a7cb94430002aef9a39d | 8ced31d49ebe627eb0932f896484a8b2b2c223ce | refs/heads/main | 2023-02-02T00:43:42.716916 | 2020-12-13T08:14:12 | 2020-12-13T08:14:12 | 321,008,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,583 | py | import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.initializers import Constant
from tensorflow.keras.layers import Layer
from hanser.models.layers import Act, Conv2d, Norm, GlobalAvgPool, Linear, Identity
class PreActResBlock(Layer):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.norm1 = Norm(in_channels)
self.act1 = Act()
self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride)
self.norm2 = Norm(out_channels)
self.act2 = Act()
self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3)
if stride != 1 or in_channels != out_channels:
self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
self.res_weight = self.add_weight(
name='res_weight', shape=(), dtype=tf.float32,
trainable=True, initializer=Constant(0.))
def call(self, x):
out = self.norm1(x)
out = self.act1(out)
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.norm2(out)
out = self.act2(out)
out = self.conv2(out)
return shortcut + self.res_weight * out
class ResNet(Model):
stages = [16, 16, 32, 64]
def __init__(self, depth, k, num_classes=10):
super().__init__()
num_blocks = (depth - 4) // 6
self.conv = Conv2d(3, self.stages[0], kernel_size=3)
self.layer1 = self._make_layer(
self.stages[0] * 1, self.stages[1] * k, num_blocks, stride=1)
self.layer2 = self._make_layer(
self.stages[1] * k, self.stages[2] * k, num_blocks, stride=2)
self.layer3 = self._make_layer(
self.stages[2] * k, self.stages[3] * k, num_blocks, stride=2)
self.norm = Norm(self.stages[3] * k)
self.act = Act()
self.avgpool = GlobalAvgPool()
self.fc = Linear(self.stages[3] * k, num_classes)
def _make_layer(self, in_channels, out_channels, blocks, stride):
layers = [PreActResBlock(in_channels, out_channels, stride=stride)]
for i in range(1, blocks):
layers.append(
PreActResBlock(out_channels, out_channels, stride=1))
return Sequential(layers)
def call(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.norm(x)
x = self.act(x)
x = self.avgpool(x)
x = self.fc(x)
return x | [
"[email protected]"
] | |
1c56c4804e9cce72cdbf8787317d9f8f9609f5de | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003278.py | 27bfeab472a10b41ed9fe95e81801cbb8866f1db | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher88509(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher88509._instance is None:
CommutativeMatcher88509._instance = CommutativeMatcher88509()
return CommutativeMatcher88509._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 88508
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 88510
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.0_1', tmp3)
except ValueError:
pass
else:
pass
# State 88511
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 88512
if len(subjects2) == 0:
pass
# State 88513
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"[email protected]"
] | |
8367a3278f4af2f004b3b831acb7ec551b603b1f | 053ad96d19c562c44e3fad53db37b24f4ec7134d | /torchelastic/tsm/driver/test/standalone_session_test.py | 5e48f14e4a023c1330e6c9a25758fa33a742de50 | [
"BSD-3-Clause"
] | permissive | kuikuikuizzZ/elastic | 7ef0ab7b1a4a3510e91eeb6b91b6f94f863940c2 | cf2fb9c153cc371e6d6b341f15122c26965b7461 | refs/heads/master | 2022-12-31T20:11:38.957121 | 2020-09-23T18:38:11 | 2020-09-23T18:39:31 | 298,498,639 | 0 | 0 | BSD-3-Clause | 2020-09-25T07:20:59 | 2020-09-25T07:20:58 | null | UTF-8 | Python | false | false | 6,924 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
from unittest.mock import MagicMock
from torchelastic.tsm.driver.api import (
Application,
AppNotReRunnableException,
AppState,
Container,
DescribeAppResponse,
Resources,
Role,
RunMode,
UnknownAppException,
)
from torchelastic.tsm.driver.local_scheduler import (
LocalDirectoryImageFetcher,
LocalScheduler,
)
from torchelastic.tsm.driver.standalone_session import StandaloneSession
from .test_util import write_shell_script
class Resource:
SMALL = Resources(cpu=1, gpu=0, memMB=1024)
MEDIUM = Resources(cpu=4, gpu=0, memMB=(4 * 1024))
LARGE = Resources(cpu=16, gpu=0, memMB=(16 * 1024))
class StandaloneSessionTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp("StandaloneSessionTest")
write_shell_script(self.test_dir, "touch.sh", ["touch $1"])
write_shell_script(self.test_dir, "fail.sh", ["exit 1"])
write_shell_script(self.test_dir, "sleep.sh", ["sleep $1"])
self.image_fetcher = LocalDirectoryImageFetcher()
self.scheduler = LocalScheduler(self.image_fetcher)
# resource ignored for local scheduler; adding as an example
self.test_container = Container(image=self.test_dir).require(Resource.SMALL)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_run(self):
test_file = os.path.join(self.test_dir, "test_file")
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("name").of(role)
app_id = session.run(app)
self.assertEqual(AppState.SUCCEEDED, session.wait(app_id).state)
def test_attach(self):
session1 = StandaloneSession(name="test_session1", scheduler=self.scheduler)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
app_id = session1.run(app)
session2 = StandaloneSession(name="test_session2", scheduler=self.scheduler)
session2.attach(app_id)
self.assertEqual(AppState.RUNNING, session2.status(app_id).state)
session2.stop(app_id)
self.assertEqual(AppState.CANCELLED, session2.status(app_id).state)
def test_attach_and_run(self):
session1 = StandaloneSession(name="test_session1", scheduler=self.scheduler)
test_file = os.path.join(self.test_dir, "test_file")
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("touch_test_file").of(role)
app_id = session1.run(app)
session2 = StandaloneSession(name="test_session2", scheduler=self.scheduler)
attached_app = session2.attach(app_id)
with self.assertRaises(AppNotReRunnableException):
session2.run(attached_app)
def test_list(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
role = Role(name="touch").runs("sleep.sh", "1").on(self.test_container)
app = Application("sleeper").of(role)
num_apps = 4
for _ in range(num_apps):
# since this test validates the list() API,
# we do not wait for the apps to finish so run the apps
# in managed mode so that the local scheduler reaps the apps on exit
session.run(app, mode=RunMode.MANAGED)
apps = session.list()
self.assertEqual(num_apps, len(apps))
def test_evict_non_existent_app(self):
# tests that apps previously run with this session that are finished and eventually
# removed by the scheduler also get removed from the session after a status() API has been
# called on the app
scheduler = LocalScheduler(self.image_fetcher, cache_size=1)
session = StandaloneSession(
name="test_session", scheduler=scheduler, wait_interval=1
)
test_file = os.path.join(self.test_dir, "test_file")
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("touch_test_file").of(role)
# local scheduler was setup with a cache size of 1
# run the same app twice (the first will be removed from the scheduler's cache)
# then validate that the first one will drop from the session's app cache as well
app_id1 = session.run(app)
session.wait(app_id1)
app_id2 = session.run(app)
session.wait(app_id2)
apps = session.list()
self.assertEqual(1, len(apps))
self.assertFalse(app_id1 in apps)
self.assertTrue(app_id2 in apps)
def test_status(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
app_id = session.run(app)
self.assertEqual(AppState.RUNNING, session.status(app_id).state)
session.stop(app_id)
self.assertEqual(AppState.CANCELLED, session.status(app_id).state)
def test_status_unknown_app(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.status("unknown_app_id")
def test_status_ui_url(self):
app_id = "test_app"
mock_scheduler = MagicMock()
resp = DescribeAppResponse()
resp.ui_url = "https://foobar"
mock_scheduler.submit.return_value = app_id
mock_scheduler.describe.return_value = resp
session = StandaloneSession(
name="test_ui_url_session", scheduler=mock_scheduler
)
role = Role("ignored").runs("/bin/echo").on(self.test_container)
session.run(Application(app_id).of(role))
status = session.status(app_id)
self.assertEquals(resp.ui_url, status.ui_url)
def test_wait_unknown_app(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.wait("unknown_app_id")
def test_stop_unknown_app(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.stop("unknown_app_id")
| [
"[email protected]"
] | |
48f6aa788d0e6b6bd8bc7a621ccc3b855d38bbdc | 31ba27461d50fcd85027318eacefa04d828feb4b | /addons/app-trobz-hr/it_equipment_bonus/security/post_object_security.py | 1075bbfb89ee07b0ebcb2d990bff159c5f43d26d | [] | no_license | TinPlusIT05/tms | 5f258cec903d5bf43c26b93fc112fce0b32de828 | 673dd0f2a7c0b69a984342b20f55164a97a00529 | refs/heads/master | 2022-12-04T02:11:54.770745 | 2019-09-23T07:18:11 | 2019-09-23T07:18:11 | 210,278,672 | 0 | 0 | null | 2022-11-22T00:30:37 | 2019-09-23T06:18:48 | Python | UTF-8 | Python | false | false | 1,091 | py | # -*- encoding: utf-8 -*-
from openerp import models, api
group_user = 'Human Resources / Employee'
group_hr_manager = 'Human Resources / Manager'
class it_equipment_bonus_post_object_security(models.TransientModel):
_name = "it.equipment.bonus.post.object.security"
@api.model
def start(self):
self.create_model_access_rights()
return True
@api.model
def create_model_access_rights(self):
MODEL_ACCESS_RIGHTS = {
('hr.equipment.category'): {
(group_hr_manager,): [1, 1, 1, 1],
(group_user,): [1, 0, 0, 0],
},
('employee.it.bonus'): {
(group_hr_manager,): [1, 1, 1, 1],
(group_user,): [1, 0, 0, 0],
},
('hr.equipment.request'): {
(group_hr_manager,): [1, 1, 1, 1],
(group_user,): [1, 1, 1, 0],
},
}
return self.env['trobz.base'].with_context(
{'module_name': 'it_equipment_bonus'}).create_model_access_rights(
MODEL_ACCESS_RIGHTS)
| [
"[email protected]"
] | |
432097dea145bd35db9bfcab0f20d4ad2f970c45 | fc8137f6a4df69640657a0af5d7201de3c6eb261 | /accepted/Valid Palindrome.py | e791233777023d30d7f38d6a0df11bc4c02b9fd2 | [] | no_license | hustlrr/leetcode | 68df72b49ee3bbb9f0755028e024cc9fea2c21aa | 56e33dff3918e371f14d6f7ef03f8951056cc273 | refs/heads/master | 2020-04-12T08:14:25.371761 | 2017-01-01T12:19:34 | 2017-01-01T12:19:34 | 77,119,341 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # coding=utf-8
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
leftcnt,rightcnt=0,0
leftidx,rightidx=0,len(s)-1
s=s.lower()
while leftidx<rightidx:
while not s[leftidx].isalnum() and leftidx<rightidx:
leftidx+=1
while not s[rightidx].isalnum() and leftidx<rightidx:
rightidx-=1
if leftidx<rightidx:
if s[leftidx]!=s[rightidx]:
return False
if s[leftidx].isalnum():
leftcnt+=1
if s[rightidx].isalnum():
rightcnt+=1
leftidx+=1
rightidx-=1
return leftcnt==rightcnt
| [
"[email protected]"
] | |
c01a223e3df44056ff29c6a04a6b554b73afe3f5 | 89c6895a0d71d4ce1fa6ca9e415649625ba2d1c6 | /babybuddy/__init__.py | a0ff099532963b2e40c164385a3203065ae80f3e | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | bry-c/babybuddy | 18f72a3f6480abaafe34250bf82828567fe05a23 | 49156c1d80568a8c052a6788af9a63ea658b7452 | refs/heads/master | 2020-12-20T18:24:29.053433 | 2020-01-25T05:24:43 | 2020-01-25T05:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,979 | py | """
.----------------.
| .--------------. |
| | ______ | |
| | |_ _ \ | |
| | | |_) | | |
| | | __'. | |
| | _| |__) | | |
| | |_______/ | |
| | | |
| '--------------' |
'----------------'
.----------------. .----------------.
| .--------------. | .--------------. |
| | __ | | | ______ | |
| | / \ | | | |_ _ \ | |
| | / /\ \ | | | | |_) | | |
| | / ____ \ | | | | __'. | |
| | _/ / \ \_ | | | _| |__) | | |
| ||____| |____|| | | |_______/ | |
| | | | | | |
| '--------------' | '--------------' |
'----------------' '----------------'
.----------------. .----------------. .----------------.
| .--------------. | .--------------. | .--------------. |
| | ____ ____ | | | ______ | | | _____ _____ | |
| | |_ _||_ _| | | | |_ _ \ | | ||_ _||_ _|| |
| | \ \ / / | | | | |_) | | | | | | | | | |
| | \ \/ / | | | | __'. | | | | ' ' | | |
| | _| |_ | | | _| |__) | | | | \ `--' / | |
| | |______| | | | |_______/ | | | `.__.' | |
| | | | | | | | | |
| '--------------' | '--------------' | '--------------' |
'----------------' '----------------' '----------------'
.----------------. .----------------. .----------------. .----------------.
| .--------------. | .--------------. | .--------------. | .--------------. |
| | ________ | | | ________ | | | ____ ____ | | | _ | |
| | |_ ___ `. | | | |_ ___ `. | | | |_ _||_ _| | | | | | | |
| | | | `. \ | | | | | `. \ | | | \ \ / / | | | | | | |
| | | | | | | | | | | | | | | | \ \/ / | | | | | | |
| | _| |___.' / | | | _| |___.' / | | | _| |_ | | | | | | |
| | |________.' | | | |________.' | | | |______| | | | |_| | |
| | | | | | | | | | | (_) | |
| '--------------' | '--------------' | '--------------' | '--------------' |
'----------------' '----------------' '----------------' '----------------'
""" # noqa
__title__ = 'Baby Buddy'
__version__ = '1.2.4'
__license__ = 'BSD 2-Clause'
VERSION = __version__
default_app_config = 'babybuddy.apps.BabyBuddyConfig'
| [
"[email protected]"
] | |
66ee5476b811b7932d971e889a6b0e8fa585e838 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /jozLzME3YptxydiQm_24.py | 8b1f7fcb54f2b79c2a1a818e7a9f5e7adc43b9b8 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | """
Create a function that takes in a word and determines whether or not it is
plural. A plural word is one that ends in "s".
### Examples
is_plural("changes") ➞ True
is_plural("change") ➞ False
is_plural("dudes") ➞ True
is_plural("magic") ➞ False
### Notes
* Don't forget to `return` the result.
* Remember that return `True` ( _boolean_ ) is not the same as return `"True"` ( _string_ ).
* This is an oversimplification of the English language. We are ignoring edge cases like "goose" and "geese", "fungus" and "fungi", etc.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def is_plural(word):
return word.endswith("s")
| [
"[email protected]"
] | |
1c847a515d2da1b0eab37823f7addbb578edaf55 | 0359f840efdaa2849594dd1f498cb1533a61b17e | /pretrain_visual.py | 521ad2edcdd8c9444b49c590e519ca2cdcb7e9f6 | [] | no_license | qiuchili/MMSentiReader | e21026b2a8333c921dbaa29933069c6444117bd4 | ecda45099159d47cb488961d4b5ed7b8f6b494cd | refs/heads/master | 2023-02-04T12:44:06.330910 | 2020-12-26T03:41:46 | 2020-12-26T03:41:46 | 281,270,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,401 | py | # -*- coding: utf-8 -*-
import torch.nn as nn
import pickle
import os
import numpy as np
import torch.nn.functional as F
import cv2
import torch
import time
from tqdm import tqdm
from torch.utils.data import DataLoader, TensorDataset,random_split
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import argparse
from utils.params import Params
from torchvision.io import read_video
import random
class Pretrain(nn.Module):
def __init__(self, input_shape, output_dim):
super(Pretrain, self).__init__()
self.input_shape = input_shape
self.output_dim = output_dim
self.conv = nn.Conv3d(in_channels = 3, out_channels = 16, kernel_size = (5,5,5))
self.max_pool = nn.MaxPool3d(kernel_size = (3,3,3))
self.max_output_dims = [int((self.input_shape[0]-5+1)/3), int((self.input_shape[1]-5+1)/3), int((self.input_shape[2]-5+1)/3)]
self.batch_norm = nn.BatchNorm3d(16)
self.conv2 = nn.Conv3d(in_channels = 16, out_channels = 4, kernel_size = (5,5,5))
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.max_output_dims = [int((_dim-5+1)/3) for _dim in self.max_output_dims]
self.max_output_dim = 4
for _dim in self.max_output_dims:
self.max_output_dim = self.max_output_dim*_dim
self.dense = nn.Linear(in_features = self.max_output_dim,out_features = 300)
self.dropout = nn.Dropout(0.8)
self.dense1 = nn.Linear(in_features = 300,out_features = 300)
self.dense2 = nn.Linear(in_features = 300,out_features = 64)
self.softmax = nn.Softmax(dim=-1)
self.dense3 = nn.Linear(in_features = 64, out_features = output_dim)
def get_representation(self, x):
x = self.conv(x)
x = self.sigmoid(x)
x = self.max_pool(x)
x = self.conv2(x)
x = self.sigmoid(x)
x = self.max_pool(x)
x = x.reshape(-1, self.max_output_dim)
x = self.dropout(x)
x = self.dense(x)
x = self.sigmoid(x)
x = self.dense1(x)
x = self.tanh(x)
return(x)
def forward(self, x):
x = self.conv(x)
x = self.sigmoid(x)
x = self.max_pool(x)
x = self.conv2(x)
x = self.sigmoid(x)
x = self.max_pool(x)
x = x.reshape(-1, self.max_output_dim)
x = self.dropout(x)
x = self.dense(x)
x = self.sigmoid(x)
x = self.dense1(x)
x = self.tanh(x)
print(x)
x = self.dense2(x)
x = self.sigmoid(x)
x = self.dense3(x)
x = F.log_softmax(x, dim = -1)
return(x)
def _load_file(file_name):
feature, label = pickle.load(open(file_name,'rb'))
return feature, label
class MELDDataset(Dataset):
def __init__(self, data_dir):
files = os.listdir(data_dir)
self.data_files = [os.path.join(data_dir,_file) for _file in files]
self.data_files = sorted(self.data_files)
def __getitem__(self, idx):
return _load_file(self.data_files[idx])
def __len__(self):
return len(self.data_files)
def get_raw_feature(video_path):
vframes, _, _ = read_video(video_path)
vframes = vframes.permute(3,0,1,2)
return torch.tensor(vframes/255.0,dtype = torch.float32)
def train(params):
dataset = MELDDataset(params.stored_path)
print('training model...')
total_num = dataset.__len__()
train_num = int(total_num*params.ratio)
val_num = total_num - train_num
train_set, val_set = random_split(dataset, [train_num, val_num])
train_loader = DataLoader(train_set, batch_size = params.batch_size, shuffle = True)
val_loader = DataLoader(val_set, batch_size = params.batch_size, shuffle = False)
output_dim = 7
model = Pretrain((params.input_d, params.input_h, params.input_w), output_dim)
model = model.to(params.device)
criterion = nn.NLLLoss()
optimizer = torch.optim.RMSprop(model.parameters(),lr = params.lr)
# Temp file for storing the best model
epochs = 100
best_val_loss = 99999.0
# best_val_loss = -1.0
for i in range(epochs):
print('epoch: ', i)
model.train()
with tqdm(total = train_num) as pbar:
time.sleep(0.05)
for _i,data in enumerate(train_loader,0):
# For debugging, please run the line below
# _i,data = next(iter(enumerate(train_loader,0)))
b_inputs = data[0].to(params.device)
b_targets = data[-1].to(params.device)
# Does not train if batch_size is 1, because batch normalization will crash
if b_inputs[0].shape[0] == 1:
continue
optimizer.zero_grad()
outputs = model(b_inputs)
loss = criterion(outputs, b_targets.argmax(dim = -1))
if np.isnan(loss.item()):
print('loss value overflow!')
break
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.8)
optimizer.step()
# Compute Training Accuracy
n_total = len(outputs)
n_correct = (outputs.argmax(dim = -1) == b_targets.argmax(dim = -1)).sum().item()
train_acc = n_correct/n_total
#Update Progress Bar
pbar.update(params.batch_size)
ordered_dict={'acc': train_acc, 'loss':loss.item()}
pbar.set_postfix(ordered_dict=ordered_dict)
model.eval()
#Validation Set
outputs = []
targets = []
for _ii,data in enumerate(val_loader,0):
data_x = data[0].to(params.device)
data_t = data[-1].to(params.device)
data_o = model(data_x)
outputs.append(data_o.detach())
targets.append(data_t.detach())
outputs = torch.cat(outputs)
targets = torch.cat(targets)
val_loss = criterion(outputs, targets.argmax(dim = -1))
n_total = len(outputs)
n_correct = (outputs.argmax(dim = -1) == targets.argmax(dim = -1)).sum().item()
val_acc = n_correct/n_total
print('val loss = {}, val acc = {}'.format(val_loss,val_acc))
if val_loss < best_val_loss:
torch.save(model,'dummy_files/best_model.pt')
print('The best model up till now. Saved to File.')
best_val_loss = val_loss
return model
def extract_raw_features(params):
data = pickle.load(open(params.file_path,'rb'))
video_ids, video_speakers, video_labels,video_text, video_audio, video_sentence, video_act, train_vid, test_vid, _ = data
if not os.path.exists(params.stored_path):
os.mkdir(params.stored_path)
train_dia_num = 1038
val_dia_num = len(train_vid) - train_dia_num
for dia_id, sen_ids in video_ids.items():
print('extracting raw features of dia {}'.format(dia_id))
dia_labels = video_labels[dia_id]
split_dir_path = os.path.join(params.videos_dir, 'train_splits')
if dia_id >= (train_dia_num+1):
split_dir_path = os.path.join(params.videos_dir, 'dev_splits')
dia_id = dia_id - (train_dia_num+1)
if dia_id >= val_dia_num:
continue
for _index, _id in enumerate(sen_ids):
print('utterance id {}'.format(_id))
video_fname = os.path.join(split_dir_path,'dia{}_utt{}.mp4'.format(dia_id, _id))
label = dia_labels[_index]
raw_video_feature = get_raw_feature(video_fname)
raw_video_feature = F.interpolate(raw_video_feature.unsqueeze(dim = 0), (params.input_d, params.input_h, params.input_w))[0]
one_hot_index = np.zeros((7))
one_hot_index[label] = 1
one_hot_index = torch.tensor(one_hot_index,dtype=torch.float64)
print('save utterance data to pickle file...')
pickle.dump([raw_video_feature,one_hot_index],open(os.path.join(params.stored_path,'dia_{}_{}.pkl'.format(dia_id, _id)),'wb'))
print('Done.')
def generate_visual_rep(model, params):
print('generate visual representation...')
data = pickle.load(open(params.file_path,'rb'))
video_ids, video_speakers, video_labels_7,video_text, video_audio, video_sentence, video_act, train_vid, test_vid, video_labels_3 = data
train_dia_num = 1038
val_dia_num = len(train_vid) - train_dia_num
video_visual = {}
for dia_id, sen_ids in video_ids.items():
dia_visual = []
split_dir_path = os.path.join(params.videos_dir, 'train_splits')
if dia_id >= (train_dia_num+1):
split_dir_path = os.path.join(params.videos_dir, 'dev_splits')
dia_id = dia_id - (train_dia_num+1)
if dia_id >= val_dia_num:
split_dir_path = os.path.join(params.videos_dir, 'test_splits')
dia_id = dia_id - val_dia_num
for _index, _id in enumerate(sen_ids):
video_fname = os.path.join(split_dir_path,'dia{}_utt{}.mp4'.format(dia_id, _id))
raw_video_feature = get_raw_feature(video_fname)
raw_video_feature = F.interpolate(raw_video_feature.unsqueeze(dim = 0), (params.input_d, params.input_h, params.input_w))
video_rep = model.get_representation(raw_video_feature.to(params.device))[0].detach().cpu().numpy()
dia_visual.append(video_rep)
video_visual[dia_id] = dia_visual
data = video_ids, video_speakers, video_labels_7,video_text, video_audio, video_visual, video_sentence, video_act, train_vid, test_vid, video_labels_3
pickle.dump(data,open(params.output_path,'wb'))
def set_seed(params):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(params.seed)
os.environ['PYTHONHASHSEED'] = str(params.seed)
np.random.seed(params.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(params.seed)
torch.cuda.manual_seed_all(params.seed)
else:
torch.manual_seed(params.seed)
def run(params):
train(params)
print('loading the pretraining model...')
model = torch.load('dummy_files/best_model.pt')
generate_visual_rep(model, params)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='running experiments on multimodal datasets.')
parser.add_argument('-config', action = 'store', dest = 'config_file', help = 'please enter configuration file.',default = 'config/pretrain_visual.ini')
args = parser.parse_args()
params = Params()
params.parse_config(args.config_file)
params.config_file = args.config_file
mode = 'run'
if 'mode' in params.__dict__:
mode = params.mode
set_seed(params)
params.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
if mode == 'extract_raw_feature':
extract_raw_features(params)
elif mode == 'run':
run(params)
| [
"[email protected]"
] | |
2fa0f6f2d17f04b73bcf80ec13cb4906693bb7f3 | a56623649791b945ae07d407533511902b67ebad | /src/installer/src/tortuga/db/dbManager.py | 1f81e2d146ecdf6855c4d2f51897b0247a325ef2 | [
"Apache-2.0"
] | permissive | joedborg/tortuga | 9792bc0ff1a9d7fa335ac41df9324dc502b80e0b | 5690e41c0c78602c195f699bf314c6c94ca7b619 | refs/heads/master | 2021-04-26T22:56:05.646538 | 2018-11-14T16:33:00 | 2018-11-14T16:33:00 | 123,898,555 | 0 | 0 | null | 2018-03-05T09:48:11 | 2018-03-05T09:48:11 | null | UTF-8 | Python | false | false | 7,768 | py | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=multiple-statements,no-member,no-name-in-module
# pylint: disable=not-callable
import configparser
from logging import getLogger
import os
import sqlalchemy
import sqlalchemy.orm
from tortuga.config.configManager import ConfigManager
from tortuga.exceptions.dbError import DbError
from tortuga.kit.registry import get_all_kit_installers
from tortuga.objects.tortugaObjectManager import TortugaObjectManager
# from .tables import get_all_table_mappers
from .sessionContextManager import SessionContextManager
from .models.base import ModelBase
from . import models # noqa pylint: disable=unused-import
logger = getLogger(__name__)
class DbManager(TortugaObjectManager):
"""
Class for db management.
:param engine: a SQLAlchemy database engine instance
:param init: a flag that is set when the database has not yet
been initialized. If this flag is set, not attempts
will be made to load/map kit tables. This flag is
cleared once the database has been initialized.
"""
def __init__(self, engine=None):
super(DbManager, self).__init__()
if not engine:
self._cm = ConfigManager()
self._dbConfig = self._refreshDbConfig()
engineURI = self.__getDbEngineURI()
if self._dbConfig['engine'] == 'sqlite' and \
not os.path.exists(self._dbConfig['path']):
# Ensure SQLite database file is created with proper permissions
fd = os.open(
self._dbConfig['path'], os.O_CREAT, mode=0o600)
os.close(fd)
self._engine = sqlalchemy.create_engine(engineURI)
else:
self._engine = engine
self.Session = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(bind=self.engine))
def _map_db_tables(self):
#
# Make sure all kit table mappers have been registered
#
for kit_installer_class in get_all_kit_installers():
kit_installer = kit_installer_class()
kit_installer.register_database_table_mappers()
#
# Map all tables that haven't yet been mapped
#
# for table_mapper in get_all_table_mappers():
# key = table_mapper.__name__
# if key not in self._mapped_tables.keys():
# logger.debug('Mapping table: {}'.format(key))
# self._mapped_tables[key] = table_mapper()
# self._mapped_tables[key].map(self)
pass
@property
def engine(self):
"""
SQLAlchemy Engine object property
"""
self._map_db_tables()
return self._engine
def session(self):
"""
Database session context manager
"""
return SessionContextManager(self)
def init_database(self):
#
# Create tables
#
self._map_db_tables()
try:
ModelBase.metadata.create_all(self.engine)
except Exception:
self.getLogger().exception('SQLAlchemy raised exception')
raise DbError('Check database settings or credentials')
@property
def metadata(self):
return self._metadata
def __getDbEngineURI(self):
dbPort = self._dbConfig['port']
dbHost = self._dbConfig['host']
engine = self._dbConfig['engine']
dbUser = self._dbConfig['username']
dbPassword = self._dbConfig['password']
if engine == 'sqlite':
engineURI = 'sqlite:///%s' % (self._dbConfig['path'])
else:
if dbUser is not None:
if dbPassword is not None:
userspec = '%s:%s' % (dbUser, dbPassword)
else:
userspec = dbUser
else:
userspec = None
if dbPort is not None:
hostspec = '%s:%s' % (dbHost, dbPort)
else:
hostspec = dbHost
engineURI = f'{engine}+pymysql' if engine == 'mysql' else engine
engineURI += '://'
if userspec is not None:
engineURI += f'{userspec}@'
engineURI += f'{hostspec}' + '/{}'.format(self._cm.getDbSchema())
return engineURI
def _getDefaultDbEngine(self): \
# pylint: disable=no-self-use
return 'sqlite'
def _getDefaultDbHost(self): \
# pylint: disable=no-self-use
return 'localhost'
def _getDefaultDbPort(self, engine): \
# pylint: disable=no-self-use
# MySQL default port
if engine == 'mysql':
return 3306
return None
def _getDefaultDbUserName(self):
return self._cm.getDbUser()
def _getDefaultDbPassword(self):
if os.path.exists(self._cm.getDbPasswordFile()):
with open(self._cm.getDbPasswordFile()) as fp:
dbPassword = fp.read()
else:
dbPassword = None
return dbPassword
def _refreshDbConfig(self, cfg=None):
dbConfig = {}
if cfg is None:
cfg = configparser.ConfigParser()
cfg.read(os.path.join(self._cm.getKitConfigBase(), 'tortuga.ini'))
# Database engine
val = cfg.get('database', 'engine').strip().lower() \
if cfg.has_option('database', 'engine') else \
self._getDefaultDbEngine()
dbConfig['engine'] = val
if dbConfig['engine'] == 'sqlite':
# If database is sqlite, read the path
dbConfig['path'] = cfg.get('database', 'path') \
if cfg.has_section('database') and \
cfg.has_option('database', 'path') else \
os.path.join(self._cm.getEtcDir(),
self._cm.getDbSchema() + '.sqlite')
# Database host
val = cfg.get('database', 'host') \
if cfg.has_option('database', 'host') else \
self._getDefaultDbHost()
dbConfig['host'] = val
# Database port
val = cfg.get('database', 'port') \
if cfg.has_option('database', 'port') else None
dbConfig['port'] = val if val else self._getDefaultDbPort(
engine=dbConfig['engine'])
# Database username
val = cfg.get('database', 'username') \
if cfg.has_option('database', 'username') \
else self._getDefaultDbUserName()
dbConfig['username'] = val
# Database password
val = cfg.get('database', 'password') \
if cfg.has_option('database', 'password') \
else self._getDefaultDbPassword()
dbConfig['password'] = val
return dbConfig
def get_backend_opts(self): \
# pylint: disable=no-self-use
return {
'mysql_engine': 'InnoDB',
}
def getMetadataTable(self, table):
return self._metadata.tables[table]
def openSession(self):
""" Open db session. """
return self.Session()
def closeSession(self):
"""Close scoped_session."""
self.Session.remove()
| [
"[email protected]"
] | |
ab958f2bbc272631ae086eaa0bf5cfab4ad7ed6b | 747755833862b8e9d0f58ebc62879d6ef47c23c8 | /python-master (5)/python-master/test/tree.py | 2b41e05f60a4593aa20574d59fedcd8f65037cf4 | [] | no_license | tangsong41/stu_py | 98a06730dbca6e158cf81c18d98fe1317c1ae512 | d41507cd8dd9e8a54084872dfa15c36da443c02b | refs/heads/master | 2022-12-11T23:53:57.530946 | 2019-01-15T18:29:19 | 2019-01-15T18:29:19 | 163,953,100 | 3 | 0 | null | 2022-12-07T23:24:01 | 2019-01-03T09:41:29 | Jupyter Notebook | UTF-8 | Python | false | false | 2,287 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: tree.py
@time: 2017/5/22 下午6:57
"""
import json
from collections import defaultdict
def tree():
"""
定义一棵树
python 字典的特性,赋值操作必须事先声明,所以这里使用 collections 很方便的为字典设置初始值
:return:
"""
return defaultdict(tree)
def test_users():
users = tree()
users['jack_1']['jack_2_1']['jack_3_1'] = {}
users['jack_1']['jack_2_1']['jack_3_2'] = {}
users['jack_1']['jack_2_2'] = {}
users['jack_1']['jack_2_2']['jack_3_1'] = {}
users['lily_1']['lily_2_1']['lily_3_1'] = {}
users['lily_1']['lily_2_2']['lily_3_2'] = {}
users['lily_1']['lily_2_3']['lily_3_3'] = {}
users['emma_1']['emma_2_1'] = {}
# 打印 users 原始结构
print users
# 打印 users json 结构
print(json.dumps(users, indent=4))
# 第一层(users的key)
print [i for i in users]
# 第二层(users子节点的key)
print [i for i in users['jack_1']]
# 第三层(users孙节点的key)
print [i for i in users['jack_1']['jack_2_1']]
l = [
{'u': 4, 'p': 1},
{'u': 10, 'p': 1},
{'u': 5, 'p': 1},
{'u': 6, 'p': 2},
{'u': 7, 'p': 2},
{'u': 8, 'p': 3},
{'u': 9, 'p': 3},
{'u': 11, 'p': 3},
{'u': 12, 'p': 3},
{'u': 13, 'p': 5},
{'u': 14, 'p': 6},
{'u': 15, 'p': 10},
{'u': 17, 'p': 10},
{'u': 19, 'p': 10},
{'u': 20, 'p': 15},
{'u': 21, 'p': 15},
{'u': 22, 'p': 17},
{'u': 23, 'p': 22},
]
def get_child_users(uid):
"""
获取子节点
:param uid:
:return:
"""
r = []
for i in l:
if i['p'] == uid:
r.append(i['u'])
return r
def test_team(uid):
"""
测试
:return:
"""
team = tree()
child_users = get_child_users(uid)
for uid1 in child_users:
team[uid1] = {}
child_users2 = get_child_users(uid1)
for uid2 in child_users2:
team[uid1][uid2] = {}
child_users3 = get_child_users(uid2)
for uid3 in child_users3:
team[uid1][uid2][uid3] = {}
print json.dumps(team, indent=4)
if __name__ == '__main__':
# test_users()
test_team(1)
| [
"[email protected]"
] | |
2660bada0256d0536dc0fda5e7fc399b226a73b2 | f360c6fe06fb9859039a5d39fad5815fd4aff372 | /community/community/urls.py | 12350ffdfa3f3e2714b7b85462baef166ec94221 | [] | no_license | gwjczwy/Django-CMS | d6297055957548997e86d383d54ae051062c8854 | f1a00d637c65809d606df3d4b96bcc594af09bd8 | refs/heads/master | 2020-04-24T21:57:44.818864 | 2019-03-03T08:41:50 | 2019-03-03T08:41:50 | 172,295,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | """community URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include,url
urlpatterns = [
path('', include('accounts.urls')),
path('', include('posting.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
a83ace36cf0328a163b1d22f951b524039dc30db | db1b327c4913c453b2fdd9dda661938c4abc5c0e | /abc/89/C.py | 1dad6e66a1695248dab6a8d662bbaa2d771d8383 | [] | no_license | oamam/atcoder | 0c129aab72e3c7090c9799fdf52f6e8119ef5238 | 658054b69b7586eed896484535dcfa1fef498e43 | refs/heads/master | 2021-06-26T09:01:12.389266 | 2020-10-30T02:01:11 | 2020-10-30T02:01:11 | 165,225,322 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import itertools
def main():
N = int(input())
d = {'M': 0, 'A': 0, 'R': 0, 'C': 0, 'H': 0}
for _ in range(N):
S = input()
if S[0] in d:
d[S[0]] += 1
ans = 0
for a, b, c in list(itertools.combinations(d.keys(), 3)):
ans += d[a] * d[b] * d[c]
print(ans)
main()
| [
"[email protected]"
] | |
a2921655da4108ff25537002abd8ac828267b205 | 461bb1cd322c381be77cafdd2deb78223abfe79b | /tests/test_config_locator.py | 82611972aa5315bb85e8d38d511aa66e7c601536 | [
"MIT"
] | permissive | ryankanno/py-configurator | 7a3b205cae2d424b4671c1154ba97d5afa8809a6 | 749a4dc329d23d976712d241da13c1d942ad3d01 | refs/heads/master | 2020-06-02T10:38:51.554795 | 2015-03-16T22:45:56 | 2015-03-16T22:45:56 | 17,777,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import eq_
from nose.tools import ok_
import os
from py_configurator import Locator
import unittest
class TestLocator(unittest.TestCase):
def test_locator_construction(self):
config_name = 'foo.bar'
local_dir = './'
system_dir = '/foo/bar/tmp'
env_key = 'CONFIG_KEY'
locator = Locator(
env_key=env_key,
config_name=config_name,
local_dir=local_dir,
system_dir=system_dir)
eq_(locator.config_name, config_name)
eq_(locator.local_dir, local_dir)
eq_(locator.env_key, env_key)
config_name_2 = 'foo.bar.2'
local_dir_2 = '/foo/bar/tmp/2'
env_key_2 = 'CONFIG_KEY_2'
locator.config_name = config_name_2
locator.local_dir = local_dir_2
locator.env_key = env_key_2
eq_(locator.config_name, config_name_2)
eq_(locator.local_dir, local_dir_2)
eq_(locator.env_key, env_key_2)
def test_config_locator_get_config_search_paths(self):
config_name = 'foo.bar'
local_dir = './'
system_dir = '/foo/bar/tmp'
env_key = 'CONFIG_KEY'
env_key_path = '/bar/config.path'
os.environ[env_key] = env_key_path
locator = Locator(
env_key=env_key,
config_name=config_name,
local_dir=local_dir,
system_dir=system_dir)
config_search_paths = locator.get_config_paths()
ok_(env_key_path in config_search_paths)
ok_(os.path.join('./', config_name) in config_search_paths)
ok_('/foo/bar/tmp/foo.bar' in config_search_paths)
ok_(os.path.join(os.path.expanduser("~"), config_name) in
config_search_paths)
def test_config_locator_get_config(self):
config_name = 'foobar.ini'
local_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data')
system_dir = '/foo/bar/tmp'
env_key = 'CONFIG_KEY'
env_key_path = '/bar/config.path'
os.environ[env_key] = env_key_path
locator = Locator(
env_key=env_key,
config_name=config_name,
local_dir=local_dir,
system_dir=system_dir)
config = locator.get_config()
ok_(config is not None)
ok_(config.get('Foo.Bar') == "1")
# vim: filetype=python
| [
"[email protected]"
] | |
bdffbc8830f8b59e9e1ff61aa4da2822cdd77638 | a1c5e68d93cd7d7a5620c34c0567b006fa33cd38 | /.eggs/PyScaffold-3.0.3-py3.6.egg/pyscaffold/extensions/travis.py | 3d97d584f36aa204824901ac55d8ded1f6778621 | [
"MIT"
] | permissive | csm-adapt/citrine_converters | 4f4e3e57379460f0e1205bf643e6251b36ade772 | 32eef5f5e733e7ab9031b2f129bb23b90cedc6bf | refs/heads/master | 2021-01-21T19:13:50.156881 | 2020-01-05T05:48:07 | 2020-01-05T05:48:07 | 92,125,603 | 0 | 2 | BSD-2-Clause | 2018-08-13T22:39:56 | 2017-05-23T03:36:09 | Python | UTF-8 | Python | false | false | 1,367 | py | # -*- coding: utf-8 -*-
"""
Extension that generates configuration and script files for Travis CI.
"""
from __future__ import absolute_import
from ..templates import travis, travis_install
from ..api import Extension
from ..api import helpers
class Travis(Extension):
"""Generate Travis CI configuration files"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.add_files,
after='define_structure')
def add_files(self, struct, opts):
"""Add some Travis files to structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
files = {
'.travis.yml': (travis(opts), helpers.NO_OVERWRITE),
'tests': {
'travis_install.sh': (travis_install(opts),
helpers.NO_OVERWRITE)
}
}
return helpers.merge(struct, {opts['project']: files}), opts
| [
"[email protected]"
] | |
d8bdb2f6559ca6362e0ef57c900953ab34883af0 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/GluGluSpin0/GluGluSpin0ToZGamma_ZToLL_W_0p014_M_750_TuneCUEP8M1_13TeV_pythia8_cfi.py | f60fb1b53fc8fc0548de2a77ed013835f321393b | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,393 | py |
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'HiggsSM:gg2H = on',
'25:m0 = 750',
'25:mWidth = 0.105',
'25:onMode = off',
'25:OnIfMatch = 23 22',
'25:doForceWidth = on',
'Higgs:clipWings = on',
'Higgs:clipWings = 10',
'23:onMode = off',
'23:OnIfMatch = 11 11',
'23:OnIfMatch = 13 13',
'23:OnIfMatch = 15 15',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
4a2a7043ddff842f7ad9b18905ebc72ba0379d26 | 9975b2681a079b46d901b994d2bb50609d62791a | /StringSplitAndJoin.py | 5a97e7a31950412b8579f1742aeb40604a4a44f8 | [] | no_license | elvinyeka/Hakker_Rank | 1b0d4aae7a6f4c9ac08f8948be4e5740950057c9 | ab8c42a3e373d4e4460a6c261b77bde65cf56bfb | refs/heads/master | 2022-12-17T00:32:41.279518 | 2020-09-14T12:09:31 | 2020-09-14T12:09:31 | 294,422,920 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | def split_and_join(line):
result = ""
for i in line:
if i == " ":
result += "-"
else:
result += i
return result
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
# https://www.hackerrank.com/challenges/python-string-split-and-join/problem | [
"[email protected]"
] | |
a1e65e5164610be033d72ac8d03ab0a25093dbfa | 3851d5eafcc5fd240a06a7d95a925518412cafa0 | /Django_Code/gs129/gs129/asgi.py | eba9818d2ad8b07e93af3c77180c5cc2d4df5d63 | [] | no_license | Ikshansaleem/DjangoandRest | c0fafaecde13570ffd1d5f08019e04e1212cc2f3 | 0ccc620ca609b4ab99a9efa650b5893ba65de3c5 | refs/heads/master | 2023-01-31T04:37:57.746016 | 2020-12-10T06:27:24 | 2020-12-10T06:27:24 | 320,180,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for gs129 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs129.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
e46bc6e3189a1504a348b21db1fec3ed510eeda1 | ab1d659a36a7af22bd65b8a91f5059cc24eb7a01 | /bigfish_functions/Lowest.py | a19bab32e7d4762357a0df46ccca153cec09d610 | [] | no_license | xingetouzi/Bigfish | e1afc23c59dc139418678611bf44d00e2e67a09d | 7b18192149f0e2c42c8491f9bc7ea5dede11398b | refs/heads/master | 2021-01-21T04:55:34.255633 | 2016-06-06T03:18:15 | 2016-06-06T03:18:15 | 48,084,560 | 4 | 3 | null | 2016-06-06T03:18:15 | 2015-12-16T03:18:38 | Python | UTF-8 | Python | false | false | 730 | py | # -*- coding:utf-8 -*-
# 计算一组时间序列数据(如开盘价、收盘价、最高价、最低价、收益率等)的最低值
# 输入参数:
# length 时间长度 int
# price 时间序列数据 序列数组 默认为最低价数据
# offset 位移数(从前多少根bar开始) int 默认为0
# 若当前K线总数不足以支持计算,(BarNum>length+offset时才能支持计算)返回None
def Lowest(length, price=None, offset=0):
if length <= 0:
return None
if price is None:
price = Low
if BarNum <= length + offset:
return None
else:
min_ = price[offset]
for i in range(length - 1):
min_ = min(price[i + offset + 1], min_)
return min_
| [
"[email protected]"
] | |
bb38a9043b0745fa075e2646e3f8c1a003e7c6a5 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/ddos_protection_plan_py3.py | b29926c6d258440a7323c347d31c5c0c172bfb8d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 3,110 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DdosProtectionPlan(Model):
"""A DDoS protection plan in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar resource_guid: The resource GUID property of the DDoS protection
plan resource. It uniquely identifies the resource, even if the user
changes its name or migrate the resource across subscriptions or resource
groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the DDoS protection
plan resource. Possible values are: 'Succeeded', 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:ivar virtual_networks: The list of virtual networks associated with the
DDoS protection plan resource. This list is read-only.
:vartype virtual_networks:
list[~azure.mgmt.network.v2018_11_01.models.SubResource]
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'virtual_networks': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'virtual_networks': {'key': 'properties.virtualNetworks', 'type': '[SubResource]'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(DdosProtectionPlan, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.resource_guid = None
self.provisioning_state = None
self.virtual_networks = None
self.etag = None
| [
"[email protected]"
] | |
80a431819c1316ffedc32848b1ea33f987e580a9 | ba54ce473e9748f9c85e88142e749efe36e2a858 | /otp/ai/Original_MagicWordManager.py | b4667724caadf42390baafe5433d9cd7ea299046 | [
"BSD-3-Clause"
] | permissive | C0MPU73R/pirates-online-classic | 8256747a464160bd138a09d54b3d5ec6aee930a1 | 2a0b69e2b0d2eb83643f9ff0fbeccca28fdd2e5c | refs/heads/master | 2023-03-24T07:45:41.722943 | 2021-03-21T01:49:10 | 2021-03-21T01:49:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,114 | py | # THIS IS DISNEYS MAGICWORDMANAGER.
# DO NOT DELETE.
from pandac.PandaModules import *
from direct.showbase import GarbageReport, ContainerReport, MessengerLeakDetector
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.InputStateGlobal import inputState
from direct.task import Task
from otp.avatar import Avatar
import string
from direct.showbase import PythonUtil
from direct.showbase.PythonUtil import Functor, DelayedCall, ScratchPad
from otp.otpbase import OTPGlobals
from direct.distributed.ClockDelta import *
class MagicWordManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('MagicWordManager')
neverDisable = 1
GameAvatarClass = None
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.shownFontNode = None
self.csShown = 0
self.guiPopupShown = 0
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.accept('magicWord', self.b_setMagicWord)
self.autoMagicWordEvent = localAvatar.getArrivedOnDistrictEvent()
if localAvatar.isGeneratedOnDistrict():
self.doLoginMagicWords()
else:
self.accept(self.autoMagicWordEvent, self.doLoginMagicWords)
def doLoginMagicWords(self):
pass
def disable(self):
self.ignore(self.autoMagicWordEvent)
del self.autoMagicWordEvent
self.ignore('magicWord')
self.hidefont()
DistributedObject.DistributedObject.disable(self)
def setMagicWord(self, word, avId, zoneId):
try:
self.doMagicWord(word, avId, zoneId)
except:
response = PythonUtil.describeException(backTrace = 1)
self.notify.warning('Ignoring error in magic word:\n%s' % response)
self.setMagicWordResponse(response)
def wordIs(self, word, w):
return word == w or word[:len(w) + 1] == '%s ' % w
def getWordIs(self, word):
return Functor(self.wordIs, word)
def doMagicWord(self, word, avId, zoneId):
wordIs = self.getWordIs(word)
print word
if wordIs('~oobe'):
base.oobe()
elif wordIs('~oobeCull'):
base.oobeCull()
elif wordIs('~tex'):
base.toggleTexture()
elif wordIs('~wire'):
base.toggleWireframe()
elif wordIs('~showfont'):
self.showfont(word[9:])
elif wordIs('~hidefont'):
self.hidefont()
elif wordIs('~guiPopup'):
self.toggleGuiPopup()
elif wordIs('~showCS') or wordIs('~showcs'):
bitmask = self.getCSBitmask(word[7:])
render.showCS(bitmask)
self.csShown = 1
elif wordIs('~hideCS') or wordIs('~hidecs'):
bitmask = self.getCSBitmask(word[7:])
render.hideCS(bitmask)
self.csShown = 0
elif wordIs('~cs'):
bitmask = self.getCSBitmask(word[3:])
if self.csShown:
render.hideCS(bitmask)
self.csShown = 0
else:
render.showCS(bitmask)
self.csShown = 1
elif wordIs('~showShadowCollisions'):
self.showShadowCollisions()
elif wordIs('~hideShadowCollisions'):
self.hideShadowCollisions()
elif wordIs('~showCollisions'):
self.showCollisions()
elif wordIs('~hideCollisions'):
self.hideCollisions()
elif wordIs('~showCameraCollisions'):
self.showCameraCollisions()
elif wordIs('~hideCameraCollisions'):
self.hideCameraCollisions()
elif wordIs('~collidespam'):
n = Notify.ptr().getCategory(':collide')
if hasattr(self, '_collideSpamSeverity'):
n.setSeverity(self._collideSpamSeverity)
del self._collideSpamSeverity
else:
self._collideSpamSeverity = n.getSeverity()
n.setSeverity(NSSpam)
elif wordIs('~notify'):
args = word.split()
n = Notify.ptr().getCategory(args[1])
n.setSeverity({
'error': NSError,
'warning': NSWarning,
'info': NSInfo,
'debug': NSDebug,
'spam': NSSpam }[args[2]])
elif wordIs('~stress'):
factor = word[7:]
if factor:
factor = float(factor)
LOD.setStressFactor(factor)
response = 'Set LOD stress factor to %s' % factor
else:
factor = LOD.getStressFactor()
response = 'LOD stress factor is %s' % factor
self.setMagicWordResponse(response)
elif wordIs('~for'):
self.forAnother(word, avId, zoneId)
elif wordIs('~badname'):
word = '~for %s ~badname' % word[9:]
print 'word is %s' % word
self.forAnother(word, avId, zoneId)
elif wordIs('~avId'):
self.setMagicWordResponse(str(localAvatar.doId))
elif wordIs('~doId'):
name = string.strip(word[6:])
objs = self.identifyDistributedObjects(name)
if len(objs) == 0:
response = '%s is unknown.' % name
else:
response = ''
for (name, obj) in objs:
response += '\n%s %d' % (name, obj.doId)
response = response[1:]
self.setMagicWordResponse(response)
elif wordIs('~exec'):
from otp.chat import ChatManager
ChatManager.ChatManager.execChat = 1
elif wordIs('~run'):
self.toggleRun()
elif wordIs('~runFaster'):
if config.GetBool('want-running', 1):
args = word.split()
if len(args) > 1:
base.debugRunningMultiplier = float(args[1])
else:
base.debugRunningMultiplier = 10
inputState.set('debugRunning', True)
elif wordIs('~who'):
avIds = []
for av in Avatar.Avatar.ActiveAvatars:
if hasattr(av, 'getFriendsList'):
avIds.append(av.doId)
self.d_setWho(avIds)
elif wordIs('~sync'):
tm = self.cr.timeManager
if tm == None:
response = 'No TimeManager.'
self.setMagicWordResponse(response)
else:
tm.extraSkew = 0.0
skew = string.strip(word[5:])
if skew != '':
tm.extraSkew = float(skew)
globalClockDelta.clear()
tm.handleHotkey()
elif wordIs('~period'):
timeout = string.strip(word[7:])
if timeout != '':
seconds = int(timeout)
self.cr.stopPeriodTimer()
self.cr.resetPeriodTimer(seconds)
self.cr.startPeriodTimer()
if self.cr.periodTimerExpired:
response = 'Period timer has expired.'
elif self.cr.periodTimerStarted:
elapsed = globalClock.getFrameTime() - self.cr.periodTimerStarted
secondsRemaining = self.cr.periodTimerSecondsRemaining - elapsed
response = 'Period timer expires in %s seconds.' % int(secondsRemaining)
else:
response = 'Period timer not set.'
self.setMagicWordResponse(response)
elif wordIs('~DIRECT'):
args = word.split()
fEnableLight = 0
if len(args) > 1:
if direct and args[1] == 'CAM':
direct.enable()
taskMgr.removeTasksMatching('updateSmartCamera*')
camera.wrtReparentTo(render)
direct.cameraControl.enableMouseFly()
self.setMagicWordResponse('Enabled DIRECT camera')
return None
elif args[1] == 'LIGHT':
fEnableLight = 1
base.startTk()
from direct.directtools import DirectSession
if fEnableLight:
direct.enableLight()
else:
direct.enable()
self.setMagicWordResponse('Enabled DIRECT')
elif wordIs('~TT'):
if not direct:
return None
args = word.split()
if len(args) > 1:
if args[1] == 'CAM':
direct.cameraControl.disableMouseFly()
camera.wrtReparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
self.setMagicWordResponse('Disabled DIRECT camera')
return None
direct.disable()
camera.wrtReparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
self.setMagicWordResponse('Disabled DIRECT')
elif wordIs('~net'):
if self.cr.networkPlugPulled():
self.cr.restoreNetworkPlug()
self.cr.startHeartbeat()
response = 'Network restored.'
else:
self.cr.pullNetworkPlug()
self.cr.stopHeartbeat()
response = 'Network disconnected.'
self.setMagicWordResponse(response)
elif wordIs('~disconnect'):
base.cr.distributedDistrict.sendUpdate('broadcastMessage')
elif wordIs('~model'):
args = word.split()
path = args[1]
model = loader.loadModel(path)
model.reparentTo(localAvatar)
model.wrtReparentTo(render)
self.setMagicWordResponse('loaded %s' % path)
elif wordIs('~axis'):
axis = loader.loadModel('models/misc/xyzAxis.bam')
axis.reparentTo(render)
axis.setPos(base.localAvatar, 0, 0, 0)
axis.setHpr(render, 0, 0, 0)
axis10 = loader.loadModel('models/misc/xyzAxis.bam')
axis10.reparentTo(render)
axis10.setPos(base.localAvatar, 0, 0, 0)
axis10.setScale(10)
axis10.setHpr(render, 0, 0, 0)
axis10.setColorScale(1, 1, 1, 0.4)
axis10.setTransparency(1)
elif wordIs('~clearAxes') or wordIs('~clearAxis'):
render.findAllMatches('**/xyzAxis.egg').detach()
elif wordIs('~myAxis'):
if hasattr(self, 'myAxis'):
self.myAxis.detachNode()
del self.myAxis
else:
self.myAxis = loader.loadModel('models/misc/xyzAxis.bam')
self.myAxis.reparentTo(localAvatar)
elif wordIs('~osd'):
onScreenDebug.enabled = not onScreenDebug.enabled
elif wordIs('~osdScale'):
args = word.split()
defScale = 0.05
if len(args) > 1:
scale = float(args[1])
else:
scale = 1.0
onScreenDebug.onScreenText.setScale(defScale * scale)
elif wordIs('~osdTaskMgr'):
if taskMgr.osdEnabled():
taskMgr.stopOsd()
else:
if not onScreenDebug.enabled:
onScreenDebug.enabled = True
taskMgr.startOsd()
elif wordIs('~fps'):
self.doFps(word, avId, zoneId)
elif wordIs('~sleep'):
args = word.split()
if len(args) > 1:
s = float(args[1])
base.setSleep(s)
response = 'sleeping %s' % s
else:
base.setSleep(0.0)
response = 'not sleeping'
self.setMagicWordResponse(response)
elif wordIs('~objects'):
args = word.split()
from direct.showbase import ObjectReport
report = ObjectReport.ObjectReport('client ~objects')
if 'all' in args:
self.notify.info('printing full object set...')
report.getObjectPool().printObjsByType(printReferrers = 'ref' in args)
if hasattr(self, 'baselineObjReport'):
self.notify.info('calculating diff from baseline ObjectReport...')
self.lastDiff = self.baselineObjReport.diff(report)
self.lastDiff.printOut(full='diff' in args or 'dif' in args)
if 'baseline' in args or not hasattr(self, 'baselineObjReport'):
self.notify.info('recording baseline ObjectReport...')
if hasattr(self, 'baselineObjReport'):
self.baselineObjReport.destroy()
self.baselineObjReport = report
self.setMagicWordResponse('objects logged')
elif wordIs('~containers'):
args = word.split()
limit = 30
if 'full' in args:
limit = None
ContainerReport.ContainerReport('~containers', log = True, limit = limit, threaded = True)
elif wordIs('~garbage'):
args = word.split()
full = 'full' in args
safeMode = 'safe' in args
GarbageReport.GarbageLogger('~garbage', fullReport = full, threaded = True, safeMode = safeMode)
elif wordIs('~guicreates'):
base.printGuiCreates = True
self.setMagicWordResponse('printing gui creation stacks')
elif wordIs('~creategarbage'):
GarbageReport._createGarbage()
elif wordIs('~leakTask'):
def leakTask(task):
return task.cont
taskMgr.add(leakTask, uniqueName('leakedTask'))
leakTask = None
elif wordIs('~leakmessage'):
MessengerLeakDetector._leakMessengerObject()
self.down_setMagicWordResponse(senderId, 'messenger leak object created')
elif wordIs('~pstats'):
args = word.split()
hostname = None
port = None
if len(args) > 1:
hostname = args[1]
if len(args) > 2:
port = int(args[2])
base.wantStats = 1
Task.TaskManager.pStatsTasks = 1
result = base.createStats(hostname, port)
connectionName = '%s' % hostname
if port is not None:
connectionName += ':%s' % port
if result:
response = 'connected client pstats to %s' % connectionName
else:
response = 'could not connect pstats to %s' % connectionName
self.setMagicWordResponse(response)
elif wordIs('~profile'):
args = word.split()
if len(args) > 1:
num = int(args[1])
else:
num = 5
base.profileFrames(num)
self.setMagicWordResponse('profiling %s client frames...' % num)
elif wordIs('~taskprofile'):
args = word.split()
wasOn = bool(taskMgr.getProfileTasks())
if len(args) > 1:
setting = bool(int(args[1]))
else:
setting = not taskMgr.getProfileTasks()
taskMgr.setProfileTasks(setting)
self.setMagicWordResponse('task profiling %s' % (choice(setting, 'ON', 'OFF'), choice(wasOn == setting, ' already', '')))
elif wordIs('~logtaskprofiles'):
args = word.split()
if len(args) > 1:
name = args[1]
else:
name = None
taskMgr.logTaskProfiles(name)
response = 'logged task profiles%s' % choice(name, ' for %s' % name, '')
self.setMagicWordResponse(response)
elif wordIs('~taskprofileflush'):
args = word.split()
if len(args) > 1:
name = args[1]
else:
name = None
taskMgr.flushTaskProfiles(name)
response = 'flushed AI task profiles%s' % choice(name, ' for %s' % name, '')
self.setMagicWordResponse(response)
elif wordIs('~objectcount'):
base.cr.printObjectCount()
self.setMagicWordResponse('logging client distributed object count...')
elif wordIs('~taskmgr'):
print taskMgr
self.setMagicWordResponse('logging client taskMgr...')
elif wordIs('~jobmgr'):
print jobMgr
self.setMagicWordResponse('logging client jobMgr...')
elif wordIs('~jobtime'):
args = word.split()
if len(args) > 1:
time = float(args[1])
else:
time = None
response = ''
if time is None:
time = jobMgr.getDefaultTimeslice()
response = 'reset client jobMgr timeslice to %s ms' % time
else:
response = 'set client jobMgr timeslice to %s ms' % time
time = time / 1000.0
jobMgr.setTimeslice(time)
self.setMagicWordResponse(response)
elif wordIs('~detectleaks'):
started = self.cr.startLeakDetector()
self.setMagicWordResponse(choice(started, 'leak detector started', 'leak detector already started'))
elif wordIs('~taskthreshold'):
args = word.split()
if len(args) > 1.0:
threshold = float(args[1])
else:
threshold = None
response = ''
if threshold is None:
threshold = taskMgr.DefTaskDurationWarningThreshold
response = 'reset task duration warning threshold to %s' % threshold
else:
response = 'set task duration warning threshold to %s' % threshold
taskMgr.setTaskDurationWarningThreshold(threshold)
self.setMagicWordResponse(response)
elif wordIs('~messenger'):
print messenger
self.setMagicWordResponse('logging client messenger...')
elif wordIs('~clientcrash'):
DelayedCall(Functor(self.notify.error, '~clientcrash: simulating a client crash'))
elif wordIs('~badDelete'):
doId = 0
while doId in base.cr.doId2do:
doId += 1
DelayedCall(Functor(base.cr.deleteObjectLocation, ScratchPad(doId = doId), 1, 1))
self.setMagicWordResponse('doing bad delete')
elif wordIs('~idTags'):
messenger.send('nameTagShowAvId', [])
base.idTags = 1
elif wordIs('~nameTags'):
messenger.send('nameTagShowName', [])
base.idTags = 0
elif wordIs('~flush'):
base.cr.doDataCache.flush()
base.cr.cache.flush()
self.setMagicWordResponse('client object and data caches flushed')
else:
return 0
return 1
def toggleRun(self):
if config.GetBool('want-running', 1):
inputState.set('debugRunning', inputState.isSet('debugRunning') != True)
def d_setMagicWord(self, magicWord, avId, zoneId):
self.sendUpdate('setMagicWord', [
magicWord,
avId,
zoneId,
base.cr.userSignature])
def b_setMagicWord(self, magicWord, avId = None, zoneId = None):
if self.cr.wantMagicWords:
if avId == None:
avId = base.localAvatar.doId
if zoneId == None:
try:
zoneId = self.cr.playGame.getPlace().getZoneId()
except:
pass
if zoneId == None:
zoneId = 0
self.d_setMagicWord(magicWord, avId, zoneId)
if magicWord.count('~crash'):
args = magicWord.split()
if len(args) > 1:
base.launcher.setPandaErrorCode(args[1])
self.notify.info('Simulating client crash: exit error = %s' % args[1])
else:
self.notify.info('Simulating client crash')
import sys
sys.exit()
self.setMagicWord(magicWord, avId, zoneId)
def setMagicWordResponse(self, response):
base.localAvatar.setChatAbsolute(response, CFSpeech | CFTimeout)
def d_setWho(self, avIds):
self.sendUpdate('setWho', [avIds])
def forAnother(self, word, avId, zoneId):
b = 5
while word[b:b + 2] != ' ~':
b += 1
if b >= len(word):
self.setMagicWordResponse('No next magic word!')
return
nextWord = word[b + 1:]
name = string.strip(word[5:b])
id = self.identifyAvatar(name)
if id == None:
self.setMagicWordResponse("Don't know who %s is." % name)
return
self.d_setMagicWord(nextWord, id, zoneId)
def identifyAvatar(self, name):
self.notify.error('Pure virtual - please override me.')
def identifyDistributedObjects(self, name):
result = []
lowerName = string.lower(name)
for obj in self.cr.doId2do.values():
className = obj.__class__.__name__
try:
name = obj.getName()
except:
name = className
if string.lower(name) == lowerName or string.lower(className) == lowerName or string.lower(className) == 'distributed' + lowerName:
result.append((name, obj))
return result
def getCSBitmask(self, str):
words = string.lower(str).split()
if len(words) == 0:
return None
invalid = ''
bitmask = BitMask32.allOff()
for w in words:
if w == 'wall':
bitmask |= OTPGlobals.WallBitmask
elif w == 'floor':
bitmask |= OTPGlobals.FloorBitmask
elif w == 'cam':
bitmask |= OTPGlobals.CameraBitmask
elif w == 'catch':
bitmask |= OTPGlobals.CatchBitmask
elif w == 'ghost':
bitmask |= OTPGlobals.GhostBitmask
elif w == 'pet':
bitmask |= OTPGlobals.PetBitmask
elif w == 'furniture':
bitmask |= OTPGlobals.FurnitureSideBitmask | OTPGlobals.FurnitureTopBitmask | OTPGlobals.FurnitureDragBitmask
elif w == 'furnitureside':
bitmask |= OTPGlobals.FurnitureSideBitmask
elif w == 'furnituretop':
bitmask |= OTPGlobals.FurnitureTopBitmask
elif w == 'furnituredrag':
bitmask |= OTPGlobals.FurnitureDragBitmask
elif w == 'pie':
bitmask |= OTPGlobals.PieBitmask
else:
try:
bitmask |= BitMask32.bit(int(w))
print bitmask
except ValueError:
invalid += ' ' + w
if invalid:
self.setMagicWordResponse('Unknown CS keyword(s): %s' % invalid)
return bitmask
def getFontByName(self, fontname):
if fontname == 'default':
return TextNode.getDefaultFont()
elif fontname == 'interface':
return OTPGlobals.getInterfaceFont()
elif fontname == 'sign':
return OTPGlobals.getSignFont()
else:
return None
def showfont(self, fontname):
fontname = string.strip(string.lower(fontname))
font = self.getFontByName(fontname)
if font == None:
self.setMagicWordResponse('Unknown font: %s' % fontname)
return None
if not isinstance(font, DynamicTextFont):
self.setMagicWordResponse('Font %s is not dynamic.' % fontname)
return None
self.hidefont()
self.shownFontNode = aspect2d.attachNewNode('shownFont')
tn = TextNode('square')
tn.setCardActual(0.0, 1.0, -1.0, 0.0)
tn.setFrameActual(0.0, 1.0, -1.0, 0.0)
tn.setCardColor(1, 1, 1, 0.5)
tn.setFrameColor(1, 1, 1, 1)
tn.setFont(font)
tn.setText(' ')
numXPages = 2
numYPages = 2
pageScale = 0.8
pageMargin = 0.1
numPages = font.getNumPages()
x = 0
y = 0
for pi in range(numPages):
page = font.getPage(pi)
tn.setCardTexture(page)
np = self.shownFontNode.attachNewNode(tn.generate())
np.setScale(pageScale)
(np.setPos(((float(x) / numXPages) * 2 - 1) + pageMargin, 0, 1 - (float(y) / numYPages) * 2 - pageMargin),)
x += 1
if x >= numXPages:
y += 1
x = 0
def hidefont(self):
if self.shownFontNode != None:
self.shownFontNode.removeNode()
self.shownFontNode = None
def showShadowCollisions(self):
try:
base.shadowTrav.showCollisions(render)
except:
self.setMagicWordResponse('CollisionVisualizer is not compiled in.')
def hideShadowCollisions(self):
base.shadowTrav.hideCollisions()
def showCollisions(self):
try:
base.cTrav.showCollisions(render)
except:
self.setMagicWordResponse('CollisionVisualizer is not compiled in.')
def hideCollisions(self):
base.cTrav.hideCollisions()
def showCameraCollisions(self):
try:
localAvatar.ccTrav.showCollisions(render)
except:
self.setMagicWordResponse('CollisionVisualizer is not compiled in.')
def hideCameraCollisions(self):
localAvatar.ccTrav.hideCollisions()
def doFps(self, word, avId, zoneId):
args = word.split()
response = None
if len(args) == 1 or args[1] == 'normal':
if globalClock.getMode() != ClockObject.MNormal:
globalClock.setMode(ClockObject.MNormal)
response = 'Normal frame rate set.'
else:
base.setFrameRateMeter(not base.frameRateMeter)
elif args[1] == 'forced':
fps = float(args[2])
globalClock.setMode(ClockObject.MForced)
globalClock.setDt(1.0 / fps)
response = 'Frame rate forced to %s fps.' % fps
base.setFrameRateMeter(1)
elif args[1] == 'degrade':
factor = float(args[2])
globalClock.setMode(ClockObject.MDegrade)
globalClock.setDegradeFactor(factor)
response = 'Frame rate degraded by factor of %s.' % factor
base.setFrameRateMeter(1)
elif args[1][-1] == '%':
percent = float(args[1][:-1])
if percent == 100:
globalClock.setMode(ClockObject.MNormal)
response = 'Normal frame rate set.'
else:
globalClock.setMode(ClockObject.MDegrade)
globalClock.setDegradeFactor(100.0 / percent)
response = 'Frame rate degraded to %s percent.' % percent
base.setFrameRateMeter(1)
else:
try:
fps = float(args[1])
except:
fps = None
if fps != None:
globalClock.setMode(ClockObject.MForced)
globalClock.setDt(1.0 / fps)
response = 'Frame rate forced to %s fps.' % fps
base.setFrameRateMeter(1)
else:
response = 'Unknown fps command: ~s' % args[1]
if base.frameRateMeter:
globalClock.setAverageFrameRateInterval(ConfigVariableDouble('average-frame-rate-interval').getValue())
if response != None:
self.setMagicWordResponse(response)
def identifyAvatar(self, name):
for av in Avatar.Avatar.ActiveAvatars:
if isinstance(av, self.GameAvatarClass) and av.getName() == name:
return av.doId
lowerName = string.lower(name)
for av in Avatar.Avatar.ActiveAvatars:
if isinstance(av, self.GameAvatarClass) and string.strip(string.lower(av.getName())) == lowerName:
return av.doId
try:
avId = int(name)
return avId
except:
pass
return None
def toggleGuiPopup(self):
if self.guiPopupShown:
base.mouseWatcherNode.hideRegions()
self.guiPopupShown = 0
else:
base.mouseWatcherNode.showRegions(render2d, 'gui-popup', 0)
self.guiPopupShown = 1
def magicWord(mw):
messenger.send('magicWord', [mw])
import __builtin__
__builtin__.magicWord = magicWord
| [
"[email protected]"
] | |
baa63fd29bd69bce20182d81ba8ea10fc20aeaef | 55c24645dd63a1c41037dcfb9fb45bc7bcdea4be | /venv/lib/python3.7/site-packages/dotenv/__init__.py | d412cb7ab9ae9061a2a73220df6a6d545add030c | [] | no_license | abdullah-nawaz/flask-boilerplate | 7c42801a21ee3e6a647cc8a7d92e0285f8e86cad | 01bc7fe1140e8ec613de4a38546a07ddfbdbd254 | refs/heads/master | 2022-12-02T05:06:08.297759 | 2020-06-24T21:36:32 | 2020-06-24T21:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from .compat import IS_TYPE_CHECKING
from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv, dotenv_values
if IS_TYPE_CHECKING:
from typing import Any, Optional
def load_ipython_extension(ipython):
# type: (Any) -> None
from .ipython import load_ipython_extension
load_ipython_extension(ipython)
def get_cli_string(path=None, action=None, key=None, value=None, quote=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> str
"""Returns a string suitable for running as a shell script.
Useful for converting a arguments passed to a fabric task
to be passed to a `local` or `run` command.
"""
command = ["dotenv"]
if quote:
command.append("-q %s" % quote)
if path:
command.append("-f %s" % path)
if action:
command.append(action)
if key:
command.append(key)
if value:
if " " in value:
command.append('"%s"' % value)
else:
command.append(value)
return " ".join(command).strip()
__all__ = [
"get_cli_string",
"load_dotenv",
"dotenv_values",
"get_key",
"set_key",
"unset_key",
"find_dotenv",
"load_ipython_extension",
]
| [
"[email protected]"
] | |
3074414644d1dd7e52c820a7a85ceea77e7b7715 | c137d7fb6eaa1c1900a63b8dae6b027176a98b6f | /MxShop/MxShop/settings.py | 9006c19e5c0acd8bbeaca3301b28ae570a00c216 | [] | no_license | LasterSmithKim/vuedjango | 22220414ad2f928f0a0df1a0e68c9083e90c1cc7 | 4a5b7fee4dd3f2d31255d7dc9188ea977a75db29 | refs/heads/master | 2022-12-10T19:52:25.014956 | 2019-12-23T16:23:01 | 2019-12-23T16:23:01 | 225,315,491 | 0 | 0 | null | 2022-11-22T04:52:05 | 2019-12-02T07:47:12 | JavaScript | UTF-8 | Python | false | false | 4,922 | py | """
Django settings for MxShop project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0, os.path.join(BASE_DIR, 'extra_apps'))
import datetime
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x*wj-nly56z6q5_9c67tg-q6ma$(+c)sp4b!^2sqe-a_ak683w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'users.UserProfile'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'DjangoUeditor',
'users.apps.UsersConfig',
'goods.apps.GoodsConfig',
'trade.apps.TradeConfig',
'user_operation.apps.UserOperationConfig',
'xadmin',
'crispy_forms',
'rest_framework',
'reversion',
'django_filters',
'corsheaders',#解决跨越问题
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'MxShop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MxShop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'vuedjango',
'USER': 'vuedjango',
'PASSWORD': 'smith123',
'HOST': '192.168.56.101',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#自定义用户验证
AUTHENTICATION_BACKENDS = [
'users.views.CustomBackend',
]
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# 添加默认的静态目录[收集使用]
STATIC_ROOT = os.path.join(BASE_DIR, "static_all")
# 设置上传文件的路径
MEDIA_URL="/media/"
MEDIA_ROOT=os.path.join(BASE_DIR, "media")
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
#'rest_framework.authentication.TokenAuthentication',Token验证方法
#'rest_framework_jwt.authentication.JSONWebTokenAuthentication', 将jsonewebtoken验证方法应用于具体app的模型中,不要对全局配置
),
#api接口文档
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'
}
#手机号验证 正则表达式
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
#互亿网配置信息
APIUSER = "********"
APIKEY = "********"
| [
"[email protected]"
] | |
2275f0288c637137c69388bc3029ea244a9b7fb4 | 0b53826167ae4337a92360ef0a8b37f0a30e1aef | /plan_b.py | 2e84b654e6fd23e18e9a743260ff6e065227906e | [] | no_license | nyghtowl/Evergreen_Competition | 712ac2c885e1622e12bce178e868c00aefd6fa2d | 456c6342cab250f61e2c02ee9d1199864342d375 | refs/heads/master | 2021-01-20T00:41:23.141388 | 2014-07-19T21:48:55 | 2014-07-19T21:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | """
beating the benchmark @StumbleUpon Evergreen Challenge
__author__ : Abhishek Thakur
"""
# -*- coding: utf-8 -*-
import numpy as np
from sklearn import metrics,preprocessing,cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
import sklearn.linear_model as lm
import pandas as p
loadData = lambda f: np.genfromtxt(open(f,'r'), delimiter=' ')
def main():
print "loading data.."
traindata = list(np.array(p.read_table('../data/train.tsv'))[:,2])
testdata = list(np.array(p.read_table('../data/test.tsv'))[:,2])
y = np.array(p.read_table('../data/train.tsv'))[:,-1]
tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode',
analyzer='word',token_pattern=r'\w{1,}',ngram_range=(1, 2), use_idf=1,smooth_idf=1,sublinear_tf=1)
rd = lm.LogisticRegression(penalty='l2', dual=True, tol=0.0001,
C=1, fit_intercept=True, intercept_scaling=1.0,
class_weight=None, random_state=None)
X_all = traindata + testdata
lentrain = len(traindata)
print "fitting pipeline"
tfv.fit(X_all)
print "transforming data"
X_all = tfv.transform(X_all)
X = X_all[:lentrain]
X_test = X_all[lentrain:]
print "20 Fold CV Score: ", np.mean(cross_validation.cross_val_score(rd, X, y, cv=20, scoring='roc_auc'))
print "training on full data"
rd.fit(X,y)
pred = rd.predict_proba(X_test)[:,1]
testfile = p.read_csv('../data/test.tsv', sep="\t", na_values=['?'], index_col=1)
pred_df = p.DataFrame(pred, index=testfile.index, columns=['label'])
pred_df.to_csv('benchmark.csv')
print "submission file created.."
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
5af284d0fe6592007bb1978462f2154dfd926980 | 9673935b03b25683b79e2e28a4584ebccd74cda9 | /closed/QCT/code/dlrm/tensorrt/infer.py | ae0dbe685a2d63f1e731a74d0cc78e3da802a9ec | [
"Apache-2.0"
] | permissive | wilderfield/inference_results_v0.7 | 7e737acec72ab0e79cf95a63987184f86c2cb0a2 | d63bb28a2919c79b69460005e686688f3fa033f1 | refs/heads/master | 2023-01-03T03:24:28.608820 | 2020-10-29T18:08:46 | 2020-10-29T18:08:46 | 306,124,077 | 0 | 0 | Apache-2.0 | 2020-10-21T19:15:17 | 2020-10-21T19:15:12 | null | UTF-8 | Python | false | false | 4,431 | py | #! /usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import ctypes
sys.path.insert(0, os.getcwd())
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
DLRM_INTERACTIONS_PLUGIN_LIBRARY="build/plugins/DLRMInteractionsPlugin/libdlrminteractionsplugin.so"
if not os.path.isfile(DLRM_INTERACTIONS_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_INTERACTIONS_PLUGIN_LIBRARY),
"Please build the DLRM Interactions plugin."
))
ctypes.CDLL(DLRM_INTERACTIONS_PLUGIN_LIBRARY)
DLRM_BOTTOM_MLP_PLUGIN_LIBRARY="build/plugins/DLRMBottomMLPPlugin/libdlrmbottommlpplugin.so"
if not os.path.isfile(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY),
"Please build the DLRM Bottom MLP plugin."
))
ctypes.CDLL(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY)
from code.common.runner import EngineRunner, get_input_format
from code.common import logging
import code.common.arguments as common_args
import json
import numpy as np
from sklearn.metrics import roc_auc_score
import tensorrt as trt
import torch
import time
def evaluate(ground_truths, predictions):
assert len(ground_truths) == len(predictions), "Number of ground truths are different from number of predictions"
return roc_auc_score(ground_truths, predictions)
def run_dlrm_accuracy(engine_file, batch_size, num_pairs=10000000, verbose=False):
if verbose:
logging.info("Running DLRM accuracy test with:")
logging.info(" engine_file: {:}".format(engine_file))
logging.info(" batch_size: {:}".format(batch_size))
logging.info(" num_pairs: {:}".format(num_pairs))
runner = EngineRunner(engine_file, verbose=verbose)
pair_dir = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "criteo", "full_recalib")
input_dtype, input_format = get_input_format(runner.engine)
if input_dtype == trt.DataType.FLOAT:
format_string = "fp32"
elif input_dtype == trt.DataType.HALF:
format_string = "fp16"
elif input_dtype == trt.DataType.INT8:
format_string = "int8"
if input_format == trt.TensorFormat.CHW4:
format_string += "_chw4"
else:
raise NotImplementedError("Unsupported DataType {:}".format(input_dtype))
numerical_inputs = np.load(os.path.join(pair_dir, "numeric_{:}.npy".format(format_string)))
categ_inputs = np.load(os.path.join(pair_dir, "categorical_int32.npy"))
predictions = []
refs = []
batch_idx = 0
for pair_idx in range(0, int(num_pairs), batch_size):
actual_batch_size = batch_size if pair_idx + batch_size <= num_pairs else num_pairs - pair_idx
numerical_input = np.ascontiguousarray(numerical_inputs[pair_idx:pair_idx + actual_batch_size])
categ_input = np.ascontiguousarray(categ_inputs[pair_idx:pair_idx + actual_batch_size])
start_time = time.time()
outputs = runner([numerical_input, categ_input], actual_batch_size)
if verbose:
logging.info("Batch {:d} (Size {:}) >> Inference time: {:f}".format(batch_idx, actual_batch_size, time.time() - start_time))
predictions.extend(outputs[0][:actual_batch_size])
batch_idx += 1
ground_truths = np.load(os.path.join(pair_dir, "ground_truth.npy"))[:num_pairs].tolist()
return evaluate(ground_truths, predictions)
def main():
args = common_args.parse_args(common_args.ACCURACY_ARGS)
logging.info("Running accuracy test...")
acc = run_dlrm_accuracy(args["engine_file"], args["batch_size"], args["num_samples"],
verbose=args["verbose"])
logging.info("Accuracy: {:}".format(acc))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
497cc3f06a4891c34917d8b345fd83fd16cc7af6 | 5d0d3cfac10e144468cc25d948e7994c5f968fd0 | /src/T2D23D.py | 930dfe4b34f879e793ca67eeaa01c43a64b3839b | [
"BSD-3-Clause",
"MIT"
] | permissive | WYGNG/USTC_SSE_Project | 1aff71631fd14dc26a0dd9190b76f97c5367d306 | 1c0cd4056f40445aed13ec1ae584608d625b9127 | refs/heads/master | 2022-12-26T13:53:48.543988 | 2020-09-27T08:02:08 | 2020-09-27T08:02:08 | 298,983,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,229 | py | import numpy as np
import matplotlib.pyplot as plt
import xlrd
import math
from scipy import optimize
# 计算角度,(x1, y1, z1)为顶点
def get_angle1(x1,y1,z1,x2,y2,z2,x3,y3,z3):
a=math.sqrt((x2-x3)**2+(y2-y3)**2+(z2-z3)**2)
b=math.sqrt((x1-x3)**2+(y1-y3)**2+(z1-z3)**2)
c=math.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
if c*b==0:
cosA=1
else:
cosA=(a**2-c**2-b**2)/(-2*c*b)
if cosA < -1.0:
cosA=-1.0
elif cosA>1.0:
cosA=1.0
A=math.acos(cosA)
deg=math.degrees(A)
return deg
# 躯干12段连杆定义
# L = [40, 34, 34, 29, 29, 58, 58, 40, 50, 50, 42, 42]
# 通过关节点坐标计算比例系数的初值
def get_s(point,L):
s = []
s.append(math.sqrt((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / L[0])
s.append(math.sqrt((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / L[1])
s.append(math.sqrt((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / L[2])
s.append(math.sqrt((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / L[3])
s.append(math.sqrt((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / L[4])
s.append(math.sqrt((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / L[5])
s.append(math.sqrt((point[0] - point[12]) ** 2 + (point[1] - point[13]) ** 2) / L[6])
s.append(math.sqrt((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / L[7])
s.append(math.sqrt((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / L[8])
s.append(math.sqrt((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / L[9])
s.append(math.sqrt((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / L[10])
s.append(math.sqrt((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / L[11])
s_target = max(s)
#print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&",s_target)
return s_target
#由2D关节点坐标和比例系数s计算3D关节点坐标
def get_point_3d(point, s_target,L):
z0 = 525 / s_target
point_3d = []
point_3d.append([point[22] / s_target, point[23] / s_target, z0]) # 0
dz11 = math.sqrt(
max(L[10] ** 2 - ((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / (s_target ** 2), 0))
if point[33]<point[35]:
dz11=-dz11
z14 = z0 + dz11
point_3d.append([point[18] / s_target, point[19] / s_target, z14]) # 1
dz9 = math.sqrt(max(L[8] ** 2 - ((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / (s_target ** 2), 0))
if point[31]<point[33]:
dz9=-dz9
z12 = z14 + dz9
point_3d.append([point[14] / s_target, point[15] / s_target, z12]) # 2
dz8 = math.sqrt(max(L[7] ** 2 - ((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / (s_target ** 2), 0))
if point[30]<point[31]:
dz8=-dz8
z11 = z12 + dz8
point_3d.append([point[12] / s_target, point[13] / s_target, z11]) # 3
dz10 = math.sqrt(
max(L[9] ** 2 - ((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / (s_target ** 2), 0))
if point[32]<point[30]:
dz10=-dz10
z13 = z11 + dz10
point_3d.append([point[16] / s_target, point[17] / s_target, z13]) # 4
dz12 = math.sqrt(
max(L[11] ** 2 - ((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / (s_target ** 2), 0))
if point[34]<point[32]:
dz12=-dz12
z15 = z13 + dz12
point_3d.append([point[20] / s_target, point[21] / s_target, z15]) # 5
dz6 = math.sqrt(max(L[5] ** 2 - ((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / (s_target ** 2), 0))
if point[25]<point[31]:
dz6=-dz6
z6 = z12 + dz6
point_3d.append([point[2] / s_target, point[3] / s_target, z6]) # 6
dz2 = math.sqrt(max(L[1] ** 2 - ((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / (s_target ** 2), 0))
if point[27]<point[25]:
dz2=-dz2
z8 = z6 + dz2
point_3d.append([point[6] / s_target, point[7] / s_target, z8]) # 7
dz4 = math.sqrt(max(L[3] ** 2 - ((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / (s_target ** 2), 0))
if point[29]<point[27]:
dz4=-dz4
z10 = z8 + dz4
point_3d.append([point[10] / s_target, point[11] / s_target, z10]) # 8
dz1 = math.sqrt(max(L[0] ** 2 - ((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / (s_target ** 2), 0))
if point[24]<point[25]:
dz1=-dz1
z5 = z6 + dz1
point_3d.append([point[0] / s_target, point[1] / s_target, z5]) # 9
dz3 = math.sqrt(max(L[2] ** 2 - ((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / (s_target ** 2), 0))
if point[26]<point[24]:
dz3=-dz3
z7 = z5 + dz3
point_3d.append([point[4] / s_target, point[5] / s_target, z7]) #
dz5 = math.sqrt(max(L[4] ** 2 - ((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / (s_target ** 2), 0))
if point[28]<point[26]:
dz5=-dz5
z9 = z7 + dz5
point_3d.append([point[8] / s_target, point[9] / s_target, z9]) # 11
return point_3d
# 单帧优化定义的目标函数
def f(s, point, s_target,L):
dz1 = math.sqrt(max(L[0] ** 2 - ((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / (s_target ** 2), 0))
dz2 = math.sqrt(max(L[1] ** 2 - ((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / (s_target ** 2), 0))
dz3 = math.sqrt(max(L[2] ** 2 - ((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / (s_target ** 2), 0))
dz4 = math.sqrt(max(L[3] ** 2 - ((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / (s_target ** 2), 0))
dz5 = math.sqrt(max(L[4] ** 2 - ((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / (s_target ** 2), 0))
dz6 = math.sqrt(max(L[5] ** 2 - ((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / (s_target ** 2), 0))
dz8 = math.sqrt(max(L[7] ** 2 - ((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / (s_target ** 2), 0))
dz9 = math.sqrt(max(L[8] ** 2 - ((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / (s_target ** 2), 0))
dz10 = math.sqrt(
max(L[9] ** 2 - ((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / (s_target ** 2), 0))
dz11 = math.sqrt(
max(L[10] ** 2 - ((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / (s_target ** 2), 0))
dz12 = math.sqrt(
max(L[11] ** 2 - ((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / (s_target ** 2), 0))
y = 0
y += (s * math.sqrt(L[0] ** 2 - dz1 ** 2) - math.sqrt((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2)) ** 2 +\
(s * math.sqrt(L[1] ** 2 - dz2 ** 2) - math.sqrt((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2)) ** 2 +\
(s * math.sqrt(L[2] ** 2 - dz3 ** 2) - math.sqrt((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2)) ** 2 +\
(s * math.sqrt(L[3] ** 2 - dz4 ** 2) - math.sqrt((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2)) ** 2 +\
(s * math.sqrt(L[4] ** 2 - dz5 ** 2) - math.sqrt((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2)) ** 2 +\
(s * math.sqrt(L[5] ** 2 - dz6 ** 2) - math.sqrt((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2)) ** 2 +\
(s * math.sqrt(L[7] ** 2 - dz8 ** 2) - math.sqrt((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2)) ** 2 +\
(s * math.sqrt(L[8] ** 2 - dz9 ** 2) - math.sqrt((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2)) ** 2 +\
(s * math.sqrt(L[9] ** 2 - dz10 ** 2) - math.sqrt((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2)) ** 2 +\
(s * math.sqrt(L[10] ** 2 - dz11 ** 2) - math.sqrt((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2)) ** 2 +\
(s * math.sqrt(L[11] ** 2 - dz12 ** 2) - math.sqrt((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2)) ** 2
# print("dz!!!!!!!!!!!!!!!!!!!!!!!",dz1,dz2,dz3,dz4,dz5,dz6,dz8,dz9,dz10,dz11,dz12)
# print("\n")
return y
# 多帧优化定义的目标函数
def f_s(s, begin, end,worksheet1, L):
z = 0
for i in range(end - begin + 1):
point = worksheet1.row_values(begin + i)
point.remove(point[0])
# s_target = get_s(point)
z += f(s[i], point, s[i], L)
return z
| [
"[email protected]"
] | |
12f5298b94e2213e4c9b120eec42f9982a07c04b | 96328f51c177bd53ca1d89199684af04008b0ba9 | /wiki_graph/util.py | c48945b102b570f0b07df5e5f23f8b6aa844ca7a | [
"MIT"
] | permissive | mvwicky/wiki-graph | b4045bf8200d579e99f9f58e77672d4dfac93c50 | a88e2f37e7d5b5ba93bcca67544746d682936f41 | refs/heads/master | 2020-03-11T16:52:19.213832 | 2018-05-14T16:23:00 | 2018-05-14T16:23:00 | 130,130,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | import collections
import itertools
import sys
def rec_sizeof(o, handlers={}, verbose=False):
"""Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
def dict_handler(d):
return itertools.chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
collections.deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter}
# user handlers take precedence
all_handlers.update(handlers)
# track which object id's have already been seen
seen = set()
# estimate sizeof object without __sizeof__
default_size = sys.getsizeof(0)
def sizeof(o):
# do not double count the same object
if id(o) in seen:
return 0
seen.add(id(o))
s = sys.getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=sys.stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
| [
"[email protected]"
] | |
98a07a8d052ae1d745b923459e18cd64001ebadb | d5a576c0b766124bd756922f818226d20867c6ef | /setup.py | a9c0a67377aef18bfcf1a11ca854655ec32fcda9 | [
"CC0-1.0",
"BSD-3-Clause"
] | permissive | fflewddur/python-phash | f6b32d4493858c2e9658d6b843dd816a5dcbfeb5 | 27152fd3c8b7a2cd032a33c25abeb423c582df65 | refs/heads/master | 2021-01-15T18:15:01.264905 | 2014-06-30T03:44:39 | 2014-06-30T03:44:39 | 21,335,530 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='phash',
version='0.1.0',
description='ctypes interface to libphash',
long_description=readme + '\n\n' + history,
author='Chris Adams',
author_email='[email protected]',
url='https://github.com/acdha/python-phash',
packages=[
'phash',
],
package_dir={'phash': 'phash'},
scripts=['scripts/compare-images.py'],
include_package_data=True,
install_requires=[
'more-itertools',
],
license="BSD",
zip_safe=False,
keywords='phash',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
) | [
"[email protected]"
] | |
946673c401ad39509fd4681ff3e7bc0ff420ce49 | 3eacb8aa1e5e92dc354e61e3140009065257d643 | /tests/test__order.py | 0bb6ab3f4dddaf55af7934eafe1350d07e2e7f4b | [
"BSD-3-Clause"
] | permissive | dask-image/dask-ndfilters | d0db13802185ad719b07752074e57bd8d8dd8529 | 3e947e791e2b3dd3a59de04b9cb70987a75f2446 | refs/heads/master | 2021-01-20T00:45:51.188492 | 2018-08-30T21:59:18 | 2018-08-30T21:59:18 | 89,183,177 | 5 | 2 | BSD-3-Clause | 2018-08-30T21:36:20 | 2017-04-24T00:56:22 | Python | UTF-8 | Python | false | false | 5,669 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
import numpy as np
import scipy.ndimage.filters as sp_ndf
import dask
import dask.array as da
import dask.array.utils as dau
import dask_ndfilters as da_ndf
assert dask
@pytest.mark.parametrize(
"da_func, extra_kwargs",
[
(da_ndf.minimum_filter, {}),
(da_ndf.median_filter, {}),
(da_ndf.maximum_filter, {}),
(da_ndf.rank_filter, {"rank": 0}),
(da_ndf.percentile_filter, {"percentile": 0}),
]
)
@pytest.mark.parametrize(
"err_type, size, footprint, origin",
[
(RuntimeError, None, None, 0),
(TypeError, 1.0, None, 0),
(RuntimeError, (1,), None, 0),
(RuntimeError, [(1,)], None, 0),
(RuntimeError, 1, np.ones((1,)), 0),
(RuntimeError, None, np.ones((1,)), 0),
(RuntimeError, None, np.ones((1, 0)), 0),
(RuntimeError, 1, None, (0,)),
(RuntimeError, 1, None, [(0,)]),
(ValueError, 1, None, 1),
(TypeError, 1, None, 0.0),
(TypeError, 1, None, (0.0, 0.0)),
(TypeError, 1, None, 1+0j),
(TypeError, 1, None, (0+0j, 1+0j)),
]
)
def test_order_filter_params(da_func,
extra_kwargs,
err_type,
size,
footprint,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
with pytest.raises(err_type):
da_func(d,
size=size,
footprint=footprint,
origin=origin,
**extra_kwargs)
@pytest.mark.parametrize(
"da_func, extra_kwargs",
[
(da_ndf.minimum_filter, {}),
(da_ndf.median_filter, {}),
(da_ndf.maximum_filter, {}),
(da_ndf.rank_filter, {"rank": 0}),
(da_ndf.percentile_filter, {"percentile": 0}),
]
)
def test_ordered_filter_shape_type(da_func,
extra_kwargs):
size = 1
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
assert all([(type(s) is int) for s in d.shape])
d2 = da_func(d, size=size, **extra_kwargs)
assert all([(type(s) is int) for s in d2.shape])
@pytest.mark.parametrize(
"sp_func, da_func, extra_kwargs",
[
(sp_ndf.minimum_filter, da_ndf.minimum_filter, {}),
(sp_ndf.median_filter, da_ndf.median_filter, {}),
(sp_ndf.maximum_filter, da_ndf.maximum_filter, {}),
(sp_ndf.rank_filter, da_ndf.rank_filter, {"rank": 0}),
(sp_ndf.percentile_filter, da_ndf.percentile_filter, {"percentile": 0}),
]
)
@pytest.mark.parametrize(
"size, footprint",
[
(1, None),
((1, 1), None),
(None, np.ones((1, 1))),
]
)
def test_ordered_filter_identity(sp_func,
da_func,
extra_kwargs,
size,
footprint):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
dau.assert_eq(
d, da_func(d, size=size, footprint=footprint, **extra_kwargs)
)
dau.assert_eq(
sp_func(a, size=size, footprint=footprint, **extra_kwargs),
da_func(d, size=size, footprint=footprint, **extra_kwargs)
)
@pytest.mark.parametrize(
"da_func, kwargs",
[
(da_ndf.minimum_filter, {"size": 1}),
(da_ndf.median_filter, {"size": 1}),
(da_ndf.maximum_filter, {"size": 1}),
(da_ndf.rank_filter, {"size": 1, "rank": 0}),
(da_ndf.percentile_filter, {"size": 1, "percentile": 0}),
]
)
def test_order_comprehensions(da_func, kwargs):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i], **kwargs) for i in range(len(d))]
l2c = [da_func(d[i], **kwargs)[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
@pytest.mark.parametrize(
"sp_func, da_func, extra_kwargs",
[
(sp_ndf.minimum_filter, da_ndf.minimum_filter, {}),
(sp_ndf.median_filter, da_ndf.median_filter, {}),
(sp_ndf.maximum_filter, da_ndf.maximum_filter, {}),
(sp_ndf.rank_filter, da_ndf.rank_filter, {"rank": 1}),
(sp_ndf.percentile_filter, da_ndf.percentile_filter, {"percentile": 10}),
]
)
@pytest.mark.parametrize(
"size, footprint, origin",
[
(2, None, 0),
(None, np.ones((2, 3)), 0),
(None, np.ones((2, 3)), (0, 1)),
(None, np.ones((2, 3)), (0, -1)),
(None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),
(None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),
(None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),
(5, None, 0),
(7, None, 0),
(8, None, 0),
(10, None, 0),
(5, None, 2),
(5, None, -2),
]
)
def test_ordered_filter_compare(sp_func,
da_func,
extra_kwargs,
size,
footprint,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
dau.assert_eq(
sp_func(
a, size=size, footprint=footprint, origin=origin, **extra_kwargs
),
da_func(
d, size=size, footprint=footprint, origin=origin, **extra_kwargs
)
)
| [
"[email protected]"
] | |
f8b4ee7a035e6157e5582a93bf49c3a9b6ad803d | f1e11f9e50d061d05d581efa8805ab28b25d7b24 | /climi/pppp/hw_spatial_pattern_check___ | c14cab5e65815e0ec1c3c88f260e677acf70a59b | [
"MIT"
] | permissive | ahheo/climi | bdccb5116046dfbf0aa6e38e7400447e427eeae0 | 2043a23876a9e620d44b2e9dd34d487ee3e0fc4b | refs/heads/main | 2023-07-09T01:24:17.514954 | 2023-07-07T05:13:16 | 2023-07-07T05:13:16 | 302,750,076 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | #!/usr/bin/env python3
from climi.uuuu import *
from climi.pppp import *
import os
import warnings
import matplotlib as mpl
mpl.use('pdf', force=True)
import matplotlib.pyplot as plt
import numpy as np
import iris
_here_ = get_path_(__file__)
sites = dict(
SE = (19, 45),
W1 = (7, 51.5),
W2 = (-1.5, 53),
W3 = (8, 60)
)
dataroot = '/nobackup/rossby22/sm_chali/DATA/hc/med/'
outdir = '/nobackup/rossby24/users/sm_chali/DATA/hw2018/fig/'
ffmt = 'thr_tx_{}_ALL_1989-2008_90.nc'
datasets = ['EOBS20', 'ERA-Interim']
colors = plt.get_cmap('tab10').colors
y0y1 = (1989, 2008)
pp = 95
_djn = os.path.join
def _get_data1(dataset):
if dataset == datasets[0]:
return extract_period_cube( iris.load_cube(
'/home/rossby/imports/obs/EOBS/EOBS20/orig/'
'tx_ens_mean_0.1deg_reg_v20.0e.nc'
), *y0y1)
elif dataset == datasets[1]:
return extract_period_cube( concat_cube_( iris.load(
'/nobackup/rossby22/sm_chali/DATA/'
'hw2018/iii/obs/ERAI/tasmax*'
)), *y0y1)
def main():
warnings.filterwarnings('ignore', category=UserWarning)
#data
for dataset in datasets:
fig, ax = plt.subplots(figsize=(7.5, 4), tight_layout=True)
#fig = init_fig_(fx=7.5, fy=4, l=.09, r=.98, t=.965, b=.15)
#ax = fig.add_subplot(1, 1, 1)
data0 = iris.load_cube(_djn(dataroot, ffmt.format(dataset)))
data1 = _get_data1(dataset)
for site, c in zip(sites.keys(), colors):
data0_ = nearest_point_cube(data0, *sites[site])
data1_ = nearest_point_cube(data0, *sites[site])
data1__ = doy_f_cube(
data1_,
np.nanpercentile, f_Args=(pp,),
ws=15,
mF=np.nan)
data1___ = np.diff(data1_.collapsed(
'time',
iris.analysis.PERCENTILE, percent=[25, 75]).data)
iqr_ = data1___[0]
data = data0_.copy((data1__ - data0_).data/data1___)
mjjas = np.sum(data.data[120:272])
rm_t_aux_cube(data)
iris.coord_categorisation.add_day_of_year(data, 'time', name='doy')
doy = data.coord('doy').points
ax.plot(doy, data.data,
color=c,
lw=1.5 if site == list(sites.keys())[0] else .75,
label='{} ({:.1f})'.format(site, mjjas))
ax.axvspan(120, 272, fc='0.8', alpha=.5, zorder=-1)
ax.set_xlabel('Day of year')
ax.set_ylabel('Normalized $T_{95th} - T_{90th}$')
ax.set_xlim([min(doy), max(doy)])
ax.set_ylim([0,.4])
ax.legend(frameon=False)
fn = _djn(outdir, '{}-90_{}_clm.pdf'.format(pp, dataset))
plt.savefig(fn, dpi=300)
plt.close(fig)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
b4a8594c676f389559ad8b7b703c3698c7687ed2 | 7e157b3bffb170ff87e29880611be9b040d61a27 | /purano/annotator/processors/tfidf.py | a941fe233977a850b27bd0b8b8c0de80ed1d253d | [
"Apache-2.0"
] | permissive | IlyaGusev/purano | 8906d5e35e8a4f0f31144d874721d4e9e5f76359 | 07234a55e8c80d1e9d8aeb8197c58e36dd26da54 | refs/heads/master | 2022-06-21T09:57:45.624005 | 2021-07-27T18:41:37 | 2021-07-27T18:41:37 | 212,185,927 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | from typing import List, Optional
import numpy as np
import torch
from purano.annotator.processors import Processor
from purano.models import Document
from purano.proto.info_pb2 import Info as InfoPb
from purano.training.models.tfidf import load_idfs, get_tfidf_vector, SVDEmbedder
@Processor.register("tfidf")
class TfIdfProcessor(Processor):
def __init__(
self,
idfs_vocabulary: str,
svd_torch_model_path: str
):
word2idf, word2idx = load_idfs(idfs_vocabulary)
self.word2idf = word2idf
self.word2idx = word2idx
self.svd_torch_model = None # type: Optional[SVDEmbedder]
if svd_torch_model_path:
self.svd_torch_model = torch.load(svd_torch_model_path)
def __call__(
self,
docs: List[Document],
infos: List[InfoPb],
input_fields: List[str],
output_field: str,
):
embeddings = np.zeros((len(docs), len(self.word2idf)), dtype=np.float32)
for doc_num, (doc, info) in enumerate(zip(docs, infos)):
text = " ".join([getattr(doc, field) for field in input_fields])
data, indices = get_tfidf_vector(text, self.word2idf, self.word2idx)
for index, value in zip(indices, data):
embeddings[doc_num][index] = value
final_embeddings = embeddings
if self.svd_torch_model:
final_embeddings = self.svd_torch_model(torch.FloatTensor(final_embeddings))
for doc_num, info in enumerate(infos):
getattr(info, output_field).extend(final_embeddings[doc_num])
| [
"[email protected]"
] | |
0d339371d555fa7f40da404b5092acbd841c381b | 08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2 | /kubernetes/test/test_v1_namespace_list.py | eecef48f123f70650b3c855d6413abfa52021b87 | [
"Apache-2.0"
] | permissive | ex3cv/client-python | 5c6ee93dff2424828d064b5a2cdbed3f80b74868 | 2c0bed9c4f653472289324914a8f0ad4cbb3a1cb | refs/heads/master | 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_namespace_list import V1NamespaceList
class TestV1NamespaceList(unittest.TestCase):
""" V1NamespaceList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NamespaceList(self):
"""
Test V1NamespaceList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_namespace_list.V1NamespaceList()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4fa7a1d1958a21a196156bb7fb162e220b5a4c42 | 157d2a2f4031c58e5504bcbac5348ff53883facc | /rDj63/rDj63/urls.py | c4a78403fddc9db36bfb4f94aefa067e840b972c | [] | no_license | optirg-39/Django_gekSh | d78b635fd3ee88addd084b68ec35c6284adfb55c | 1129a6df35c110dfeeeaaf1a76b2ebc192a5f1ce | refs/heads/master | 2023-04-15T13:09:03.067099 | 2021-04-26T12:15:35 | 2021-04-26T12:15:35 | 352,018,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | """rDj63 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from enroll import views
urlpatterns = [
path('admin/', admin.site.urls),
path('sign_up/', views.sign_up, name = 'signin1'),
path('log_in/', views.log_in, name = 'login1'),
path('user_profile/', views.user_profile, name = 'profile1'),
path('logout_user/', views.user_logout, name = 'logout1'),
]
| [
"[email protected]"
] | |
ba7c90fd43394d63878f354d584a396087cd06f9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/R2/benchmark/startPyquil122.py | 3fd855f9728d33e5489e4eb157beb0e6dabf2e1b | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | # qubit number=2
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=4
prog += CNOT(0,1) # number=7
prog += X(1) # number=8
prog += CNOT(0,1) # number=9
prog += X(1) # number=3
prog += CNOT(1,0) # number=5
prog += CNOT(1,0) # number=6
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil122.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
270375ee24a54029cec52788cd572a47ddcc8f30 | 0804c9f4d46e15a93a4322808b487eb00c28da95 | /Chapter 10/BPApp/HiTech/engg/routes.py | 0a37098872ecb6b648d051b9c3b2fa6c8071f0b3 | [
"MIT"
] | permissive | bpbpublications/Building-Web-Apps-with-Python-and-Flask | 99b6e9312a41bea1ba6c6c5dce70f958b86ad768 | 4fbbe75fad9629f16ff5bf8bd603aa09dd04f9eb | refs/heads/main | 2023-03-27T05:55:23.170813 | 2021-03-23T10:27:59 | 2021-03-23T10:27:59 | 339,637,598 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from flask import Blueprint, render_template
engg=Blueprint('engg', __name__,template_folder='templates',static_folder='static')
@engg.route('/')
def index():
return render_template('engindex.html')
@engg.route('/courses')
def courses():
return '<h1>list of courses in Enginnering</h1>'
@engg.route('/faculty')
def faculty():
return '<h1>list of Engineering faculty members</h1>'
@engg.route('/form')
def form():
return render_template('form.html')
from flask import Flask
app=Flask(__name__)
app.register_blueprint(engg)
if __name__=='__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
e62d95154c55d2a4f3766e12e5274d5f6283c5f7 | 213682d70d45739b8a4fd7c0fcf05437a0704c4d | /pipeline/ve/share/radical.pilot/examples/00_getting_started.py | f4a0df0d42b456b1edfef3b2a02e65281648c08e | [] | no_license | ATLAS-Titan/misc | d272adfe13fcbbea1562ca98c718bc1465032421 | 0a20d158d0d9a95ef72b6a8d0bccbb68193e98c0 | refs/heads/master | 2020-12-24T06:31:10.696708 | 2017-07-18T14:55:41 | 2017-07-18T14:55:41 | 73,486,659 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,381 | py | #!/usr/bin/env python
__copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu'
__license__ = 'MIT'
import os
import sys
import radical.pilot as rp
import radical.utils as ru
# ------------------------------------------------------------------------------
#
# READ the RADICAL-Pilot documentation: http://radicalpilot.readthedocs.org/
#
# ------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
if __name__ == '__main__':
# we use a reporter class for nicer output
report = ru.LogReporter(name='radical.pilot')
report.title('Getting Started (RP version %s)' % rp.version)
# use the resource specified as argument, fall back to localhost
if len(sys.argv) > 2: report.exit('Usage:\t%s [resource]\n\n' % sys.argv[0])
elif len(sys.argv) == 2: resource = sys.argv[1]
else : resource = 'local.localhost'
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session()
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# read the config used for resource details
report.info('read config')
config = ru.read_json('%s/config.json' % os.path.dirname(os.path.abspath(__file__)))
report.ok('>>ok\n')
report.header('submit pilots')
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
pmgr = rp.PilotManager(session=session)
# Define an [n]-core local pilot that runs for [x] minutes
# Here we use a dict to initialize the description object
pd_init = {
'resource' : resource,
'runtime' : 15, # pilot runtime (min)
'exit_on_error' : True,
'project' : config[resource]['project'],
'queue' : config[resource]['queue'],
'access_schema' : config[resource]['schema'],
'cores' : config[resource]['cores'],
}
pdesc = rp.ComputePilotDescription(pd_init)
# Launch the pilot.
pilot = pmgr.submit_pilots(pdesc)
report.header('submit units')
# Register the ComputePilot in a UnitManager object.
umgr = rp.UnitManager(session=session)
umgr.add_pilots(pilot)
# Create a workload of ComputeUnits.
# Each compute unit runs '/bin/date'.
n = 128 # number of units to run
report.info('create %d unit description(s)\n\t' % n)
cuds = list()
for i in range(0, n):
# create a new CU description, and fill it.
# Here we don't use dict initialization.
cud = rp.ComputeUnitDescription()
cud.executable = '/bin/date'
cuds.append(cud)
report.progress()
report.ok('>>ok\n')
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
umgr.submit_units(cuds)
# Wait for all compute units to reach a final state (DONE, CANCELED or FAILED).
report.header('gather results')
umgr.wait_units()
except Exception as e:
# Something unexpected happened in the pilot code above
report.error('caught Exception: %s\n' % e)
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
report.warn('exit requested\n')
finally:
# always clean up the session, no matter if we caught an exception or
# not. This will kill all remaining pilots.
report.header('finalize')
session.close(cleanup=False)
report.header()
#-------------------------------------------------------------------------------
| [
"[email protected]"
] | |
a6abb9c81b02af705e611a0bfd48a2fb56fa2acd | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/markdown/extensions/toc.py | dc80c7e8ecd3ee4e9f5d7702a9b13082a83e0314 | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 13,174 | py | """
Table of Contents Extension for Python-Markdown
===============================================
See <https://Python-Markdown.github.io/extensions/toc>
for documentation.
Oringinal code Copyright 2008 [Jack Miller](https://codezen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE
from ..postprocessors import UnescapePostprocessor
import re
import unicodedata
import xml.etree.ElementTree as etree
def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub(r'[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub(r'[%s\s]+' % separator, separator, value)
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d' % (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d' % (id, 1)
ids.add(id)
return id
def stashedHTML2text(text, md, strip_entities=True):
""" Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
def _html_sub(m):
""" Substitute raw html with plain text. """
try:
raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
except (IndexError, TypeError): # pragma: no cover
return m.group(0)
# Strip out tags and/or entities - leaving text
res = re.sub(r'(<[^>]+>)', '', raw)
if strip_entities:
res = re.sub(r'(&[\#a-zA-Z0-9]+;)', '', res)
return res
return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
def unescape(text):
""" Unescape escaped text. """
c = UnescapePostprocessor()
return c.run(text)
def nest_toc_tokens(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
ordered_list = []
if len(toc_list):
# Initialize everything by processing the first entry
last = toc_list.pop(0)
last['children'] = []
levels = [last['level']]
ordered_list.append(last)
parents = []
# Walk the rest nesting the entries properly
while toc_list:
t = toc_list.pop(0)
current_level = t['level']
t['children'] = []
# Reduce depth if current level < last item's level
if current_level < levels[-1]:
# Pop last level since we know we are less than it
levels.pop()
# Pop parents and levels we are less than or equal to
to_pop = 0
for p in reversed(parents):
if current_level <= p['level']:
to_pop += 1
else: # pragma: no cover
break
if to_pop:
levels = levels[:-to_pop]
parents = parents[:-to_pop]
# Note current level as last
levels.append(current_level)
# Level is the same, so append to
# the current parent (if available)
if current_level == levels[-1]:
(parents[-1]['children'] if parents
else ordered_list).append(t)
# Current level is > last item's level,
# So make last item a parent and append current as child
else:
last['children'].append(t)
parents.append(last)
levels.append(current_level)
last = t
return ordered_list
class TocTreeprocessor(Treeprocessor):
def __init__(self, md, config):
super().__init__(md)
self.marker = config["marker"]
self.title = config["title"]
self.base_level = int(config["baselevel"]) - 1
self.slugify = config["slugify"]
self.sep = config["separator"]
self.use_anchors = parseBoolValue(config["anchorlink"])
self.anchorlink_class = config["anchorlink_class"]
self.use_permalinks = parseBoolValue(config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = config["permalink"]
self.permalink_class = config["permalink_class"]
self.permalink_title = config["permalink_title"]
self.header_rgx = re.compile("[Hh][123456]")
if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]:
self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')]
else:
self.toc_top = 1
self.toc_bottom = int(config["toc_depth"])
def iterparent(self, node):
''' Iterator wrapper to get allowed parent and child all at once. '''
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
for child in node:
if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']:
yield node, child
yield from self.iterparent(child)
def replace_marker(self, root, elem):
''' Replace marker with elem. '''
for (p, c) in self.iterparent(root):
text = ''.join(c.itertext()).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
if c.text and c.text.strip() == self.marker:
for i in range(len(p)):
if p[i] == c:
p[i] = elem
break
def set_level(self, elem):
''' Adjust header level according to base level. '''
level = int(elem.tag[-1]) + self.base_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def add_anchor(self, c, elem_id): # @ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = self.anchorlink_class
c.text = ""
for elem in c:
anchor.append(elem)
while len(c):
c.remove(c[0])
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True
else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = self.permalink_class
if self.permalink_title:
permalink.attrib["title"] = self.permalink_title
c.append(permalink)
def build_toc_div(self, toc_list):
""" Return a string div given a toc list. """
div = etree.Element("div")
div.attrib["class"] = "toc"
# Add title to the div
if self.title:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.title
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
build_etree_ul(toc_list, div)
if 'prettify' in self.md.treeprocessors:
self.md.treeprocessors['prettify'].run(div)
return div
def run(self, doc):
# Get a list of id attributes
used_ids = set()
for el in doc.iter():
if "id" in el.attrib:
used_ids.add(el.attrib["id"])
toc_tokens = []
for el in doc.iter():
if isinstance(el.tag, str) and self.header_rgx.match(el.tag):
self.set_level(el)
if int(el.tag[-1]) < self.toc_top or int(el.tag[-1]) > self.toc_bottom:
continue
text = ''.join(el.itertext()).strip()
# Do not override pre-existing ids
if "id" not in el.attrib:
innertext = unescape(stashedHTML2text(text, self.md))
el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
toc_tokens.append({
'level': int(el.tag[-1]),
'id': el.attrib["id"],
'name': unescape(stashedHTML2text(
el.attrib.get('data-toc-label', text), self.md, strip_entities=False
))
})
# Remove the data-toc-label attribute as it is no longer needed
if 'data-toc-label' in el.attrib:
del el.attrib['data-toc-label']
if self.use_anchors:
self.add_anchor(el, el.attrib["id"])
if self.use_permalinks not in [False, None]:
self.add_permalink(el, el.attrib["id"])
toc_tokens = nest_toc_tokens(toc_tokens)
div = self.build_toc_div(toc_tokens)
if self.marker:
self.replace_marker(doc, div)
# serialize and attach to markdown instance.
toc = self.md.serializer(div)
for pp in self.md.postprocessors:
toc = pp.run(toc)
self.md.toc_tokens = toc_tokens
self.md.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, **kwargs):
self.config = {
"marker": ['[TOC]',
'Text to find and replace with Table of Contents - '
'Set to an empty string to disable. Defaults to "[TOC]"'],
"title": ["",
"Title to insert into TOC <div> - "
"Defaults to an empty string"],
"anchorlink": [False,
"True if header should be a self link - "
"Defaults to False"],
"anchorlink_class": ['toclink',
'CSS class(es) used for the link. '
'Defaults to "toclink"'],
"permalink": [0,
"True or link text if a Sphinx-style permalink should "
"be added - Defaults to False"],
"permalink_class": ['headerlink',
'CSS class(es) used for the link. '
'Defaults to "headerlink"'],
"permalink_title": ["Permanent link",
"Title attribute of the permalink - "
"Defaults to 'Permanent link'"],
"baselevel": ['1', 'Base level for headers.'],
"slugify": [slugify,
"Function to generate anchors based on header text - "
"Defaults to the headerid ext's slugify function."],
'separator': ['-', 'Word separator. Defaults to "-".'],
"toc_depth": [6,
'Define the range of section levels to include in'
'the Table of Contents. A single integer (b) defines'
'the bottom section level (<h1>..<hb>) only.'
'A string consisting of two digits separated by a hyphen'
'in between ("2-5"), define the top (t) and the'
'bottom (b) (<ht>..<hb>). Defaults to `6` (bottom).'],
}
super().__init__(**kwargs)
def extendMarkdown(self, md):
md.registerExtension(self)
self.md = md
self.reset()
tocext = self.TreeProcessorClass(md, self.getConfigs())
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.register(tocext, 'toc', 5)
def reset(self):
self.md.toc = ''
self.md.toc_tokens = []
def makeExtension(**kwargs): # pragma: no cover
return TocExtension(**kwargs)
| [
"[email protected]"
] | |
644fab70a5b3af9bb5dfa778fe89ab854f219ed8 | 63d3a6255f2677f9d92205d62163b9d22a74c5c7 | /modules/dynadb/migrations/0058_auto_20161124_1741.py | 89a757a370f4bc40d44bddae34790dc82e409e84 | [
"Apache-2.0"
] | permissive | GPCRmd/GPCRmd | 9204f39b1bfbc800b13512b316e05e54ddd8af23 | 47d7a4e71025b70e15a0f752760873249932c54e | refs/heads/main | 2023-09-04T11:13:44.285629 | 2023-08-29T13:43:01 | 2023-08-29T13:43:01 | 260,036,875 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-24 16:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0057_auto_20161116_2004'),
]
operations = [
migrations.AlterField(
model_name='dyndbsubmissionmolecule',
name='type',
field=models.SmallIntegerField(blank=True, choices=[(0, 'Orthosteric ligand'), (1, 'Allosteric ligand'), (2, 'Crystallographic waters, lipids or ions'), (3, 'Other')], default=0, null=True),
),
]
| [
"[email protected]"
] | |
03c962b553ec91380beb9b3231464bb8fae29bc0 | fccb35b69307ae4848aeee484995100de624dedf | /toolbox/bulk_processing/invalid_address_processor.py | b00f886d424ecf8dbac31af077a0420d5a05d53a | [] | no_license | ONSdigital/census-rm-toolbox | 468d1cb8e901bc8ae87a693495b85080c9d85e9f | 17eeae06f859091805e3fd3d5f8e620500af6741 | refs/heads/master | 2023-08-21T13:23:55.614636 | 2021-05-18T07:44:31 | 2021-05-18T07:44:31 | 207,325,373 | 0 | 1 | null | 2023-07-25T17:05:59 | 2019-09-09T14:12:11 | Python | UTF-8 | Python | false | false | 1,827 | py | import logging
import uuid
from datetime import datetime
from structlog import wrap_logger
from toolbox.bulk_processing.bulk_processor import BulkProcessor
from toolbox.bulk_processing.processor_interface import Processor
from toolbox.bulk_processing.validators import case_exists_by_id, is_uuid, max_length, mandatory
from toolbox.config import Config
from toolbox.logger import logger_initial_config
class InvalidAddressProcessor(Processor):
file_prefix = Config.BULK_INVALID_ADDRESS_FILE_PREFIX
routing_key = Config.INVALID_ADDRESS_EVENT_ROUTING_KEY
exchange = Config.EVENTS_EXCHANGE
bucket_name = Config.BULK_INVALID_ADDRESS_BUCKET_NAME
project_id = Config.BULK_INVALID_ADDRESS_PROJECT_ID
schema = {
"case_id": [is_uuid(), case_exists_by_id()],
"reason": [mandatory(), max_length(255)]
}
def build_event_messages(self, row):
address_resolution = "AR"
return [{
"event": {
"type": "ADDRESS_NOT_VALID",
"source": "RM_BULK_INVALID_ADDRESS_PROCESSOR",
"channel": address_resolution,
"dateTime": datetime.utcnow().isoformat() + 'Z',
"transactionId": str(uuid.uuid4())
},
"payload": {
"invalidAddress": {
"reason": row['reason'],
"collectionCase": {
"id": row['case_id']
}
}
}
}]
def main():
logger_initial_config()
logger = wrap_logger(logging.getLogger(__name__))
logger.info('Started bulk processing invalid addresses', app_log_level=Config.LOG_LEVEL,
environment=Config.ENVIRONMENT)
BulkProcessor(InvalidAddressProcessor()).run()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
29a6f8130b12ecd4a5d0cc05a93f14e402d00970 | fb8c10b50bd42db139330a8ed596e864bb8ae440 | /Tkinter_GUI_Python/7941OT_8_code/8.03 validation mode demo.py | 34ce49915abce5b3c1d9238126e6942966dd0de9 | [] | no_license | masb01/test | 628c9bc4297e6c1b745503e297682258553d87cf | 7e402d5744d4395ebd660f3c05044bf3df16ce7c | refs/heads/master | 2020-03-18T20:59:38.142095 | 2018-06-10T15:15:16 | 2018-06-10T15:15:16 | 135,251,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | """
Code illustration: 8.03
Validation Modes Demo
Tkinter GUI Application Development Hotshot
"""
import Tkinter as tk
class ValidateModeDemo():
def __init__(self):
self.root = tk.Tk()
vcmd = (self.root.register(self.validate), '%V')
# validate = none mode - will not call validate method ever.
tk.Label (text='None').pack()
tk.Entry(self.root, validate="none", validatecommand=vcmd).pack()
# validate = focus mode - will call validate method on focusin and focusout
tk.Label (text='Focus').pack()
tk.Entry(self.root, validate="focus", validatecommand=vcmd).pack()
# validate = focusin mode - - will call validate method on focusin
tk.Label (text='Focusin').pack()
tk.Entry(self.root, validate="focusin", validatecommand=vcmd).pack()
# validate = focusout mode - will call validate method on focusout
tk.Label (text='Focus Out').pack()
tk.Entry(self.root, validate="focusout", validatecommand=vcmd).pack()
# validate = Key mode - will call validate method only when you type something or edit the entry
tk.Label (text='key').pack()
tk.Entry(self.root, validate="key", validatecommand=vcmd).pack()
# validate = all mode - will call validate method on focus and key events
tk.Label (text='all').pack()
tk.Entry(self.root, validate="all", validatecommand=vcmd).pack()
self.root.mainloop()
def validate(self, v):
print 'Called Just Now Via Mode %s' %v
# this is where you will validate your data and return True or False
#depending on wether the data is valid or not
# for now let us just return True for all cases.
return True
app = ValidateModeDemo() | [
"masb160119672gmail.com"
] | masb160119672gmail.com |
173c766edb003276de453cf1019773545a9f23ff | 4832856f115ef30fb9f611e92d7e473d531f6c4d | /setup.py | 19d11a618feed45d888db08e66dea88ba2d3197c | [
"Apache-2.0"
] | permissive | nycto-hackerone/OWASP-Nettacker | 9c2a227eaf5175ce42181650911ae795846637a3 | 33f7e4a53b4773c91be57bfb535baec3478ca85c | refs/heads/master | 2021-01-24T00:04:33.864977 | 2018-02-24T11:34:45 | 2018-02-24T11:34:45 | 122,752,364 | 1 | 1 | Apache-2.0 | 2018-02-24T15:26:53 | 2018-02-24T15:26:53 | null | UTF-8 | Python | false | false | 2,347 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from setuptools import setup
from setuptools import find_packages
def package_files(directory):
"""
This function was created to crawl the directory and find files (none python files) using os.walk
Args:
directory: path to crawl
Returns:
list of package files in an array
"""
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
# read requirements from requirements.txt
requirements = open("requirements.txt").read().rsplit()
if int(sys.version_info[0]) is 2:
# add scapy requirement name for python 2.x
requirements.append("scapy")
else:
# add scapy requirement name for python 3.x
requirements.append("scapy-python3")
setup(
name="OWASP-Nettacker",
version='0.0.1',
description='OWASP Nettacker - Automated Penetration Testing Framework',
packages=find_packages(),
package_data={"": package_files("web") + ["../api/database.sqlite3"]}, # package files + database file
include_package_data=True,
install_requires=requirements,
url="https://github.com/viraintel/OWASP-Nettacker",
license="Apache-2.0",
author="Ali Razmjoo",
author_email="[email protected]",
long_description="Automated Penetration Testing Framework - OWASP Nettacker project is created to"
" automate information gathering, vulnerability scanning and eventually generating"
" a report for networks, including services, bugs, vulnerabilities, misconfigurations,"
" and other information. This software will utilize TCP SYN, ACK, ICMP and many other"
" protocols in order to detect and bypass Firewall/IDS/IPS devices. By leveraging a"
" unique method in OWASP Nettacker for discovering protected services and devices such"
" as SCADA. It would make a competitive edge compared to other scanner making it one of"
" the bests.",
scripts=["scripts/nettacker.bat" if sys.platform == "win32" or sys.platform == "win64"
else "scripts/nettacker", "nettacker.py"] # script files for windows and other OS
)
| [
"[email protected]"
] | |
1c94ccbfa89e4eb8af6b2127e6d3be14f1a2962c | 3b1c660676da8459bc5587c7a2fb5b4df3446c8f | /arch2.py | 8b3b597a8c1441b21af7326f504bf1b19e335c49 | [] | no_license | mariogeiger/galaxy_zoo | b28930295023d2f0cb61c266e48438b8d1782875 | 2cda9c15fe29b982fcb754b29b9427400d647859 | refs/heads/master | 2021-06-14T17:47:43.431834 | 2017-03-09T07:44:15 | 2017-03-09T07:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,771 | py | # pylint: disable=C,R,no-member
import tensorflow as tf
import numpy as np
import math
def dihedral_fullyconnected(x, f_out=None, std=None):
f_in = x.get_shape().as_list()[1]
assert f_in % 8 == 0
if f_out is None:
f_out = f_in
assert f_out % 8 == 0
if std is None:
std = math.sqrt(2.0 / f_in)
with tf.name_scope("fc_8x{}_8x{}".format(f_in // 8, f_out // 8)):
ww = tf.Variable(tf.truncated_normal([f_in, f_out // 8], stddev=std), name="W")
b = tf.Variable(tf.constant(0.0, shape=[f_out // 8]), name="b")
mt = np.array([
[0, 1, 2, 3, 4, 5, 6, 7], [1, 0, 3, 2, 5, 4, 7, 6],
[2, 3, 0, 1, 6, 7, 4, 5], [3, 2, 1, 0, 7, 6, 5, 4],
[4, 6, 5, 7, 0, 2, 1, 3], [5, 7, 4, 6, 1, 3, 0, 2],
[6, 4, 7, 5, 2, 0, 3, 1], [7, 5, 6, 4, 3, 1, 2, 0]])
# tau[mt[a,b]] = tau[a] o tau[b]
iv = np.array([0, 1, 2, 3, 4, 6, 5, 7])
# tau[iv[a]] is the inverse of tau[a]
wws = tf.split(0, 8, ww)
W = tf.concat(1, [ # merge 8 part of the output
tf.concat(0, [ # merge 8 part of the input
wws[mt[iv[j], i]]
for i in range(8)])
for j in range(8)])
return tf.matmul(x, W) + tf.tile(b, [8])
def dihedral_convolution(x, f_out=None, s=1, w=3, first=False, std=None, padding='SAME'):
f_in = x.get_shape().as_list()[3]
if f_out is None:
f_out = f_in
assert f_out % 8 == 0
if std is None:
std = math.sqrt(2.0 / (w * w * f_in))
with tf.name_scope("conv_{}{}_8x{}".format('' if first else '8x', f_in if first else f_in//8, f_out//8)):
ww = tf.Variable(tf.random_normal([w, w, f_in, f_out // 8], stddev=std), name="W")
b = tf.Variable(tf.constant(0.0, shape=[f_out // 8]), name="b")
ws = [None] * 8
ws[0] = ww # tau[0]
ws[1] = tf.reverse(ww, [False, True, False, False]) # tau[1]
ws[2] = tf.reverse(ww, [True, False, False, False]) # tau[2]
ws[3] = tf.reverse(ww, [True, True, False, False]) # tau[3]
ws[4] = tf.transpose(ww, [1, 0, 2, 3]) # tau[4]
ws[5] = tf.reverse(ws[4], [False, True, False, False]) # tau[5]
ws[6] = tf.reverse(ws[4], [True, False, False, False]) # tau[6]
ws[7] = tf.reverse(ws[4], [True, True, False, False]) # tau[7]
# ws[j] = tau[j] F_all
if first:
W = tf.concat(3, ws)
else:
assert f_in % 8 == 0
mt = np.array([
[0, 1, 2, 3, 4, 5, 6, 7], [1, 0, 3, 2, 5, 4, 7, 6],
[2, 3, 0, 1, 6, 7, 4, 5], [3, 2, 1, 0, 7, 6, 5, 4],
[4, 6, 5, 7, 0, 2, 1, 3], [5, 7, 4, 6, 1, 3, 0, 2],
[6, 4, 7, 5, 2, 0, 3, 1], [7, 5, 6, 4, 3, 1, 2, 0]])
# tau[mt[a,b]] = tau[a] o tau[b]
iv = np.array([0, 1, 2, 3, 4, 6, 5, 7])
# tau[iv[a]] is the inverse of tau[a]
wws = [None] * 8
for j in range(8):
wws[j] = tf.split(2, 8, ws[j])
# wws[j][i] = tau[j] F_i
W = tf.concat(3, [ # merge 8 part of the output
tf.concat(2, [ # merge 8 part of the input
wws[j][mt[iv[j], i]]
for i in range(8)])
for j in range(8)])
# y = Conv(x, W)
return tf.nn.conv2d(x, W, [1, s, s, 1], padding) + tf.tile(b, [8])
def dihedral_pool(x):
shape = x.get_shape().as_list()
f_in = shape[-1]
assert f_in % 8 == 0
with tf.name_scope("dihedral_pool_8x{}".format(f_in // 8)):
xs = tf.split(len(shape) - 1, 8, x)
return tf.div(tf.add_n(xs), 8.0)
def dihedral_batch_normalization(x, acc):
depth = x.get_shape().as_list()[3]
assert depth % 8 == 0
with tf.name_scope("bn_8x{}".format(depth // 8)):
m, v = moments(dihedral_pool(x), axes=[0, 1, 2])
acc_m = tf.Variable(tf.constant(0.0, shape=[depth // 8]), trainable=False, name="acc_m")
acc_v = tf.Variable(tf.constant(1.0, shape=[depth // 8]), trainable=False, name="acc_v")
acc_m = tf.assign(acc_m, (1.0 - acc) * acc_m + acc * m)
acc_v = tf.assign(acc_v, (1.0 - acc) * acc_v + acc * v)
m = tf.tile(acc_m, [8])
v = tf.tile(acc_v, [8])
m.set_shape([depth])
v.set_shape([depth])
beta = tf.tile(tf.Variable(tf.constant(0.0, shape=[depth // 8])), [8])
gamma = tf.tile(tf.Variable(tf.constant(1.0, shape=[depth // 8])), [8])
return tf.nn.batch_normalization(x, m, v, beta, gamma, 1e-3)
def pool22(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def moments(x, axes):
m = tf.reduce_mean(x, axes)
v = tf.reduce_mean(tf.square(x), axes) - tf.square(m)
return m, v
class CNN:
# pylint: disable=too-many-instance-attributes
def __init__(self):
self.tfx = None
self.tfl = None
self.tfp = None
self.tfy = None
self.tftrain_step = None
self.mse = None
self.tfkp = None
self.acc = None
self.train_counter = 0
self.test = None
def create_architecture(self):
self.tfkp = tf.placeholder(tf.float32)
self.acc = tf.placeholder(tf.float32)
x = self.tfx = tf.placeholder(tf.float32, [None, 424, 424, 3])
def augmentation(x):
x = tf.map_fn(lambda x: tf.image.random_brightness(x, max_delta=16. / 255.), x)
x = tf.map_fn(lambda x: tf.image.random_contrast(x, lower=0.75, upper=1.25), x)
return x
x = tf.cond(self.tfkp < 0.99, lambda: augmentation(x), lambda: x)
x = tf.nn.relu(dihedral_convolution(x, 8 * 4, w=6, s=2, first=True, padding='VALID'))
x = tf.nn.relu(dihedral_convolution(x))
x = dihedral_batch_normalization(x, self.acc)
assert x.get_shape().as_list() == [None, 210, 210, 8 * 4]
x = tf.nn.relu(dihedral_convolution(x, 8 * 8, w=4, s=2, padding='VALID'))
x = tf.nn.relu(dihedral_convolution(x, padding='VALID'))
x = dihedral_batch_normalization(x, self.acc)
assert x.get_shape().as_list() == [None, 102, 102, 8 * 8]
x = tf.nn.relu(dihedral_convolution(x, 8 * 16, w=4, s=2, padding='VALID'))
x = tf.nn.relu(dihedral_convolution(x))
x = dihedral_batch_normalization(x, self.acc)
assert x.get_shape().as_list() == [None, 50, 50, 8 * 16]
x = tf.nn.relu(dihedral_convolution(x, 8 * 32, w=4, s=2, padding='VALID'))
x = tf.nn.relu(dihedral_convolution(x, padding='VALID'))
x = dihedral_batch_normalization(x, self.acc)
assert x.get_shape().as_list() == [None, 22, 22, 8 * 32]
x = tf.nn.dropout(x, self.tfkp)
x = tf.nn.relu(dihedral_convolution(x, 8 * 64, w=4, s=2, padding='VALID'))
x = tf.nn.dropout(x, self.tfkp)
x = tf.nn.relu(dihedral_convolution(x, padding='VALID'))
x = dihedral_batch_normalization(x, self.acc)
assert x.get_shape().as_list() == [None, 8, 8, 8 * 64]
x = tf.nn.dropout(x, self.tfkp)
x = tf.nn.relu(dihedral_convolution(x, 8 * 128, w=4, s=2, padding='VALID'))
x = tf.nn.dropout(x, self.tfkp)
x = tf.nn.relu(dihedral_convolution(x, w=3, padding='VALID'))
x = dihedral_batch_normalization(x, self.acc)
assert x.get_shape().as_list() == [None, 1, 1, 8 * 128]
x = tf.reshape(x, [-1, 8 * 128])
x = tf.nn.dropout(x, self.tfkp)
x = dihedral_fullyconnected(x, 8 * 256)
x = tf.nn.dropout(x, self.tfkp)
x = dihedral_fullyconnected(x, 8 * 37)
self.test = x
x = dihedral_pool(x)
assert x.get_shape().as_list() == [None, 37]
c1 = tf.nn.softmax(x[:, 0:3])
c2 = tf.nn.softmax(x[:, 3:5]) * c1[:, 1:2]
c3 = tf.nn.softmax(x[:, 5:7]) * c2[:, 1:2]
c4 = tf.nn.softmax(x[:, 7:9]) * c2[:, 1:2]
c5 = tf.nn.softmax(x[:, 9:13]) * c2[:, 1:2]
c6 = tf.nn.softmax(x[:, 13:15])
c7 = tf.nn.softmax(x[:, 15:18]) * c1[:, 0:1]
c8 = tf.nn.softmax(x[:, 18:25]) * c6[:, 0:1]
c9 = tf.nn.softmax(x[:, 25:28]) * c2[:, 0:1]
c10 = tf.nn.softmax(x[:, 28:31]) * c4[:, 0:1]
c11 = tf.nn.softmax(x[:, 31:37]) * c4[:, 0:1]
self.tfp = tf.concat(1, [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11])
self.tfy = tf.placeholder(tf.float32, [None, 37])
self.mse = tf.reduce_mean(tf.square(self.tfp - self.tfy))
self.tftrain_step = tf.train.AdamOptimizer(0.001).minimize(self.mse)
@staticmethod
def prepare(images_path, labels_csv):
import csv
import os
with open(labels_csv) as f:
reader = csv.reader(f)
rows = [row for row in reader]
labels = np.array([[float(x) for x in r[1:]] for r in rows[1:]]).astype(np.float32)
files = [images_path + '/' + f for f in sorted(os.listdir(images_path))]
n = 2000 # for the test set
return (files[:n], labels[:n]), (files[n:], labels[n:])
@staticmethod
def load(files):
from scipy.ndimage import imread
n = len(files)
xs = np.zeros((n, 424, 424, 3), dtype=np.float32)
for i in range(n):
xs[i] = imread(files[i], mode='RGB').astype(np.float32) / 256.0
return xs
@staticmethod
def batch(files, labels):
ids = np.random.choice(len(files), 15, replace=False)
xs = CNN.load([files[i] for i in ids])
ys = labels[ids]
return xs, ys
def train(self, session, xs, ys, options=None, run_metadata=None):
acc = math.exp(-self.train_counter / 5000.0)
_, mse = session.run([self.tftrain_step, self.mse],
feed_dict={self.tfx: xs, self.tfy: ys, self.tfkp: 0.5, self.acc: acc},
options=options, run_metadata=run_metadata)
self.train_counter += 1
return mse
def train_timeline(self, session, xs, ys, filename='timeline.json'):
from tensorflow.python.client import timeline
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
mse = self.train(session, xs, ys, run_options, run_metadata)
# google chrome : chrome://tracing/
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open(filename, 'w') as f:
f.write(ctf)
return mse
def predict(self, session, xs):
return session.run(self.tfp,
feed_dict={self.tfx: xs, self.tfkp: 1.0, self.acc: 0.0})
def predict_mse(self, session, xs, ys):
return session.run([self.tfp, self.mse],
feed_dict={self.tfx: xs, self.tfy: ys, self.tfkp: 1.0, self.acc: 0.0})
| [
"[email protected]"
] | |
db9f504a531a4656b87987f9b9f0af2db5399ace | 7896baeb297e131bab53cfbff712d1fd77bccede | /gombru/style_images_COCO.py | a633bfa3a8ce2a1b81682b5d1c45eaa1fa8824bc | [
"Apache-2.0"
] | permissive | gombru/magenta_styleTransfer | 599b85b24dd406a82df271bb769fe3dc1fa19f0b | bd41b0bf3bb18988653e4a355d95dac8632e814f | refs/heads/master | 2020-04-11T23:11:47.133793 | 2019-02-12T12:12:50 | 2019-02-12T12:12:50 | 162,159,299 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | from magenta.models.image_stylization import image_stylization_transform
from PIL import Image
import os
import warnings
import random
warnings.filterwarnings("ignore")
results_path = "/home/Imatge/ssd2/ICDAR_2015_IndidentalSceneText/train/img_styled_icdar/"
num_styles = 96 # 9, 32, 34
# checkpoint = "/home/raulgomez/datasets/styleTransferMiro/models/multistyle-pastiche-generator-varied.ckpt"
# checkpoint = "/home/raulgomez/datasets/styleTransferMiro/models/miro"
checkpoint = "/home/Imatge/hd/datasets/styleTransferMiro/train/icdar"
# which_styles = []
# for i in range(num_styles): which_styles.append(i)
input_images_dir = "/home/Imatge/ssd2/ICDAR_2015_IndidentalSceneText/train/img/"
input_images = []
for file in os.listdir(input_images_dir): input_images.append(file.split('/')[-1])
# legible_ids_dir = "/home/Imatge/ssd2/COCO-Text/gt_COCO_format_legible/"
# legible_ids = []
# for file in os.listdir(legible_ids_dir): legible_ids.append(file.split('/')[-1].strip('.json'))
# final_ids = [id for id in input_images if id.strip('.jpg') in legible_ids]
# del input_images
# del legible_ids
# print("Number of images with legible text: " + str(len(final_ids)))
batch_size = 32
i=0
while True:
cur_styles = random.sample(range(0, 96), 4)
# cur_styles.remove(0)
# cur_styles.remove(6)
# cur_styles.remove(25)
print(" --> Starting batch from" + str(i) + " with styles " + str(cur_styles))
if i > len(input_images):
break
last_image = i + batch_size
if last_image > len(input_images):
last_image = len(input_images)
cur_input_images = input_images[i:last_image]
result_images = image_stylization_transform.multiple_input_images(checkpoint, num_styles, input_images_dir, cur_input_images, cur_styles)
for k, v in result_images.items():
v = v[0,:,:,:]
pil_image = Image.fromarray((v*255).astype('uint8'))
pil_image.save(results_path + k + '.png')
i+=batch_size
print(" --> " + str(i) + " out of " + str(len(input_images)))
print("DONE")
| [
"[email protected]"
] | |
42e9fb0e23fdf748d185ab5a38dff35ff23cb749 | 83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c | /CODEFORCES/park_light.py | 880f197a915fce16db419ae24c4356fa190e9c66 | [] | no_license | harshitalpha/Algorithms | ebad07cc77516ab5c35ae414462d10a38d5ef97e | 2f7dcf4c3bb4390267231c7c96f7e76399c0166e | refs/heads/master | 2021-07-14T17:34:02.546583 | 2020-06-25T06:38:39 | 2020-06-25T06:38:39 | 178,813,562 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import math
for _ in range(int(input())):
r, c = [int(s) for s in input().split()]
if r%2 == 0:
ans = (r/2)*c
else:
ans = (r-1)/2 * c
ans = ans + math.ceil(c/2)
print(int(ans)) | [
"[email protected]"
] | |
02ead0e966188d8db902b52401360de29dc3478e | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /smurf/smurf_end_to_end_test.py | f50fb3e7c366d7a6b332c231de7d1e830aa81c90 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,453 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that training and evaluation work as expected."""
# pylint:skip-file
import contextlib
import io
from absl import flags
from absl.testing import absltest
from smurf import smurf_flags
from smurf import smurf_trainer
FLAGS = flags.FLAGS
class SmurfEndToEndTest(absltest.TestCase):
def test_training_on_spoof(self):
FLAGS.eval_on = ''
FLAGS.train_on = 'spoof:unused'
FLAGS.plot_dir = '/tmp/spoof_train'
FLAGS.check_data = True
FLAGS.num_train_steps = 1
FLAGS.epoch_length = 1
FLAGS.evaluate_during_train = False
FLAGS.height = 296
FLAGS.width = 296
f = io.StringIO()
with contextlib.redirect_stdout(f):
smurf_trainer.train_eval()
# Check that the relevant metrics are printed to stdout.
stdout_message = f.getvalue()
self.assertIn('total-loss: ', stdout_message)
self.assertIn('data-time: ', stdout_message)
self.assertIn('learning-rate: ', stdout_message)
self.assertIn('train-time: ', stdout_message)
def test_evaluating_on_spoof(self):
FLAGS.eval_on = 'spoof:unused'
FLAGS.check_data = False
FLAGS.train_on = ''
FLAGS.plot_dir = '/tmp/spoof_eval'
FLAGS.height = 296
FLAGS.width = 296
FLAGS.num_train_steps = 1
FLAGS.evaluate_during_train = True
f = io.StringIO()
with contextlib.redirect_stdout(f):
smurf_trainer.train_eval()
# Check that the relevant metrics are printed to stdout.
stdout_message = f.getvalue()
self.assertIn('spoof-EPE: ', stdout_message)
self.assertIn('spoof-occl-f-max: ', stdout_message)
self.assertIn('spoof-ER: ', stdout_message)
self.assertIn('spoof-best-occl-thresh: ', stdout_message)
self.assertIn('spoof-eval-time(s): ', stdout_message)
self.assertIn('spoof-inf-time(ms): ', stdout_message)
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.