blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
676a8d4121ad27fd5bfa82844f08c833b388178c | ffab02cf7e1213f91923cb1343cef4616a7de5a7 | /venv/bin/isort | 6d23f4819e78f95b81f0dc605acf081309c42fe5 | []
| no_license | mornville/flask_blog | 4e50d6c3f835274589b278ce14f2f445b691b087 | bf66060f3f519170e3d4865e6d85b6543359e9b0 | refs/heads/master | 2021-12-28T08:27:04.556959 | 2019-10-01T14:57:09 | 2019-10-01T14:57:09 | 203,522,537 | 0 | 0 | null | 2021-12-13T20:16:58 | 2019-08-21T06:37:56 | Python | UTF-8 | Python | false | false | 251 | #!/Users/ashutoshjha/Desktop/flask_blog/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
1dffe5f62462692c17b0917a0d9f33174704c851 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/1215.py | 2727d86b5e032b798fe4ae7360d004c88b3cc807 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | def tidy(n):
a = list(str(n))
if len(a)>=2:
for i in range(len(a)-1):
if a[i]>a[i+1]:
a[i] = str(int(a[i])-1)
for j in range(i+1, len(a)):
a[j] = '9'
a = ''.join(a)
out = int(a)
return out
def check_tidy(n):
a = tidy(n)
b = list(str(a))
b.sort()
b = ''.join(b)
b = int(b)
if a == b:
return a
else:
return check_tidy(a)
in_f = open("i.in", 'r')
ou_f = open("o.out", 'w')
T = int(in_f.readline())
for i in range(T):
s = in_f.readline().strip()
k = int(s)
out = check_tidy(k)
j = "Case #" + str(i+1) +": " + str(out) + "\n"
ou_f.write(j)
in_f.close()
ou_f.close() | [
"[email protected]"
]
| |
8f58e730976b0a677b30bf61b7b99c9ee4cc60a3 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnaiddecamp.py | 19e17d66f304241ad72952f45ad26016da559867 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 139 | py | ii = [('MarrFDI3.py', 1), ('ClarGE2.py', 19), ('ClarGE.py', 2), ('WadeJEB.py', 1), ('SoutRD.py', 1), ('ThomGLG.py', 1), ('MackCNH2.py', 1)] | [
"[email protected]"
]
| |
93149bb3a6b9892081504d75a719a82d1a7fa2e1 | f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc | /examples/transform_cube.py | f9f45274bed9265c28b79a03dfd4e3ccccfa5ad1 | [
"MIT"
]
| permissive | triroakenshield/ezdxf | 5652326710f2a24652605cdeae9dd6fc58e4f2eb | 82e964a574bcb86febc677bd63f1626318f51caf | refs/heads/master | 2023-08-17T12:17:02.583094 | 2021-10-09T08:23:36 | 2021-10-09T08:23:36 | 415,426,069 | 1 | 0 | MIT | 2021-10-09T21:31:25 | 2021-10-09T21:31:25 | null | UTF-8 | Python | false | false | 1,407 | py | # Copyright (c) 2020-2021 Manfred Moitzi
# License: MIT License
from pathlib import Path
import math
import ezdxf
from ezdxf import zoom
from ezdxf.math import UCS
DIR = Path("~/Desktop/Outbox").expanduser()
p = [
(0, 0, 0),
(1, 0, 0),
(1, 1, 0),
(0, 1, 0),
(0, 0, 1),
(1, 0, 1),
(1, 1, 1),
(0, 1, 1),
]
doc = ezdxf.new()
msp = doc.modelspace()
block = doc.blocks.new("block_4m3")
cube = block.add_mesh()
with cube.edit_data() as mesh_data:
mesh_data.add_face([p[0], p[1], p[2], p[3]])
mesh_data.add_face([p[4], p[5], p[6], p[7]])
mesh_data.add_face([p[0], p[1], p[5], p[4]])
mesh_data.add_face([p[1], p[2], p[6], p[5]])
mesh_data.add_face([p[3], p[2], p[6], p[7]])
mesh_data.add_face([p[0], p[3], p[7], p[4]])
mesh_data.optimize()
# Place untransformed cube, don't use the rotation
# attribute unless you really need it, just
# transform the UCS.
blockref = msp.add_blockref(name="block_4m3", insert=(0, 0, 0))
# First rotation about the local x-axis
ucs = UCS().rotate_local_x(angle=math.radians(45))
# same as a rotation around the WCS x-axis:
# ucs = UCS().rotate(axis=(1, 0, 0), angle=math.radians(45))
# Second rotation about the WCS z-axis
ucs = ucs.rotate(axis=(0, 0, 1), angle=math.radians(45))
# Last step transform block reference from UCS to WCS
blockref.transform(ucs.matrix)
zoom.extents(msp)
doc.saveas(DIR / "cube.dxf")
| [
"[email protected]"
]
| |
40c80298125d22d148038ffefb051f1a267a1a50 | 6e3b8a04a074c30cf4fc43abe7a208f772df795b | /Mid-Exam/2-task.py | 58c67fe8bae90b41f94cde4ab24bb1499bf056e6 | []
| no_license | majurski/Softuni_Fundamentals | dc0808fdaab942896eebfb208fb6b291df797752 | bf53a9efdcb45eb911624ab86d762a6281391fb8 | refs/heads/master | 2022-11-29T06:06:06.287984 | 2020-08-10T19:36:18 | 2020-08-10T19:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | arr = input().split()
new_arr = list(map(int, arr))
result = []
line = input()
while line != "end":
value = line.split()
command = value[0]
if command == "swap":
index_1 = int(value[1])
index_2 = int(value[2])
new_arr[index_1], new_arr[index_2] = new_arr[index_2], new_arr[index_1]
elif command == "multiply":
index_1 = int(value[1])
index_2 = int(value[2])
multiplied = new_arr[index_1] * new_arr[index_2]
new_arr[index_1] = multiplied
elif command == "decrease":
for val in new_arr:
val -= 1
result.append(val)
line = input()
print(", ".join(list(map(str, result))))
# print(', '.join([str(x) for x in last]))
| [
"[email protected]"
]
| |
72791e17e71456aade20cc9cc4e32de6523e144b | 34f5146e25144d4ceced8af38b5de2f8fff53fdd | /ui/mainwindow.py | 158a3002e7c464033c18697b89cd33491f8128a1 | []
| no_license | fadiga/mstock | 3271eeb0b8339b27347bbb70b96bc1f161ed6901 | a5f621ed58bd881d9a232498ef23762a5f9c186f | refs/heads/master | 2021-05-25T11:56:28.430965 | 2017-09-25T19:08:27 | 2017-09-25T19:08:27 | 39,653,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
# maintainer: Fad
from __future__ import (
unicode_literals, absolute_import, division, print_function)
from PyQt4.QtGui import QIcon
from PyQt4.QtCore import Qt
from Common.ui.common import FMainWindow, QToolBadgeButton
from ui.menutoolbar import MenuToolBar
from ui.menubar import MenuBar
from Common.ui.statusbar import GStatusBar
from ui.dashboard import DashbordViewWidget
from configuration import Config
class MainWindow(FMainWindow):
def __init__(self):
FMainWindow.__init__(self)
self.setWindowIcon(QIcon.fromTheme(
'logo', QIcon(u"{}".format(Config.APP_LOGO))))
self.menubar = MenuBar(self)
self.setMenuBar(self.menubar)
self.toolbar = MenuToolBar(self)
self.addToolBar(Qt.LeftToolBarArea, self.toolbar)
self.statusbar = GStatusBar(self)
self.setStatusBar(self.statusbar)
self.page = DashbordViewWidget
self.change_context(self.page)
def page_width(self):
return self.width() - 100
def add_badge(self, msg, count):
b = QToolBadgeButton(self)
b.setText(msg)
b.setCounter(count)
self.toolbar.addWidget(b)
def exit(self):
self.logout()
self.close()
def active_menu(self):
self.menubar = MenuBar(self)
self.setMenuBar(self.menubar)
| [
"[email protected]"
]
| |
1e97caa9740ddd276af8721952d53c64e6237066 | de8b832a3c804837300b9974dc0151d9294fa573 | /code/experiment/GenderSoundNet/ex18_1_1_1_1_1_1_1_1_1_1_1_1_1_1/genderSoundNet.py | d318188bf1b8d63c08f77854ab0b089a4eff19a9 | []
| no_license | YuanGongND/Deep_Speech_Visualization | fcff2ac93e5adffd707b98eb7591f50fe77c1274 | 73a79e3596d9a5ee338eafb9a87b227696de25d1 | refs/heads/master | 2021-07-19T23:00:36.294817 | 2017-10-28T01:04:59 | 2017-10-28T01:04:59 | 105,332,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,377 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 21:03:04 2017
Conduct erxperiment on IEMOCAP, three labels:
96001: emotion(0-4, 5 = other emotions)
96002: speaker(0-9)
96003: gender(male=0, female=1)
@author: Kyle
"""
import os
from sys import argv
_, newFolderName, gpuI = argv
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuI)
import sys
sys.path.append("../../model/")
import soundNet
import waveCNN
sys.path.append("../")
import expUtil
import numpy as np
from keras import backend as K
import matplotlib.pyplot as plt
import shutil
#%% creat folder to save model, the code, and model configuration
while os.path.isdir( newFolderName ):
newFolderName = newFolderName + '_1'
print( 'exist' )
os.mkdir( newFolderName )
shutil.copy( os.path.basename(__file__), newFolderName ) # copy this file to the new folder
shutil.copy( '../../model/soundNet.py', newFolderName )
shutil.copy( '../../model/waveCNN.py', newFolderName )
shutil.copy( '../expUtil.py', newFolderName )
# put all configuratation here
thisTask = 'gender'
dataType = 'toyWaveform'
# define the model
model = soundNet.soundNet # define the model
#model = waveCNN.waveCNN
# according to the configuaration, change the coresponding setting
#if thisTask == 'emotion':
# trainNewFolderName = newFolderName
# load data
trainFeature, trainLabel, testFeature, testLabel = expUtil.loadData( testFolder = 4, testTask = thisTask, precision = 'original', sampleRate = 16000, dataType = dataType )
#%% grid search
#batch_sizeList = [ 32, 24, 16 ]
#learningRateList = [ 1e-3, 5e-4, 1e-4, 5e-5, 1e-5 ]
#initList = [ 'RandomUniform', 'lecun_normal', 'lecun_uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform' ]
batch_sizeList = [ 32 ]
learningRateList = [ 1e-4 ]
initList = [ 'glorot_normal' ]
for batch_size in batch_sizeList:
resultList = [ ]
for learningRate in learningRateList:
for init in initList:
tempFolderName = newFolderName + '/' + str( learningRate ) + '_' + str( batch_size ) + '_' + init
os.mkdir( tempFolderName )
# train the model
resultOnTrain, resultOnTest = expUtil.train( testFeature, testLabel, trainFeature, trainLabel, iteration_num = 100, \
lr_decay = 0.1, batch_size = batch_size, learningRate = learningRate, iterationNum = 100, \
modelT = model, newFolderName = tempFolderName, init = keras.initializers.Constant(value=0.01), saveSign = True, denseUnitNum = 64, \
dataType = dataType )
resultList.append( resultOnTest[ -1 ] )
np.savetxt( newFolderName + '\_' + str( batch_size ) +'_gridSearch.csv', resultList, delimiter = ',' )
resultList = np.array( resultList )
resultList.resize( [ len( learningRateList ), len( initList ) ] )
np.savetxt( newFolderName + '\_' + str( batch_size ) +'_gridSearch.csv', resultList, delimiter = ',' )
#%% start test
testSamples = testFeature.shape[ 0 ]
trainSamples = trainFeature.shape[ 0 ]
log = 'testSample_num = ' + str( testSamples ) + '\n trainSample_num = ' + str( trainSamples )
with open( newFolderName + '/log.txt' , "w") as text_file:
text_file.write( log )
| [
"[email protected]"
]
| |
3eb21352e9d9a3dcc23572a98430da9b90e4f9aa | db818127b373da9d88583e717f184f483a1f844d | /instruction_env/Lib/site-packages/numpydoc/tests/test_validate.py | b7127ce2012e8419e79f0df9f71e724b944e2723 | [
"MIT"
]
| permissive | lfunderburk/Effective-Instructions | 4af5a763b5021668abd6d37f1d860eeff07bfee8 | ce40f890fb8623ff1ec9c3e9e1190505cbd1e6db | refs/heads/main | 2023-04-14T22:43:48.363281 | 2021-04-26T05:40:19 | 2021-04-26T05:40:19 | 331,163,652 | 0 | 0 | MIT | 2021-04-26T05:40:22 | 2021-01-20T01:58:52 | null | UTF-8 | Python | false | false | 32,871 | py | import pytest
import numpydoc.validate
import numpydoc.tests
validate_one = numpydoc.validate.validate
class GoodDocStrings:
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
See Also
--------
AnotherClass : With its description.
Examples
--------
>>> result = 1 + 1
"""
def one_liner(self):
"""Allow one liner docstrings (including quotes)."""
# This should fail, but not because of the position of the quotes
pass
def plot(self, kind, color="blue", **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot, e.g.::
'foo'
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def swap(self, arr, i, j, *args, **kwargs):
"""
Swap two indicies on an array.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
arr : list
The list having indexes swapped.
i, j : int
The indexes being swapped.
*args, **kwargs
Extraneous parameters are being permitted.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
- Make sure you set a seed for reproducibility
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
.. versionadded:: 0.1
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
int
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
Examples
--------
>>> 1 + 1
2
"""
return 1
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
int
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = 10
>>> s
10
With the `n` parameter, we can change the number of returned rows:
>>> s + 1
11
"""
return 1
def summary_starts_with_number(self, n=5):
"""
2nd rule of summaries should allow this.
3 Starting the summary with a number instead of a capital letter.
Also in parameters, returns, see also...
Parameters
----------
n : int
4 Number of values to return.
Returns
-------
int
5 Subset of the original series with the n first values.
See Also
--------
tail : 6 Return the last n elements of the Series.
Examples
--------
>>> s = 10
>>> s
10
7 With the `n` parameter, we can change the number of returned rows:
>>> s + 1
11
"""
return 1
def contains(self, pat, case=True, na=float('NaN')):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
See Also
--------
related : Something related.
Examples
--------
>>> s = 25
>>> s
25
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s + 1
26
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s * 2
50
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure reST directives don't affect checks for leading periods.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
See Also
--------
related : Something related.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
def no_returns(self):
"""
Say hello and have no returns.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def empty_returns(self):
"""
Say hello and always return None.
Since this function never returns a value, this
docstring doesn't need a return section.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
def say_hello():
return "Hello World!"
say_hello()
if True:
return
else:
return None
def multiple_variables_on_one_line(self, matrix, a, b, i, j):
"""
Swap two values in a matrix.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
matrix : list of list
A double list that represents a matrix.
a, b : int
The indicies of the first value.
i, j : int
The indicies of the second value.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
class BadGenericDocStrings:
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def two_linebreaks_between_sections(self, foo):
"""
Test linebreaks message GL03.
Note 2 blank lines before parameters section.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def linebreak_at_end_of_docstring(self, foo):
"""
Test linebreaks message GL03.
Note extra blank line at end of docstring.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def unknown_section(self):
"""
This section has an unknown section title.
Unknown Section
---------------
This should raise an error in the validation.
"""
def sections_in_wrong_order(self):
"""
This docstring has the sections in the wrong order.
Parameters
----------
name : str
This section is in the right position.
Examples
--------
>>> print('So far Examples is good, as it goes before Parameters')
So far Examples is good, as it goes before Parameters
See Also
--------
function : This should generate an error, as See Also needs to go
before Examples.
"""
def deprecation_in_wrong_order(self):
"""
This docstring has the deprecation warning in the wrong order.
This is the extended summary. The correct order should be
summary, deprecation warning, extended summary.
.. deprecated:: 1.0
This should generate an error as it needs to go before
extended summary.
"""
def method_wo_docstrings(self):
pass
def directives_without_two_colons(self, first, second):
"""
Ensure reST directives have trailing colons.
Parameters
----------
first : str
Sentence ending in period, followed by single directive w/o colons.
.. versionchanged 0.1.2
second : bool
Sentence ending in period, followed by multiple directives w/o
colons.
.. versionadded 0.1.2
.. deprecated 0.00.0
"""
pass
class BadSummaries:
def no_summary(self):
"""
Returns
-------
int
Always one.
"""
def heading_whitespaces(self):
"""
Summary with heading whitespaces.
Returns
-------
int
Always one.
"""
def wrong_line(self):
"""Quotes are on the wrong line.
Both opening and closing."""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters:
"""
Everything here has a problem with its Parameters section.
"""
def no_type(self, value):
"""
Lacks the type.
Parameters
----------
value
A parameter without type.
"""
def type_with_period(self, value):
"""
Has period after type.
Parameters
----------
value : str.
A parameter type should not finish with period.
"""
def no_description(self, value):
"""
Lacks the description.
Parameters
----------
value : str
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
def bad_parameter_spacing(self, a, b):
"""
The parameters on the same line have an extra space between them.
Parameters
----------
a, b : int
Foo bar baz.
"""
pass
class BadReturns:
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
def named_single_return(self):
"""
Provides name but returns only one value.
Returns
-------
s : str
A nice greeting.
"""
return "Hello world!"
def no_capitalization(self):
"""
Forgets capitalization in return values description.
Returns
-------
foo : str
The first returned string.
bar : str
the second returned string.
"""
return "Hello", "World!"
def no_period_multi(self):
"""
Forgets period in return values description.
Returns
-------
foo : str
The first returned string
bar : str
The second returned string.
"""
return "Hello", "World!"
class BadSeeAlso:
def no_desc(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail
"""
pass
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples:
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> import datetime
>>> value = datetime.date(2019,1,1)
"""
pass
class TestValidator:
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "numpydoc.tests.test_validate"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
def test_one_liner(self, capsys):
result = validate_one(self._import_path(klass="GoodDocStrings", func='one_liner'))
errors = " ".join(err[1] for err in result["errors"])
assert 'should start in the line immediately after the opening quotes' not in errors
assert 'should be placed in the line after the last text' not in errors
def test_good_class(self, capsys):
errors = validate_one(self._import_path(klass="GoodDocStrings"))["errors"]
assert isinstance(errors, list)
assert not errors
@pytest.mark.parametrize(
"func",
[
"plot",
"swap",
"sample",
"random_letters",
"sample_values",
"head",
"head1",
"summary_starts_with_number",
"contains",
"mode",
"good_imports",
"no_returns",
"empty_returns",
"multiple_variables_on_one_line",
],
)
def test_good_functions(self, capsys, func):
errors = validate_one(self._import_path(klass="GoodDocStrings", func=func))[
"errors"
]
assert isinstance(errors, list)
assert not errors
def test_bad_class(self, capsys):
errors = validate_one(self._import_path(klass="BadGenericDocStrings"))["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"func",
[
"func",
"astype",
"astype1",
"astype2",
"astype3",
"plot",
"directives_without_two_colons",
],
)
def test_bad_generic_functions(self, capsys, func):
errors = validate_one(
self._import_path(klass="BadGenericDocStrings", func=func) # noqa:F821
)["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"klass,func,msgs",
[
# See Also tests
(
"BadGenericDocStrings",
"unknown_section",
('Found unknown section "Unknown Section".',),
),
(
"BadGenericDocStrings",
"sections_in_wrong_order",
(
"Sections are in the wrong order. Correct order is: Parameters, "
"See Also, Examples",
),
),
(
"BadGenericDocStrings",
"deprecation_in_wrong_order",
("Deprecation warning should precede extended summary",),
),
(
"BadGenericDocStrings",
"directives_without_two_colons",
(
"reST directives ['versionchanged', 'versionadded', "
"'deprecated'] must be followed by two colons",
),
),
(
"BadSeeAlso",
"no_desc",
('Missing description for See Also "Series.tail" reference',),
),
(
"BadSeeAlso",
"desc_no_period",
('Missing period at end of description for See Also "Series.iloc"',),
),
(
"BadSeeAlso",
"desc_first_letter_lowercase",
('should be capitalized for See Also "Series.tail"',),
),
# Summary tests
(
"BadSummaries",
"no_summary",
("No summary found",),
),
(
"BadSummaries",
"heading_whitespaces",
("Summary contains heading whitespaces",),
),
(
"BadSummaries",
"wrong_line",
("should start in the line immediately after the opening quotes",
"should be placed in the line after the last text"),
),
("BadSummaries", "no_punctuation", ("Summary does not end with a period",)),
(
"BadSummaries",
"no_capitalization",
("Summary does not start with a capital letter",),
),
(
"BadSummaries",
"no_capitalization",
("Summary must start with infinitive verb",),
),
("BadSummaries", "multi_line", ("Summary should fit in a single line",)),
(
"BadSummaries",
"two_paragraph_multi_line",
("Summary should fit in a single line",),
),
# Parameters tests
(
"BadParameters",
"no_type",
('Parameter "value" has no type',),
),
(
"BadParameters",
"type_with_period",
('Parameter "value" type should not finish with "."',),
),
(
"BadParameters",
"no_description",
('Parameter "value" has no description',),
),
(
"BadParameters",
"missing_params",
("Parameters {'**kwargs'} not documented",),
),
(
"BadParameters",
"bad_colon_spacing",
(
'Parameter "kind" requires a space before the colon '
"separating the parameter name and type",
),
),
(
"BadParameters",
"no_description_period",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"no_description_period_with_directive",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"parameter_capitalization",
('Parameter "kind" description should start with a capital letter',),
),
(
"BadParameters",
"integer_parameter",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"string_parameter",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"boolean_parameter",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"bad_parameter_spacing",
("Parameters {'b'} not documented", "Unknown parameters {' b'}"),
),
pytest.param(
"BadParameters",
"blank_lines",
("No error yet?",),
marks=pytest.mark.xfail,
),
# Returns tests
("BadReturns", "return_not_documented", ("No Returns section found",)),
("BadReturns", "yield_not_documented", ("No Yields section found",)),
pytest.param("BadReturns", "no_type", ("foo",), marks=pytest.mark.xfail),
("BadReturns", "no_description", ("Return value has no description",)),
(
"BadReturns",
"no_punctuation",
('Return value description should finish with "."',),
),
(
"BadReturns",
"named_single_return",
(
"The first line of the Returns section should contain only the "
"type, unless multiple values are being returned",
),
),
(
"BadReturns",
"no_capitalization",
("Return value description should start with a capital letter",),
),
(
"BadReturns",
"no_period_multi",
('Return value description should finish with "."',),
),
(
"BadGenericDocStrings",
"method_wo_docstrings",
("The object does not have a docstring",),
),
(
"BadGenericDocStrings",
"two_linebreaks_between_sections",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
(
"BadGenericDocStrings",
"linebreak_at_end_of_docstring",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs):
with pytest.warns(None) as w:
result = validate_one(self._import_path(klass=klass, func=func))
if len(w):
assert all('Unknown section' in str(ww.message) for ww in w)
for msg in msgs:
assert msg in " ".join(err[1] for err in result["errors"])
class TestDocstringClass:
@pytest.mark.parametrize("invalid_name", ["unknown_mod", "unknown_mod.MyClass"])
def test_raises_for_invalid_module_name(self, invalid_name):
msg = 'No module can be imported from "{}"'.format(invalid_name)
with pytest.raises(ImportError, match=msg):
numpydoc.validate.Docstring(invalid_name)
@pytest.mark.parametrize(
"invalid_name", ["datetime.BadClassName", "datetime.bad_method_name"]
)
def test_raises_for_invalid_attribute_name(self, invalid_name):
name_components = invalid_name.split(".")
obj_name, invalid_attr_name = name_components[-2], name_components[-1]
msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name)
with pytest.raises(AttributeError, match=msg):
numpydoc.validate.Docstring(invalid_name)
| [
"[email protected]"
]
| |
4e98cba026ffbfa488d602586ed1fb56b70a4b3e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/117/usersdata/168/26292/submittedfiles/al2.py | 2636de789a2d58357263296f2d1affdab045e0ff | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from __future__ import division
#INICIE SEU CDIGO AQUI
n=float(input('Digite n'))
n1=int(n)
n2=n-int
print('%.2f'%n1)
print('%.2f'%n2)
| [
"[email protected]"
]
| |
1872c2b02787510ea089a882647d262201237e43 | e7f708af4b599ec6763e0d3b311e2cb47cc155d8 | /payments/admin.py | f69953885eba02d24436b82c8477468a8e0d0cfd | []
| no_license | dmontoya1/tu-licencia | d48bc8779d8cda50c7a382cb1c14e2ae3668ebc8 | d436d665ba797d7b90fcdcc58bcef3e79b917682 | refs/heads/master | 2023-05-14T16:38:52.408066 | 2020-06-08T20:24:39 | 2020-06-08T20:24:39 | 371,433,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# from .models import Invoice
# class InvoiceAdmin(admin.ModelAdmin):
# list_display = ('__unicode__', 'user', 'release_date', 'is_discharged', 'payment_status')
# readonly_fields = ('release_date', )
# search_fields = ('release_date', 'payu_reference_code')
# admin.site.register(Invoice, InvoiceAdmin) | [
"[email protected]"
]
| |
015f94220909b436deb31345160eebc80132c586 | 3ab1f37b4372d0796c85ef24343dd8c03accb6ef | /OddEvenLinkedList.py | 5004fdb3d7cf1ccbd577d641dc11c9e1fe6a488c | []
| no_license | Blossomyyh/leetcode | 2be6a99534801fc59fe9551317ca49c3704b1c3d | 38615779eb43d147587467e11dc22761ac0726cb | refs/heads/master | 2023-01-22T16:56:26.624677 | 2020-11-20T13:47:43 | 2020-11-20T13:47:43 | 266,845,278 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
if not head or head.next == None: return head
odd, even, second = head, head.next, head.next
while odd and odd.next and even and even.next:
odd.next = odd.next.next
even.next = even.next.next
odd = odd.next
even = even.next
odd.next = second
return head
node = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
Solution().oddEvenList(node)
| [
"[email protected]"
]
| |
b4aec3c94de4ef1c9d0804e58f30aa47e9eeb51c | 22c6303398fe9d3a01ea2e2dee56a7c51ffb8106 | /src/StyleVarPane.py | caec6f9ba7540e07c20e8041d2bf85d34f9bbcfc | []
| no_license | prrg/BEE2.4 | 07c3d96b58bda8d7b4383d46778d01bcf970a5e4 | ffd30eb140e04db781229b27992aaed4385b438b | refs/heads/master | 2020-04-01T18:45:12.625402 | 2018-10-04T05:28:13 | 2018-10-04T05:28:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,859 | py | from tkinter import *
from tk_tools import TK_ROOT
from tkinter import ttk
from collections import namedtuple
import functools
import operator
import img as png
from BEE2_config import GEN_OPTS
from SubPane import SubPane
import packageLoader
import tooltip
import utils
import itemconfig
from typing import Union
stylevar = namedtuple('stylevar', 'id name default desc')
# Special StyleVars that are hardcoded into the BEE2
# These are effectively attributes of Portal 2 itself, and always work
# in every style.
styleOptions = [
# ID, Name, default value
stylevar(
id='MultiverseCave',
name=_('Multiverse Cave'),
default=1,
desc=_('Play the Workshop Cave Johnson lines on map start.')
),
stylevar(
id='FixFizzlerBump',
name=_('Prevent Portal Bump (fizzler)'),
default=0,
desc=_('Add portal bumpers to make it more difficult to portal across '
'fizzler edges. This can prevent placing portals in tight '
'spaces near fizzlers, or fizzle portals on activation.')
),
stylevar(
id='NoMidVoices',
name=_('Suppress Mid-Chamber Dialogue'),
default=0,
desc=_('Disable all voicelines other than entry and exit lines.')
),
stylevar(
id='UnlockDefault',
name=_('Unlock Default Items'),
default=0,
desc=_('Allow placing and deleting the mandatory Entry/Exit Doors and '
'Large Observation Room. Use with caution, this can have weird '
'results!')
),
stylevar(
id='AllowGooMist',
name=_('Allow Adding Goo Mist'),
default=1,
desc=_('Add mist particles above Toxic Goo in certain styles. This can '
'increase the entity count significantly with large, complex '
'goo pits, so disable if needed.')
),
stylevar(
id='FunnelAllowSwitchedLights',
name=_('Light Reversible Excursion Funnels'),
default=1,
desc=_('Funnels emit a small amount of light. However, if multiple funnels '
'are near each other and can reverse polarity, this can cause '
'lighting issues. Disable this to prevent that by disabling '
'lights. Non-reversible Funnels do not have this issue.'),
),
stylevar(
id='EnableShapeSignageFrame',
name=_('Enable Shape Framing'),
default=1,
desc=_('After 10 shape-type antlines are used, the signs repeat. '
'With this enabled, colored frames will be added to '
'distinguish them.'),
),
]
checkbox_all = {}
checkbox_chosen = {}
checkbox_other = {}
tk_vars = {}
VAR_LIST = []
STYLES = {}
window = None
UI = {}
def update_filter():
pass
def add_vars(style_vars, styles):
"""
Add the given stylevars to our list.
"""
VAR_LIST.clear()
VAR_LIST.extend(
sorted(style_vars, key=operator.attrgetter('id'))
)
for var in VAR_LIST: # type: packageLoader.StyleVar
var.enabled = GEN_OPTS.get_bool('StyleVar', var.id, var.default)
for style in styles:
STYLES[style.id] = style
def set_stylevar(var):
"""Save the value for a particular stylevar."""
val = str(tk_vars[var].get())
GEN_OPTS['StyleVar'][var] = val
if var == 'UnlockDefault':
update_filter()
def make_desc(var: Union[packageLoader.StyleVar, stylevar], is_hardcoded=False):
"""Generate the description text for a StyleVar.
This adds 'Default: on/off', and which styles it's used in.
"""
if var.desc:
desc = [var.desc, '']
else:
desc = []
desc.append(
_('Default: On')
if var.default else
_('Default: Off')
)
if is_hardcoded or var.styles is None:
desc.append(_('Styles: Unstyled'))
else:
app_styles = [
style
for style in
STYLES.values()
if var.applies_to_style(style)
]
if len(app_styles) == len(STYLES):
desc.append(_('Styles: All'))
else:
style_list = sorted(
style.selitem_data.short_name
for style in
app_styles
)
desc.append(
ngettext('Style: {}', 'Styles: {}', len(style_list)
).format(', '.join(style_list)))
return '\n'.join(desc)
def refresh(selected_style):
"""Move the stylevars to the correct position.
This depends on which apply to the current style.
"""
en_row = 0
dis_row = 0
for var in VAR_LIST:
if var.applies_to_all():
continue # Always visible!
if var.applies_to_style(selected_style):
checkbox_chosen[var.id].grid(
row=en_row,
sticky="W",
padx=3,
)
checkbox_other[var.id].grid_remove()
en_row += 1
else:
checkbox_chosen[var.id].grid_remove()
checkbox_other[var.id].grid(
row=dis_row,
sticky="W",
padx=3,
)
dis_row += 1
if en_row == 0:
UI['stylevar_chosen_none'].grid(sticky='EW')
else:
UI['stylevar_chosen_none'].grid_remove()
if dis_row == 0:
UI['stylevar_other_none'].grid(sticky='EW')
else:
UI['stylevar_other_none'].grid_remove()
def flow_stylevar(e=None):
UI['style_can']['scrollregion'] = UI['style_can'].bbox(ALL)
def make_pane(tool_frame):
"""Create the styleVar pane.
"""
global window
window = SubPane(
TK_ROOT,
options=GEN_OPTS,
title=_('Style/Item Properties'),
name='style',
resize_y=True,
tool_frame=tool_frame,
tool_img=png.png('icons/win_stylevar'),
tool_col=3,
)
UI['nbook'] = nbook = ttk.Notebook(window)
nbook.grid(row=0, column=0, sticky=NSEW)
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=1)
nbook.enable_traversal()
stylevar_frame = ttk.Frame(nbook)
stylevar_frame.rowconfigure(0, weight=1)
stylevar_frame.columnconfigure(0, weight=1)
nbook.add(stylevar_frame, text=_('Styles'))
UI['style_can'] = Canvas(stylevar_frame, highlightthickness=0)
# need to use a canvas to allow scrolling
UI['style_can'].grid(sticky='NSEW')
window.rowconfigure(0, weight=1)
UI['style_scroll'] = ttk.Scrollbar(
stylevar_frame,
orient=VERTICAL,
command=UI['style_can'].yview,
)
UI['style_scroll'].grid(column=1, row=0, rowspan=2, sticky="NS")
UI['style_can']['yscrollcommand'] = UI['style_scroll'].set
utils.add_mousewheel(UI['style_can'], stylevar_frame)
canvas_frame = ttk.Frame(UI['style_can'])
frame_all = ttk.Labelframe(canvas_frame, text=_("All:"))
frame_all.grid(row=0, sticky='EW')
frm_chosen = ttk.Labelframe(canvas_frame, text=_("Selected Style:"))
frm_chosen.grid(row=1, sticky='EW')
ttk.Separator(
canvas_frame,
orient=HORIZONTAL,
).grid(row=2, sticky='EW', pady=(10, 5))
frm_other = ttk.Labelframe(canvas_frame, text=_("Other Styles:"))
frm_other.grid(row=3, sticky='EW')
UI['stylevar_chosen_none'] = ttk.Label(
frm_chosen,
text=_('No Options!'),
font='TkMenuFont',
justify='center',
)
UI['stylevar_other_none'] = ttk.Label(
frm_other,
text=_('None!'),
font='TkMenuFont',
justify='center',
)
all_pos = 0
for all_pos, var in enumerate(styleOptions):
# Add the special stylevars which apply to all styles
tk_vars[var.id] = IntVar(
value=GEN_OPTS.get_bool('StyleVar', var.id, var.default)
)
checkbox_all[var.id] = ttk.Checkbutton(
frame_all,
variable=tk_vars[var.id],
text=var.name,
command=functools.partial(set_stylevar, var.id)
)
checkbox_all[var.id].grid(row=all_pos, column=0, sticky="W", padx=3)
tooltip.add_tooltip(
checkbox_all[var.id],
make_desc(var, is_hardcoded=True),
)
for var in VAR_LIST:
tk_vars[var.id] = IntVar(value=var.enabled)
args = {
'variable': tk_vars[var.id],
'text': var.name,
'command': functools.partial(set_stylevar, var.id)
}
desc = make_desc(var)
if var.applies_to_all():
# Available in all styles - put with the hardcoded variables.
all_pos += 1
checkbox_all[var.id] = check = ttk.Checkbutton(frame_all, **args)
check.grid(row=all_pos, column=0, sticky="W", padx=3)
tooltip.add_tooltip(check, desc)
else:
# Swap between checkboxes depending on style.
checkbox_chosen[var.id] = ttk.Checkbutton(frm_chosen, **args)
checkbox_other[var.id] = ttk.Checkbutton(frm_other, **args)
tooltip.add_tooltip(
checkbox_chosen[var.id],
desc,
)
tooltip.add_tooltip(
checkbox_other[var.id],
desc,
)
UI['style_can'].create_window(0, 0, window=canvas_frame, anchor="nw")
UI['style_can'].update_idletasks()
UI['style_can'].config(
scrollregion=UI['style_can'].bbox(ALL),
width=canvas_frame.winfo_reqwidth(),
)
if utils.USE_SIZEGRIP:
ttk.Sizegrip(
window,
cursor=utils.CURSORS['stretch_vert'],
).grid(row=1, column=0)
UI['style_can'].bind('<Configure>', flow_stylevar)
item_config_frame = ttk.Frame(nbook)
nbook.add(item_config_frame, text=_('Items'))
itemconfig.make_pane(item_config_frame)
| [
"[email protected]"
]
| |
4e69526e2f22b9e6ead9e0673d893c9460e3b570 | fc1cc515a65e844705cc6262a70cd0a12ce1d1df | /math/0x00-linear_algebra/2-size_me_please.py | f9800d2c3bc375d8df09bd35e4d0cfd25402da82 | []
| no_license | yulyzulu/holbertonschool-machine_learning | 4f379a4d58da201e8125bd8d74e3c9a4dfcf8a57 | d078d9c1c5bd96730a08d52e4520eb380467fb48 | refs/heads/master | 2022-12-26T12:01:25.345332 | 2020-10-03T03:19:05 | 2020-10-03T03:19:05 | 279,392,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | #!/usr/bin/env python3
""" Module to execute function """
def matrix_shape(matrix):
"""Function that calculates the shape of a matrix"""
shape = []
shape.append(len(matrix))
while type(matrix[0]) == list:
matrix = matrix[0]
shape.append(len(matrix))
return shape
| [
"[email protected]"
]
| |
9ca06377d8cd9fbe39082bfb3d7983c1eb7ddd2c | 6b27c39edc10b1353104043b7a523f4981c99ef2 | /pytype/tools/merge_pyi/test_data/stars.pep484.py | 56d6529efedb9cc54e761bdb5952bafc66cde7b3 | [
"Apache-2.0",
"MIT"
]
| permissive | google/pytype | ad0ff0b6c1083b4f0a1af1747869d422f2b5f4d8 | bda0b9547af9a084bb2bd1427f58dcde968e48b5 | refs/heads/main | 2023-08-26T17:52:23.546035 | 2023-08-24T22:48:00 | 2023-08-24T22:48:00 | 32,483,713 | 4,595 | 367 | NOASSERTION | 2023-09-13T04:40:45 | 2015-03-18T20:52:08 | Python | UTF-8 | Python | false | false | 288 | py | def f1(*a):
pass
def f2(**a):
pass
def f3(a, *b):
pass
def f4(a, **b):
pass
## arg with default after *args is valid python3, not python2
def f5(*a, b=1):
pass
def f6(*a, b=1, **c):
pass
def f7(x=1, *a, b=1, **c):
pass
def f8(#asd
*a):
pass
| [
"[email protected]"
]
| |
eacef7a09f8311f33624a9f5acb965d5ec877336 | 7d8022661a756f77f715ee4d099fb17cb9da671a | /engine/data_loader.py | 6e92e9d69aff6df248ddbe721e11db9904459073 | []
| no_license | lxj0276/Quant-Util | a7d70d88fc47eb16a08149faefa7b128c01c670e | 2706ecba72a293ee01105ad22508a8d6b20e1394 | refs/heads/master | 2020-04-25T13:40:36.700892 | 2018-10-15T04:35:54 | 2018-10-15T04:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | import os
from collections import OrderedDict
import pandas as pd
from engine.cons import *
from engine.utils import get_calendar
from feature.time import TimeRange
from mongoapi.get_data import get_day_trade_data
from redis_cache.rediscache import cache_it_pickle
class pricing_data_loader:
def __init__(self, load_path=PRICING_DATA_PATH):
self.load_path = load_path
def load_data(self, instruments, feature, start_time, end_time):
pass
#
# @cache_it_pickle()
# def load_single_data(self, instrument, start_time, end_time, feature='close'):
# df = pd.read_csv(os.path.join(self.load_path, instrument + '.csv'))[['date', feature, 'rate']]
# df = df.rename(columns={feature: PRICE})
# # df[PRICE]=df[PRICE]/df['rate']
# del df['rate']
# carlender = get_calendar()
#
# df = df.set_index('date')
# df_full = df.reindex(carlender).fillna(method='ffill')
#
# pricing_data = df_full[(df_full.index >= start_time) & (df_full.index <= end_time)].to_dict()[PRICE]
# df = df.reindex(df_full.index)
# on_trading = (~df[(df.index >= start_time) & (df.index <= end_time)].isnull()).astype(int).to_dict()[PRICE]
# return OrderedDict(pricing_data), OrderedDict(on_trading)
@cache_it_pickle()
def load_single_data(self, instrument, start_time, end_time, feature='close'):
trade_calendar = get_calendar(start_time, end_time)
data = get_day_trade_data([instrument],
start_time,
end_time,
[feature],
return_df=True)[['date',feature]].set_index('date')
data=data.reindex(trade_calendar)
pricing_data = data.fillna(method='ffill').to_dict()[feature]
on_trading = (~data.isnull()).astype(int).to_dict()[feature]
return OrderedDict(pricing_data), OrderedDict(on_trading)
| [
"[email protected]"
]
| |
7c38dda3f18562b3df7eecb78e86f2712b212369 | b5d72e68c3976766a7adfd1fa33f778a5268c84a | /Regex/ex1.py | 35628e29ded201a4fa05a049420dc39525ee29d4 | []
| no_license | LizinczykKarolina/Python | 4a2fb0e7fb130c86239b2666fb346bf35a8e655b | 7f4a3a9cba15fd2b4c7a104667461b3c49f8b757 | refs/heads/master | 2021-06-18T21:43:52.983557 | 2017-06-17T21:24:01 | 2017-06-17T21:24:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | #1. Write a Python program to check that a string contains only a certain set of characters (in this case a-z, A-Z and 0-9).
import re
email_address = '[email protected]'
searchObj = re.search(r'[a-zA-Z0-9.]', email_address, re.M | re.I)
if searchObj:
print True
else:
print False
"sxdupa1" -match '^sx|1$' | [
"[email protected]"
]
| |
f3ae22f885b78d753d9dbc851fea38a065e80d88 | 9fc768c541145c1996f2bdb8a5d62d523f24215f | /code/HomeWork/ch5/H_5_4.py | a92df699ef2667ea9bcb8c973b7297b29db61309 | []
| no_license | jumbokh/pyclass | 3b624101a8e43361458130047b87865852f72734 | bf2d5bcca4fff87cb695c8cec17fa2b1bbdf2ce5 | refs/heads/master | 2022-12-25T12:15:38.262468 | 2020-09-26T09:08:46 | 2020-09-26T09:08:46 | 283,708,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | # H_5_4.py 功能:輸入數字後判斷是否為11的倍數
num_even = 0 # 儲存偶數位數字暫存
num_odd = 0 # 儲存奇數位數字暫存
number = str(input('請輸入數字 : '))
l = len(number) # 判斷輸入數字之長度
x = int(number) # 轉換成數值型態
for n in range(l,0,-1):
y = x//(10**(n-1)) # 計算奇偶位數字
x = x - (y*(10**(n-1)))
if n%2 == 0: # 判斷若是偶數位數字則儲存在偶數位暫存,反之存奇數位暫存
num_even = num_even + y
else:
num_odd = num_odd + y
# 判斷是否為11的倍數
if abs(num_even - num_odd) == 0 or (abs(num_even - num_odd))%11 == 0:
print('此數為11的倍數')
else:
print('此數不是11的倍數') | [
"[email protected]"
]
| |
6eeb30afedb9faf5956630200d67199cef30815d | 2a4a17a67b9069c19396c0f8eabc8b7c4b6ff703 | /BGP3D/Chapter11/WorldClass_00.py | 2cd366287c64b1790280d87dd425c04951cc0271 | []
| no_license | kaz101/panda-book | 0fa273cc2df5849507ecc949b4dde626241ffa5e | 859a759c769d9c2db0d11140b0d04506611c2b7b | refs/heads/master | 2022-12-19T09:36:05.794731 | 2020-09-16T19:04:10 | 2020-09-16T19:04:10 | 295,784,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,569 | py | ''' World Class
This class is the launching point for the game.
This is the file that needs to run to start the game,
and this class creates all the pieces of the game.
'''
import direct.directbase.DirectStart
from direct.filter.CommonFilters import CommonFilters
from HUDClass_00 import HUD
from RaceClass_00 import Race
from InputManagerClass_00 import InputManager
from MenuClass_00 import Menu
class World:
def __init__(self):
base.disableMouse()
# Turns off the default mouse-camera controls in Panda3D.
base.setBackgroundColor(0, 0, 0)
# Sets the background to black.
self.inputManager = InputManager()
# Creates an InputManager to handle all of the user input in the game.
#taskMgr.doMethodLater(10, self.debugTask, "Debug Task")
# Tells the debugTask to run once every ten seconds. The debug task is a good
# place to put various data print outs about the game to help with debugging.
self.filters = CommonFilters(base.win, base.cam)
filterok = self.filters.setBloom(blend=(0,0,0,1),
desat=-0.5, intensity=3.0, size=2)
render.setShaderAuto()
# Turns on Panda3D's automatic shader generation.
self.menuGraphics = loader.loadModel(
"../Models/MenuGraphics.egg")
# Loads the egg that contains all the menu graphics.
self.fonts = {
"silver" : loader.loadFont("../Fonts/LuconSilver.egg"),
"blue" : loader.loadFont("../Fonts/LuconBlue.egg"),
"orange" : loader.loadFont("../Fonts/LuconOrange.egg")}
# Loads the three custom fonts our game will use.
hud = HUD(self.fonts)
# Creates the HUD.
self.race = Race(self.inputManager, hud)
self.race.createDemoRace()
# creates an instance of the race class and tells it to
# start a demo race.
self.createStartMenu()
# creates the start menu.
def createStartMenu(self):
menu = Menu(self.menuGraphics, self.fonts, self.inputManager)
menu.initMenu([0,None,
["New Game", "Quit Game"],
[[self.race.createRace, self.createReadyDialogue],
[base.userExit]],
[[None,None],[None]]])
def createReadyDialogue(self):
menu = Menu(self.menuGraphics, self.fonts, self.inputManager)
menu.initMenu([3,"Are you ready?",
["Yes","Exit"],
[[self.race.startRace],[self.race.createDemoRace]],
[[3],[None]]])
def debugTask(self, task):
print(taskMgr)
# prints all of the tasks in the task manager.
return task.again
# debugTask: Runs once every ten seconds to print out reports on the games status.
w = World()
run() | [
"[email protected]"
]
| |
93770b04b06cc4bee60b772d822f418ad8a272c9 | faca8866b3c8aca30a915d8cb2748766557ed808 | /object_detection_updata/metrics/tf_example_parser.py | 510e6917fa9838e9282ec6f561e50cb77a7b5ff9 | []
| no_license | yongqis/proposal_joint_retireval | 6899d80f8fb94569c7b60764f6e7de74bcfa9cc8 | 97b086c62473ab1a5baf45743535fce70c3f8c20 | refs/heads/master | 2020-05-25T19:07:22.946008 | 2019-06-03T07:09:04 | 2019-06-03T07:09:09 | 187,943,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,294 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto parser for data loading.
A parser to decode data containing serialized tensorflow.Example
protos into materialized tensors (numpy arrays).
"""
import numpy as np
from object_detection_updata.core import data_parser
from object_detection_updata.core import standard_fields as fields
class FloatParser(data_parser.DataToNumpyParser):
"""Tensorflow Example float parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].float_list.value,
dtype=np.float).transpose() if tf_example.features.feature[
self.field_name].HasField("float_list") else None
class StringParser(data_parser.DataToNumpyParser):
"""Tensorflow Example string parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return "".join(tf_example.features.feature[self.field_name]
.bytes_list.value) if tf_example.features.feature[
self.field_name].HasField("bytes_list") else None
class Int64Parser(data_parser.DataToNumpyParser):
"""Tensorflow Example int64 parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].int64_list.value,
dtype=np.int64).transpose() if tf_example.features.feature[
self.field_name].HasField("int64_list") else None
class BoundingBoxParser(data_parser.DataToNumpyParser):
"""Tensorflow Example bounding box parser."""
def __init__(self, xmin_field_name, ymin_field_name, xmax_field_name,
ymax_field_name):
self.field_names = [
ymin_field_name, xmin_field_name, ymax_field_name, xmax_field_name
]
def parse(self, tf_example):
result = []
parsed = True
for field_name in self.field_names:
result.append(tf_example.features.feature[field_name].float_list.value)
parsed &= (
tf_example.features.feature[field_name].HasField("float_list"))
return np.array(result).transpose() if parsed else None
class TfExampleDetectionAndGTParser(data_parser.DataToNumpyParser):
"""Tensorflow Example proto parser."""
def __init__(self):
self.items_to_handlers = {
fields.DetectionResultFields.key:
StringParser(fields.TfExampleFields.source_id),
# Object ground truth boxes and classes.
fields.InputDataFields.groundtruth_boxes: (BoundingBoxParser(
fields.TfExampleFields.object_bbox_xmin,
fields.TfExampleFields.object_bbox_ymin,
fields.TfExampleFields.object_bbox_xmax,
fields.TfExampleFields.object_bbox_ymax)),
fields.InputDataFields.groundtruth_classes: (
Int64Parser(fields.TfExampleFields.object_class_label)),
# Object detections.
fields.DetectionResultFields.detection_boxes: (BoundingBoxParser(
fields.TfExampleFields.detection_bbox_xmin,
fields.TfExampleFields.detection_bbox_ymin,
fields.TfExampleFields.detection_bbox_xmax,
fields.TfExampleFields.detection_bbox_ymax)),
fields.DetectionResultFields.detection_classes: (
Int64Parser(fields.TfExampleFields.detection_class_label)),
fields.DetectionResultFields.detection_scores: (
FloatParser(fields.TfExampleFields.detection_score)),
}
self.optional_items_to_handlers = {
fields.InputDataFields.groundtruth_difficult:
Int64Parser(fields.TfExampleFields.object_difficult),
fields.InputDataFields.groundtruth_group_of:
Int64Parser(fields.TfExampleFields.object_group_of),
fields.InputDataFields.groundtruth_image_classes:
Int64Parser(fields.TfExampleFields.image_class_label),
}
def parse(self, tf_example):
"""Parses tensorflow example and returns a tensor dictionary.
Args:
tf_example: a tf.Example object.
Returns:
A dictionary of the following numpy arrays:
fields.DetectionResultFields.source_id - string containing original image
id.
fields.InputDataFields.groundtruth_boxes - a numpy array containing
groundtruth boxes.
fields.InputDataFields.groundtruth_classes - a numpy array containing
groundtruth classes.
fields.InputDataFields.groundtruth_group_of - a numpy array containing
groundtruth group of flag (optional, None if not specified).
fields.InputDataFields.groundtruth_difficult - a numpy array containing
groundtruth difficult flag (optional, None if not specified).
fields.InputDataFields.groundtruth_image_classes - a numpy array
containing groundtruth image-level labels.
fields.DetectionResultFields.detection_boxes - a numpy array containing
detection boxes.
fields.DetectionResultFields.detection_classes - a numpy array containing
detection class labels.
fields.DetectionResultFields.detection_scores - a numpy array containing
detection scores.
Returns None if tf.Example was not parsed or non-optional fields were not
found.
"""
results_dict = {}
parsed = True
for key, parser in self.items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
parsed &= (results_dict[key] is not None)
for key, parser in self.optional_items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
return results_dict if parsed else None
| [
"[email protected]"
]
| |
d83ff9d35756aa203096778316d1f1840a266b4c | d9f6f439300d298246c37ccfb881e8e8af4fda22 | /cfp/migrations/0021_profile_name.py | 814cb2ed2021c677ece96c65e1671b44cb2a0824 | [
"MIT"
]
| permissive | ajlozier/speakers | e62b8d346a58a034998860d1b42a38b00cbdbd23 | d7d87c99b1cfa5f9df5455f737385115d9d5279c | refs/heads/master | 2021-09-08T19:33:08.894305 | 2018-03-12T00:54:10 | 2018-03-12T00:54:10 | 122,101,157 | 0 | 0 | null | 2018-02-19T18:08:18 | 2018-02-19T18:08:18 | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0020_auto_20150218_0802'),
]
operations = [
migrations.AddField(
model_name='profile',
name='name',
field=models.CharField(max_length=300, default=''),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
e5d51b840f9d3e61bbf612efe4cb4e6c26e84ce6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_actor.py | 04e32c50a329bba627e107500de9e8bd1c6b493f | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py |
#calss header
class _ACTOR():
def __init__(self,):
self.name = "ACTOR"
self.definitions = [u'someone who pretends to be someone else while performing in a film, play, or television or radio programme: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
1a89d48503fba5a5f596dc365fc645417e63c887 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/scaleform/battle.py | cb3cb78844254b812459a7a4b3f4ed98dac52a20 | []
| no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 39,654 | py | # 2016.02.14 12:38:27 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/Battle.py
import weakref
import BigWorld
import GUI
import Math
import SoundGroups
import VOIP
import constants
import BattleReplay
import CommandMapping
from ConnectionManager import connectionManager
from account_helpers.settings_core.SettingsCore import g_settingsCore
from gui.Scaleform.LogitechMonitor import LogitechMonitor
from gui.Scaleform.daapi.view.battle.damage_info_panel import VehicleDamageInfoPanel
from gui.Scaleform.daapi.view.battle.gas_attack import GasAttackPlugin
from gui.Scaleform.daapi.view.battle.repair_timer import RepairTimerPlugin
from gui.Scaleform.daapi.view.battle.resource_points import ResourcePointsPlugin
from gui.Scaleform.daapi.view.battle.respawn_view import RespawnViewPlugin
from gui.Scaleform.daapi.view.battle.PlayersPanelsSwitcher import PlayersPanelsSwitcher
from gui.Scaleform.daapi.view.battle.RadialMenu import RadialMenu
from gui.Scaleform.daapi.view.battle.flag_notification import FlagNotificationPlugin
from gui.Scaleform.daapi.view.battle.players_panel import playersPanelFactory
from gui.Scaleform.daapi.view.battle.score_panel import scorePanelFactory
from gui.Scaleform.daapi.view.battle.ConsumablesPanel import ConsumablesPanel
from gui.Scaleform.daapi.view.battle.BattleRibbonsPanel import BattleRibbonsPanel
from gui.Scaleform.daapi.view.battle.TimersBar import TimersBar
from gui.Scaleform.daapi.view.battle.damage_panel import DamagePanel
from gui.Scaleform.daapi.view.battle.indicators import IndicatorsCollection
from gui.Scaleform.daapi.view.battle.messages import PlayerMessages, VehicleErrorMessages, VehicleMessages
from gui.Scaleform.daapi.view.battle.stats_form import statsFormFactory
from gui.Scaleform.daapi.view.battle.teams_bases_panel import TeamBasesPanel
from gui.Scaleform.daapi.view.battle.markers import MarkersManager
from gui.Scaleform.daapi.view.lobby.ReportBug import makeHyperLink, reportBugOpenConfirm
from gui.Scaleform.locale.ARENAS import ARENAS
from gui.Scaleform.locale.INGAME_GUI import INGAME_GUI
from gui.Scaleform.locale.MENU import MENU
import gui
from gui.battle_control import g_sessionProvider
from gui.battle_control.DynSquadViewListener import DynSquadViewListener, RecordDynSquadViewListener, ReplayDynSquadViewListener
from gui.battle_control.battle_arena_ctrl import battleArenaControllerFactory
from gui.battle_control.battle_constants import VEHICLE_VIEW_STATE
from gui.prb_control.formatters import getPrebattleFullDescription
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
from gui.shared.formatters import text_styles
from gui.shared.utils.TimeInterval import TimeInterval
from gui.shared.utils.plugins import PluginsCollection
from messenger import MessengerEntry
from windows import BattleWindow
from SettingsInterface import SettingsInterface
from debug_utils import LOG_DEBUG, LOG_ERROR
from helpers import i18n, isPlayerAvatar
from gui import DEPTH_OF_Battle, GUI_SETTINGS, g_tankActiveCamouflage, g_guiResetters, g_repeatKeyHandlers, game_control
from gui.LobbyContext import g_lobbyContext
from gui.Scaleform import VoiceChatInterface, ColorSchemeManager, getNecessaryArenaFrameName
from gui.Scaleform.SoundManager import SoundManager
from gui.shared.denunciator import BattleDenunciator
from gui.shared.utils import toUpper
from gui.shared.utils.functions import makeTooltip, getArenaSubTypeName, isBaseExists, getBattleSubTypeWinText
from gui.Scaleform.windows import UIInterface
from gui.Scaleform.MovingText import MovingText
from gui.Scaleform.Minimap import Minimap
from gui.Scaleform.CursorDelegator import g_cursorDelegator
from gui.Scaleform.ingame_help import IngameHelp
from gui.Scaleform import SCALEFORM_SWF_PATH
from gui.battle_control.arena_info import getArenaIcon, hasFlags, hasRespawns, hasResourcePoints, isFalloutMultiTeam, hasRepairPoints, isFalloutBattle, hasGasAttack
from gui.battle_control import avatar_getter
def _isVehicleEntity(entity):
import Vehicle
return isinstance(entity, Vehicle.Vehicle)
def _getQuestsTipData(arena, arenaDP):
pqTipData = [None] * 3
serverSettings = g_lobbyContext.getServerSettings()
isPQEnabled = serverSettings is not None and serverSettings.isPotapovQuestEnabled()
if isPQEnabled and (arena.guiType == constants.ARENA_GUI_TYPE.RANDOM or arena.guiType == constants.ARENA_GUI_TYPE.TRAINING and constants.IS_DEVELOPMENT or isFalloutBattle()):
vehInfo = arenaDP.getVehicleInfo(arenaDP.getPlayerVehicleID(forceUpdate=True))
if isFalloutBattle():
pQuests = vehInfo.player.getFalloutPotapovQuests()
else:
pQuests = vehInfo.player.getRandomPotapovQuests()
if len(pQuests):
quest = pQuests[0]
pqTipData = [quest.getUserName(), _getQuestConditionsMessage(INGAME_GUI.POTAPOVQUESTS_TIP_MAINHEADER, quest.getUserMainCondition()), _getQuestConditionsMessage(INGAME_GUI.POTAPOVQUESTS_TIP_ADDITIONALHEADER, quest.getUserAddCondition())]
else:
pqTipData = [i18n.makeString(INGAME_GUI.POTAPOVQUESTS_TIP_NOQUESTS_BATTLETYPE if isFalloutBattle() else INGAME_GUI.POTAPOVQUESTS_TIP_NOQUESTS_VEHICLETYPE), None, None]
return pqTipData
def _getQuestConditionsMessage(header, text):
return i18n.makeString(text_styles.middleTitle(header) + '\n' + text_styles.main(text))
_CONTOUR_ICONS_MASK = '../maps/icons/vehicle/contour/%(unicName)s.png'
_SMALL_MAP_SOURCE = '../maps/icons/map/battleLoading/%s.png'
_SCOPE = EVENT_BUS_SCOPE.BATTLE
class Battle(BattleWindow):
teamBasesPanel = property(lambda self: self.__teamBasesPanel)
timersBar = property(lambda self: self.__timersBar)
consumablesPanel = property(lambda self: self.__consumablesPanel)
damagePanel = property(lambda self: self.__damagePanel)
markersManager = property(lambda self: self.__markersManager)
vErrorsPanel = property(lambda self: self.__vErrorsPanel)
vMsgsPanel = property(lambda self: self.__vMsgsPanel)
pMsgsPanel = property(lambda self: self.__pMsgsPanel)
minimap = property(lambda self: self.__minimap)
radialMenu = property(lambda self: self.__radialMenu)
damageInfoPanel = property(lambda self: self.__damageInfoPanel)
fragCorrelation = property(lambda self: self.__fragCorrelation)
statsForm = property(lambda self: self.__statsForm)
leftPlayersPanel = property(lambda self: self.__leftPlayersPanel)
rightPlayersPanel = property(lambda self: self.__rightPlayersPanel)
ribbonsPanel = property(lambda self: self.__ribbonsPanel)
ppSwitcher = property(lambda self: self.__ppSwitcher)
indicators = property(lambda self: self.__indicators)
VEHICLE_DESTROY_TIMER = {'ALL': 'all',
constants.VEHICLE_MISC_STATUS.VEHICLE_DROWN_WARNING: 'drown',
constants.VEHICLE_MISC_STATUS.VEHICLE_IS_OVERTURNED: 'overturn'}
VEHICLE_DEATHZONE_TIMER = {'ALL': 'all',
constants.DEATH_ZONES.STATIC: 'death_zone',
constants.DEATH_ZONES.GAS_ATTACK: 'gas_attack'}
VEHICLE_DEATHZONE_TIMER_SOUND = {constants.DEATH_ZONES.GAS_ATTACK: ({'warning': 'fallout_gaz_sphere_warning',
'critical': 'fallout_gaz_sphere_timer'}, {'warning': '/GUI/fallout/fallout_gaz_sphere_warning',
'critical': '/GUI/fallout/fallout_gaz_sphere_timer'})}
__cameraVehicleID = -1
__stateHandlers = {VEHICLE_VIEW_STATE.FIRE: '_setFireInVehicle',
VEHICLE_VIEW_STATE.SHOW_DESTROY_TIMER: '_showVehicleTimer',
VEHICLE_VIEW_STATE.HIDE_DESTROY_TIMER: '_hideVehicleTimer',
VEHICLE_VIEW_STATE.SHOW_DEATHZONE_TIMER: 'showDeathzoneTimer',
VEHICLE_VIEW_STATE.HIDE_DEATHZONE_TIMER: 'hideDeathzoneTimer',
VEHICLE_VIEW_STATE.OBSERVED_BY_ENEMY: '_showSixthSenseIndicator'}
def __init__(self, appNS):
self.__ns = appNS
self.__soundManager = None
self.__arena = BigWorld.player().arena
self.__plugins = PluginsCollection(self)
plugins = {}
if hasFlags():
plugins['flagNotification'] = FlagNotificationPlugin
if hasRepairPoints():
plugins['repairTimer'] = RepairTimerPlugin
if hasRespawns() and (constants.IS_DEVELOPMENT or not BattleReplay.g_replayCtrl.isPlaying):
plugins['respawnView'] = RespawnViewPlugin
if hasResourcePoints():
plugins['resources'] = ResourcePointsPlugin
if hasGasAttack():
plugins['gasAttack'] = GasAttackPlugin
self.__plugins.addPlugins(plugins)
self.__denunciator = BattleDenunciator()
self.__timerSounds = {}
for timer, sounds in self.VEHICLE_DEATHZONE_TIMER_SOUND.iteritems():
self.__timerSounds[timer] = {}
for level, sound in sounds[False].iteritems():
self.__timerSounds[timer][level] = SoundGroups.g_instance.getSound2D(sound)
self.__timerSound = None
BattleWindow.__init__(self, 'battle.swf')
self.__isHelpWindowShown = False
self.__cameraMode = None
self.component.wg_inputKeyMode = 1
self.component.position.z = DEPTH_OF_Battle
self.movie.backgroundAlpha = 0
self.addFsCallbacks({'battle.leave': self.onExitBattle})
self.addExternalCallbacks({'battle.showCursor': self.cursorVisibility,
'battle.tryLeaveRequest': self.tryLeaveRequest,
'battle.populateFragCorrelationBar': self.populateFragCorrelationBar,
'Battle.UsersRoster.Appeal': self.onDenunciationReceived,
'Battle.selectPlayer': self.selectPlayer,
'battle.helpDialogOpenStatus': self.helpDialogOpenStatus,
'battle.initLobbyDialog': self._initLobbyDialog,
'battle.reportBug': self.reportBug})
self.__dynSquadListener = None
BigWorld.wg_setRedefineKeysMode(False)
self.onPostmortemVehicleChanged(BigWorld.player().playerVehicleID)
return
@property
def appNS(self):
return self.__ns
@property
def soundManager(self):
return self.__soundManager
def attachCursor(self):
return g_cursorDelegator.activateCursor()
def detachCursor(self):
return g_cursorDelegator.detachCursor()
def getRoot(self):
return self.__battle_flashObject
def getCameraVehicleID(self):
return self.__cameraVehicleID
def populateFragCorrelationBar(self, _):
if self.__fragCorrelation is not None:
self.__fragCorrelation.populate()
return
def showAll(self, event):
self.call('battle.showAll', [event.ctx['visible']])
def showCursor(self, isShow):
self.cursorVisibility(-1, isShow)
def selectPlayer(self, cid, vehId):
player = BigWorld.player()
if isPlayerAvatar():
player.selectPlayer(int(vehId))
def onDenunciationReceived(self, _, uid, userName, topic):
self.__denunciator.makeAppeal(uid, userName, topic)
self.__arenaCtrl.invalidateGUI()
def onPostmortemVehicleChanged(self, id):
if self.__cameraVehicleID == id:
return
else:
self.__cameraVehicleID = id
self.__arenaCtrl.invalidateGUI(not g_sessionProvider.getCtx().isPlayerObserver())
g_sessionProvider.getVehicleStateCtrl().switchToAnother(id)
self._hideVehicleTimer('ALL')
self.hideDeathzoneTimer('ALL')
self.__vErrorsPanel.clear()
self.__vMsgsPanel.clear()
aim = BigWorld.player().inputHandler.aim
if aim is not None:
aim.updateAmmoState(True)
return
def onCameraChanged(self, cameraMode, curVehID = None):
LOG_DEBUG('onCameraChanged', cameraMode, curVehID)
if self.__cameraMode == 'mapcase':
self.setAimingMode(False)
elif cameraMode == 'mapcase':
self.setAimingMode(True)
self.__cameraMode = cameraMode
def setVisible(cname):
m = self.getMember(cname)
if m is not None:
m.visible = cameraMode != 'video'
return
if self.__isGuiShown():
self.damagePanel.showAll(cameraMode != 'video')
setVisible('vehicleErrorsPanel')
if cameraMode == 'video':
self.__cameraVehicleID = -1
self.__vErrorsPanel.clear()
self.__vMsgsPanel.clear()
self._hideVehicleTimer('ALL')
self.hideDeathzoneTimer('ALL')
aim = BigWorld.player().inputHandler.aim
if aim is not None:
aim.updateAmmoState(True)
aim = BigWorld.player().inputHandler.aim
if aim is not None:
aim.onCameraChange()
return
def __isGuiShown(self):
m = self.getMember('_root')
if m is not None and callable(m.isGuiVisible):
return m.isGuiVisible()
else:
return False
def _showVehicleTimer(self, value):
code, time, warnLvl = value
LOG_DEBUG('show vehicles destroy timer', code, time, warnLvl)
self.call('destroyTimer.show', [self.VEHICLE_DESTROY_TIMER[code], time, warnLvl])
def _hideVehicleTimer(self, code = None):
LOG_DEBUG('hide vehicles destroy timer', code)
if code is None:
code = 'ALL'
self.call('destroyTimer.hide', [self.VEHICLE_DESTROY_TIMER[code]])
return
def showDeathzoneTimer(self, value):
zoneID, time, warnLvl = value
if self.__timerSound is not None:
self.__timerSound.stop()
self.__timerSound = None
sound = self.__timerSounds.get(zoneID, {}).get(warnLvl)
if sound is not None:
self.__timerSound = sound
self.__timerSound.play()
LOG_DEBUG('show vehicles deathzone timer', zoneID, time, warnLvl)
self.call('destroyTimer.show', [self.VEHICLE_DEATHZONE_TIMER[zoneID], time, warnLvl])
return
def hideDeathzoneTimer(self, zoneID = None):
if self.__timerSound is not None:
self.__timerSound.stop()
self.__timerSound = None
if zoneID is None:
zoneID = 'ALL'
LOG_DEBUG('hide vehicles deathzone timer', zoneID)
self.call('destroyTimer.hide', [self.VEHICLE_DEATHZONE_TIMER[zoneID]])
return
def _showSixthSenseIndicator(self, isShow):
self.call('sixthSenseIndicator.show', [isShow])
def setVisible(self, bool):
LOG_DEBUG('[Battle] visible', bool)
self.component.visible = bool
def afterCreate(self):
event = events.AppLifeCycleEvent
g_eventBus.handleEvent(event(self.__ns, event.INITIALIZING))
player = BigWorld.player()
voice = VoiceChatInterface.g_instance
LOG_DEBUG('[Battle] afterCreate')
setattr(self.movie, '_global.wg_isShowLanguageBar', GUI_SETTINGS.isShowLanguageBar)
setattr(self.movie, '_global.wg_isShowServerStats', constants.IS_SHOW_SERVER_STATS)
setattr(self.movie, '_global.wg_isShowVoiceChat', GUI_SETTINGS.voiceChat)
setattr(self.movie, '_global.wg_voiceChatProvider', voice.voiceChatProvider)
setattr(self.movie, '_global.wg_isChina', constants.IS_CHINA)
setattr(self.movie, '_global.wg_isKorea', constants.IS_KOREA)
setattr(self.movie, '_global.wg_isReplayPlaying', BattleReplay.g_replayCtrl.isPlaying)
BattleWindow.afterCreate(self)
addListener = g_eventBus.addListener
addListener(events.GameEvent.HELP, self.toggleHelpWindow, scope=_SCOPE)
addListener(events.GameEvent.GUI_VISIBILITY, self.showAll, scope=_SCOPE)
player.inputHandler.onPostmortemVehicleChanged += self.onPostmortemVehicleChanged
player.inputHandler.onCameraChanged += self.onCameraChanged
g_settingsCore.onSettingsChanged += self.__accs_onSettingsChanged
g_settingsCore.interfaceScale.onScaleChanged += self.__onRecreateDevice
isMutlipleTeams = isFalloutMultiTeam()
isFallout = isFalloutBattle()
self.proxy = weakref.proxy(self)
self.__battle_flashObject = self.proxy.getMember('_level0')
if self.__battle_flashObject:
self.__battle_flashObject.resync()
voice.populateUI(self.proxy)
voice.onPlayerSpeaking += self.setPlayerSpeaking
voice.onVoiceChatInitFailed += self.onVoiceChatInitFailed
self.colorManager = ColorSchemeManager._ColorSchemeManager()
self.colorManager.populateUI(self.proxy)
self.movingText = MovingText()
self.movingText.populateUI(self.proxy)
self.__settingsInterface = SettingsInterface()
self.__settingsInterface.populateUI(self.proxy)
self.__soundManager = SoundManager()
self.__soundManager.populateUI(self.proxy)
self.__timersBar = TimersBar(self.proxy, isFallout)
self.__teamBasesPanel = TeamBasesPanel(self.proxy)
self.__debugPanel = DebugPanel(self.proxy)
self.__consumablesPanel = ConsumablesPanel(self.proxy)
self.__damagePanel = DamagePanel(self.proxy)
self.__markersManager = MarkersManager(self.proxy)
self.__ingameHelp = IngameHelp(self.proxy)
self.__minimap = Minimap(self.proxy)
self.__radialMenu = RadialMenu(self.proxy)
self.__ribbonsPanel = BattleRibbonsPanel(self.proxy)
self.__indicators = IndicatorsCollection()
self.__ppSwitcher = PlayersPanelsSwitcher(self.proxy)
isColorBlind = g_settingsCore.getSetting('isColorBlind')
self.__leftPlayersPanel = playersPanelFactory(self.proxy, True, isColorBlind, isFallout, isMutlipleTeams)
self.__rightPlayersPanel = playersPanelFactory(self.proxy, False, isColorBlind, isFallout, isMutlipleTeams)
self.__damageInfoPanel = VehicleDamageInfoPanel(self.proxy)
self.__damageInfoPanel.start()
self.__fragCorrelation = scorePanelFactory(self.proxy, isFallout, isMutlipleTeams)
self.__statsForm = statsFormFactory(self.proxy, isFallout, isMutlipleTeams)
self.__plugins.init()
self.isVehicleCountersVisible = g_settingsCore.getSetting('showVehiclesCounter')
self.__fragCorrelation.showVehiclesCounter(self.isVehicleCountersVisible)
self.__vErrorsPanel = VehicleErrorMessages(self.proxy)
self.__vMsgsPanel = VehicleMessages(self.proxy)
self.__pMsgsPanel = PlayerMessages(self.proxy)
self.__plugins.start()
self.__debugPanel.start()
self.__consumablesPanel.start()
self.__damagePanel.start()
self.__ingameHelp.start()
self.__vErrorsPanel.start()
self.__vMsgsPanel.start()
self.__pMsgsPanel.start()
self.__markersManager.start()
self.__markersManager.setMarkerDuration(GUI_SETTINGS.markerHitSplashDuration)
markers = {'enemy': g_settingsCore.getSetting('enemy'),
'dead': g_settingsCore.getSetting('dead'),
'ally': g_settingsCore.getSetting('ally')}
self.__markersManager.setMarkerSettings(markers)
MessengerEntry.g_instance.gui.invoke('populateUI', self.proxy)
g_guiResetters.add(self.__onRecreateDevice)
g_repeatKeyHandlers.add(self.component.handleKeyEvent)
self.__onRecreateDevice()
self.__statsForm.populate()
self.__leftPlayersPanel.populateUI(self.proxy)
self.__rightPlayersPanel.populateUI(self.proxy)
if BattleReplay.g_replayCtrl.isPlaying:
BattleReplay.g_replayCtrl.onBattleSwfLoaded()
self.__populateData()
self.__minimap.start()
self.__radialMenu.setSettings(self.__settingsInterface)
self.__radialMenu.populateUI(self.proxy)
self.__ribbonsPanel.start()
g_sessionProvider.setBattleUI(self)
self.__arenaCtrl = battleArenaControllerFactory(self, isFallout, isMutlipleTeams)
g_sessionProvider.addArenaCtrl(self.__arenaCtrl)
self.updateFlagsColor()
self.movie.setFocussed(SCALEFORM_SWF_PATH)
self.call('battle.initDynamicSquad', self.__getDynamicSquadsInitParams(enableButton=not BattleReplay.g_replayCtrl.isPlaying))
self.call('sixthSenseIndicator.setDuration', [GUI_SETTINGS.sixthSenseDuration])
g_tankActiveCamouflage[player.vehicleTypeDescriptor.type.compactDescr] = self.__arena.arenaType.vehicleCamouflageKind
keyCode = CommandMapping.g_instance.get('CMD_VOICECHAT_MUTE')
if not BigWorld.isKeyDown(keyCode):
VOIP.getVOIPManager().setMicMute(True)
ctrl = g_sessionProvider.getVehicleStateCtrl()
ctrl.onVehicleStateUpdated += self.__onVehicleStateUpdated
ctrl.onPostMortemSwitched += self.__onPostMortemSwitched
if BattleReplay.g_replayCtrl.isPlaying:
self.__dynSquadListener = ReplayDynSquadViewListener(self.proxy)
elif BattleReplay.g_replayCtrl.isRecording:
self.__dynSquadListener = RecordDynSquadViewListener(self.proxy)
else:
self.__dynSquadListener = DynSquadViewListener(self.proxy)
g_eventBus.handleEvent(event(self.__ns, event.INITIALIZED))
def beforeDelete(self):
LOG_DEBUG('[Battle] beforeDelete')
removeListener = g_eventBus.removeListener
removeListener(events.GameEvent.HELP, self.toggleHelpWindow, scope=_SCOPE)
removeListener(events.GameEvent.GUI_VISIBILITY, self.showAll, scope=_SCOPE)
ctrl = g_sessionProvider.getVehicleStateCtrl()
if ctrl is not None:
ctrl.onVehicleStateUpdated -= self.__onVehicleStateUpdated
ctrl.onPostMortemSwitched -= self.__onPostMortemSwitched
player = BigWorld.player()
if player and player.inputHandler:
player.inputHandler.onPostmortemVehicleChanged -= self.onPostmortemVehicleChanged
player.inputHandler.onCameraChanged -= self.onCameraChanged
if self.colorManager:
self.colorManager.dispossessUI()
voice = VoiceChatInterface.g_instance
if voice:
voice.dispossessUI(self.proxy)
voice.onPlayerSpeaking -= self.setPlayerSpeaking
voice.onVoiceChatInitFailed -= self.onVoiceChatInitFailed
if self.__plugins is not None:
self.__plugins.stop()
self.__plugins.fini()
self.__plugins = None
if self.movingText is not None:
self.movingText.dispossessUI()
self.movingText = None
if self.__timerSound is not None:
self.__timerSound.stop()
self.__timerSound = None
if self.__soundManager is not None:
self.__soundManager.dispossessUI()
self.__soundManager = None
if self.colorManager is not None:
self.colorManager.dispossessUI()
self.colorManager = None
if self.component:
g_repeatKeyHandlers.discard(self.component.handleKeyEvent)
g_settingsCore.onSettingsChanged -= self.__accs_onSettingsChanged
g_settingsCore.interfaceScale.onScaleChanged -= self.__onRecreateDevice
self.__timersBar.destroy()
self.__teamBasesPanel.destroy()
self.__debugPanel.destroy()
self.__consumablesPanel.destroy()
self.__damagePanel.destroy()
self.__markersManager.destroy()
self.__ingameHelp.destroy()
self.__vErrorsPanel.destroy()
self.__vMsgsPanel.destroy()
self.__pMsgsPanel.destroy()
self.__radialMenu.destroy()
self.__minimap.destroy()
self.__ribbonsPanel.destroy()
self.__fragCorrelation.destroy()
self.__statsForm.destroy()
self.__damageInfoPanel.destroy()
g_sessionProvider.clearBattleUI()
if self.__arenaCtrl is not None:
g_sessionProvider.removeArenaCtrl(self.__arenaCtrl)
self.__arenaCtrl.clear()
self.__arenaCtrl = None
self.__ppSwitcher.destroy()
self.__leftPlayersPanel.dispossessUI()
self.__rightPlayersPanel.dispossessUI()
MessengerEntry.g_instance.gui.invoke('dispossessUI')
self.__arena = None
self.__denunciator = None
g_guiResetters.discard(self.__onRecreateDevice)
self.__settingsInterface.dispossessUI()
self.__settingsInterface = None
if self.__dynSquadListener:
self.__dynSquadListener.destroy()
self.__dynSquadListener = None
BattleWindow.beforeDelete(self)
event = events.AppLifeCycleEvent
g_eventBus.handleEvent(event(self.__ns, event.DESTROYED))
return
def __onVehicleStateUpdated(self, state, value):
if state not in self.__stateHandlers:
return
else:
handler = getattr(self, self.__stateHandlers[state], None)
if handler and callable(handler):
if value is not None:
handler(value)
else:
handler()
return
def _setFireInVehicle(self, bool):
self.call('destroyTimer.onFireInVehicle', [bool])
def onVoiceChatInitFailed(self):
if GUI_SETTINGS.voiceChat:
self.call('VoiceChat.initFailed', [])
def clearCommands(self):
pass
def bindCommands(self):
self.__consumablesPanel.bindCommands()
self.__ingameHelp.buildCmdMapping()
def updateFlagsColor(self):
isColorBlind = g_settingsCore.getSetting('isColorBlind')
colorGreen = self.colorManager.getSubScheme('flag_team_green', isColorBlind=isColorBlind)['rgba']
colorRed = self.colorManager.getSubScheme('flag_team_red', isColorBlind=isColorBlind)['rgba']
arenaDP = g_sessionProvider.getArenaDP()
teamsOnArena = arenaDP.getTeamsOnArena()
for teamIdx in teamsOnArena:
color = colorGreen if arenaDP.isAllyTeam(teamIdx) else colorRed
BigWorld.wg_setFlagColor(teamIdx, color / 255)
for teamIdx in [0] + teamsOnArena:
BigWorld.wg_setFlagEmblem(teamIdx, 'system/maps/wg_emblem.dds', Math.Vector4(0.0, 0.1, 0.5, 0.9))
def setPlayerSpeaking(self, accountDBID, flag):
self.__callEx('setPlayerSpeaking', [accountDBID, flag])
vID = g_sessionProvider.getCtx().getVehIDByAccDBID(accountDBID)
if vID > 0:
self.__markersManager.showDynamic(vID, flag)
def isPlayerSpeaking(self, accountDBID):
return VoiceChatInterface.g_instance.isPlayerSpeaking(accountDBID)
def __onPostMortemSwitched(self):
LogitechMonitor.onScreenChange('postmortem')
if self.radialMenu is not None:
self.radialMenu.forcedHide()
if not g_sessionProvider.getCtx().isPlayerObserver():
self.__callEx('showPostmortemTips', [1.0, 5.0, 1.0])
return
def cursorVisibility(self, callbackId, visible, x = None, y = None, customCall = False, enableAiming = True):
if visible:
g_cursorDelegator.syncMousePosition(self, x, y, customCall)
else:
g_cursorDelegator.restoreMousePosition()
if BigWorld.player() is not None and isPlayerAvatar():
BigWorld.player().setForcedGuiControlMode(visible, False, enableAiming)
return
def tryLeaveRequest(self, _):
resStr = 'quitBattle'
replayCtrl = BattleReplay.g_replayCtrl
player = BigWorld.player()
isVehicleAlive = getattr(player, 'isVehicleAlive', False)
isNotTraining = self.__arena.guiType != constants.ARENA_GUI_TYPE.TRAINING
if not replayCtrl.isPlaying:
if constants.IS_KOREA and gui.GUI_SETTINGS.igrEnabled and self.__arena is not None and isNotTraining:
vehicleID = getattr(player, 'playerVehicleID', -1)
if vehicleID in self.__arena.vehicles:
vehicle = self.__arena.vehicles[vehicleID]
if isVehicleAlive and vehicle.get('igrType') != constants.IGR_TYPE.NONE:
resStr = 'quitBattleIGR'
else:
LOG_ERROR("Player's vehicle not found", vehicleID)
isDeserter = isVehicleAlive and isNotTraining
if isDeserter:
resStr += '/deserter'
else:
isDeserter = False
self.__callEx('tryLeaveResponse', [resStr, isDeserter])
return
def onExitBattle(self, _):
arena = getattr(BigWorld.player(), 'arena', None)
LOG_DEBUG('onExitBattle', arena)
if arena:
BigWorld.player().leaveArena()
return
def toggleHelpWindow(self, _):
self.__callEx('showHideHelp', [not self.__isHelpWindowShown])
def setAimingMode(self, isAiming):
self.__callEx('setAimingMode', [isAiming])
def helpDialogOpenStatus(self, cid, isOpened):
self.__isHelpWindowShown = isOpened
def _initLobbyDialog(self, cid):
if connectionManager.serverUserName:
tooltipBody = i18n.makeString('#tooltips:header/info/players_online_full/body')
tooltipFullData = makeTooltip('#tooltips:header/info/players_online_full/header', tooltipBody % {'servername': connectionManager.serverUserName})
self.__callEx('setServerStatsInfo', [tooltipFullData])
self.__callEx('setServerName', [connectionManager.serverUserName])
if constants.IS_SHOW_SERVER_STATS:
stats = game_control.g_instance.serverStats.getStats()
if 'clusterCCU' in stats and 'regionCCU' in stats:
self.__callEx('setServerStats', [stats['clusterCCU'], stats['regionCCU']])
else:
self.__callEx('setServerStats', [None, None])
else:
self.__callEx('setServerName', ['-'])
links = GUI_SETTINGS.reportBugLinks
if len(links):
reportBugButton = makeHyperLink('ingameMenu', MENU.INGAME_MENU_LINKS_REPORT_BUG)
self.__callEx('setReportBugLink', [reportBugButton])
return
def reportBug(self, _):
reportBugOpenConfirm(g_sessionProvider.getArenaDP().getVehicleInfo().player.accountDBID)
def __getDynamicSquadsInitParams(self, enableAlly = True, enableEnemy = False, enableButton = True):
return [self.__arena.guiType == constants.ARENA_GUI_TYPE.RANDOM and enableAlly, enableEnemy, enableButton]
def __populateData(self):
arena = avatar_getter.getArena()
arenaDP = g_sessionProvider.getArenaDP()
arenaData = ['',
0,
'',
'',
'']
if arena:
arenaData = [toUpper(arena.arenaType.name)]
descExtra = getPrebattleFullDescription(arena.extraData or {})
arenaSubType = getArenaSubTypeName(BigWorld.player().arenaTypeID)
if descExtra:
arenaData.extend([arena.guiType + 1, descExtra])
elif arena.guiType in [constants.ARENA_GUI_TYPE.RANDOM, constants.ARENA_GUI_TYPE.TRAINING]:
arenaTypeName = '#arenas:type/%s/name' % arenaSubType
arenaData.extend([getNecessaryArenaFrameName(arenaSubType, isBaseExists(BigWorld.player().arenaTypeID, BigWorld.player().team)), arenaTypeName])
elif arena.guiType in constants.ARENA_GUI_TYPE.RANGE:
arenaData.append(constants.ARENA_GUI_TYPE_LABEL.LABELS[arena.guiType])
arenaData.append('#menu:loading/battleTypes/%d' % arena.guiType)
else:
arenaData.extend([arena.guiType + 1, '#menu:loading/battleTypes/%d' % arena.guiType])
myTeamNumber = arenaDP.getNumberOfTeam()
getTeamName = g_sessionProvider.getCtx().getTeamName
arenaData.extend([getTeamName(myTeamNumber), getTeamName(arenaDP.getNumberOfTeam(enemy=True))])
teamHasBase = 1 if isBaseExists(BigWorld.player().arenaTypeID, myTeamNumber) else 2
if not isFalloutBattle():
typeEvent = 'normal'
winText = getBattleSubTypeWinText(BigWorld.player().arenaTypeID, teamHasBase)
else:
typeEvent = 'fallout'
if isFalloutMultiTeam():
winText = i18n.makeString(ARENAS.TYPE_FALLOUTMUTLITEAM_DESCRIPTION)
else:
winText = i18n.makeString('#arenas:type/%s/description' % arenaSubType)
arenaData.append(winText)
arenaData.append(typeEvent)
arenaData.extend(_getQuestsTipData(arena, arenaDP))
arenaData.extend([_SMALL_MAP_SOURCE % arena.arenaType.geometryName])
self.__callEx('arenaData', arenaData)
def __onRecreateDevice(self, scale = None):
params = list(GUI.screenResolution())
params.append(g_settingsCore.interfaceScale.get())
self.call('Stage.Update', params)
self.__markersManager.updateMarkersScale()
def invalidateGUI(self):
arenaCtrl = getattr(self, '_Battle__arenaCtrl', None)
if arenaCtrl is not None:
arenaCtrl.invalidateGUI()
return
def __callEx(self, funcName, args = None):
self.call('battle.' + funcName, args)
def __accs_onSettingsChanged(self, diff):
self.colorManager.update()
if 'isColorBlind' in diff:
isColorBlind = diff['isColorBlind']
self.__leftPlayersPanel.defineColorFlags(isColorBlind=isColorBlind)
self.__rightPlayersPanel.defineColorFlags(isColorBlind=isColorBlind)
self.updateFlagsColor()
self.__markersManager.updateMarkers()
self.__minimap.updateEntries()
if 'enemy' in diff or 'dead' in diff or 'ally' in diff:
markers = {'enemy': g_settingsCore.getSetting('enemy'),
'dead': g_settingsCore.getSetting('dead'),
'ally': g_settingsCore.getSetting('ally')}
self.__markersManager.setMarkerSettings(markers)
self.__markersManager.updateMarkerSettings()
if 'showVehiclesCounter' in diff:
self.isVehicleCountersVisible = diff['showVehiclesCounter']
self.__fragCorrelation.showVehiclesCounter(self.isVehicleCountersVisible)
if 'interfaceScale' in diff:
self.__onRecreateDevice()
self.__arenaCtrl.invalidateGUI()
self.__arenaCtrl.invalidateArenaInfo()
def setTeamValuesData(self, data):
if self.__battle_flashObject is not None:
self.__battle_flashObject.setTeamValues(data)
return
def setMultiteamValues(self, data):
if self.__battle_flashObject is not None:
self.__battle_flashObject.setMultiteamValues(data)
return
def getPlayerNameLength(self, isEnemy):
panel = self.rightPlayersPanel if isEnemy else self.leftPlayersPanel
return panel.getPlayerNameLength()
def getVehicleNameLength(self, isEnemy):
panel = self.rightPlayersPanel if isEnemy else self.leftPlayersPanel
return panel.getVehicleNameLength()
def getTeamBasesPanel(self):
return self.__teamBasesPanel
def getBattleTimer(self):
return self.__timersBar
def getPreBattleTimer(self):
return self.__timersBar
def getConsumablesPanel(self):
return self.__consumablesPanel
def getDamagePanel(self):
return self.__damagePanel
def getMarkersManager(self):
return self.__markersManager
def getVErrorsPanel(self):
return self.__vErrorsPanel
def getVMsgsPanel(self):
return self.__vMsgsPanel
def getPMsgsPanel(self):
return self.__pMsgsPanel
def getMinimap(self):
return self.__minimap
def getRadialMenu(self):
return self.__radialMenu
def getDamageInfoPanel(self):
return self.__damageInfoPanel
def getFragCorrelation(self):
return self.__fragCorrelation
def getStatsForm(self):
return self.__statsForm
def getLeftPlayersPanel(self):
return self.__leftPlayersPanel
def getRightPlayersPanel(self):
return self.__rightPlayersPanel
def getRibbonsPanel(self):
return self.__ribbonsPanel
def getPlayersPanelsSwitcher(self):
return self.__ppSwitcher
def getIndicators(self):
return self.__indicators
def getDebugPanel(self):
return self.__debugPanel
class DebugPanel(UIInterface):
__UPDATE_INTERVAL = 0.01
def __init__(self, parentUI):
UIInterface.__init__(self)
self.__ui = parentUI
self.__timeInterval = None
self.__performanceStats = _PerformanceStats()
self.__performanceStats.populateUI(parentUI)
return
def start(self):
self.__timeInterval = TimeInterval(self.__UPDATE_INTERVAL, self, '_DebugPanel__update')
self.__timeInterval.start()
self.__update()
def destroy(self):
self.__performanceStats.disposeUI()
self.__performanceStats = None
self.__timeInterval.stop()
return
def __update(self):
player = BigWorld.player()
if player is None or not hasattr(player, 'playerVehicleID'):
return
else:
fps = 0
recordedFps = -1
ping = 0
isLaggingNow = False
replayCtrl = BattleReplay.g_replayCtrl
if replayCtrl.isPlaying and replayCtrl.fps > 0:
fps = BigWorld.getFPS()[1]
recordedFps = replayCtrl.fps
ping = replayCtrl.ping
isLaggingNow = replayCtrl.isLaggingNow
else:
isLaggingNow = player.filter.isLaggingNow
if not isLaggingNow:
for v in BigWorld.entities.values():
if _isVehicleEntity(v):
if not v.isPlayerVehicle:
if v.isAlive() and isinstance(v.filter, BigWorld.WGVehicleFilter) and v.filter.isLaggingNow:
isLaggingNow = True
break
ping = min(BigWorld.LatencyInfo().value[3] * 1000, 999)
if ping < 999:
ping = max(1, ping - 500.0 * constants.SERVER_TICK_LENGTH)
fpsInfo = BigWorld.getFPS()
from helpers.statistics import g_statistics
g_statistics.update(fpsInfo, ping, isLaggingNow)
fps = fpsInfo[1]
if replayCtrl.isRecording:
replayCtrl.setFpsPingLag(fps, ping, isLaggingNow)
try:
self.__performanceStats.updateDebugInfo(int(fps), int(ping), isLaggingNow, int(recordedFps))
except:
pass
return
class _PerformanceStats(UIInterface):
def __init__(self):
UIInterface.__init__(self)
self.flashObject = None
return
def populateUI(self, proxy):
UIInterface.populateUI(self, proxy)
self.flashObject = self.uiHolder.getMember('_level0.debugPanel')
self.flashObject.script = self
def updateDebugInfo(self, fps, ping, lag, fpsReplay):
if fpsReplay != 0 and fpsReplay != -1:
fps = '{0}({1})'.format(fpsReplay, fps)
else:
fps = str(fps)
ping = str(ping)
self.flashObject.as_updateDebugInfo(fps, ping, lag)
def disposeUI(self):
self.flashObject.script = None
self.flashObject = None
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\battle.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:38:28 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
6497fdc892b607d8e24d743723cfcec4d261b1f9 | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/contrib/gan/python/eval/python/sliced_wasserstein_impl.py | 7b2c9927975fe036db777a8f6ded43cb2aa7ca72 | [
"MIT",
"Apache-2.0"
]
| permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,341 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Sliced Wasserstein Distance.
Proposed in https://arxiv.org/abs/1710.10196 and the official Theano
implementation that we used as reference can be found here:
https://github.com/tkarras/progressive_growing_of_gans
Note: this is not an exact distance but an approximation through random
projections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
__all__ = ['sliced_wasserstein_distance']
_GAUSSIAN_FILTER = np.float32([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [
6, 24, 36, 24, 6
], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]).reshape([5, 5, 1, 1]) / 256.0
def _laplacian_pyramid(batch, num_levels):
"""Compute a Laplacian pyramid.
Args:
batch: (tensor) The batch of images (batch, height, width, channels).
num_levels: (int) Desired number of hierarchical levels.
Returns:
List of tensors from the highest to lowest resolution.
"""
gaussian_filter = constant_op.constant(_GAUSSIAN_FILTER)
def spatial_conv(batch, gain):
s = array_ops.shape(batch)
padded = array_ops.pad(batch, [[0, 0], [2, 2], [2, 2], [0, 0]], 'REFLECT')
xt = array_ops.transpose(padded, [0, 3, 1, 2])
xt = array_ops.reshape(xt, [s[0] * s[3], s[1] + 4, s[2] + 4, 1])
conv_out = nn_ops.conv2d(xt, gaussian_filter * gain, [1] * 4, 'VALID')
conv_xt = array_ops.reshape(conv_out, [s[0], s[3], s[1], s[2]])
conv_xt = array_ops.transpose(conv_xt, [0, 2, 3, 1])
return conv_xt
def pyr_down(batch): # matches cv2.pyrDown()
return spatial_conv(batch, 1)[:, ::2, ::2]
def pyr_up(batch): # matches cv2.pyrUp()
s = array_ops.shape(batch)
zeros = array_ops.zeros([3 * s[0], s[1], s[2], s[3]])
res = array_ops.concat([batch, zeros], 0)
res = array_ops.batch_to_space(res, crops=[[0, 0], [0, 0]], block_size=2)
res = spatial_conv(res, 4)
return res
pyramid = [math_ops.to_float(batch)]
for _ in range(1, num_levels):
pyramid.append(pyr_down(pyramid[-1]))
pyramid[-2] -= pyr_up(pyramid[-1])
return pyramid
def _batch_to_patches(batch, patches_per_image, patch_size):
"""Extract patches from a batch.
Args:
batch: (tensor) The batch of images (batch, height, width, channels).
patches_per_image: (int) Number of patches to extract per image.
patch_size: (int) Size of the patches (size, size, channels) to extract.
Returns:
Tensor (batch*patches_per_image, patch_size, patch_size, channels) of
patches.
"""
def py_func_random_patches(batch):
"""Numpy wrapper."""
batch_size, height, width, channels = batch.shape
patch_count = patches_per_image * batch_size
hs = patch_size // 2
# Randomly pick patches.
patch_id, y, x, chan = np.ogrid[0:patch_count, -hs:hs + 1, -hs:hs + 1, 0:3]
img_id = patch_id // patches_per_image
# pylint: disable=g-no-augmented-assignment
# Need explicit addition for broadcast to work properly.
y = y + np.random.randint(hs, height - hs, size=(patch_count, 1, 1, 1))
x = x + np.random.randint(hs, width - hs, size=(patch_count, 1, 1, 1))
# pylint: enable=g-no-augmented-assignment
idx = ((img_id * height + y) * width + x) * channels + chan
patches = batch.flat[idx]
return patches
patches = script_ops.py_func(
py_func_random_patches, [batch], batch.dtype, stateful=False)
return patches
def _normalize_patches(patches):
"""Normalize patches by their mean and standard deviation.
Args:
patches: (tensor) The batch of patches (batch, size, size, channels).
Returns:
Tensor (batch, size, size, channels) of the normalized patches.
"""
patches = array_ops.concat(patches, 0)
mean, variance = nn.moments(patches, [1, 2, 3], keep_dims=True)
patches = (patches - mean) / math_ops.sqrt(variance)
return array_ops.reshape(patches, [array_ops.shape(patches)[0], -1])
def _sort_rows(matrix, num_rows):
"""Sort matrix rows by the last column.
Args:
matrix: a matrix of values (row,col).
num_rows: (int) number of sorted rows to return from the matrix.
Returns:
Tensor (num_rows, col) of the sorted matrix top K rows.
"""
tmatrix = array_ops.transpose(matrix, [1, 0])
sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]
return array_ops.transpose(sorted_tmatrix, [1, 0])
def _sliced_wasserstein(a, b, random_sampling_count, random_projection_dim):
"""Compute the approximate sliced Wasserstein distance.
Args:
a: (matrix) Distribution "a" of samples (row, col).
b: (matrix) Distribution "b" of samples (row, col).
random_sampling_count: (int) Number of random projections to average.
random_projection_dim: (int) Dimension of the random projection space.
Returns:
Float containing the approximate distance between "a" and "b".
"""
s = array_ops.shape(a)
means = []
for _ in range(random_sampling_count):
# Random projection matrix.
proj = random_ops.random_normal(
[array_ops.shape(a)[1], random_projection_dim])
proj *= math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(proj), 0, keep_dims=True))
# Project both distributions and sort them.
proj_a = math_ops.matmul(a, proj)
proj_b = math_ops.matmul(b, proj)
proj_a = _sort_rows(proj_a, s[0])
proj_b = _sort_rows(proj_b, s[0])
# Pairwise Wasserstein distance.
wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
means.append(wdist)
return math_ops.reduce_mean(means)
def _sliced_wasserstein_svd(a, b):
"""Compute the approximate sliced Wasserstein distance using an SVD.
This is not part of the paper, it's a variant with possibly more accurate
measure.
Args:
a: (matrix) Distribution "a" of samples (row, col).
b: (matrix) Distribution "b" of samples (row, col).
Returns:
Float containing the approximate distance between "a" and "b".
"""
s = array_ops.shape(a)
# Random projection matrix.
sig, u = linalg_ops.svd(array_ops.concat([a, b], 0))[:2]
proj_a, proj_b = array_ops.split(u * sig, 2, axis=0)
proj_a = _sort_rows(proj_a[:, ::-1], s[0])
proj_b = _sort_rows(proj_b[:, ::-1], s[0])
# Pairwise Wasserstein distance.
wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
return wdist
def sliced_wasserstein_distance(real_images,
fake_images,
resolution_min=16,
patches_per_image=64,
patch_size=7,
random_sampling_count=1,
random_projection_dim=7 * 7 * 3,
use_svd=False):
"""Compute the Wasserstein distance between two distributions of images.
Note that measure vary with the number of images. Use 8192 images to get
numbers comparable to the ones in the original paper.
Args:
real_images: (tensor) Real images (batch, height, width, channels).
fake_images: (tensor) Fake images (batch, height, width, channels).
resolution_min: (int) Minimum resolution for the Laplacion pyramid.
patches_per_image: (int) Number of patches to extract per image per
Laplacian level.
patch_size: (int) Width of a square patch.
random_sampling_count: (int) Number of random projections to average.
random_projection_dim: (int) Dimension of the random projection space.
use_svd: experimental method to compute a more accurate distance.
Returns:
List of tuples (distance_real, distance_fake) for each level of the
Laplacian pyramid from the highest resoluion to the lowest.
distance_real is the Wasserstein distance between real images
distance_fake is the Wasserstein distance between real and fake images.
Raises:
ValueError: If the inputs shapes are incorrect. Input tensor dimensions
(batch, height, width, channels) are expected to be known at graph
construction time. In addition height and width must be the same and the
number of colors should be exactly 3. Real and fake images must have the
same size.
"""
height = real_images.shape[1]
real_images.shape.assert_is_compatible_with([None, None, height, 3])
fake_images.shape.assert_is_compatible_with(real_images.shape)
# Select resolutions.
resolution_full = int(height)
resolution_min = min(resolution_min, resolution_full)
resolution_max = resolution_full
# Base loss of detail.
resolutions = [
2**i
for i in range(
int(np.log2(resolution_max)),
int(np.log2(resolution_min)) - 1, -1)
]
# Gather patches for each level of the Laplacian pyramids.
patches_real, patches_fake, patches_test = (
[[] for _ in resolutions] for _ in range(3))
for lod, level in enumerate(
_laplacian_pyramid(real_images, len(resolutions))):
patches_real[lod].append(
_batch_to_patches(level, patches_per_image, patch_size))
patches_test[lod].append(
_batch_to_patches(level, patches_per_image, patch_size))
for lod, level in enumerate(
_laplacian_pyramid(fake_images, len(resolutions))):
patches_fake[lod].append(
_batch_to_patches(level, patches_per_image, patch_size))
for lod in range(len(resolutions)):
for patches in [patches_real, patches_test, patches_fake]:
patches[lod] = _normalize_patches(patches[lod])
# Evaluate scores.
scores = []
for lod in range(len(resolutions)):
if not use_svd:
scores.append(
(_sliced_wasserstein(patches_real[lod], patches_test[lod],
random_sampling_count, random_projection_dim),
_sliced_wasserstein(patches_real[lod], patches_fake[lod],
random_sampling_count, random_projection_dim)))
else:
scores.append(
(_sliced_wasserstein_svd(patches_real[lod], patches_test[lod]),
_sliced_wasserstein_svd(patches_real[lod], patches_fake[lod])))
return scores
| [
"[email protected]"
]
| |
75af3c206ddd4f8e25574cabd71067f19214176c | 4170ed62059b6898cc8914e7f23744234fc2f637 | /CD zum Buch "Einstieg in Python"/Programmbeispiele/GUI/gui_check.py | 5f2ffb98025c0f74ac7ab1c38d5a0295e3aeec43 | []
| no_license | Kirchenprogrammierer/Cheats | 9633debd31ab1df78dc639d1aef90d3ac4c1f069 | 0b71c150f48ad1f16d7b47a8532b1f94d26e148e | refs/heads/master | 2021-05-08T10:42:39.927811 | 2018-02-01T17:29:11 | 2018-02-01T17:29:11 | 119,858,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import tkinter
def ende():
main.destroy()
def anzeigen():
lb["text"] = "Zimmer " + du.get() + " " + mb.get()
main = tkinter.Tk()
# Anzeigelabel
lb = tkinter.Label(main, text = "Zimmer ", width=40)
lb.pack()
# Widget-Variablen
du = tkinter.StringVar()
du.set("ohne Dusche")
mb = tkinter.StringVar()
mb.set("ohne Minibar")
# Zwei Checkbuttons
cb1 = tkinter.Checkbutton(main, text="Dusche",
variable=du, onvalue="mit Dusche",
offvalue="ohne Dusche", command=anzeigen)
cb1.pack()
cb2 = tkinter.Checkbutton(main, text="Minibar",
variable=mb, onvalue="mit Minibar",
offvalue="ohne Minibar", command=anzeigen)
cb2.pack()
bende = tkinter.Button(main, text = "Ende",
command = ende)
bende.pack()
main.mainloop()
| [
"[email protected]"
]
| |
8f95dcb9c22025b63932cb30e24ae18203a9dc38 | ce48d74eb28ec153573cd42fe58d39adc784c85c | /jdcloud_sdk/services/xdata/models/DwDatabaseInfo.py | bedf36eee3a80637f1aaf4c87b6493744b5f6c2c | [
"Apache-2.0"
]
| permissive | oulinbao/jdcloud-sdk-python | 4c886cb5b851707d98232ca9d76a85d54c8ff8a8 | 660e48ec3bc8125da1dbd576f7868ea61ea21c1d | refs/heads/master | 2020-03-16T22:22:15.922184 | 2018-05-11T10:45:34 | 2018-05-11T10:45:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # coding=utf8
# Copyright 2018-2025 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DwDatabaseInfo(object):
def __init__(self, owner=None, comments=None, databaseName=None):
"""
:param owner: (Optional) 所有者
:param comments: (Optional) 描述信息
:param databaseName: (Optional) 数据库名称
"""
self.owner = owner
self.comments = comments
self.databaseName = databaseName
| [
"[email protected]"
]
| |
9bbf035b5e759603277ab311b08da9172cf22559 | cd40fd66338bab16c3cac360ec68d0410daf85dc | /asyncio_study/event_loop/utils.py | 039087d9261d78679ca8a895731fe964988d7754 | []
| no_license | suhjohn/Asyncio-Study | c74a95c37d6ce1d0983b5626a4f68d2b80d7ec79 | d9c5a092924a32f18849787fd30cb322a0ff8b15 | refs/heads/master | 2021-05-12T12:28:15.749447 | 2018-01-14T17:25:22 | 2018-01-14T17:25:22 | 117,414,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | from functools import wraps
from time import time
def log_execution_time(func):
"""
Decorator function that prints how long it took to execute the function
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time()
return_value = func(*args, **kwargs)
end = time()
delta = end - start
print(f"Executing {func.__name__} took {delta} seconds.")
return return_value
return wrapper
def fib(n):
return fib(n - 1) + fib(n - 2) if n > 1 else n
timed_fib = log_execution_time(fib) | [
"[email protected]"
]
| |
0ba626c1cb0e67582804291760036684180e9aac | 3faeae950e361eb818830ad210f30a6232e5d7f1 | /wepppy/_scripts/lt_runs_low.py | efdeb5b5aecc5bf4b8561c6458f160bf05309335 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | rogerlew/wepppy | 401e6cee524073209a4445c680b43ea0c6102dfc | 1af4548d725b918b73ee022f2572a63b5194cce0 | refs/heads/master | 2023-07-21T12:56:26.979112 | 2023-07-13T23:26:22 | 2023-07-13T23:26:22 | 125,935,882 | 10 | 6 | NOASSERTION | 2023-03-07T20:42:52 | 2018-03-20T00:01:27 | HTML | UTF-8 | Python | false | false | 46,439 | py |
import os
import shutil
from os.path import exists as _exists
from pprint import pprint
from time import time
from time import sleep
import wepppy
from wepppy.nodb import *
from os.path import join as _join
from wepppy.wepp.out import TotalWatSed
from wepppy.export import arc_export
from osgeo import gdal, osr
gdal.UseExceptions()
if __name__ == '__main__':
projects = [
# dict(wd='CurCond_Watershed_1',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.09757304843217, 39.19773527084747],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_2',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.11460381632118, 39.18896973503106],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_3',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.12165282292143, 39.18644160172608],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_4',
# extent=[-120.20605087280275, 39.15083019711799, -120.08588790893556, 39.243953257043124],
# map_center=[-120.14596939086915, 39.19740715574304],
# map_zoom=13,
# outlet=[-120.12241504431637, 39.181379503672105],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_5',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.1402884859731, 39.175919130374645],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_6',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.14460408169862, 39.17224134827233],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_7_Ward',
# extent=[-120.29445648193361, 39.06424830007589, -120.11867523193361, 39.20059987393997],
# map_center=[-120.20656585693361, 39.13245708812353],
# map_zoom=12,
# outlet=[-120.15993239840523, 39.13415744093873],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_8',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16237493339143, 39.12864047715305],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_9_Blackwood',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16359931397338, 39.10677866737716],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_10',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.14140904093959, 39.07218260362715],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_11_General',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12006459240162, 39.05139598278608],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_12_Meeks',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12452021800915, 39.036407051851995],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_13',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.11884807004954, 39.02163646138702],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_14',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12066635447759, 39.01951924517021],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_15',
# extent=[-120.15652656555177, 38.98636711600028, -120.09644508361818, 39.033052785617535],
# map_center=[-120.12648582458498, 39.00971380270266],
# map_zoom=14,
# outlet=[-120.10916060023823, 39.004865203316534],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_16',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10472536830764, 39.002638030718146],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_17',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10376442165887, 39.00072228304711],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_18',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.10700793337516, 38.95312733140358],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_19',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.09942499965612, 38.935371421937056],
# landuse=None,
# cs=12, erod=0.000001),
# dict(wd='CurCond_Watershed_20',
# extent=[-120.14305114746095, 38.877536817489165, -120.02288818359376, 38.97102081360566],
# map_center=[-120.08296966552736, 38.924294213302424],
# map_zoom=13,
# outlet=[-120.07227563388808, 38.940891230590054],
# landuse=None,
# cs=12, erod=0.000001),
#
#
# dict(wd='Thinn_Watershed_1',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.09757304843217, 39.19773527084747],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_2',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.11460381632118, 39.18896973503106],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_3',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.12165282292143, 39.18644160172608],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_4',
# extent=[-120.20605087280275, 39.15083019711799, -120.08588790893556, 39.243953257043124],
# map_center=[-120.14596939086915, 39.19740715574304],
# map_zoom=13,
# outlet=[-120.12241504431637, 39.181379503672105],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_5',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.1402884859731, 39.175919130374645],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_6',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.14460408169862, 39.17224134827233],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_7_Ward',
# extent=[-120.29445648193361, 39.06424830007589, -120.11867523193361, 39.20059987393997],
# map_center=[-120.20656585693361, 39.13245708812353],
# map_zoom=12,
# outlet=[-120.15993239840523, 39.13415744093873],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_8',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16237493339143, 39.12864047715305],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_9_Blackwood',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16359931397338, 39.10677866737716],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_10',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.14140904093959, 39.07218260362715],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_11_General',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12006459240162, 39.05139598278608],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_12_Meeks',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12452021800915, 39.036407051851995],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_13',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.11884807004954, 39.02163646138702],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_14',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12066635447759, 39.01951924517021],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_15',
# extent=[-120.15652656555177, 38.98636711600028, -120.09644508361818, 39.033052785617535],
# map_center=[-120.12648582458498, 39.00971380270266],
# map_zoom=14,
# outlet=[-120.10916060023823, 39.004865203316534],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_16',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10472536830764, 39.002638030718146],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_17',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10376442165887, 39.00072228304711],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_18',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.10700793337516, 38.95312733140358],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_19',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.09942499965612, 38.935371421937056],
# landuse=107,
# cs=12, erod=0.000001),
# dict(wd='Thinn_Watershed_20',
# extent=[-120.14305114746095, 38.877536817489165, -120.02288818359376, 38.97102081360566],
# map_center=[-120.08296966552736, 38.924294213302424],
# map_zoom=13,
# outlet=[-120.07227563388808, 38.940891230590054],
# landuse=107,
# cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_1',
extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
map_center=[-120.1348114013672, 39.165471994238374],
map_zoom=12,
outlet=[-120.09757304843217, 39.19773527084747],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_2',
extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
map_center=[-120.1348114013672, 39.165471994238374],
map_zoom=12,
outlet=[-120.11460381632118, 39.18896973503106],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_3',
extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
map_center=[-120.1348114013672, 39.165471994238374],
map_zoom=12,
outlet=[-120.12165282292143, 39.18644160172608],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_4',
extent=[-120.20605087280275, 39.15083019711799, -120.08588790893556, 39.243953257043124],
map_center=[-120.14596939086915, 39.19740715574304],
map_zoom=13,
outlet=[-120.12241504431637, 39.181379503672105],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_5',
extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
map_center=[-120.13206481933595, 39.19527859633793],
map_zoom=12,
outlet=[-120.1402884859731, 39.175919130374645],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_6',
extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
map_center=[-120.13206481933595, 39.19527859633793],
map_zoom=12,
outlet=[-120.14460408169862, 39.17224134827233],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_7_Ward',
extent=[-120.29445648193361, 39.06424830007589, -120.11867523193361, 39.20059987393997],
map_center=[-120.20656585693361, 39.13245708812353],
map_zoom=12,
outlet=[-120.15993239840523, 39.13415744093873],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_8',
extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
map_center=[-120.20313262939455, 39.09276546806873],
map_zoom=12,
outlet=[-120.16237493339143, 39.12864047715305],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_9_Blackwood',
extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
map_center=[-120.20313262939455, 39.09276546806873],
map_zoom=12,
outlet=[-120.16359931397338, 39.10677866737716],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_10',
extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
map_center=[-120.20313262939455, 39.09276546806873],
map_zoom=12,
outlet=[-120.14140904093959, 39.07218260362715],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_11_General',
extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
map_center=[-120.14408111572267, 39.003177506910475],
map_zoom=12,
outlet=[-120.12006459240162, 39.05139598278608],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_12_Meeks',
extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
map_center=[-120.14408111572267, 39.003177506910475],
map_zoom=12,
outlet=[-120.12452021800915, 39.036407051851995],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_13',
extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
map_center=[-120.14408111572267, 39.003177506910475],
map_zoom=12,
outlet=[-120.11884807004954, 39.02163646138702],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_14',
extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
map_center=[-120.14408111572267, 39.003177506910475],
map_zoom=12,
outlet=[-120.12066635447759, 39.01951924517021],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_15',
extent=[-120.15652656555177, 38.98636711600028, -120.09644508361818, 39.033052785617535],
map_center=[-120.12648582458498, 39.00971380270266],
map_zoom=14,
outlet=[-120.10916060023823, 39.004865203316534],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_16',
extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
map_center=[-120.14408111572267, 39.003177506910475],
map_zoom=12,
outlet=[-120.10472536830764, 39.002638030718146],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_17',
extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
map_center=[-120.14408111572267, 39.003177506910475],
map_zoom=12,
outlet=[-120.10376442165887, 39.00072228304711],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_18',
extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
map_center=[-120.10562896728517, 38.92015408680781],
map_zoom=12,
outlet=[-120.10700793337516, 38.95312733140358],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_19',
extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
map_center=[-120.10562896728517, 38.92015408680781],
map_zoom=12,
outlet=[-120.09942499965612, 38.935371421937056],
landuse=106,
cs=12, erod=0.000001),
dict(wd='LowSev_Watershed_20',
extent=[-120.14305114746095, 38.877536817489165, -120.02288818359376, 38.97102081360566],
map_center=[-120.08296966552736, 38.924294213302424],
map_zoom=13,
outlet=[-120.07227563388808, 38.940891230590054],
landuse=106,
cs=12, erod=0.000001),
#
# dict(wd='HighSev_Watershed_1',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.09757304843217, 39.19773527084747],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_2',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.11460381632118, 39.18896973503106],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_3',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.12165282292143, 39.18644160172608],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_4',
# extent=[-120.20605087280275, 39.15083019711799, -120.08588790893556, 39.243953257043124],
# map_center=[-120.14596939086915, 39.19740715574304],
# map_zoom=13,
# outlet=[-120.12241504431637, 39.181379503672105],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_5',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.1402884859731, 39.175919130374645],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_6',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.14460408169862, 39.17224134827233],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_7_Ward',
# extent=[-120.29445648193361, 39.06424830007589, -120.11867523193361, 39.20059987393997],
# map_center=[-120.20656585693361, 39.13245708812353],
# map_zoom=12,
# outlet=[-120.15993239840523, 39.13415744093873],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_8',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16237493339143, 39.12864047715305],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_9_Blackwood',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16359931397338, 39.10677866737716],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_10',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.14140904093959, 39.07218260362715],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_11_General',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12006459240162, 39.05139598278608],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_12_Meeks',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12452021800915, 39.036407051851995],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_13',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.11884807004954, 39.02163646138702],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_14',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12066635447759, 39.01951924517021],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_15',
# extent=[-120.15652656555177, 38.98636711600028, -120.09644508361818, 39.033052785617535],
# map_center=[-120.12648582458498, 39.00971380270266],
# map_zoom=14,
# outlet=[-120.10916060023823, 39.004865203316534],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_16',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10472536830764, 39.002638030718146],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_17',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10376442165887, 39.00072228304711],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_18',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.10700793337516, 38.95312733140358],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_19',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.09942499965612, 38.935371421937056],
# landuse=105,
# cs=12, erod=0.000001),
# dict(wd='HighSev_Watershed_20',
# extent=[-120.14305114746095, 38.877536817489165, -120.02288818359376, 38.97102081360566],
# map_center=[-120.08296966552736, 38.924294213302424],
# map_zoom=13,
# outlet=[-120.07227563388808, 38.940891230590054],
# landuse=105,
# cs=12, erod=0.000001),
#
#
#
#
# dict(wd='ModSev_Watershed_1',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.09757304843217, 39.19773527084747],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_2',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.11460381632118, 39.18896973503106],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_3',
# extent=[-120.25497436523439, 39.072244930479926, -120.0146484375, 39.25857565711887],
# map_center=[-120.1348114013672, 39.165471994238374],
# map_zoom=12,
# outlet=[-120.12165282292143, 39.18644160172608],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_4',
# extent=[-120.20605087280275, 39.15083019711799, -120.08588790893556, 39.243953257043124],
# map_center=[-120.14596939086915, 39.19740715574304],
# map_zoom=13,
# outlet=[-120.12241504431637, 39.181379503672105],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_5',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.1402884859731, 39.175919130374645],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_6',
# extent=[-120.25222778320314, 39.102091011833686, -120.01190185546876, 39.28834275351453],
# map_center=[-120.13206481933595, 39.19527859633793],
# map_zoom=12,
# outlet=[-120.14460408169862, 39.17224134827233],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_7_Ward',
# extent=[-120.29445648193361, 39.06424830007589, -120.11867523193361, 39.20059987393997],
# map_center=[-120.20656585693361, 39.13245708812353],
# map_zoom=12,
# outlet=[-120.15993239840523, 39.13415744093873],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_8',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16237493339143, 39.12864047715305],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_9_Blackwood',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.16359931397338, 39.10677866737716],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_10',
# extent=[-120.29102325439453, 39.02451827974919, -120.11524200439455, 39.16094667321639],
# map_center=[-120.20313262939455, 39.09276546806873],
# map_zoom=12,
# outlet=[-120.14140904093959, 39.07218260362715],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_11_General',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12006459240162, 39.05139598278608],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_12_Meeks',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12452021800915, 39.036407051851995],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_13',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.11884807004954, 39.02163646138702],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_14',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.12066635447759, 39.01951924517021],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_15',
# extent=[-120.15652656555177, 38.98636711600028, -120.09644508361818, 39.033052785617535],
# map_center=[-120.12648582458498, 39.00971380270266],
# map_zoom=14,
# outlet=[-120.10916060023823, 39.004865203316534],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_16',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10472536830764, 39.002638030718146],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_17',
# extent=[-120.23197174072267, 38.9348437659246, -120.05619049072267, 39.07144530820888],
# map_center=[-120.14408111572267, 39.003177506910475],
# map_zoom=12,
# outlet=[-120.10376442165887, 39.00072228304711],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_18',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.10700793337516, 38.95312733140358],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_19',
# extent=[-120.22579193115236, 38.826603057341515, -119.98546600341798, 39.01358193815758],
# map_center=[-120.10562896728517, 38.92015408680781],
# map_zoom=12,
# outlet=[-120.09942499965612, 38.935371421937056],
# landuse=118,
# cs=12, erod=0.000001),
# dict(wd='ModSev_Watershed_20',
# extent=[-120.14305114746095, 38.877536817489165, -120.02288818359376, 38.97102081360566],
# map_center=[-120.08296966552736, 38.924294213302424],
# map_zoom=13,
# outlet=[-120.07227563388808, 38.940891230590054],
# landuse=118,
# cs=12, erod=0.000001)
]
failed = open('failed', 'w')
for proj in projects:
try:
wd = proj['wd']
extent = proj['extent']
map_center = proj['map_center']
map_zoom = proj['map_zoom']
outlet = proj['outlet']
default_landuse = proj['landuse']
print('cleaning dir')
if _exists(wd):
print()
shutil.rmtree(wd)
os.mkdir(wd)
print('initializing project')
#ron = Ron(wd, "lt-fire.cfg")
ron = Ron(wd, "lt.cfg")
#ron = Ron(wd, "0.cfg")
ron.name = wd
ron.set_map(extent, map_center, zoom=map_zoom)
print('fetching dem')
ron.fetch_dem()
print('building channels')
topaz = Topaz.getInstance(wd)
topaz.build_channels(csa=5, mcl=60)
topaz.set_outlet(*outlet)
sleep(0.5)
print('building subcatchments')
topaz.build_subcatchments()
print('abstracting watershed')
wat = Watershed.getInstance(wd)
wat.abstract_watershed()
translator = wat.translator_factory()
topaz_ids = [top.split('_')[1] for top in translator.iter_sub_ids()]
print('building landuse')
landuse = Landuse.getInstance(wd)
landuse.mode = LanduseMode.Gridded
landuse.build()
landuse = Landuse.getInstance(wd)
# 105 - Tahoe High severity fire
# topaz_ids is a list of string ids e.g. ['22', '23']
if default_landuse is not None:
print('setting default landuse')
landuse.modify(topaz_ids, default_landuse)
print('building soils')
soils = Soils.getInstance(wd)
soils.mode = SoilsMode.Gridded
soils.build()
print('building climate')
climate = Climate.getInstance(wd)
stations = climate.find_closest_stations()
climate.input_years = 27
climate.climatestation = stations[0]['id']
climate.climate_mode = ClimateMode.Observed
climate.climate_spatialmode = ClimateSpatialMode.Multiple
climate.set_observed_pars(start_year=1990, end_year=2016)
climate.build(verbose=1)
print('prepping wepp')
wepp = Wepp.getInstance(wd)
wepp.prep_hillslopes()
print('running hillslopes')
wepp.run_hillslopes()
print('prepping watershed')
wepp = Wepp.getInstance(wd)
wepp.prep_watershed(erodibility=proj['erod'], critical_shear=proj['cs'])
print('running watershed')
wepp.run_watershed()
print('generating loss report')
loss_report = wepp.report_loss()
print('generating totalwatsed report')
fn = _join(ron.export_dir, 'totalwatsed.csv')
totwatsed = TotalWatSed(_join(ron.output_dir, 'totalwatsed.txt'),
wepp.baseflow_opts, wepp.phosphorus_opts)
totwatsed.export(fn)
assert _exists(fn)
print('exporting arcmap resources')
arc_export(wd)
except:
failed.write('%s\n' % wd)
| [
"[email protected]"
]
| |
a414e93bf6a93d7ea9d9d9fafad47934f70567b8 | 271c7959a39f3d7ff63dddf285004fd5badee4d9 | /venv/Lib/site-packages/ncclient/devices/h3c.py | 17e98b5a38712d3469fac56fcd86aaac22fcbffa | [
"MIT"
]
| permissive | natemellendorf/configpy | b6b01ea4db1f2b9109fd4ddb860e9977316ed964 | 750da5eaef33cede9f3ef532453d63e507f34a2c | refs/heads/master | 2022-12-11T05:22:54.289720 | 2019-07-22T05:26:09 | 2019-07-22T05:26:09 | 176,197,442 | 4 | 1 | MIT | 2022-12-08T02:48:51 | 2019-03-18T03:24:12 | Python | UTF-8 | Python | false | false | 1,936 | py | """
Handler for Huawei device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Huawei", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from ncclient.xml_ import BASE_NS_1_0
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.h3c.rpc import *
class H3cDeviceHandler(DefaultDeviceHandler):
"""
H3C handler for device specific information.
In the device_params dictionary, which is passed to __init__, you can specify
the parameter "ssh_subsystem_name". That allows you to configure the preferred
SSH subsystem name that should be tried on your H3C switch. If connecting with
that name fails, or you didn't specify that name, the other known subsystem names
will be tried. However, if you specify it then this name will be tried first.
"""
_EXEMPT_ERRORS = []
def __init__(self, device_params):
super(H3cDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["get_bulk"] = GetBulk
dict["get_bulk_config"] = GetBulkConfig
dict["cli"] = CLI
dict["action"] = Action
dict["save"] = Save
dict["load"] = Load
dict["rollback"] = Rollback
return dict
def get_capabilities(self):
# Just need to replace a single value in the default capabilities
c = super(H3cDeviceHandler, self).get_capabilities()
return c
def get_xml_base_namespace_dict(self):
return {None: BASE_NS_1_0}
def get_xml_extra_prefix_kwargs(self):
d = {}
d.update(self.get_xml_base_namespace_dict())
return {"nsmap": d}
def perform_qualify_check(self):
return False
| [
"[email protected]"
]
| |
07573412e81359a8769586309efc67f4597edaa7 | be4fc3aac9d41e52e799cc366376e611fe726f90 | /reading_comprehension/model/base_model.py | a99aeb0dc25f2c9e397569854c5077ed1086596b | [
"Apache-2.0"
]
| permissive | Hope247code/reading_comprehension_tf | 235fac5eee98183e021d8c315b8193f4a1a33ba3 | 6cd4ac78c6c93900458ac75c774766b56125891d | refs/heads/master | 2023-06-28T14:50:26.802641 | 2019-07-14T06:30:30 | 2019-07-14T06:30:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,095 | py | import collections
import os.path
import numpy as np
import tensorflow as tf
from util.default_util import *
from util.reading_comprehension_util import *
from util.layer_util import *
__all__ = ["TrainResult", "InferResult", "BaseModel"]
class TrainResult(collections.namedtuple("TrainResult",
("loss", "learning_rate", "global_step", "batch_size", "summary"))):
pass
class InferResult(collections.namedtuple("InferResult",
("predict", "predict_detail", "batch_size", "summary"))):
pass
class BaseModel(object):
"""reading comprehension base model"""
def __init__(self,
logger,
hyperparams,
data_pipeline,
external_data,
mode="train",
scope="base"):
"""initialize mrc base model"""
self.logger = logger
self.hyperparams = hyperparams
self.data_pipeline = data_pipeline
self.mode = mode
self.scope = scope
self.update_op = None
self.train_loss = None
self.learning_rate = None
self.global_step = None
self.train_summary = None
self.infer_answer_start = None
self.infer_answer_start_mask = None
self.infer_answer_end = None
self.infer_answer_end_mask = None
self.infer_summary = None
self.word_embedding = external_data["word_embedding"] if external_data is not None and "word_embedding" in external_data else None
self.batch_size = tf.size(tf.reduce_max(self.data_pipeline.input_answer_mask, axis=-2))
self.num_gpus = self.hyperparams.device_num_gpus
self.default_gpu_id = self.hyperparams.device_default_gpu_id
self.logger.log_print("# {0} gpus are used with default gpu id set as {1}"
.format(self.num_gpus, self.default_gpu_id))
if self.hyperparams.train_regularization_enable == True:
self.regularizer = create_weight_regularizer(self.hyperparams.train_regularization_type,
self.hyperparams.train_regularization_scale)
else:
self.regularizer = None
self.random_seed = self.hyperparams.train_random_seed if self.hyperparams.train_enable_debugging else None
def _create_fusion_layer(self,
input_unit_dim,
output_unit_dim,
fusion_type,
num_layer,
hidden_activation,
dropout,
num_gpus,
default_gpu_id,
regularizer,
random_seed,
trainable):
"""create fusion layer for mrc base model"""
with tf.variable_scope("fusion", reuse=tf.AUTO_REUSE):
if fusion_type == "concate":
fusion_layer_list = []
if input_unit_dim != output_unit_dim:
convert_layer = create_convolution_layer("1d", 1, input_unit_dim,
output_unit_dim, 1, 1, 1, "SAME", None, [0.0], None, False, False, False,
num_gpus, default_gpu_id, regularizer, random_seed, trainable)
fusion_layer_list.append(convert_layer)
elif fusion_type == "dense":
fusion_layer = create_dense_layer("single", num_layer, output_unit_dim, 1, hidden_activation,
[dropout] * num_layer, None, False, False, False, num_gpus, default_gpu_id, regularizer, random_seed, trainable)
fusion_layer_list = [fusion_layer]
elif fusion_type == "highway":
fusion_layer_list = []
if input_unit_dim != output_unit_dim:
convert_layer = create_convolution_layer("1d", 1, input_unit_dim,
output_unit_dim, 1, 1, 1, "SAME", None, [0.0], None, False, False, False,
num_gpus, default_gpu_id, regularizer, random_seed, trainable)
fusion_layer_list.append(convert_layer)
fusion_layer = create_highway_layer(num_layer, output_unit_dim, hidden_activation,
[dropout] * num_layer, num_gpus, default_gpu_id, regularizer, random_seed, trainable)
fusion_layer_list.append(fusion_layer)
elif fusion_type == "conv":
fusion_layer = create_convolution_layer("1d", num_layer, input_unit_dim,
output_unit_dim, 1, 1, 1, "SAME", hidden_activation, [dropout] * num_layer,
None, False, False, False, num_gpus, default_gpu_id, regularizer, random_seed, trainable)
fusion_layer_list = [fusion_layer]
else:
raise ValueError("unsupported fusion type {0}".format(fusion_type))
return fusion_layer_list
def _build_fusion_result(self,
input_data_list,
input_mask_list,
fusion_layer_list):
"""build fusion result for mrc base model"""
input_fusion = tf.concat(input_data_list, axis=-1)
input_fusion_mask = tf.reduce_max(tf.concat(input_mask_list, axis=-1), axis=-1, keepdims=True)
if fusion_layer_list != None:
for fusion_layer in fusion_layer_list:
input_fusion, input_fusion_mask = fusion_layer(input_fusion, input_fusion_mask)
return input_fusion, input_fusion_mask
def _get_exponential_moving_average(self,
num_steps):
decay_rate = self.hyperparams.train_ema_decay_rate
enable_debias = self.hyperparams.train_ema_enable_debias
enable_dynamic_decay = self.hyperparams.train_ema_enable_dynamic_decay
if enable_dynamic_decay == True:
ema = tf.train.ExponentialMovingAverage(decay=decay_rate, num_updates=num_steps, zero_debias=enable_debias)
else:
ema = tf.train.ExponentialMovingAverage(decay=decay_rate, zero_debias=enable_debias)
return ema
def _apply_learning_rate_warmup(self,
learning_rate):
"""apply learning rate warmup"""
warmup_mode = self.hyperparams.train_optimizer_warmup_mode
warmup_rate = self.hyperparams.train_optimizer_warmup_rate
warmup_end_step = self.hyperparams.train_optimizer_warmup_end_step
if warmup_mode == "exponential_warmup":
warmup_factor = warmup_rate ** (1 - tf.to_float(self.global_step) / tf.to_float(warmup_end_step))
warmup_learning_rate = warmup_factor * learning_rate
elif warmup_mode == "inverse_exponential_warmup":
warmup_factor = tf.log(tf.to_float(self.global_step + 1)) / tf.log(tf.to_float(warmup_end_step))
warmup_learning_rate = warmup_factor * learning_rate
else:
raise ValueError("unsupported warm-up mode {0}".format(warmup_mode))
warmup_learning_rate = tf.cond(tf.less(self.global_step, warmup_end_step),
lambda: warmup_learning_rate, lambda: learning_rate)
return warmup_learning_rate
def _apply_learning_rate_decay(self,
learning_rate):
"""apply learning rate decay"""
decay_mode = self.hyperparams.train_optimizer_decay_mode
decay_rate = self.hyperparams.train_optimizer_decay_rate
decay_step = self.hyperparams.train_optimizer_decay_step
decay_start_step = self.hyperparams.train_optimizer_decay_start_step
if decay_mode == "exponential_decay":
decayed_learning_rate = tf.train.exponential_decay(learning_rate=learning_rate,
global_step=(self.global_step - decay_start_step),
decay_steps=decay_step, decay_rate=decay_rate, staircase=True)
elif decay_mode == "inverse_time_decay":
decayed_learning_rate = tf.train.inverse_time_decay(learning_rate=learning_rate,
global_step=(self.global_step - decay_start_step),
decay_steps=decay_step, decay_rate=decay_rate, staircase=True)
else:
raise ValueError("unsupported decay mode {0}".format(decay_mode))
decayed_learning_rate = tf.cond(tf.less(self.global_step, decay_start_step),
lambda: learning_rate, lambda: decayed_learning_rate)
return decayed_learning_rate
def _initialize_optimizer(self,
learning_rate):
"""initialize optimizer"""
optimizer_type = self.hyperparams.train_optimizer_type
if optimizer_type == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_type == "momentum":
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=self.hyperparams.train_optimizer_momentum_beta)
elif optimizer_type == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
decay=self.hyperparams.train_optimizer_rmsprop_beta,
epsilon=self.hyperparams.train_optimizer_rmsprop_epsilon)
elif optimizer_type == "adadelta":
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate,
rho=self.hyperparams.train_optimizer_adadelta_rho,
epsilon=self.hyperparams.train_optimizer_adadelta_epsilon)
elif optimizer_type == "adagrad":
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate,
initial_accumulator_value=self.hyperparams.train_optimizer_adagrad_init_accumulator)
elif optimizer_type == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=self.hyperparams.train_optimizer_adam_beta_1, beta2=self.hyperparams.train_optimizer_adam_beta_2,
epsilon=self.hyperparams.train_optimizer_adam_epsilon)
else:
raise ValueError("unsupported optimizer type {0}".format(optimizer_type))
return optimizer
def _minimize_loss(self,
loss):
"""minimize optimization loss"""
"""compute gradients"""
if self.num_gpus > 1:
grads_and_vars = self.optimizer.compute_gradients(loss, colocate_gradients_with_ops=True)
else:
grads_and_vars = self.optimizer.compute_gradients(loss, colocate_gradients_with_ops=False)
"""clip gradients"""
gradients = [x[0] for x in grads_and_vars]
variables = [x[1] for x in grads_and_vars]
clipped_gradients, gradient_norm = tf.clip_by_global_norm(gradients, self.hyperparams.train_clip_norm)
grads_and_vars = zip(clipped_gradients, variables)
"""update model based on gradients"""
update_model = self.optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
return update_model, clipped_gradients, gradient_norm
def train(self,
sess):
"""train model"""
_, loss, learning_rate, global_step, batch_size, summary = sess.run([self.update_op,
self.train_loss, self.decayed_learning_rate, self.global_step, self.batch_size, self.train_summary])
return TrainResult(loss=loss, learning_rate=learning_rate,
global_step=global_step, batch_size=batch_size, summary=summary)
def infer(self,
sess):
"""infer model"""
(answer_start, answer_end, answer_start_mask, answer_end_mask,
batch_size, summary) = sess.run([self.infer_answer_start, self.infer_answer_end,
self.infer_answer_start_mask, self.infer_answer_end_mask, self.batch_size, self.infer_summary])
max_context_length = self.hyperparams.data_max_context_length
max_answer_length = self.hyperparams.data_max_answer_length
predict_start = np.expand_dims(answer_start[:max_context_length], axis=-1)
predict_start_mask = np.expand_dims(answer_start_mask[:max_context_length], axis=-1)
predict_start_start = predict_start * predict_start_mask
predict_end = np.expand_dims(answer_end[:max_context_length], axis=-1)
predict_end_mask = np.expand_dims(answer_end_mask[:max_context_length], axis=-1)
predict_end = predict_end * predict_end_mask
predict_span = np.matmul(predict_start, predict_end.transpose((0,2,1)))
predict_span_mask = np.matmul(predict_start_mask, predict_end_mask.transpose((0,2,1)))
predict_span = predict_span * predict_span_mask
predict = np.full((batch_size, 2), -1)
for k in range(batch_size):
max_prob = float('-inf')
max_prob_start = -1
max_prob_end = -1
for i in range(max_context_length):
for j in range(i, min(max_context_length, i+max_answer_length)):
if predict_span[k, i, j] > max_prob:
max_prob = predict_span[k, i, j]
max_prob_start = i
max_prob_end = j
predict[k, 0] = max_prob_start
predict[k, 1] = max_prob_end
predict_detail = np.concatenate((predict_start, predict_end), axis=-1)
return InferResult(predict=predict, predict_detail=predict_detail, batch_size=batch_size, summary=summary)
def _get_train_summary(self):
"""get train summary"""
return tf.summary.merge([tf.summary.scalar("learning_rate", self.learning_rate),
tf.summary.scalar("train_loss", self.train_loss), tf.summary.scalar("gradient_norm", self.gradient_norm)])
def _get_infer_summary(self):
"""get infer summary"""
return tf.no_op()
| [
"[email protected]"
]
| |
33280c23ce326b75e21f7c1e6b655ba9c1896cde | 9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4 | /Bernd Klein (520) ile Python/p_20404a.py | bc54c21d16cad7c304cceedabad9a88e4a32d34d | []
| no_license | mnihatyavas/Python-uygulamalar | 694091545a24f50a40a2ef63a3d96354a57c8859 | 688e0dbde24b5605e045c8ec2a9c772ab5f0f244 | refs/heads/master | 2020-08-23T19:12:42.897039 | 2020-04-24T22:45:22 | 2020-04-24T22:45:22 | 216,670,169 | 0 | 0 | null | null | null | null | ISO-8859-9 | Python | false | false | 683 | py | # coding:iso-8859-9 Türkçe
# p_20404a.py: İnternet ip adresleriyle iletişim kontrolü örneği.
import os, re # threading/ipsiz kontrol...
alınanPaketler = re.compile (r"(\d) alındı")
durum = ("cevapsız", "canlı fakat kayıp", "canlı")
for sonek in range (20,30):
ip = "192.168.178." + str (sonek)
kontrol = os.popen ("çınlattı -q -c2 " + ip, "r")
print ("...çınlatıyor ", ip)
while True:
satır = kontrol.readsatır()
if not satır: break
alınan = alınanPaketler.findall (satır)
if alınan: print (ip + ": " + durum[int (alınan[0])])
#İnternet açık olmalı ve ilgili ip adresleri kontrol edilmeli... | [
"[email protected]"
]
| |
f0d53247787de483a8157978732687647973de62 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_Class651.py | dd0dd8db9096c5bbae3a183ddb1975f869ce8a2d | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=24
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=21
c.append(cirq.X.on(input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=23
c.append(cirq.X.on(input_qubit[2])) # number=17
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class651.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
]
| |
d26b48204806352ba4722be9cf5fc9324a9c0d14 | 54a0b86d4c3f731487ad4470fb365907970472e6 | /P1/studentparameters/Project1_Parameters_ad.py | abd32ecc8b79a7a3387eba925bcaeff4e5091637 | []
| no_license | samiurrahman98/ece458-computer-security | 26aa46e174b0bf77f748e6451dd2e0e4183feebd | cf79430b98e3679ffcd687a0c96b5e979187e1e3 | refs/heads/master | 2022-11-25T01:26:36.874094 | 2020-07-31T21:24:53 | 2020-07-31T21:24:53 | 280,979,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | # Select the file name that matches your first two letters of your last name on Learn
# Read those parameters as your ECE458 project 1 parameters
# p,q,g are DSA domain parameters, sk_i (secret keys) are used in each signature and verification
p=16158504202402426253991131950366800551482053399193655122805051657629706040252641329369229425927219006956473742476903978788728372679662561267749592756478584653187379668070077471640233053267867940899762269855538496229272646267260199331950754561826958115323964167572312112683234368745583189888499363692808195228055638616335542328241242316003188491076953028978519064222347878724668323621195651283341378845128401263313070932229612943555693076384094095923209888318983438374236756194589851339672873194326246553955090805398391550192769994438594243178242766618883803256121122147083299821412091095166213991439958926015606973543
q=13479974306915323548855049186344013292925286365246579443817723220231
g=9891663101749060596110525648800442312262047621700008710332290803354419734415239400374092972505760368555033978883727090878798786527869106102125568674515087767296064898813563305491697474743999164538645162593480340614583420272697669459439956057957775664653137969485217890077966731174553543597150973233536157598924038645446910353512441488171918287556367865699357854285249284142568915079933750257270947667792192723621634761458070065748588907955333315440434095504696037685941392628366404344728480845324408489345349308782555446303365930909965625721154544418491662738796491732039598162639642305389549083822675597763407558360
sk1=11901278079431521417510182145794663264975691637509742253512537133135
sk2=5169869455587584420217628936034595597270984859924339992857774218592
sk3=270864169201384296687902210880717403152220842818569896183973535059
| [
"[email protected]"
]
| |
5db01072a7b1cfb52cc6a3b3c2a3401cc35537e6 | a5103b7d5066138ac1a9aabc273361491a5031cd | /course5/week1/rnn_utils.py | eafeffe54a31969af6913bbe0cbfc4a64948fa29 | []
| no_license | mckjzhangxk/deepAI | 0fa2f261c7899b850a4ec432b5a387e8c5f13e83 | 24e60f24b6e442db22507adddd6bf3e2c343c013 | refs/heads/master | 2022-12-13T18:00:12.839041 | 2021-06-18T03:01:10 | 2021-06-18T03:01:10 | 144,862,423 | 1 | 1 | null | 2022-12-07T23:31:01 | 2018-08-15T14:19:10 | Jupyter Notebook | UTF-8 | Python | false | false | 3,663 | py | import numpy as np
def _initial_parameters(n_x,n_a,n_y):
params={}
params['Waa'] = np.random.randn(n_a,n_a)
params['Wax'] = np.random.randn(n_a,n_x)
params['ba'] = np.random.randn(n_a,1)
params['Wya'] = np.random.randn(n_y,n_a)
params['by'] = np.random.randn(n_y,1)
return params
def _initial_gradients(params):
grads={'d'+key:np.zeros_like(value) for key,value in params.items()}
grads['da_next']=0
return grads
def softmax(x):
'''
x have shape [n,m]
:param x:
:return:
'''
x=x-np.max(x,axis=0,keepdims=True)
expx=np.exp(x)
expsum=np.sum(expx,axis=0,keepdims=True)
return expx/expsum
def _unpack(parameter):
return parameter['Waa'],parameter['Wax'],parameter['Wya'],parameter['ba'],parameter['by']
def _rnn_step_forward(xt,a_prev,parameter):
'''
:param xt:shape [nx,m]
:param a_prev: [na_m]
:param parameter: Waa,Wax,Way,bx,by
:return: cache all input and parameter,and a(i+1)
and a y_predition
'''
Waa, Wax, Wya, ba, by=_unpack(parameter)
a_out=np.tanh(Waa.dot(a_prev)+Wax.dot(xt)+ba)
ypred=softmax(Wya.dot(a_out)+by)
cache=xt,a_prev,a_out,parameter
return a_out,ypred,cache
def _rnn_step_backward(dy,cache,gradients):
'''
:param dy:shape[n_y,m]
:param cache:xt,a_prev,parameter
:param gradients: dWaa,dWy,dWax,dba,dby,da_next
:return:gradients
'''
xt,a_prev,a_out,parameter=cache
Waa, Wax, Wya, ba, by = _unpack(parameter)
#from linear prediction
dWya=dy.dot(a_out.T)
dby=np.sum(dy,axis=1,keepdims=True)
da_next=Wya.T.dot(dy)+gradients['da_next']
#from rnn units
dz=da_next*(1-a_out**2)
dWaa=dz.dot(a_prev.T)
dWax=dz.dot(xt.T)
dba=np.sum(dz,axis=1,keepdims=True)
gradients['da_next']=Waa.T.dot(dz)
gradients['dWaa']+=dWaa
gradients['dWax'] += dWax
gradients['dba'] += dba
gradients['dWya'] += dWya
gradients['dby'] += dby
return gradients
def _rnn_forward(x,a_prev,parameter):
'''
:param x: shape [n_x,m,T]
:param a_prev: shape [n_a,m]
:param parameter: Waa,Wax,Way,ba,by
:return: y_pred shape:[n_y,m,T],
caches:a list of all cache
'''
n_x,m,T=x.shape
n_y,n_a=parameter['Wya'].shape
#the return value
y_pred=np.zeros((n_y,m,T))
a_out=np.zeros((n_a,m,T))
caches=[]
for t in range(T):
a_prev,yhat,cache=_rnn_step_forward(x[:,:,t],a_prev,parameter)
y_pred[:,:,t]=yhat
a_out[:,:,t]=a_prev
caches.append(cache)
return y_pred,a_out,caches
def _rnn_backward(dy,caches,param):
'''
:param dy:shape[n_c,m,T]
:param caches: cahces of rnn_forward
:return: gradients
'''
n_y,m,T=dy.shape
gradients=_initial_gradients(param)
for t in reversed(range(T)):
gradients=_rnn_step_backward(dy[:,:,t],caches[t],gradients)
return gradients
def _computeLoss(yhat,y):
'''
:param yhat:
:param y:[n_y,m,T]
:return:
'''
#shape mxT
prob_of_trueLabel=np.sum(yhat*y,axis=0)
prob_of_trueLabel=prob_of_trueLabel.ravel()
loss=np.mean(-np.log(prob_of_trueLabel))
return loss
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
# VGG16()
VGG19()
ResNet50()
InceptionV3()
m,n_x,n_a,n_y,T=100,27,32,10,60
x=np.random.randn(n_x,m,T)
a_prev=np.random.randn(n_a,m)
params=_initial_parameters(n_x,n_a,n_y)
y_pred,a_out,caches=_rnn_forward(x,a_prev,params)
dy=np.random.randn(n_y,m,T)
gradients=_rnn_backward(dy,caches,params) | [
"[email protected]"
]
| |
e2690fdaa4d677a52afa36499234099fce9dcb9f | e3ac7c428aa5b60e021a9440262320ff80f5be88 | /Ui/welcomescreen.py | 12640c6e51d951c0941192071407924a8f52cfc2 | []
| no_license | turamant/PyQt5-PhoneBook | b3423f978d2a368e6a997889095d13c5cb47a875 | 522490439889d91a40a651574339e80af6b6db1d | refs/heads/main | 2023-07-11T00:50:10.109781 | 2021-08-10T14:24:54 | 2021-08-10T14:24:54 | 391,993,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,599 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'welcomescreen.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1178, 798)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(0, 0, 1181, 801))
self.widget.setMinimumSize(QtCore.QSize(831, 731))
self.widget.setStyleSheet("background-color: rgb(232, 232, 232);")
self.widget.setObjectName("widget")
self.labelWelcom = QtWidgets.QLabel(self.widget)
self.labelWelcom.setGeometry(QtCore.QRect(370, 110, 551, 131))
self.labelWelcom.setStyleSheet("font: 28pt \"DejaVu Math TeX Gyre\";\n"
"color: black;")
self.labelWelcom.setObjectName("labelWelcom")
self.loginPushButton = QtWidgets.QPushButton(self.widget)
self.loginPushButton.setGeometry(QtCore.QRect(240, 480, 191, 51))
self.loginPushButton.setStyleSheet("\n"
"font: 18pt \"Cantarell\";\n"
"background-color: rgb(232, 232, 232);")
self.loginPushButton.setObjectName("loginPushButton")
self.signupPushButton = QtWidgets.QPushButton(self.widget)
self.signupPushButton.setGeometry(QtCore.QRect(490, 480, 211, 51))
self.signupPushButton.setStyleSheet("\n"
"font: 18pt \"Cantarell\";\n"
"background-color: rgb(232, 232, 232);")
self.signupPushButton.setObjectName("signupPushButton")
self.cancelPushButton = QtWidgets.QPushButton(self.widget)
self.cancelPushButton.setGeometry(QtCore.QRect(760, 480, 211, 51))
self.cancelPushButton.setStyleSheet("background-color: rgb(232, 232, 232);\n"
"\n"
"font: 18pt \"Cantarell\";\n"
"")
self.cancelPushButton.setObjectName("cancelPushButton")
self.nameuserLineEdit = QtWidgets.QLineEdit(self.widget)
self.nameuserLineEdit.setGeometry(QtCore.QRect(370, 260, 461, 71))
self.nameuserLineEdit.setObjectName("nameuserLineEdit")
self.passwordLineEdit = QtWidgets.QLineEdit(self.widget)
self.passwordLineEdit.setGeometry(QtCore.QRect(370, 360, 461, 71))
self.passwordLineEdit.setObjectName("passwordLineEdit")
self.saveMeCheckBox = QtWidgets.QCheckBox(self.widget)
self.saveMeCheckBox.setGeometry(QtCore.QRect(470, 550, 291, 71))
self.saveMeCheckBox.setStyleSheet("font: 18pt \"Cantarell\";")
self.saveMeCheckBox.setObjectName("saveMeCheckBox")
self.echoPasswordCheckBox = QtWidgets.QCheckBox(self.widget)
self.echoPasswordCheckBox.setGeometry(QtCore.QRect(470, 600, 291, 71))
self.echoPasswordCheckBox.setStyleSheet("font: 18pt \"Cantarell\";")
self.echoPasswordCheckBox.setObjectName("echoPasswordCheckBox")
self.forgotPasswordPushButton = QtWidgets.QPushButton(self.widget)
self.forgotPasswordPushButton.setGeometry(QtCore.QRect(490, 690, 231, 36))
self.forgotPasswordPushButton.setStyleSheet("color: blue;\n"
"font: 14pt \"Cantarell\";\n"
"background-color: rgb(235, 235, 235);\n"
"border:0px;")
self.forgotPasswordPushButton.setObjectName("forgotPasswordPushButton")
self.changePasswordPushButton = QtWidgets.QPushButton(self.widget)
self.changePasswordPushButton.setGeometry(QtCore.QRect(490, 740, 231, 36))
self.changePasswordPushButton.setStyleSheet("color: blue;\n"
"font: 14pt \"Cantarell\";\n"
"background-color: rgb(235, 235, 235);\n"
"border:0px;")
self.changePasswordPushButton.setObjectName("changePasswordPushButton")
self.helpPushButton = QtWidgets.QPushButton(self.widget)
self.helpPushButton.setGeometry(QtCore.QRect(1060, 10, 113, 36))
self.helpPushButton.setStyleSheet("background-color: rgb(232, 232, 232);")
self.helpPushButton.setObjectName("helpPushButton")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.labelWelcom.setText(_translate("Dialog", "Окно авторизации"))
self.loginPushButton.setText(_translate("Dialog", "Войти"))
self.signupPushButton.setText(_translate("Dialog", "Регистрация"))
self.cancelPushButton.setText(_translate("Dialog", "Выход"))
self.nameuserLineEdit.setPlaceholderText(_translate("Dialog", " e-mail"))
self.passwordLineEdit.setWhatsThis(_translate("Dialog", "<html><head/><body><p>sdefesrgvesrgvegrvevgre</p><p>vrbge</p></body></html>"))
self.passwordLineEdit.setPlaceholderText(_translate("Dialog", " Пароль"))
self.saveMeCheckBox.setText(_translate("Dialog", "Запомнить меня"))
self.echoPasswordCheckBox.setText(_translate("Dialog", "Показать пароль"))
self.forgotPasswordPushButton.setText(_translate("Dialog", "Забыли пароль?"))
self.changePasswordPushButton.setText(_translate("Dialog", "Сменить пароль"))
self.helpPushButton.setText(_translate("Dialog", "Help"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"[email protected]"
]
| |
b6e77796d885e1ff2b153035ca5ccf756ae11b66 | c506e2707708f6b1d1c8b6a4761b3952cdf33c12 | /backend/www/test/upload_contacts_test.py | 1abc56c42b13ef091d47cfb8ba1b890375b3c5f1 | [
"Apache-2.0"
]
| permissive | xuantan/viewfinder | 69e17d50228dd34fa34d79eea1c841cc80a869ff | 992209086d01be0ef6506f325cf89b84d374f969 | refs/heads/master | 2021-08-26T00:58:18.180445 | 2021-08-10T03:06:48 | 2021-08-10T03:06:48 | 19,481,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,824 | py | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Test upload_contacts service method.
"""
__authors__ = ['[email protected] (Mike Purtell)']
import json
import mock
from copy import deepcopy
from viewfinder.backend.base import util
from viewfinder.backend.www.test import service_base_test
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.notification import Notification
from viewfinder.backend.db.operation import Operation
class UploadContactsTestCase(service_base_test.ServiceBaseTestCase):
def testUploadContacts(self):
"""Test successful upload_contacts."""
contacts = [{'identities': [{'identity': 'Email:[email protected]', 'description': 'work'}],
'contact_source': Contact.MANUAL,
'name': 'Mike Purtell',
'given_name': 'Mike',
'family_name': 'Purtell'},
{'identities': [{'identity': 'Phone:+13191231111', 'description': 'home'},
{'identity': 'Phone:+13191232222', 'description': 'mobile'},
{'identity': 'Phone:+13191233333', 'description': 'a'},
{'identity': 'Phone:+13195555555'},
{'identity': 'FacebookGraph:1232134234'}],
'contact_source': Contact.IPHONE,
'name': 'Mike Purtell',
'given_name': 'Mike',
'family_name': 'Purtell'}]
result_0 = self._tester.QueryContacts(self._cookie)
upload_result = self._tester.UploadContacts(self._cookie, contacts)
self.assertEqual(len(upload_result['contact_ids']), 2)
result_1 = self._tester.QueryContacts(self._cookie)
# Observe that the number of contacts increased by 2.
self.assertEqual(result_0['num_contacts'] + 2, result_1['num_contacts'])
self.assertEqual(upload_result['contact_ids'][1], result_1['contacts'][0]['contact_id'])
self.assertEqual(upload_result['contact_ids'][0], result_1['contacts'][1]['contact_id'])
# Try to upload the same contacts again and see that there's no error and no changes in contacts on server.
self._tester.UploadContacts(self._cookie, contacts)
result_2 = self._tester.QueryContacts(self._cookie)
self.assertEqual(result_1, result_2)
# Slightly modify one of the contacts and see that a new contact is added.
contacts[1]['name'] = 'John Purtell'
self._tester.UploadContacts(self._cookie, contacts)
# This should result in just one additional contact on the server.
result_3 = self._tester.QueryContacts(self._cookie)
self.assertEqual(result_2['num_contacts'] + 1, result_3['num_contacts'])
def testContactsWithRegisteredUsers(self):
"""Test interaction between user registration, contacts and notifications."""
def _RegisterUser(name, given_name, email):
user, _ = self._tester.RegisterFakeViewfinderUser({'name': name, 'given_name': given_name, 'email': email}, {})
return user
def _ValidateContactUpdate(expected_notification_name, expected_user_ids):
notification_list = self._tester._RunAsync(Notification.RangeQuery,
self._client,
self._user.user_id,
range_desc=None,
limit=1,
col_names=None,
scan_forward=False)
self.assertEqual(notification_list[0].name, expected_notification_name)
invalidation = json.loads(notification_list[0].invalidate)
start_key = invalidation['contacts']['start_key']
query_result = self._tester.QueryContacts(self._cookie, start_key=start_key)
found_user_ids = set()
for contact in query_result['contacts']:
for identity in contact['identities']:
if identity.has_key('user_id'):
found_user_ids.add(identity['user_id'])
self.assertEqual(expected_user_ids, found_user_ids)
contacts = [{'identities': [{'identity': 'Email:[email protected]'}],
'contact_source': Contact.MANUAL,
'name': 'Mike Boat'},
{'identities': [{'identity': 'Email:[email protected]'}, {'identity': 'Email:[email protected]'}],
'contact_source': Contact.MANUAL,
'name': 'Mike Cars'}]
util._TEST_TIME += 1
user_boat = _RegisterUser('Mike Purtell', 'Mike', '[email protected]')
self._tester.UploadContacts(self._cookie, contacts)
_ValidateContactUpdate('upload_contacts', set([user_boat.user_id]))
util._TEST_TIME += 1
user_vw = _RegisterUser('Mike VW', 'Mike', '[email protected]')
_ValidateContactUpdate('first register contact', set([user_vw.user_id]))
util._TEST_TIME += 1
user_porsche = _RegisterUser('Mike Porsche', 'Mike', '[email protected]')
_ValidateContactUpdate('first register contact', set([user_vw.user_id, user_porsche.user_id]))
@mock.patch.object(Operation, 'FAILPOINTS_ENABLED', True)
def testIdempotency(self):
"""Force op failure in order to test idempotency."""
self._tester.UploadContacts(self._cookie,
[{'identities': [{'identity': 'Email:[email protected]', 'description': 'work'}],
'contact_source': Contact.MANUAL,
'name': 'Mike Purtell',
'given_name': 'Mike',
'family_name': 'Purtell'}])
def testUploadContactsFailures(self):
"""ERROR: Test some failure cases."""
good_contact = {'identities': [{'identity': 'Email:[email protected]', 'description': 'work'}],
'contact_source': Contact.MANUAL,
'name': 'Mike Purtell',
'given_name': 'Mike',
'family_name': 'Purtell'}
# ERROR: Empty identities:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Missing identities:
bad_contact = deepcopy(good_contact)
bad_contact.pop('identities')
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Missing contact_source:
bad_contact = deepcopy(good_contact)
bad_contact.pop('contact_source')
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Unknown contact source:
bad_contact = deepcopy(good_contact)
bad_contact['contact_source'] = 'x'
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Attempt to upload a contact as if it's from facebook:
bad_contact = deepcopy(good_contact)
bad_contact['contact_source'] = Contact.FACEBOOK
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Attempt to upload a contact as if it's from gmail:
bad_contact = deepcopy(good_contact)
bad_contact['contact_source'] = Contact.GMAIL
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Malformed identities field:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = ['invalid']
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Invalid identity properties field:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'something': 'invalid'}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Unknown identity type:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'identity': 'Blah:234'}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Invalid additional identity properties field:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'identity': 'Email:[email protected]', 'bad': 'additional field'}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: Extra/unknown field:
bad_contact = deepcopy(good_contact)
bad_contact['unknown'] = 'field'
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: identity not in canonical form (upper case character):
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'identity': 'Email:[email protected]'}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: too many contacts in a single request:
too_many_contacts = [good_contact for i in xrange(51)]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, too_many_contacts)
# ERROR: too many identities in a single contact:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'identity': 'Email:[email protected]'} for i in xrange(51)]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: contact name too long:
bad_contact = deepcopy(good_contact)
bad_contact['name'] = 'a' * 1001
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: contact given_name too long:
bad_contact = deepcopy(good_contact)
bad_contact['given_name'] = 'a' * 1001
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: contact family_name too long:
bad_contact = deepcopy(good_contact)
bad_contact['family_name'] = 'a' * 1001
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: contact identity key too long:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'identity': 'Email:%s' % ('a' * 1001)}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
# ERROR: contact description too long:
bad_contact = deepcopy(good_contact)
bad_contact['identities'] = [{'identity': 'Email:[email protected]', 'description': 'a' * 1001}]
self.assertRaisesHttpError(400, self._tester.UploadContacts, self._cookie, [bad_contact])
@mock.patch.object(Contact, 'MAX_CONTACTS_LIMIT', 2)
def testMaxContactLimit(self):
"""Test exceed limit error."""
# This should increase the total to 1 and succeed.
self._tester.UploadContacts(self._cookie,
[{'identities': [{'identity': 'Email:[email protected]'}],
'contact_source': Contact.IPHONE}])
# This should fail because it would bring the total above 2.
self.assertRaisesHttpError(403,
self._tester.UploadContacts,
self._cookie,
[{'identities': [{'identity': 'Email:[email protected]'}],
'contact_source': Contact.IPHONE},
{'identities': [{'identity': 'Email:[email protected]'}],
'contact_source': Contact.MANUAL}])
# This should increase the total to 2 and succeed.
self._tester.UploadContacts(self._cookie,
[{'identities': [{'identity': 'Email:[email protected]'}],
'contact_source': Contact.MANUAL}])
def testUploadContactWithNoIdentities(self):
"""Verify that a contact without any identities succeeds."""
contacts = [{'identities': [],
'contact_source': Contact.IPHONE,
'name': 'Mike Purtell',
'given_name': 'Mike',
'family_name': 'Purtell'}]
upload_result = self._tester.UploadContacts(self._cookie, contacts)
self.assertEqual(len(upload_result['contact_ids']), 1)
contact_id = upload_result['contact_ids'][0]
result = self._tester.QueryContacts(self._cookie)
# Observe that query_contacts returns contact with empty identities list.
self.assertEqual(len(result['contacts']), 1)
self.assertEqual(len(result['contacts'][0]['identities']), 0)
# Ensure that we can also remove this contact.
self._tester.RemoveContacts(self._cookie, [contact_id])
result = self._tester.QueryContacts(self._cookie)
self.assertEqual(len(result['contacts']), 1)
self.assertIn('removed', result['contacts'][0]['labels'])
def _TestUploadContacts(tester, user_cookie, request_dict):
"""Called by the ServiceTester in order to test upload_contacts
service API call.
"""
def _ValidateUploadOneContact(contact):
contact = deepcopy(contact)
# Transform into proper form for Contact.CalculateContactId()
identities_properties = [(identity_properties['identity'], identity_properties.get('description', None))
for identity_properties in contact['identities']]
contact.pop('identities')
contact_dict = Contact.CreateContactDict(user_id, identities_properties, op_dict['op_timestamp'], **contact)
predicate = lambda c: c.contact_id == contact_dict['contact_id']
existing_contacts = validator.QueryModelObjects(Contact, predicate=predicate)
# Create contact if it doesn't already exist or it's in a 'removed' state.
if len(existing_contacts) == 0 or existing_contacts[-1].IsRemoved():
if len(existing_contacts) != 0:
# Delete the 'removed' contact.
validator.ValidateDeleteDBObject(Contact, DBKey(user_id, existing_contacts[-1].sort_key))
validator.ValidateCreateContact(**contact_dict)
return contact_dict['contact_id']
validator = tester.validator
user_id, device_id = tester.GetIdsFromCookie(user_cookie)
request_dict = deepcopy(request_dict)
# Send upload_episode request.
actual_dict = tester.SendRequest('upload_contacts', user_cookie, request_dict)
op_dict = tester._DeriveNotificationOpDict(user_id, device_id, request_dict)
result_contact_ids = []
for contact in request_dict['contacts']:
contact_id = _ValidateUploadOneContact(contact)
result_contact_ids.append(contact_id)
# Validate that a notification was created for the upload of contacts.
invalidate = {'contacts': {'start_key': Contact.CreateSortKey(None, op_dict['op_timestamp'])}}
validator.ValidateNotification('upload_contacts', user_id, op_dict, invalidate)
# Increment time so that subsequent contacts will use later time.
util._TEST_TIME += 1
tester._CompareResponseDicts('upload_contacts', user_id, request_dict, {'contact_ids': result_contact_ids}, actual_dict)
return actual_dict
| [
"[email protected]"
]
| |
6127db8c4201697208fcbd427c8f2ff605b318ec | ea9aa0e93a7f264511ef10b5ccb90f65d958900f | /3rd_practice/blog/models.py | c0eca233fb31aebd733abc8ef197a867e51945e6 | []
| no_license | wayhome25/django_travel_blog_2 | 01cf4591e0aa69fb5a3144e0739bd43ce4bebc9c | 13e5d7ad2851febafb9a57e1fc93bb29c916c4c7 | refs/heads/master | 2020-06-01T08:58:24.492163 | 2017-08-26T13:19:45 | 2017-08-26T15:04:18 | 94,069,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
photo = models.ImageField()
is_public = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', args=[self.pk])
class Comment(models.Model):
post = models.ForeignKey(Post)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
message = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-id']
def __str__(self):
return self.message
def get_edit_url(self):
return reverse('blog:comment_edit', args=[self.post.pk, self.pk])
def get_delete_url(self):
return reverse('blog:comment_delete', args=[self.post.pk, self.pk])
| [
"[email protected]"
]
| |
3996a1c41e99afd8b6eabfd0864dad2c2f4c7187 | 13e1fb78955c03a75cf483725f4811abd1b51ac4 | /compiler/tests/03_wire_test.py | 61e21985e7422f475fa91e656c3f095e1c483963 | [
"BSD-3-Clause"
]
| permissive | ucb-cs250/OpenRAM | d7a88695ac6820d03b4b365245a1c4962cdc546d | 3c5e13f95c925a204cabf052525c3de07638168f | refs/heads/master | 2023-01-23T15:09:35.183103 | 2020-10-12T16:05:07 | 2020-10-12T16:05:07 | 318,904,763 | 0 | 0 | BSD-3-Clause | 2020-12-05T22:47:14 | 2020-12-05T22:47:13 | null | UTF-8 | Python | false | false | 2,212 | py | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys
import os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
class wire_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
import wire
import tech
import design
layer_stacks = [tech.poly_stack] + tech.beol_stacks
for reverse in [False, True]:
for stack in layer_stacks:
if reverse:
layer_stack = stack[::-1]
else:
layer_stack = stack
# Just make a conservative spacing. Make it wire pitch instead?
min_space = 2 * (tech.drc["minwidth_{}".format(layer_stack[0])] +
tech.drc["minwidth_{}".format(layer_stack[2])])
position_list = [[0, 0],
[0, 3 * min_space],
[1 * min_space, 3 * min_space],
[4 * min_space, 3 * min_space],
[4 * min_space, 0],
[7 * min_space, 0],
[7 * min_space, 4 * min_space],
[-1 * min_space, 4 * min_space],
[-1 * min_space, 0]]
position_list = [[x - min_space, y - min_space] for x, y in position_list]
w = design.design("wire_test_{}".format("_".join(layer_stack)))
wire.wire(w, layer_stack, position_list)
self.local_drc_check(w)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| [
"[email protected]"
]
| |
4df58b827efdb8b9e33d0f96ef24b7287e582d7b | 55668bc4e19fd9aa3caa4395add2fe73741eb844 | /206/main.py | 29930f8e53ae428345565b5f0a0b9b54dc5cbd5c | [
"MIT"
]
| permissive | pauvrepetit/leetcode | cb8674e10b0dc6beb29e9b64a49bede857b889fd | 2fda37371f1c5afcab80214580e8e5fd72b48a3b | refs/heads/master | 2023-08-30T07:39:08.982036 | 2023-08-25T08:52:57 | 2023-08-25T08:52:57 | 151,678,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | #
# @lc app=leetcode.cn id=206 lang=python3
#
# [206] 反转链表
#
from typing import Optional
# @lc code=start
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def rev(self, head: Optional[ListNode], prev: Optional[ListNode]) -> Optional[ListNode]:
if head == None:
return head
next = head.next
head.next = prev
if next == None:
return head
return self.rev(next, head)
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
return self.rev(head, None)
# if head == None or head.next == None:
# return head
# return self.rev(head.next, head)
# @lc code=end
a = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5, None)))))
Solution().reverseList(a) | [
"[email protected]"
]
| |
6b482961ceb846d151075be3e5574ce30b958fbb | 9c3c83007c5bf0f36635b0045b2aad7f8a11ac11 | /novice/03-02/variablerules.py | 04efe613839917730bde3cf315a1c00a8a16bb91 | [
"MIT"
]
| permissive | septiannurtrir/praxis-academy | bc58f9484db36b36c202bf90fdfd359482b72770 | 1ef7f959c372ae991d74ccd373123142c2fbc542 | refs/heads/master | 2021-06-21T17:04:58.379408 | 2019-09-13T16:46:08 | 2019-09-13T16:46:08 | 203,007,994 | 1 | 0 | MIT | 2021-03-20T01:43:24 | 2019-08-18T13:38:23 | Python | UTF-8 | Python | false | false | 481 | py | from flask import Flask
app = Flask(__name__)
@app.route('/user/<username>')
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % escape(username)
@app.route('/post/<int:post_id>')
def show_post(post_id):
#show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/path/<path:subpath>')
def show_subpath(subpath):
#show the subpath after /path/
return 'Subpath %s' % escape(subpath) | [
"[email protected]"
]
| |
b1bb1dfa462be18d9be81098971bfbdf1023cb30 | 7ce761781e7f5b57b2469adce459a71b4758694d | /env/lib/python2.7/site-packages/graphlab/toolkits/_internal/search/_search.py | 0870866d1721ce31a94ab4442179cea4425399d7 | []
| no_license | hophamtenquang/RecSys | c4fa18d1ba262670a284b2fba2ca97b882ef0f4c | 535472844a046cadd9230302da647a54afff95e8 | refs/heads/master | 2021-01-19T17:00:32.924064 | 2017-08-30T10:31:32 | 2017-08-30T10:31:32 | 101,031,687 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,660 | py | import sys as _sys
import graphlab as _gl
import graphlab.connect.main as glconnect
from graphlab.toolkits._internal_utils import _raise_error_if_not_sframe
from graphlab.toolkits._model import SDKModel as _SDKModel
from graphlab.toolkits._main import ToolkitError as _ToolkitError
from graphlab.toolkits._internal_utils import _toolkit_repr_print
from graphlab.util import _make_internal_url
from graphlab.util import _raise_error_if_not_of_type
from graphlab.util import _raise_error_if_not_of_type
def create(data, features=None,
bm25_k1=1.5,
bm25_b=0.75,
tfidf_threshold=0.01,
verbose=True):
"""
Create a searchable index of text columns in an SFrame.
Parameters
----------
data : SFrame
An SFrame containing at least one str column containing text that should
be indexed.
features : list of str
A list of column names that contain text that should be indexed.
Default: all str columns in the provided dataset.
bm25_k1 : float
Tuning parameter for the relative importance of term frequencies when
computing the BM25 score between a query token and a document.
bm25_b : float
Tuning parameter to downweight scores of long documents when
computing the BM25 score between a query token and a document.
tfidf_threshold : float
Tuning parameter to skip indexing words that have a TF-IDF score below
this value.
verbose : bool
Controls whether or not to print progress during model creation.
Returns
-------
out
SearchModel
See Also
--------
SearchModel.query
References
----------
Christopher D. Manning, Hinrich Schutze, and Prabhakar Raghavan.
Introduction to information retrieval.
http://nlp.stanford.edu/IR-book/pdf/irbookonlinereading.pdf
Examples
--------
>>> import graphlab as gl
>>> sf = gl.SFrame({'text': ['Hello my friend', 'I love this burrito']})
>>> m = gl.toolkits._internal.search.create(sf)
>>> print m.query('burrito')
"""
# Input validation on data and features
if features is None:
features = _get_str_columns(data)
_raise_error_if_not_of_type(data, [_gl.SFrame])
_raise_error_if_not_of_type(features, [list])
for f in features:
if data[f].dtype() != str:
raise _ToolkitError("Feature `%s` must be of type str" % f)
# Store options
options = {}
options['bm25_b'] = bm25_b
options['bm25_k1'] = bm25_k1
options['tfidf_threshold'] = tfidf_threshold
options['verbose'] = verbose
options['features'] = features
# Construct model
proxy = _gl.extensions._SearchIndex()
proxy.init_options(options)
proxy.index(data)
return SearchModel(proxy)
class SearchModel(_SDKModel):
"""
SearchModel objects can be used to search text data for a given query.
This model should not be constructed directly. Instead, use
:func:`graphlab.toolkits._internal.search.create` to create an
instance of this model.
"""
def __init__(self, model_proxy=None):
super(SearchModel, self).__init__(model_proxy)
self.__name__ = 'search'
def _get_wrapper(self):
_class = self.__proxy__.__class__
proxy_wrapper = self.__proxy__._get_wrapper()
def model_wrapper(unity_proxy):
model_proxy = proxy_wrapper(unity_proxy)
return SearchModel(model_proxy)
return model_wrapper
@classmethod
def _get_queryable_methods(cls):
'''Returns a list of method names that are queryable through Predictive
Service'''
return {'query': {}}
def get_current_options(self):
return self.__proxy__.get_current_options()
def __str__(self):
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, etc.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
data_fields = [
('Number of documents', 'num_documents'),
('Average tokens/document', 'average_document_length')]
param_ranking_fields = [
('BM25 k1', 'bm25_k1'),
('BM25 b', 'bm25_b'),
('TF-IDF threshold', 'tfidf_threshold')]
index_fields = [
('Number of unique tokens indexed', 'num_tokens'),
('Preprocessing time (s)', 'elapsed_processing'),
('Indexing time (s)', 'elapsed_indexing')]
section_titles = ['Corpus',
'Indexing settings',
'Index']
return ([data_fields,
param_ranking_fields,
index_fields],
section_titles)
def __repr__(self):
(sections, section_titles) = self._get_summary_struct()
return _toolkit_repr_print(self, sections,
section_titles, width=32)
def query(self, query, num_results=10,
expansion_k=5,
expansion_epsilon=0.1,
expansion_near_match_weight=.5):
"""
Search for text.
Parameters
----------
query: str
A string of text.
num_results : int
The number of results to return.
expansion_k : int
Maximum number of nearest words to include from query token.
expansion_epsilon : float
Maximum distance to allow between query token and nearby word when
doing query expansion. Must be between 0 and 1.
expansion_near_match_weight : float
Multiplier to use on BM25 scores for documents indexed via an
approximate match with a given token. This will be used for each of
the `expansion_k` words that are considered an approximate match.
Must be between 0 and 1.
Returns
-------
out: SFrame
The rows of the original SFrame along with a `score` column
which contains the BM25 score between this query and the row.
Examples
--------
>>> import graphlab as gl
>>> sf = gl.SFrame({'text': ['Hello my friend', 'I love this burrito']})
>>> s = gl.search.create(sf, features=['text'])
>>> s.query('burrito')
"""
if _sys.version_info.major == 2:
_raise_error_if_not_of_type(query, [str, unicode])
else:
_raise_error_if_not_of_type(query, [str])
q = query.split(' ')
results = self.__proxy__.query_index(q,
expansion_k=expansion_k,
expansion_epsilon=expansion_epsilon,
expansion_near_match_weight=expansion_near_match_weight)
results = self.__proxy__.join_query_result(results, method='default',
num_results=num_results)
return results
def _get_str_columns(sf):
"""
Returns a list of names of columns that are string type.
"""
return [name for name in sf.column_names() if sf[name].dtype() == str]
| [
"[email protected]"
]
| |
01a4792e02f8e7a6911af7ab76554e70e0471d8b | 694d57c3e512ce916269411b51adef23532420cd | /leetcode/215kth_largest_element.py | 58f31f67f601ead30791f3de73429cab51b47b5f | []
| no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from random import randint
class Solution(object):
def partition(self, start, end, p, nums):
pivot = nums[p]
nums[p], nums[start] = nums[start], nums[p]
i = j = k = start
while j <= end:
if nums[j] == pivot:
nums[i], nums[j] = nums[j], nums[i]
i += 1
elif nums[j] < pivot:
nums[i], nums[j] = nums[j], nums[i]
nums[k], nums[i] = nums[i], nums[k]
i += 1
k += 1
j += 1
return k, i-1
def findKthLargest(self, nums, k):
n = len(nums)
target = n - k
mid_h, mid_e = self.partition(0, n - 1, randint(0, n-1), nums)
s, e = 0, n-1
while True:
if target >= mid_h - s and target <= mid_e - s:
return nums[mid_h]
elif target > mid_e - s:
r = randint(mid_e + 1, e)
mid_h, mid_e = self.partition(mid_e + 1, e, r, nums)
else:
r = randint(s, mid_h - 1)
mid_h, mid_e = self.partition(s, mid_h - 1, r, nums)
| [
"[email protected]"
]
| |
29acead529edc72fe4dc197e2a785872c12c51e0 | 3cedc2e0867a53ed2f36e01624f369693d1a050d | /rnn/rnn88_females_original.py | f4f2cc56c161654c5daaec73a43be8329ccc3589 | []
| no_license | lkpiel/mastersthesis | a471d8c6a5881e13599b22965dd3f437c83fc967 | 71c723b435b347d2805e159b6e10828f89541e98 | refs/heads/master | 2023-02-20T11:57:45.266361 | 2018-05-06T11:17:43 | 2018-05-06T11:17:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,742 | py | #! /usr/bin/python3
import sys
print(sys.version)
import sys
import pandas
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, load_model
from keras.layers import Dense, Input, Dropout, Average, Merge, Layer, Conv2D, MaxPooling2D, GlobalAveragePooling2D, GlobalAveragePooling2D, AveragePooling2D, Reshape, BatchNormalization
from keras.optimizers import SGD, Adam
from keras import initializers
from keras import regularizers
from keras import constraints
from keras import backend as K
from IPython.core.debugger import Tracer
from keras.layers import Masking, LSTM, TimeDistributed, Bidirectional, Flatten
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras import callbacks
from keras.constraints import maxnorm, unitnorm
from sklearn.preprocessing import OneHotEncoder
from keras.models import Model
import pdb
import keras
#FORMAT DATA
#ONE HOT ENCODES A GIVEN COLUMN
def onehot(x): return np.array(OneHotEncoder().fit_transform(x.values.reshape(-1,1)).todense())
def format(data):
del data['Unnamed: 605']
mask = data['AgeGroup'] == 'ag1'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 0
mask = data['AgeGroup'] == 'ag2'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 1
mask = data['AgeGroup'] == 'ag3'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 2
mask = data['Gender'] == 'm'
column_name = 'Gender'
data.loc[mask, column_name] = 0
mask = data['Gender'] == 'f'
column_name = 'Gender'
data.loc[mask, column_name] = 1
return data
def smooth_labels(y, smooth_factor):
'''Convert a matrix of one-hot row-vector labels into smoothed versions.
# Arguments
y: matrix of one-hot row-vector labels to be smoothed
smooth_factor: label smoothing factor (between 0 and 1)
# Returns
A matrix of smoothed labels.
'''
assert len(y.shape) == 2
if 0 <= smooth_factor <= 1:
# label smoothing ref: https://www.robots.ox.ac.uk/~vgg/rg/papers/reinception.pdf
y *= 1 - smooth_factor
y += smooth_factor / y.shape[1]
else:
raise Exception('Invalid label smoothing factor: ' + str(smooth_factor))
return y
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever...
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
print(input_shape)
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number epsilon to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
import tensorflow as tf
import keras
from keras import backend as K
def age_group_accuracy(y_true, y_pred):
array = np.array([0]*13 + [1]*2 + [2]*10000000)
age_to_group = K.variable(value=array, dtype='int32', name='age_to_group')
ages_true = tf.gather(age_to_group, tf.cast(tf.rint(y_true), tf.int32))
ages_pred = tf.gather(age_to_group, tf.cast(tf.rint(y_pred), tf.int32))
return K.mean(K.equal(ages_true, ages_pred), axis=-1)
train_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/train/export.csv", sep=" ")
val_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/dev/export.csv", sep=" ")
test_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/test/export.csv", sep=" ")
train_data = format(train_data)
val_data = format(val_data)
test_data = format(test_data)
trainFemaleIndexes = train_data.index[train_data['Gender'] == 1].tolist()
valFemaleIndexes = val_data.index[val_data['Gender'] == 1].tolist()
testFemaleIndexes = test_data.index[test_data['Gender'] == 1].tolist()
train_data_females = train_data[train_data['Gender'] == 1]
val_data_females = val_data[val_data['Gender'] == 1]
test_data_females = test_data[test_data['Gender'] == 1]
test_data_males = test_data[test_data['Gender'] == 0]
train_labels_females = onehot(train_data_females['AgeGroup'])
val_labels_females = onehot(val_data_females['AgeGroup'])
test_labels_females = onehot(test_data_females['AgeGroup'])
test_labels_males = onehot(test_data_males['AgeGroup'])
train_i_vectors_females = train_data_females.iloc[:, 5:].as_matrix()
val_i_vectors_females = val_data_females.iloc[:, 5:].as_matrix()
test_i_vectors_females = test_data_females.iloc[:, 5:].as_matrix()
test_i_vectors_males = test_data_males.iloc[:, 5:].as_matrix()
#testMaleIndexes = test_data_i_vectors.index[test_data_i_vectors['Gender'] == 1].tolist()
print ("LABELS LOADED")
train_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_train_data_padded.npy", encoding="bytes")[..., np.newaxis]
val_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_val_data_padded.npy", encoding="bytes")[..., np.newaxis]
test_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_test_data_padded.npy", encoding="bytes")[..., np.newaxis]
train_data_padded = train_data_padded[np.array(trainFemaleIndexes)]
val_data_padded = val_data_padded[np.array(valFemaleIndexes)]
test_data_padded = test_data_padded[np.array(testFemaleIndexes)]
print ("DATA LOADED")
################################################################################################
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.7,
patience=2, min_lr=0.0001, verbose=1)
kernel_regularizer = regularizers.l2(0.0001)
input_layer = Input(shape=(1107, 20, 1), name="lstm_input")
x = Conv2D(128, (3, 20), activation='relu', border_mode='valid')(input_layer)
x = Conv2D(128, (5, 1), strides=(3,1), activation='relu', border_mode='valid')(x)
x = Conv2D(128, (5, 1), strides=(3,1), activation='relu', border_mode='valid')(x)
x = Reshape((-1, 128))(x)
x = Bidirectional(LSTM(128, return_sequences=True))(x)
x = AttentionWithContext()(x)
output_layer_1 = Dense(3, activation='softmax', name='group_output')(x)
output_layer_2 = Dense(2, name='gender')(x)
print ("model_88 BUILT")
model_88 = Model(inputs=[input_layer], outputs=[output_layer_1, output_layer_2])
model_88.compile(loss={'group_output':'categorical_crossentropy', 'gender':'categorical_crossentropy'},
optimizer=SGD(0.01),
metrics={'group_output':'accuracy','gender':'accuracy'})
print ("model_88 COMPILED")
checkpoint = ModelCheckpoint(filepath='/models/model_88.hdf5', monitor='val_group_output_acc', save_best_only=True)
'''
history = model_88.fit([train_data_padded],
[train_labels_age_group, train_labels_gender],
validation_data=([val_data_padded], [val_labels_age_group, val_labels_gender]),
epochs=30,
verbose=1,
batch_size=64,
callbacks=[checkpoint]
)
np.save('../history/history_model_88.npy', history.history)
modelHistory = np.load('../history/history_model_88.npy').item()
print ("HISTORY: ")
print (modelHistory)
'''
model_88.load_weights('/models/model_88.hdf5')
val_predictions = model_88.predict(val_data_padded)
print ("VAL PREDICTED")
test_predictions = model_88.predict(test_data_padded)
print ("TEST PREDICTED")
np.save('/home/hpc_lkpiel/predictions/val/model_88_females_original_age_group.npy', val_predictions[0])
print ("VAL SAVED")
np.save('/home/hpc_lkpiel/predictions/test/model_88_females_original_age_group.npy', test_predictions[0])
print ("TEST SAVED")
'''
np.save('/home/hpc_lkpiel/predictions/val/model_88_gender.npy', val_predictions[1])
print ("VAL SAVED")
np.save('/home/hpc_lkpiel/predictions/test/model_88_gender.npy', test_predictions[1])
print ("TEST SAVED")
'''
'''
valResult = model_88.evaluate([val_data_padded, val_i_vectors], [val_labels_age_group, val_labels_gender])
testResult = model_88.evaluate([test_data_padded, test_i_vectors], [test_labels_age_group, test_labels_gender] )
print (str(valResult))
print (str(testResult))
print ("WROTE TO FILE model_88")
'''
######################################## | [
"[email protected]"
]
| |
d2441a8e7cd1131ccb87e1debc5c49e33fc62f90 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-SearchKit/setup.py | cfce0b4f572de3e7d3cb839fb83d76b5f34634ee | [
"MIT"
]
| permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | '''
Deprecated wrappers for the "SearchKit" framework on macOS.
Use the CoreServices package instead.
'''
from pyobjc_setup import setup
VERSION="5.1.1"
setup(
name='pyobjc-framework-SearchKit',
description = "Wrappers for the framework SearchKit on macOS",
min_os_level='10.5',
packages = [ "SearchKit" ],
version=VERSION,
install_requires = [
'pyobjc-core>='+VERSION,
'pyobjc-framework-CoreServices>='+VERSION,
],
long_description=__doc__,
)
| [
"[email protected]"
]
| |
98e494f9969bfbe0e38927f5f5a9e9da3f675862 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /tjMNAEgkNvM5eyEqJ_9.py | 009b0a6620c65bbe7a418ed56973cde1d60c4685 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | """
You are given two inputs:
1. An array of abbreviations.
2. An array of words.
Write a function that returns `True` if each abbreviation **uniquely
identifies** a word, and `False` otherwise.
### Examples
unique_abbrev(["ho", "h", "ha"], ["house", "hope", "happy"]) ➞ False
// "ho" and "h" are ambiguous and can identify either "house" or "hope"
unique_abbrev(["s", "t", "v"], ["stamina", "television", "vindaloo"]) ➞ True
unique_abbrev(["bi", "ba", "bat"], ["big", "bard", "battery"]) ➞ False
unique_abbrev(["mo", "ma", "me"], ["moment", "many", "mean"]) ➞ True
### Notes
Abbreviations will be a substring from `[0, n]` from the original string.
"""
def unique_abbrev(abbs, words):
al = []
for x in abbs:
temp = []
for y in words:
temp.append(y.startswith(x))
al.append((temp))
return sum(al[0]+al[1]+al[2]) == 3
| [
"[email protected]"
]
| |
78bb3e33b198664933fcf0bc38618fc403aed04b | 2393a8fabee3d39bf1623e26c0313e3351caf814 | /python/study/IO/示例代码/nonblocking/nonblocking-client.py | bb4e00884ca6c2f8d4878a852bc366caf746f668 | []
| no_license | shidg/note | 5d3aaff9d1c6cf87b89513b7712638c9b808653c | d46aceaed64e3e2f854149f71f18fa92d650dc37 | refs/heads/master | 2023-05-26T16:14:51.715966 | 2023-05-19T02:08:36 | 2023-05-19T02:08:36 | 27,533,612 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-#
'''
author: -- shidegang --
Created Time: 2019-08-28 10:43:17
'''
import socket
sk = socket.socket()
server_addr = ('127.0.0.1',9000)
sk.connect(server_addr)
while True:
sk.sendall('hello'.encode(encoding='utf8'))
data = sk.recv(1024)
print(data.decode(encoding='utf8')) | [
"[email protected]"
]
| |
bdd26c536928ecc4169204488d28c7ea79fac6d1 | 6e0d8d91dd22e2275cd713822679d5cabbc9331a | /thespian/system/__init__.py | 2c69b0574347c752e0c9daedb76632e6daa45a22 | [
"MIT"
]
| permissive | kquick/Thespian | 711712eb0a9ad3370f1013c8393cc461b9541dfe | dfc6d3e865c05f929328b85e98671a5c8fc3a54a | refs/heads/master | 2023-05-26T15:51:57.959690 | 2023-05-22T15:08:00 | 2023-05-22T15:08:00 | 78,292,621 | 203 | 32 | MIT | 2021-06-22T14:42:09 | 2017-01-07T17:18:27 | Python | UTF-8 | Python | false | false | 636 | py | # This module contains the various ActorSystemBase implementations
# upon which the ActorSystem operates.
from thespian.actors import ActorAddress, ActorSystemMessage, PoisonMessage
from thespian.system.addressManager import *
from thespian.system.messages.status import *
from thespian.system.messages.convention import *
def isInternalActorSystemMessage(msg):
if isinstance(msg, PoisonMessage):
msg = msg.poisonMessage
return isinstance(msg, ActorSystemMessage) and \
not isinstance(msg, (Thespian_SystemStatus,
Thespian_ActorStatus,
PoisonMessage))
| [
"[email protected]"
]
| |
3e9fbcc82ac9735647bbbf58624023d9b3049086 | 7f368b275cd18a5b7b2eb22b822223252914c8ef | /tensorflow_gan/python/tpu/cross_replica_ops.py | 7ab35260b93b8be210c6cb4f9caf314cc746b313 | [
"Apache-2.0"
]
| permissive | nivedwho/gan | 176c624800378d9dfa9f74211c362b62953cc7f1 | 723ce1e3627778b979f048d817f834f253611ff4 | refs/heads/master | 2023-08-01T08:07:34.299917 | 2021-09-14T04:10:38 | 2021-09-14T04:11:37 | 396,680,181 | 0 | 0 | Apache-2.0 | 2021-08-16T07:44:33 | 2021-08-16T07:44:33 | null | UTF-8 | Python | false | false | 3,128 | py | # coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow operations specific to TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
from tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import
__all__ = [
'cross_replica_mean',
'cross_replica_moments',
]
def cross_replica_mean(inputs, group_size=None):
"""Calculates the average value of inputs tensor across TPU replicas."""
num_replicas = tpu_function.get_tpu_context().number_of_shards
if not group_size:
group_size = num_replicas
if group_size == 1:
return inputs
if group_size != num_replicas:
group_assignment = []
assert num_replicas % group_size == 0
for g in range(num_replicas // group_size):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
else:
group_assignment = None
return tf.compat.v1.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast(
group_size, inputs.dtype)
def cross_replica_moments(inputs, axis, parallel=True, group_size=None):
"""Compute mean and variance of the inputs tensor across TPU replicas.
Args:
inputs: A tensor with 2 or more dimensions.
axis: Array of ints. Axes along which to compute mean and variance.
parallel: Use E[x^2] - (E[x])^2 to compute variance. This can be done
in parallel to computing the mean and reducing the communication overhead.
group_size: Integer, the number of replicas to compute moments arcoss.
None or 0 will use all replicas (global).
Returns:
Two tensors with mean and variance.
"""
# Compute local mean and then average across replicas.
mean = tf.math.reduce_mean(input_tensor=inputs, axis=axis)
mean = cross_replica_mean(mean)
if parallel:
# Compute variance using the E[x^2] - (E[x])^2 formula. This is less
# numerically stable than the E[(x-E[x])^2] formula, but allows the two
# cross-replica sums to be computed in parallel, saving communication
# overhead.
mean_of_squares = tf.reduce_mean(input_tensor=tf.square(inputs), axis=axis)
mean_of_squares = cross_replica_mean(mean_of_squares, group_size=group_size)
mean_squared = tf.square(mean)
variance = mean_of_squares - mean_squared
else:
variance = tf.math.reduce_mean(
input_tensor=tf.math.square(inputs - mean), axis=axis)
variance = cross_replica_mean(variance, group_size=group_size)
return mean, variance
| [
"[email protected]"
]
| |
c4ae49b5ef3dff9cda6859483ab61b793df6c6e4 | 90e6860b5370b742f01c0664ac84f14dc1272155 | /src/ziggurat/config/StandardConfigurator.py | fdce59892f864b753760a8508b5a30a278cc7f28 | []
| no_license | sernst/Ziggurat | e63f876b8f2cb3f78c7a7a4dcf79af810a540722 | 4ae09bbd9c467b2ad740e117ed00354c04951e22 | refs/heads/master | 2021-01-17T07:20:17.138440 | 2016-05-27T14:27:43 | 2016-05-27T14:27:43 | 9,278,283 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,944 | py | # StandardConfigurator.py
# (C)2013
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
import re
from pyaid.string.StringUtils import StringUtils
from pyramid.config import Configurator
from pyaid.file.FileUtils import FileUtils
#___________________________________________________________________________________________________ StandardConfigurator
class StandardConfigurator(Configurator):
"""A class for..."""
#===================================================================================================
# C L A S S
_REST_PATTERN = re.compile('\*[A-Za-z0-9]+$')
_DEFAULT_SETTINGS = {
'host':'0.0.0.0',
'port':6543,
'pyramid.reload_templates':True,
'pyramid.debug_authorization':False,
'pyramid.debug_notfound':False,
'pyramid.debug_routematch':False,
'pyramid.debug_templates':True,
'pyramid.default_locale_name':'en',
'pyramid.includes':'pyramid_tm',
'mako.input_encoding':'utf-8' }
#___________________________________________________________________________________________________ __init__
def __init__(self, app, rootViewPackage =None, **kwargs):
"""Creates a new instance of StandardConfigurator."""
super(StandardConfigurator, self).__init__(**kwargs)
self._isPopulated = False
self._app = app
self._rootViewPackage = rootViewPackage
self.add_request_method(
self._getMyAppRequestProperty,
StringUtils.toStrStr('ziggurat'), reify=True)
#===================================================================================================
# G E T / S E T
#___________________________________________________________________________________________________ GS: rootViewPackage
@property
def rootViewPackage(self):
return self._rootViewPackage
#___________________________________________________________________________________________________ GS: makoRootTemplatePath
@property
def makoRootTemplatePath(self):
return FileUtils.createPath(self._app.rootPath, 'templates', 'mako', isDir=True)
#___________________________________________________________________________________________________ GS: makoModuleDirectory
@property
def makoModuleDirectory(self):
return FileUtils.createPath(self._app.rootPath, 'operations', 'mako', isDir=True)
#===================================================================================================
# P U B L I C
#___________________________________________________________________________________________________ populateConfigs
def populateConfigs(self):
if self._isPopulated:
return
self._isPopulated = True
self._populateRoutes()
settings = dict(self._DEFAULT_SETTINGS.items())
p = self.makoRootTemplatePath
if p:
settings['mako.directories'] = p
p = self.makoModuleDirectory
if p:
settings['mako.module_directory'] = p
self._populateSettings(settings)
self.add_settings(settings)
#___________________________________________________________________________________________________ addRouteItem
def addRouteItem(self, name, pattern, className, renderer =None, package =None, subpackage =None):
"""Adds a route to the registry."""
# Adds optional end slash argument to URLs that don't enforce an end slash themselves
if not pattern.endswith('/'):
if self._REST_PATTERN.search(pattern) is None:
pattern += '{endSlash:[/]*}'
importDef = [className, className]
if subpackage:
importDef.insert(0, subpackage)
importDef.insert(0, package if package else self.rootViewPackage)
self.add_route(name, pattern)
self.add_view('.'.join(importDef), route_name=name, renderer=renderer)
#___________________________________________________________________________________________________ addStaticRouteItem
def addStaticRouteItem(self, name, path):
self.add_static_view(name=name, path=path)
#===================================================================================================
# P R O T E C T E D
#___________________________________________________________________________________________________ _getMyAppRequestProperty
def _getMyAppRequestProperty(self, request):
return self._app
#___________________________________________________________________________________________________ _populateSettings
def _populateSettings(self, settings):
pass
#___________________________________________________________________________________________________ _populateRoutes
def _populateRoutes(self):
"""Doc..."""
pass
#===================================================================================================
# I N T R I N S I C
#___________________________________________________________________________________________________ __repr__
def __repr__(self):
return self.__str__()
#___________________________________________________________________________________________________ __unicode__
def __unicode__(self):
return StringUtils.toUnicode(self.__str__())
#___________________________________________________________________________________________________ __str__
def __str__(self):
return '<%s>' % self.__class__.__name__
| [
"[email protected]"
]
| |
ddf8e310ace1ebb6773c14c882d812f973ffa1af | 4b4828d3c98d76d7bf38f90a015945acc408ddc5 | /PythonAI/Source/W2D3/src/bmi_web.py | 441f43d452b0cdebf5e3e9a87e8c79e84ae2551b | []
| no_license | Huh-jae-won/Study | cb5d32728e8dcded492e7edb054b500c91ec607c | e4dbc3fef69bb273b62b866fb5ef2a7250222f10 | refs/heads/main | 2023-06-20T13:06:26.691899 | 2021-07-11T07:43:41 | 2021-07-11T07:43:41 | 362,759,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | """
# URL : http://localhost:8080/cgi-bin/bmi_web.py
"""
# 모듈 로딩 ---------------------------------------------------
import cgi, sys, codecs, os
import joblib
# WEB 인코딩 설정 ---------------------------------------------
sys.stdout=codecs.getwriter('utf-8')(sys.stdout.detach())
# 함수 선언 --------------------------------------------------
# WEB 페이지 출력 --------------------------------------------
def displayWEB(detect_msg):
print("Content-Type: text/html; charset=utf-8")
print("")
html="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>비만 여부 판별</title>
</head>
<body align="center">
<h2>[ 비만 상태 체크 ]</h2>
<form>
<div style='text-align:center; background-color:#D5D5D5;border-radius:10px;width:60%; margin: auto;padding:50px;'>
<input id="height" type="text" placeholder="키" name="height">   
<input id="weight" type="text" placeholder="몸무게" name="weight">
<input type="submit" value="판정"></br>
<p><font color='blue'>{}</font></p>
</div>
</form></body></html>""".format(detect_msg)
print(html)
# 판정 --------------------------------------------------------
def detect_bmi(w, h):
w = int(w)
h = int(h)
# 비만도 예측하기
res = clf.predict([[w / 100, h / 200]])
return str(res[0])
# 기능 구현 -----------------------------------------------------
# (1) 학습 데이터 읽기
pklfile = os.path.dirname(__file__) + "/bmi.pkl"
clf = joblib.load(pklfile)
# (2) WEB 페이지 <Form> -> <INPUT> 리스트 가져오기
form = cgi.FieldStorage()
height_value = form.getvalue('height')
weight_value = form.getvalue('weight')
# (3) 판정 하기
if height_value is not None and weight_value is not None:
bmi_dic = {"fat": "과체중", "normal": "정상체중", "thin": "저체중"}
result = detect_bmi(weight_value, height_value)
result = '키 {}, 몸무게 {} => {}입니다.'.format(height_value, weight_value, bmi_dic[result])
else:
result ='측정된 결과가 없습니다.'
# (4) WEB 출력하기
displayWEB(result)
| [
"[email protected]"
]
| |
43bd61d034275b4e72d5fd73ddf6e07f646548ed | 85f68b427bf9c4b8b5c3f8a70dccc217226e9706 | /gam_app/old_migrations/0012_auto_20180619_1641.py | 7de987d6f30f3cd7be49de7d4ff093f9f154c292 | []
| no_license | benrogboe/GAM | ffb87e76a87aa7eaf0d0d33d4df7ddc571399e49 | fbec7cb967252578d4669c5ff91a9b0b9cdfd9d5 | refs/heads/master | 2020-03-30T05:08:38.811097 | 2018-09-28T18:55:24 | 2018-09-28T18:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # Generated by Django 2.0.1 on 2018-06-19 16:41
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gam_app', '0011_auto_20180619_1527'),
]
operations = [
migrations.AlterField(
model_name='imagen',
name='archivo',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='gam_app.Archivo'),
),
migrations.AlterField(
model_name='imagen',
name='colección',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='gam_app.Colección'),
),
]
| [
"[email protected]"
]
| |
c4b0fa6a10dd0233f06a512fb6746c6c4f0b86d7 | b17fda8e3a9f360cbab8e8ed0ecd66b03787250a | /.venv/lib/python2.7/site-packages/planemo/templates.py | caa25724a0252f54837ada1ffbff3f78b82341b4 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | maumauleon/galaxy-irri-dev | 7a4b824c90474da0a2f3a3b858777c5089b9d5cb | 063bf0dca5d465466aefa77edaf47df12c4ff932 | refs/heads/master | 2022-11-16T03:10:18.067196 | 2017-08-23T03:31:01 | 2017-08-23T03:31:01 | 98,497,124 | 1 | 2 | NOASSERTION | 2022-11-01T17:00:32 | 2017-07-27T05:25:40 | Python | UTF-8 | Python | false | false | 478 | py | try:
from jinja2 import Template
except ImportError:
Template = None
NO_JINJA2_MESSAGE = ("This functionality requires Jinja2 but this library is "
"unavailable. Install with `pip install jinja2`.")
def render(template_str, **kwds):
""" Use jinja2 to render a template
"""
if Template is None:
raise Exception(NO_JINJA2_MESSAGE)
template = Template(template_str)
contents = template.render(**kwds)
return contents
| [
"[email protected]"
]
| |
dd722e77341a10ff56977e18b26a3b12366106a6 | 7729ddbb2e4eb03469cd19f2ac6b5670b831923b | /src/seraing/urban/dataimport/__init__.py | 626639e80ca9ee026c47dbf2d06065f7a7893534 | []
| no_license | IMIO/seraing.urban.dataimport_22 | 5cd7bb6e09debeb72145af107b99997ba54f96a3 | db2f3c596572159692fa6cb11050111d1cb0fca5 | refs/heads/master | 2021-05-05T17:01:27.788747 | 2017-09-12T13:45:01 | 2017-09-12T13:45:01 | 103,239,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('seraing.urban.dataimport')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| [
"[email protected]"
]
| |
a697246864c5f020df2a2b5b60c9e4a429c0d160 | 7f53c41182a6d9c5da0c58a15716f01725ac0316 | /2019_2_19_public_test/test.py | e79247064e8a5dfa2e00c40dbbe822ef17f75f3b | []
| no_license | 1286211699/2019_1_23_pub_test | f6b7ee089e78ad673c56b3cd4ccee9b2154581f6 | 3aed7f4941353d48bf3407e9d30ac85c83b0ed7b | refs/heads/master | 2022-12-19T14:41:15.264627 | 2019-03-21T09:46:08 | 2019-03-21T09:46:08 | 167,125,649 | 1 | 0 | null | 2022-12-08T01:33:30 | 2019-01-23T05:54:52 | HTML | UTF-8 | Python | false | false | 666 | py | import _thread
import time
# 为线程定义一个函数
def print_time( threadName, delay):
count = 0
while count < 5:
time.sleep(delay)
count += 1
print ("%s: %s" % ( threadName, time.ctime(time.time()) ))
# 创建两个线程
#但是这个模块我们不推荐,因为底层封装的时候它的主线程不会等待子线程的结束!
#官方以及我们推荐再封装Threading,所以在这里大家了解下
try:
_thread.start_new_thread( print_time,("Thread-1", 2, ) )
_thread.start_new_thread( print_time,("Thread-2", 4, ) )
except:
print ("Error: 无法启动线程")
while True:
pass
| [
"[email protected]"
]
| |
efeee94769b83f842bac96bd9d32030c907b7472 | 14be69d424c8f30cab70231d5509df50ccaa2f04 | /tensorflow/python/training/basic_session_run_hooks.py | 99f057e8371aa6d1d5420e5622a79426783ded4b | [
"Apache-2.0"
]
| permissive | siemanko/tensorflow | 739847a1b7d44e6d9291da63be0daf3ae8f2d17c | 66e0faf5f2391d8e1b3acf69afbfa0adf609596d | refs/heads/master | 2021-07-08T09:00:56.875610 | 2017-10-05T18:16:17 | 2017-10-05T18:16:17 | 105,937,165 | 0 | 1 | null | 2017-10-05T20:29:58 | 2017-10-05T20:29:58 | null | UTF-8 | Python | false | false | 28,652 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@LoggingTensorHook
@@StopAtStepHook
@@CheckpointSaverHook
@@StepCounterHook
@@NanLossDuringTrainingError
@@NanTensorHook
@@SummarySaverHook
@@GlobalStepWaiterHook
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class _HookTimer(object):
"""Base timer for determining when Hooks should trigger.
Should not be instantiated directly.
"""
def __init__(self):
pass
def reset(self):
"""Resets the timer."""
pass
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step."""
raise NotImplementedError
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
raise NotImplementedError
def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError
class SecondOrStepTimer(_HookTimer):
"""Timer that triggers at most once every N seconds or once every N steps.
"""
def __init__(self, every_secs=None, every_steps=None):
self.reset()
self._every_secs = every_secs
self._every_steps = every_steps
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
super(SecondOrStepTimer, self).__init__()
def reset(self):
self._last_triggered_step = None
self._last_triggered_time = None
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class NeverTriggerTimer(_HookTimer):
"""Timer that never triggers."""
def should_trigger_for_step(self, step):
_ = step
return False
def update_last_triggered_step(self, step):
_ = step
return (None, None)
def last_triggered_step(self):
return None
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors every N local steps, every N seconds, or at end.
The tensors will be printed to the log, with `INFO` severity. If you are not
seeing the logs, you might want to add the following line after your imports:
```python
tf.logging.set_verbosity(tf.logging.INFO)
```
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None,
at_end=False, formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = tensors.keys()
self._tensors = tensors
self._formatter = formatter
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
def begin(self):
self._timer.reset()
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(tensor_values))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, tensor_values[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._log_tensors(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_tensors(values)
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListerner(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListerner()
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with tf.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately before or after this hook saves
the checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
self._listeners = listeners or []
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def before_run(self, run_context): # pylint: disable=unused-argument
if self._timer.last_triggered_step() is None:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step+1):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
self._save(run_context.session, global_step)
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor.".
format(collection_key))
self._saver = savers[0]
return savers[0]
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(every_steps=every_n_steps,
every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step+1):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info("Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of
names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
self._final_ops_values = session.run(self._final_ops,
feed_dict=self._final_ops_feed_dict)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| [
"[email protected]"
]
| |
00f569ae77237d2d80518fa93f5d1e27c4d3b867 | f9632d4a822f6525a007598a1f67757ac9891749 | /rakuten/playground/extract_category.py | 7b023d96f6665916c58bdc27b25ebb3531856c12 | []
| no_license | rpryzant/marketing | ab055a5ae02ed287cb5a763d3e937efa72d057df | 9f463da0a2e7c9c48951e793d95534963cd19721 | refs/heads/master | 2021-09-07T01:20:31.720381 | 2018-02-14T22:13:40 | 2018-02-14T22:13:40 | 116,716,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | """
extracts a category from the data
"""
import map_reduce
from collections import defaultdict
# make parent dicts
genre_tsv = '/Volumes/datasets/rakuten_dump/merchandize_data/genres/genres_with_english.tsv'
id_to_pid = {}
for l in open(genre_tsv):
l = l.strip()
try:
[id, ja_name, pid, en_name] = l.split('\t')
except:
continue
id_to_pid[id] = pid
# the category you want to extract
# from fashion_categories
CATEGORIES_TO_EXTRACT = [
'502456', '100454', '502427', '100472', '110933', '502368', '100939', '100433', '216129',
'550009', '111103', '207733', '205193', '551648', '551648', '206587', '303816', '206591',
'206590', '303803', '551441', '551409', '551433', '551403', '551445', '551413', '551682',
'551668', '551648', '551664', '551644', '551677', '551672', '551652', '205197', '200807',
'207699', '100542', '100371', '558929', '204994', '402513' '402517', '402515', '508925',
'508930', '501642', '402087', '201780', '302242', '204982', '201794', '302464', '407933',
'502027', '402463', '402475', '501965', '501962', '501963', '501976', '506410', '200859'
]
def is_child(id):
# is id the child of CATEGORY_TO_EXTRACT?
while id in id_to_pid:
if id in CATEGORIES_TO_EXTRACT:
return True
id = id_to_pid[id]
return False
def map_fn(path):
out = open(path + '.OUT', 'a')
for l in open(path):
parts = l.strip().split("\t")
genre_id = parts[-1]
if is_child(genre_id):
out.write(l.strip() + '\n')
return
def reduce_fn(result_list):
return ''
map_reduce.mapreduce(
map_fn=map_fn,
reduce_fn=reduce_fn,
# input_re='/scr/rpryzant/marketing/rakuten/data/products_tsv/*.tsv',
input_re='/Volumes/datasets/rakuten_dump/merchandize_data/products_tsv/*.tsv',
output_filename='/scr/rpryzant/marketing/rakuten/categories',
n_cores=2
)
| [
"[email protected]"
]
| |
21a03c5b4c4fdf5f65f8f33de569e2d41869d67b | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/account/azext_account/vendored_sdks/subscription/operations/_subscription_operation_operations.py | ff4eaf571ec7c44d355033680ae253d44a157678 | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 4,364 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class SubscriptionOperationOperations(object):
"""SubscriptionOperationOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Current version is 2019-10-01-preview. Constant value: "2019-10-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-10-01-preview"
self.config = config
def get(
self, operation_id, custom_headers=None, raw=False, **operation_config):
"""Get the status of the pending Microsoft.Subscription API operations.
:param operation_id: The operation ID, which can be found from the
Location field in the generate recommendation response header.
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SubscriptionCreationResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.subscription.models.SubscriptionCreationResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'operationId': self._serialize.url("operation_id", operation_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SubscriptionCreationResult', response)
header_dict = {
'Location': 'str',
'Retry-After': 'int',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get.metadata = {'url': '/providers/Microsoft.Subscription/subscriptionOperations/{operationId}'}
| [
"[email protected]"
]
| |
47d15cbb5b377278a0596f903530d487f4f3cc6c | 1b512092052c8fe7f6919ee870431ac7b3a65f66 | /pal/examples/MixedHalidesBE2/run_simple_misokg.py | 77cd6d71fbb4de358efaf37a34fd2962c948ae5b | []
| no_license | ClancyLab/PAL | d7b9dd1caeb62d363041b8e4c7f402d6edbf741e | cb0ef048de37014922b943ae6b5eaffd3d43da63 | refs/heads/master | 2022-02-25T05:31:20.590421 | 2019-10-14T19:47:10 | 2019-10-14T19:47:10 | 210,862,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | from pal.opt import Optimizer
import pal.utils.strings as pal_strings
from pal.constants.solvents import solvents
from pal.kernels.matern import maternKernel52 as mk52
# from pal.objectives.binding_energy import get_binding_energy as BE
from pal.acquisition.misokg import getNextSample_misokg
from pal.stats.MLE import MLE
from pal.stats.MAP import MAP
import os
import copy
# import random
import numpy as np
import cPickle as pickle
def run_misokg(run_index):
# Store data for debugging
IS0 = pickle.load(open("enthalpy_N1_R3_Ukcal-mol", 'r'))
IS1 = pickle.load(open("enthalpy_N1_R2_Ukcal-mol", 'r'))
# Generate the main object
sim = Optimizer()
# Assign simulation properties
#sim.hyperparameter_objective = MAP
sim.hyperparameter_objective = MLE
###################################################################################################
# File names
sim.fname_out = "enthalpy_misokg.dat"
sim.fname_historical = None
# Information sources, in order from expensive to cheap
sim.IS = [
lambda h, c, s: -1.0 * IS0[' '.join([''.join(h), c, s])],
lambda h, c, s: -1.0 * IS1[' '.join([''.join(h), c, s])]
]
sim.costs = [
1.0,
0.1
]
sim.logger_fname = "data_dumps/%d_misokg.log" % run_index
if os.path.exists(sim.logger_fname):
os.system("rm %s" % sim.logger_fname)
os.system("touch %s" % sim.logger_fname)
sim.obj_vs_cost_fname = "data_dumps/%d_misokg.dat" % run_index
sim.mu_fname = "data_dumps/%d_mu_misokg.dat" % run_index
sim.sig_fname = "data_dumps/%d_sig_misokg.dat" % run_index
sim.combos_fname = "data_dumps/%d_combos_misokg.dat" % run_index
sim.hp_fname = "data_dumps/%d_hp_misokg.dat" % run_index
sim.acquisition_fname = "data_dumps/%d_acq_misokg.dat" % run_index
sim.save_extra_files = True
########################################
# Override the possible combinations with the reduced list of IS0
# Because we do this, we should also generate our own historical sample
combos_no_IS = [k[1] + "Pb" + k[0] + "_" + k[2] for k in [key.split() for key in IS0.keys()]]
sim.historical_nsample = 10
choices = np.random.choice(combos_no_IS, sim.historical_nsample, replace=False)
tmp_data = pal_strings.alphaToNum(
choices,
solvents,
mixed_halides=True,
name_has_IS=False)
data = []
for IS in range(len(sim.IS)):
for i, d in enumerate(tmp_data):
h, c, _, s, _ = pal_strings.parseName(pal_strings.parseNum(d, solvents, mixed_halides=True, num_has_IS=False), name_has_IS=False)
c = c[0]
data.append([IS] + d + [sim.IS[IS](h, c, s)])
sim.fname_historical = "data_dumps/%d.history" % run_index
pickle.dump(data, open(sim.fname_historical, 'w'))
simple_data = [d for d in data if d[0] == 0]
pickle.dump(simple_data, open("data_dumps/%d_reduced.history" % run_index, 'w'))
########################################
sim.n_start = 10 # The number of starting MLE samples
sim.reopt = 20
sim.ramp_opt = None
sim.parallel = False
# Possible compositions by default
sim.A = ["Cs", "MA", "FA"]
sim.B = ["Pb"]
sim.X = ["Cl", "Br", "I"]
sim.solvents = copy.deepcopy(solvents)
sim.S = list(set([v["name"] for k, v in sim.solvents.items()]))
sim.mixed_halides = True
sim.mixed_solvents = False
# Parameters for debugging and overwritting
sim.debug = False
sim.verbose = True
sim.overwrite = True # If True, warning, else Error
sim.acquisition = getNextSample_misokg
# Functional forms of our mean and covariance
# MEAN: 4 * mu_alpha + mu_zeta
# COV: sig_alpha * |X><X| + sig_beta * I_N + sig_zeta + MaternKernel(S, weights, sig_m)
SCALE = [2.0, 4.0][int(sim.mixed_halides)]
# _1, _2, _3 used as dummy entries
def mean(X, Y, theta):
mu = np.array([SCALE * theta.mu_alpha + theta.mu_zeta for _ in Y])
return mu
sim.mean = mean
def cov_old(X, Y, theta):
A = theta.sig_alpha * np.dot(np.array(X)[:, 1:-3], np.array(X)[:, 1:-3].T)
B = theta.sig_beta * np.diag(np.ones(len(X)))
C = theta.sig_zeta
D = mk52(np.array(X)[:, -3:-1], [theta.l1, theta.l2], theta.sig_m)
return theta.rho_matrix(X) * (A + B + C + D)
def cov(X0, Y, theta):
A = theta.sig_alpha * np.dot(np.array(X0)[:, :-3], np.array(X0)[:, :-3].T)
B = theta.sig_beta * np.diag(np.ones(len(X0)))
C = theta.sig_zeta
D = mk52(np.array(X0)[:, -3:-1], [theta.l1, theta.l2], theta.sig_m)
Kx = A + B + C + D
L = np.array([
np.array([theta.rho[str(sorted([i, j]))] if i >= j else 0.0 for j in range(theta.n_IS)])
for i in range(theta.n_IS)
])
# Normalize L to stop over-scaling values small
if theta.normalize_L:
L = L / np.linalg.norm(L)
# Force it to be positive semi-definite
Ks = L.dot(L.T)
if theta.normalize_Ks:
Ks = Ks / np.linalg.norm(Ks)
e = np.diag(np.array([theta.e1, theta.e2]))
Ks = e.dot(Ks.dot(e))
return np.kron(Ks, Kx)
sim.cov = cov
sim.theta.bounds = {}
sim.theta.mu_alpha, sim.theta.bounds['mu_alpha'] = None, (1E-3, lambda _, Y: max(Y))
sim.theta.sig_alpha, sim.theta.bounds['sig_alpha'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.sig_beta, sim.theta.bounds['sig_beta'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.mu_zeta, sim.theta.bounds['mu_zeta'] = None, (1E-3, lambda _, Y: max(Y))
sim.theta.sig_zeta, sim.theta.bounds['sig_zeta'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.sig_m, sim.theta.bounds['sig_m'] = None, (1E-2, lambda _, Y: np.var(Y))
sim.theta.l1, sim.theta.bounds['l1'] = None, (1E-1, 1)
sim.theta.l2, sim.theta.bounds['l2'] = None, (1E-1, 1)
sim.theta.e1, sim.theta.bounds['e1'] = None, (1E-1, 1.0)
sim.theta.e2, sim.theta.bounds['e2'] = None, (1E-1, 1.0)
# # NOTE! This is a reserved keyword in misoKG. We will generate a list of the same length
# # of the information sources, and use this for scaling our IS.
sim.theta.rho = {"[0, 0]": 1.0, "[0, 1]": 0.96, "[1, 1]": 1.0}
#sim.theta.rho = {"[0, 0]": 1.0}
#sim.theta.rho = {"[0, 0]": None, "[0, 1]": None, "[1, 1]": None}
sim.theta.bounds['rho [0, 0]'] = (1E-1, 1E1)
# sim.theta.bounds['rho [0, 1]'] = (1E-1, 1E1)
# sim.theta.bounds['rho [1, 1]'] = (1E-1, 1E1)
sim.theta.bounds['rho [0, 0]'] = (0.1, 1.0)
sim.theta.bounds['rho [0, 1]'] = (0.1, 1.0)
sim.theta.bounds['rho [1, 1]'] = (0.1, 1.0)
sim.theta.set_hp_names()
sim.primary_rho_opt = False
sim.update_hp_only_with_IS0 = False
sim.update_hp_only_with_overlapped = False
sim.theta.normalize_L = False
sim.theta.normalize_Ks = False
# This was a test feature that actually over-wrote rho to be PSD
# sim.force_rho_psd = True
###################################################################################################
# Start simulation
sim.run()
| [
"[email protected]"
]
| |
9d9c54c167c1d1608999cca9cd3f8deb88c08f87 | f7a718425de1447836b547f831a120937f1fcf40 | /plumbum/util/datefmt.py | 86b8b0bd0033ccb5a4c68ba1d8edc9352eb63e63 | [
"BSD-3-Clause"
]
| permissive | coyotevz/plumbum-old-1 | ad8ce697ffb4cbd0a6f238f66a1c546800e47024 | c0f769ca525298ab190592d0997575d917a4bed4 | refs/heads/master | 2021-01-20T10:50:32.516766 | 2016-11-18T04:20:32 | 2016-11-18T04:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
# date/time utilities
if os.name == 'nt':
raise NotImplementedError("Not yet implemented for this platform")
else:
time_now, datetime_now = time.time, datetime.now
| [
"[email protected]"
]
| |
908203b5cd69481a591c3a236d23ab58bfe761cd | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_321/ch130_2020_04_01_16_55_20_162558.py | b1a491a59ae05eb3b4d84e3ac5803ce5f576c87e | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def monta_mala(l):
i = 0
mala = []
while sum(mala) <= 23:
if sum(mala) + l[0] > 23:
break
else:
mala.append(l[i])
i +=1
return mala | [
"[email protected]"
]
| |
4302c2a92d3fd0e16720b5d0bb2c81e469aa422d | 71e8bdddd84338bbb2d77934351d76251c2fd77d | /unique-paths.py | 39eeaaf353b483ccb782eeba05930f71fcbd9851 | []
| no_license | onestarshang/leetcode | 3da20fbec1b42d3565eb95a64ea3f30c29f1e1eb | 0a7aa09a2b95e4caca5b5123fb735ceb5c01e992 | refs/heads/master | 2021-01-09T06:00:06.018037 | 2016-12-17T16:17:49 | 2016-12-17T16:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | #coding: utf-8
'''
http://oj.leetcode.com/problems/unique-paths/
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Above is a 3 x 7 grid. How many possible unique paths are there?
Note: m and n will be at most 100.
'''
class Solution:
# @return an integer
def uniquePaths(self, m, n):
if m == 0 or n == 0:
return 0
d = [[0 for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
if i == 0 or j == 0:
d[i][j] = 1
else:
d[i][j] = d[i - 1][j] + d[i][j - 1]
return d[m - 1][n - 1]
| [
"[email protected]"
]
| |
1b5cfbe1f3042ab381911ffa943576eb5a6a5208 | 32904d4841d104143ba0f41cc3aeb749e470f546 | /backend/django/apps/memos/migrations/0008_auto_20191025_2003.py | 3fdde9fa6cac56f3d36a37dc33c06ac8382c74cb | []
| no_license | aurthurm/dispatrace-api-vuejs | 20ec5deee015e69bce7a64dc2d89ccae8941b800 | 56d122318af27ff64755fc515345974631d3026f | refs/heads/master | 2023-01-23T23:03:15.438339 | 2020-10-20T22:09:29 | 2020-10-20T22:09:29 | 219,028,985 | 0 | 1 | null | 2022-12-22T18:31:38 | 2019-11-01T17:08:35 | Vue | UTF-8 | Python | false | false | 540 | py | # Generated by Django 2.2.6 on 2019-10-25 18:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('memos', '0007_memoattachment_memocomment'),
]
operations = [
migrations.AlterField(
model_name='memoattachment',
name='memo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memoattachment_attachment', to='memos.Memo'),
),
]
| [
"[email protected]"
]
| |
33e6fb56b398cd6635d41be61972d9290f4fa7f1 | cdd79cef15bdf6a0b9098e27028bbe38607bc288 | /数論/Combination/mcomb.py | 0f7e575c0fe0a8647ae6beff5f8fa66747094a11 | []
| no_license | nord2sudjp/atcoder | ee35a3eb35717485dc62627172de24c9dac102fb | 6b1cc5102a615492cc7ff8a33813bbb954641782 | refs/heads/master | 2023-08-25T11:27:14.205593 | 2021-09-27T05:43:04 | 2021-09-27T05:43:04 | 302,855,505 | 0 | 0 | null | null | null | null | SHIFT_JIS | Python | false | false | 654 | py | # https://atcoder.jp/contests/abc145/submissions/10775904
def comb(n,r,mod):
# nからr通りを選択する
# modは素数であること
if n<r:return 0
k=min(r,n-r)
C=1
for i in range(1,k+1):
C=(C*(n+1-i)*pow(i,mod-2,mod))%mod
return C
print(com(n+m,n,mod))
#
def comb_cal(n):
s=1
m=0
for i in range(n):
s*=2
m+=s-1
#print(s-1)
return((s-1,m))
i,j=comb_cal(3)
print(i,j)
'''
1: 1 : 1
2: 2 1 : 3
3: 3 3 1 : 7
4: 4 6 4 1 : 15
5: 5 10 10 5 1 : 31
6: 6 15 20 15 6 1 : 63
7: 7 21 35 35 21 7 1 : 127
8: 8 28 56 70 56 28 8 1 : 255
9: 9 36 84 126 126 84 36 9 1 : 511
1013
'''
| [
"[email protected]"
]
| |
4625b7562e6935395144e1da64a15c0b078f999e | 52b5773617a1b972a905de4d692540d26ff74926 | /triangle.py | aabaa8b43b28d0f6063839f8844acbb0a8568919 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # this function is meant to print a triangle
def triangle():
# outer loop is for the rows --> 4
for i in range(0,4):
# inner loop is for colums --> 4
for j in range(0, i+1):
print("*",end= " ")
print("\r")
triangle()
| [
"[email protected]"
]
| |
234fe0703bcd32e0a8e3cea1e43969c845b3ac6e | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/Toontown2016/toontown/toonbase/ToontownStartDist.py | 6718c47b2d8ad07f5cb8e4b83442d6bf516c3334 | []
| no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,538 | py | # This is the "main" module that will start a distribution copy of
# Toontown 2016
# Replace some modules that do exec:
import collections
collections.namedtuple = lambda *x: tuple
# This is included in the package by the prepare_client script. It contains the
# PRC file data, (stripped) DC file, and time zone info:
import game_data
# Load all of the packaged PRC config page(s):
from pandac.PandaModules import *
for i, config in enumerate(game_data.CONFIG):
name = 'GameData config page #{0}'.format(i)
loadPrcFileData(name, config)
# The VirtualFileSystem, which has already initialized, doesn't see the mount
# directives in the config(s) yet. We have to force it to load them manually:
vfs = VirtualFileSystem.getGlobalPtr()
mounts = ConfigVariableList('vfs-mount')
for mount in mounts:
mountFile, mountPoint = (mount.split(' ', 2) + [None, None, None])[:2]
mountFile = Filename(mountFile)
mountFile.makeAbsolute()
mountPoint = Filename(mountPoint)
vfs.mount(mountFile, mountPoint, 0)
# To read the DC file as a StringStream, we must override the ClientRepository:
dcStream = StringStream(game_data.DC)
from direct.distributed import ConnectionRepository
import types
class ConnectionRepository_override(ConnectionRepository.ConnectionRepository):
def readDCFile(self, dcFileNames=None):
dcFile = self.getDcFile()
dcFile.clear()
self.dclassesByName = {}
self.dclassesByNumber = {}
self.hashVal = 0
dcImports = {}
readResult = dcFile.read(dcStream, 'DC stream')
if not readResult:
self.notify.error("Could not read dc file.")
self.hashVal = dcFile.getHash()
for n in xrange(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)[:]
suffix = moduleName.split('/')
moduleName = suffix[0]
suffix=suffix[1:]
if self.dcSuffix in suffix:
moduleName += self.dcSuffix
elif self.dcSuffix == 'UD' and 'AI' in suffix: #HACK:
moduleName += 'AI'
importSymbols = []
for i in xrange(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
suffix = symbolName.split('/')
symbolName = suffix[0]
suffix=suffix[1:]
if self.dcSuffix in suffix:
symbolName += self.dcSuffix
elif self.dcSuffix == 'UD' and 'AI' in suffix: #HACK:
symbolName += 'AI'
importSymbols.append(symbolName)
self.importModule(dcImports, moduleName, importSymbols)
for i in xrange(dcFile.getNumClasses()):
dclass = dcFile.getClass(i)
number = dclass.getNumber()
className = dclass.getName() + self.dcSuffix
classDef = dcImports.get(className)
if classDef is None and self.dcSuffix == 'UD':
className = dclass.getName() + 'AI'
classDef = dcImports.get(className)
if classDef == None:
className = dclass.getName()
classDef = dcImports.get(className)
if classDef is None:
self.notify.debug("No class definition for %s." % (className))
else:
if type(classDef) == types.ModuleType:
if not hasattr(classDef, className):
self.notify.warning("Module %s does not define class %s." % (className, className))
continue
classDef = getattr(classDef, className)
if type(classDef) != types.ClassType and type(classDef) != types.TypeType:
self.notify.error("Symbol %s is not a class name." % (className))
else:
dclass.setClassDef(classDef)
self.dclassesByName[className] = dclass
if number >= 0:
self.dclassesByNumber[number] = dclass
if self.hasOwnerView():
ownerDcSuffix = self.dcSuffix + 'OV'
ownerImportSymbols = {}
for n in xrange(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)
suffix = moduleName.split('/')
moduleName = suffix[0]
suffix=suffix[1:]
if ownerDcSuffix in suffix:
moduleName = moduleName + ownerDcSuffix
importSymbols = []
for i in xrange(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
suffix = symbolName.split('/')
symbolName = suffix[0]
suffix=suffix[1:]
if ownerDcSuffix in suffix:
symbolName += ownerDcSuffix
importSymbols.append(symbolName)
ownerImportSymbols[symbolName] = None
self.importModule(dcImports, moduleName, importSymbols)
for i in xrange(dcFile.getNumClasses()):
dclass = dcFile.getClass(i)
if ((dclass.getName()+ownerDcSuffix) in ownerImportSymbols):
number = dclass.getNumber()
className = dclass.getName() + ownerDcSuffix
classDef = dcImports.get(className)
if classDef is None:
self.notify.error("No class definition for %s." % className)
else:
if type(classDef) == types.ModuleType:
if not hasattr(classDef, className):
self.notify.error("Module %s does not define class %s." % (className, className))
classDef = getattr(classDef, className)
dclass.setOwnerClassDef(classDef)
self.dclassesByName[className] = dclass
ConnectionRepository.ConnectionRepository = ConnectionRepository_override
# We also need timezone stuff:
class dictloader(object):
def __init__(self, dict):
self.dict = dict
def get_data(self, key):
return self.dict.get(key.replace('\\','/'))
import pytz
pytz.__loader__ = dictloader(game_data.ZONEINFO)
# Finally, start the game:
import toontown.toonbase.ToontownStart
| [
"[email protected]"
]
| |
a822fbeeb592b742c4ddbe11b82b3ead6703f4e6 | 26e2c68f929ecc8bb5c20c6b8cd200b66d99def5 | /DjangoDopLEsson/products/migrations/0001_initial.py | 0ce5c7a66193f21ead1baaf75f96d6d86c10e249 | []
| no_license | kirigaikabuto/DjangoLessonsPart | ad19c1da0d1da27830c6fdf1b07353632bbc097d | 4442518ae1f0a8641e066c9a63ff4e55e04d5fe5 | refs/heads/master | 2022-11-28T10:29:54.428001 | 2020-08-03T09:26:42 | 2020-08-03T09:26:42 | 273,497,052 | 0 | 0 | null | 2020-08-03T09:26:43 | 2020-06-19T13:11:15 | Python | UTF-8 | Python | false | false | 590 | py | # Generated by Django 3.0.7 on 2020-06-24 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.IntegerField()),
],
),
]
| [
"[email protected]"
]
| |
910aba7d092e6fe88237d6d7c73f25a5638d20a8 | c70dfc0d74b34e41f7d2dbdbd6bfaca2f79af55b | /cyp/models/convnet.py | 7e74e171c0c26477317c14467986b5411a787c77 | [
"MIT"
]
| permissive | jeangolay/pycrop-yield-prediction | a2c65fa3bd704d1d3251318a9afe39bfcd05cf10 | 1b36b673cc58b506ad4d3c8bd6b6917ac5a72d28 | refs/heads/master | 2021-02-18T06:58:31.844163 | 2019-11-25T13:45:55 | 2019-11-25T13:45:55 | 245,172,915 | 0 | 1 | MIT | 2020-03-05T13:39:21 | 2020-03-05T13:39:20 | null | UTF-8 | Python | false | false | 7,994 | py | import torch
from torch import nn
import torch.nn.functional as F
from pathlib import Path
from .base import ModelBase
class ConvModel(ModelBase):
"""
A PyTorch replica of the CNN structured model from the original paper. Note that
this class assumes feature_engineering was run with channels_first=True
Parameters
----------
in_channels: int, default=9
Number of channels in the input data. Default taken from the number of bands in the
MOD09A1 + the number of bands in the MYD11A2 datasets
dropout: float, default=0.5
Default taken from the original paper
dense_features: list, or None, default=None.
output feature size of the Linear layers. If None, default values will be taken from the paper.
The length of the list defines how many linear layers are used.
time: int, default=32
The number of timesteps being used. This is necessary to pass in the initializer since it will
affect the size of the first dense layer, which is the flattened output of the conv layers
savedir: pathlib Path, default=Path('data/models')
The directory into which the models should be saved.
device: torch.device
Device to run model on. By default, checks for a GPU. If none exists, uses
the CPU
"""
def __init__(self, in_channels=9, dropout=0.5, dense_features=None, time=32,
savedir=Path('data/models'), use_gp=True, sigma=1, r_loc=0.5, r_year=1.5,
sigma_e=0.01, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
# save values for reinitialization
self.in_channels = in_channels
self.dropout = dropout
self.dense_features = dense_features
self.time = time
model = ConvNet(in_channels=in_channels, dropout=dropout,
dense_features=dense_features, time=time)
if dense_features is None:
num_dense_layers = 2
else:
num_dense_layers = len(dense_features)
model_weight = f'dense_layers.{num_dense_layers - 1}.weight'
model_bias = f'dense_layers.{num_dense_layers - 1}.bias'
super().__init__(model, model_weight, model_bias, 'cnn', savedir, use_gp, sigma, r_loc,
r_year, sigma_e, sigma_b, device)
def reinitialize_model(self, time=None):
# the only thing which changes here is the time value, since this affects the
# size of the first dense layer.
if time is None:
time = self.time
model = ConvNet(in_channels=self.in_channels, dropout=self.dropout,
dense_features=self.dense_features, time=time)
if self.device.type != 'cpu':
model = model.cuda()
self.model = model
class ConvNet(nn.Module):
"""
A crop yield conv net.
For a description of the parameters, see the ConvModel class.
Only handles strides of 1 and 2
"""
def __init__(self, in_channels=9, dropout=0.5, dense_features=None, time=32):
super().__init__()
# values taken from the paper
in_out_channels_list = [in_channels, 128, 256, 256, 512, 512, 512]
stride_list = [None, 1, 2, 1, 2, 1, 2]
# Figure out the size of the final flattened conv layer, which
# is dependent on the input size
num_divisors = sum([1 if i == 2 else 0 for i in stride_list])
for i in range(num_divisors):
if time % 2 != 0:
time += 1
time /= 2
if dense_features is None:
dense_features = [2048, 1]
dense_features.insert(0, int(in_out_channels_list[-1] * time * 4))
assert len(stride_list) == len(in_out_channels_list), \
"Stride list and out channels list must be the same length!"
self.convblocks = nn.ModuleList([
ConvBlock(in_channels=in_out_channels_list[i-1],
out_channels=in_out_channels_list[i],
kernel_size=3, stride=stride_list[i],
dropout=dropout) for
i in range(1, len(stride_list))
])
self.dense_layers = nn.ModuleList([
nn.Linear(in_features=dense_features[i-1],
out_features=dense_features[i]) for
i in range(1, len(dense_features))
])
self.initialize_weights()
def initialize_weights(self):
for convblock in self.convblocks:
nn.init.kaiming_uniform_(convblock.conv.weight.data)
# http://cs231n.github.io/neural-networks-2/#init
# see: Initializing the biases
nn.init.constant_(convblock.conv.bias.data, 0)
for dense_layer in self.dense_layers:
nn.init.kaiming_uniform_(dense_layer.weight.data)
nn.init.constant_(dense_layer.bias.data, 0)
def forward(self, x, return_last_dense=False):
"""
If return_last_dense is true, the feature vector generated by the second to last
dense layer will also be returned. This is then used to train a Gaussian Process model.
"""
for block in self.convblocks:
x = block(x)
# flatten
x = x.view(x.shape[0], -1)
for layer_number, dense_layer in enumerate(self.dense_layers):
x = dense_layer(x)
if return_last_dense and (layer_number == len(self.dense_layers) - 2):
output = x
if return_last_dense:
return x, output
return x
class ConvBlock(nn.Module):
"""
A 2D convolution, followed by batchnorm, a ReLU activation, and dropout
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.conv = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.batchnorm(self.conv(x)))
return self.dropout(x)
class Conv2dSamePadding(nn.Conv2d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
This solution is mostly copied from
https://github.com/pytorch/pytorch/issues/3867#issuecomment-349279036
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input):
return conv2d_same_padding(input, self.weight, self.bias, self.stride,
self.dilation, self.groups)
def conv2d_same_padding(input, weight, bias=None, stride=1, dilation=1, groups=1):
# stride and dilation are expected to be tuples.
# first, we'll figure out how much padding is necessary for the rows
input_rows = input.size(2)
filter_rows = weight.size(2)
effective_filter_size_rows = (filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] + effective_filter_size_rows - input_rows)
rows_odd = (padding_rows % 2 != 0)
# same for columns
input_cols = input.size(3)
filter_cols = weight.size(3)
effective_filter_size_cols = (filter_cols - 1) * dilation[1] + 1
out_cols = (input_cols + stride[1] - 1) // stride[1]
padding_cols = max(0, (out_cols - 1) * stride[1] + effective_filter_size_cols - input_cols)
cols_odd = (padding_cols % 2 != 0)
if rows_odd or cols_odd:
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=dilation, groups=groups)
| [
"[email protected]"
]
| |
d857d210c85ab7e5b44aa427f2403019ebe176a1 | f08d137b7821d79672c91e5f06967ffa1f90e278 | /.history/Python/Main_py_20211021101357.py | 24180ebf2cf62ee0838fe71a2cd46e81d5e858e6 | []
| no_license | anhviet-key/hello-cac-ban | a39ffb1731a77dd171523ea145f5d8b62fccde7c | 18411b51add7e3277d42869f8a50c67111337983 | refs/heads/main | 2023-08-23T09:02:01.074958 | 2021-10-27T07:48:47 | 2021-10-27T07:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from Sub_py import emailProcess, print_Mess
def main():
emails = ["[email protected]", "[email protected]", "[email protected]"]
for email in emails:
usemailProcess(email)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
9dcddbcc8d5d3f81e9b43c1b674bb99bf74081e6 | 495943f075f6a641d456d66deebb208847cb6c50 | /bases/bases.py | 4b971bac31d64037ece100affafdb194b8cec092 | []
| no_license | LukazDane/CS-1.3 | 377a6ef77c3db4a497f492ed73a3ba2487531b93 | 9cee1f71b9374a54a1fe336cd1f8db1a51275ef8 | refs/heads/master | 2022-07-04T00:26:48.498036 | 2020-05-11T02:37:00 | 2020-05-11T02:37:00 | 255,189,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,209 | py | import string
import math
# ##### https://en.wikipedia.org/wiki/List_of_Unicode_characters
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
decoded = []
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# TODO: Decode digits from binary (base 2)
ndec = 0
digits = digits[::-1]
# if base == 2:
for i in range(len(digits)):
digit = int(digits[i], base=base)
ndec += digit * base ** i
return ndec
# elif base == 16:
# x = int(str(digits), 16)
# print(x)
# else:
# reverse the digits
# digits = digits[::-1]
# # print(digits)
# # variable to hold our answer
# num = 0
# # loop through each index
# for x in range(len(digits)):
# # variable to hold each index while we work it out
# uni = digits[x]
# if uni.isdigit():
# # if already a number (0-9) keep it
# uni = int(uni)
# # print(uni)
# else: # assumes alphabet
# # convert to unicode uppercase val, subtract calue of A and add 10 to get base 10 number
# uni = ord(uni.upper())-ord('A')+10
# # unicode a -> A = 65 | A(65) - A(65) + 10 = 10(a)
# # unicode b -> B = 66 | B(66) - A(65) + 10 = 11(b)
# # print(uni)
# num += uni*(base**x)
# decoded.append(num)
# print(decoded)
print(decode('1110 1100', 2))
print(decode('fff', 16))
print(decode("1a2b", 32))
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
# https://stackoverflow.com/questions/1181919/python-base-36-encoding
base_36 = string.digits + string.ascii_uppercase
result = []
while number > 0:
q = number / base
remainder = number % base
sep_q = str(q).split(".")
number = int(sep_q[0])
if 9 < remainder < base:
remainder = base_36[remainder].lower()
result.insert(0, str(remainder))
return "".join(result)
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
decoded = decode(digits, base1)
return encode(decoded, base2)
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(
digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
4faf1c90487d459da70158af665f0ebc2c9cf364 | d75fc0ae459066bfb15187d1c902e22000153dc4 | /TestScript/tenderverificationONSupplierStatus.py | 44e27edee4b8c97a036372329b6aa5c7f6dc4e37 | []
| no_license | sureshkumarkadi/Project | 875a05a752164ff9620286ab8261c7774acc4f27 | 4652edfa6ac47d6f44bd41e03314d96753e09d92 | refs/heads/master | 2020-03-25T19:52:23.124215 | 2018-08-09T05:28:08 | 2018-08-09T05:28:08 | 144,104,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | #-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: mathew.jacob
#
# Created: 25/08/2016
# Copyright: (c) mathew.jacob 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
from selenium.webdriver.support.ui import WebDriverWait
import unittest
import sys
import os
import time
import traceback
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_path=os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0,folder_path+"\Library")
sys.path.insert(0,folder_path+"\Syslibrary")
sys.path.insert(0,folder_path+"\Data")
sys.path.insert(0,folder_path+"\Object")
from launcheTender import LauncheTenderclass
from tenderDetails import Tenderdetails
from tenderDetails import SubmitTenderclass
from datadriven import DataDriver
from setupenviron import setupValue
from logouteTender import Userprofilemenu
##from RESTAPI import ReopentenderusingRESTAPIclass
from RESTAPIStaging import ReopentenderusingRESTAPIclass
from logdriver import logvalue
logs=logvalue.logger
logclose=logvalue()
ftime = time.mktime(time.localtime())
ptime=time.strftime("%d-%m-%Y_%H%M%S", time.localtime(ftime))
#filename = 'TestCase-100358-{0}.png'.format(ptime)
tf = 'test_TenderverificationONSupplierStatus'
filename = 'Testcase-%s.png' %(tf)
path= setupValue().screenpath
fullpath = os.path.join(path,filename)
#Test case Number = 100358
class TenderverificationONSupplierStatus(unittest.TestCase):
def test_TenderverificationONSupplierStatus(self):
try:
browserInstance = setupValue()
browser = browserInstance.setupfunction()
browser.implicitly_wait(5)
time.sleep(1)
LauncheTender1 = LauncheTenderclass()
browser = LauncheTender1.openURL(browser)
browser.implicitly_wait(5)
time.sleep(1)
tenderDetails = Tenderdetails()
browser = LauncheTender1.subcontractorValidlogin(browser)
#browser = LauncheTender1.list_Organisation(browser)
#browser = LauncheTender1.verifyorganisationdetails(browser)
browser = LauncheTender1.list_project(browser)
time.sleep(1)
browser = tenderDetails.Subcontratorproject(browser)
time.sleep(2)
tenderverifySupplierstatus1 = DataDriver()
tenderverifySupplierstatus_path = tenderverifySupplierstatus1.readfromXML(folder_path+'\Object\TenderPage.xml','eTender','tenderverifySupplierstatus')
time.sleep(1)
tenderverifySupplierstatus = browser.find_element_by_xpath(tenderverifySupplierstatus_path) #Webelement for values
time.sleep(1)
self.assertEqual(tenderverifySupplierstatus.text,'Review pending')
logs.info("Test Case No : 100358 Passed Successfully")
except Exception:
logs.error("Validation with Test Case No: 100358 failed")
browser.save_screenshot(fullpath)
traceback.print_exc(file=sys.stdout)
self.fail("Test Case No: 100358 failed")
browser.implicitly_wait(5)
finally:
LauncheTender1.closebrowser(browser)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
d00a26baca490f31f439effc72c908cdb5f1a988 | f8bdc46409c9f5eaf3d85ef157260589462d941a | /demos/instance_occlsegm/instance_occlsegm_lib/contrib/instance_occlsegm/models/mask_rcnn/mask_rcnn_train_chain.py | dcac6d9a34651571c6619b60979f08467e428520 | [
"MIT",
"BSD-3-Clause"
]
| permissive | start-jsk/jsk_apc | 2e268f8b65e9d7f4f9cc4416dc8383fd0a7b9750 | c4e349f45ef38457dc774e33f6902acf1a1540a6 | refs/heads/master | 2023-09-05T09:06:24.855510 | 2023-09-01T17:10:12 | 2023-09-01T17:10:12 | 25,620,908 | 36 | 25 | NOASSERTION | 2023-09-01T17:10:14 | 2014-10-23T05:28:31 | Common Lisp | UTF-8 | Python | false | false | 9,218 | py | # Modified works:
# --------------------------------------------------------
# Copyright (c) 2017 - 2018 Kentaro Wada.
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# This is modified work of FasterRCNNTrainChain:
# --------------------------------------------------------
# Copyright (c) 2017 Preferred Networks, Inc.
# Licensed under The MIT License [see LICENSE for details]
# https://github.com/chainer/chainercv
# --------------------------------------------------------
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
from chainercv.links.model.faster_rcnn.utils.anchor_target_creator import\
AnchorTargetCreator
from .utils import ProposalTargetCreator
class MaskRCNNTrainChain(chainer.Chain):
"""Calculate losses for Faster R-CNN and report them.
This is used to train Faster R-CNN in the joint training scheme
[#FRCNN]_.
The losses include:
* :obj:`rpn_loc_loss`: The localization loss for \
Region Proposal Network (RPN).
* :obj:`rpn_cls_loss`: The classification loss for RPN.
* :obj:`roi_loc_loss`: The localization loss for the head module.
* :obj:`roi_cls_loss`: The classification loss for the head module.
.. [#FRCNN] Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args:
faster_rcnn (~chainercv.links.model.faster_rcnn.FasterRCNN):
A Faster R-CNN model that is going to be trained.
rpn_sigma (float): Sigma parameter for the localization loss
of Region Proposal Network (RPN). The default value is 3,
which is the value used in [#FRCNN]_.
roi_sigma (float): Sigma paramter for the localization loss of
the head. The default value is 1, which is the value used
in [#FRCNN]_.
anchor_target_creator: An instantiation of
:obj:`chainercv.links.model.faster_rcnn.AnchorTargetCreator`.
proposal_target_creator_params: An instantiation of
:obj:`chainercv.links.model.faster_rcnn.ProposalTargetCreator`.
"""
def __init__(self, mask_rcnn, rpn_sigma=3., roi_sigma=1.,
anchor_target_creator=AnchorTargetCreator(),
proposal_target_creator=ProposalTargetCreator(),
):
super(MaskRCNNTrainChain, self).__init__()
with self.init_scope():
self.mask_rcnn = mask_rcnn
self.rpn_sigma = rpn_sigma
self.roi_sigma = roi_sigma
self.anchor_target_creator = anchor_target_creator
self.proposal_target_creator = proposal_target_creator
self.loc_normalize_mean = mask_rcnn.loc_normalize_mean
self.loc_normalize_std = mask_rcnn.loc_normalize_std
def __call__(self, imgs, bboxes, labels, masks, scales):
"""Forward Faster R-CNN and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
Currently, only :math:`N=1` is supported.
Args:
imgs (~chainer.Variable): A variable with a batch of images.
bboxes (~chainer.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
labels (~chainer.Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
scale (float or ~chainer.Variable): Amount of scaling applied to
the raw image during preprocessing.
Returns:
chainer.Variable:
Scalar loss variable.
This is the sum of losses for Region Proposal Network and
the head module.
"""
if isinstance(bboxes, chainer.Variable):
bboxes = bboxes.data
if isinstance(labels, chainer.Variable):
labels = labels.data
if isinstance(scales, chainer.Variable):
scales = scales.data
scales = cuda.to_cpu(scales)
batch_size, _, H, W = imgs.shape
img_size = (H, W)
features = self.mask_rcnn.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = self.mask_rcnn.rpn(
features, img_size, scales)
if any(len(b) == 0 for b in bboxes):
return chainer.Variable(self.xp.array(0, dtype=np.float32))
batch_indices = range(batch_size)
sample_rois = []
sample_roi_indices = []
gt_roi_locs = []
gt_roi_labels = []
gt_roi_masks = []
for batch_index, bbox, label, mask in \
zip(batch_indices, bboxes, labels, masks):
roi = rois[roi_indices == batch_index]
sample_roi, gt_roi_loc, gt_roi_label, gt_roi_mask = \
self.proposal_target_creator(roi, bbox, label, mask)
del roi
sample_roi_index = self.xp.full(
(len(sample_roi),), batch_index, dtype=np.int32)
sample_rois.append(sample_roi)
sample_roi_indices.append(sample_roi_index)
del sample_roi, sample_roi_index
gt_roi_locs.append(gt_roi_loc)
gt_roi_labels.append(gt_roi_label)
gt_roi_masks.append(gt_roi_mask)
del gt_roi_loc, gt_roi_label, gt_roi_mask
sample_rois = self.xp.concatenate(sample_rois, axis=0)
sample_roi_indices = self.xp.concatenate(sample_roi_indices, axis=0)
gt_roi_locs = self.xp.concatenate(gt_roi_locs, axis=0)
gt_roi_labels = self.xp.concatenate(gt_roi_labels, axis=0)
gt_roi_masks = self.xp.concatenate(gt_roi_masks, axis=0)
roi_cls_locs, roi_scores, roi_masks = self.mask_rcnn.head(
features, sample_rois, sample_roi_indices)
# RPN losses
gt_rpn_locs = []
gt_rpn_labels = []
for bbox, rpn_loc, rpn_score in zip(bboxes, rpn_locs, rpn_scores):
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
bbox, anchor, img_size)
gt_rpn_locs.append(gt_rpn_loc)
gt_rpn_labels.append(gt_rpn_label)
del gt_rpn_loc, gt_rpn_label
gt_rpn_locs = self.xp.concatenate(gt_rpn_locs, axis=0)
gt_rpn_labels = self.xp.concatenate(gt_rpn_labels, axis=0)
rpn_locs = F.concat(rpn_locs, axis=0)
rpn_scores = F.concat(rpn_scores, axis=0)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_locs, gt_rpn_locs, gt_rpn_labels, self.rpn_sigma)
rpn_cls_loss = F.sigmoid_cross_entropy(rpn_scores, gt_rpn_labels)
# Losses for outputs of the head.
n_sample = len(roi_cls_locs)
roi_cls_locs = roi_cls_locs.reshape((n_sample, -1, 4))
roi_locs = roi_cls_locs[self.xp.arange(n_sample), gt_roi_labels]
roi_loc_loss = _fast_rcnn_loc_loss(
roi_locs, gt_roi_locs, gt_roi_labels, self.roi_sigma)
roi_cls_loss = F.softmax_cross_entropy(roi_scores, gt_roi_labels)
# # Losses for outputs of mask branch
n_instance, n_fg_class_x_n_mask_class, roi_H, roi_W = roi_masks.shape
assert n_sample == n_instance
n_fg_class = self.mask_rcnn.n_class - 1
n_mask_class = n_fg_class_x_n_mask_class // n_fg_class
roi_masks = roi_masks.reshape(
(n_instance, n_mask_class, n_fg_class, roi_H, roi_W)
)
roi_mask_loss = F.softmax_cross_entropy(
roi_masks[np.arange(n_instance), :, gt_roi_labels - 1, :, :],
gt_roi_masks,
)
# roi_mask_loss = F.sigmoid_cross_entropy(
# roi_masks[np.arange(n_sample), gt_roi_labels - 1, :, :],
# gt_roi_masks)
loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss + \
roi_mask_loss
chainer.reporter.report({'rpn_loc_loss': rpn_loc_loss,
'rpn_cls_loss': rpn_cls_loss,
'roi_loc_loss': roi_loc_loss,
'roi_cls_loss': roi_cls_loss,
'roi_mask_loss': roi_mask_loss,
'loss': loss},
self)
return loss
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = F.absolute(diff)
flag = (abs_diff.data < (1. / sigma2)).astype(np.float32)
y = (flag * (sigma2 / 2.) * F.square(diff) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return F.sum(y)
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
xp = chainer.cuda.get_array_module(pred_loc)
in_weight = xp.zeros_like(gt_loc)
# Localization loss is calculated only for positive rois.
in_weight[gt_label > 0] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight, sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= xp.sum(gt_label >= 0)
return loc_loss
| [
"[email protected]"
]
| |
fe1157f372e8999831140b5c8835adac1ce983b2 | bc572eca7a03aec83ee55300887a21cad3dbd160 | /tools/Polygraphy/tests/comparator/test_postprocess.py | 0cbba453125e5baa918d358612e41d35c9cb243d | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
]
| permissive | wuqiangch/TensorRT | fba0029dc5c0b3b9ffa091e45f26d8d10d702393 | d04182cd0086c70db4a8ad30e0d7675c4eb33782 | refs/heads/master | 2023-05-31T21:04:01.079351 | 2021-06-23T20:37:20 | 2021-06-25T19:39:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from polygraphy.comparator import PostprocessFunc, IterationResult
class TestTopK(object):
def test_basic(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k=3)
top_k = func(IterationResult({"x": arr}))
assert np.all(top_k["x"] == [4, 3, 2])
def test_k_can_exceed_array_len(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k=10)
top_k = func(IterationResult({"x": arr}))
assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
def test_per_output_top_k(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k={"": 10, "y": 2})
top_k = func(IterationResult({"x": arr, "y": arr}))
assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
assert np.all(top_k["y"] == [4, 3])
| [
"[email protected]"
]
| |
a6d0047071d0b232286f98b5287c49a605e6a21e | 320a98d428bf06eff6f3f209b1eadeb366a65482 | /common/version.py | c4dd9de7ffd4c1b577a51386ff7b1cc74c444cd3 | []
| no_license | Ryan--Yang/share | 6fe8b21918206fed903bd7a315216b47e58f697e | 4acc658f7c0a8f1b50f7b5c0b8884b96fe1e137d | refs/heads/master | 2020-12-31T02:42:22.125477 | 2013-12-04T07:27:53 | 2013-12-24T01:54:38 | 14,791,494 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,644 | py | from util import *
default_java_file = '/usr/lib/jvm/default-java'
gcc_file = '/usr/bin/gcc'
def handle_option():
global args
parser = argparse.ArgumentParser(description = 'Set up the version of Java',
formatter_class = argparse.RawTextHelpFormatter,
epilog = '''
examples:
python %(prog)s -v 1.5
python %(prog)s -v 1.7.0_45
''')
parser.add_argument('-s', '--set-version', dest='set_version', help='set version')
parser.add_argument('-g', '--get', dest='get_version', help='get version', action='store_true')
parser.add_argument('-t', '--target', dest='target', help='target to set version with', choices=['java', 'gcc'], default='gcc')
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
def setup():
pass
def get_version():
if not args.get_version:
return
if args.target == 'java':
get_version_java()
elif args.target == 'gcc':
get_version_gcc()
def set_version():
if not args.set_version:
return
if args.target == 'java':
set_version_java()
elif args.target == 'gcc':
set_version_gcc()
def get_version_java():
java_version_result = execute('java -version', silent=True, catch=True)
match = re.match('java version "(.*)"', java_version_result)
java_version = match.group(1)
java_home_result = os.getenv('JAVA_HOME')
if java_home_result:
match = re.match('jdk(.*)', java_home_result)
if match:
java_home = match.group(1)
else:
error('JAVA_HOME is not expected')
else:
java_home = 'NULL'
if os.path.exists(default_java_file):
default_java_result = execute('ls -l ' + default_java_file, silent=True, catch=True)
match = re.match('.*jdk(.*)', default_java_result)
if match:
default_java = match.group(1)
else:
error('default-java is not expected')
else:
default_java = 'NULL'
#info(java_version_result)
#if java_home_result:
# info(java_home_result)
#if default_java_result:
# info(default_java_result)
info('java -v: ' + java_version)
info('JAVA_HOME: ' + java_home)
info('default-java: ' + default_java)
def set_version_java():
if args.set_version == '1.5':
version = '1.5.0_22'
elif args.set_version == '1.6':
version = '1.6.0_45'
elif args.set_version == '1.7':
version = '1.7.0_45'
else:
version = args.set_version
execute('sudo update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/jdk' + version + '/bin/javac 50000')
execute('sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/jdk' + version + '/bin/java 50000')
execute('sudo update-alternatives --install /usr/bin/javaws javaws /usr/lib/jvm/jdk' + version + '/bin/javaws 50000')
execute('sudo update-alternatives --install /usr/bin/javap javap /usr/lib/jvm/jdk' + version + '/bin/javap 50000')
execute('sudo update-alternatives --install /usr/bin/jar jar /usr/lib/jvm/jdk' + version + '/bin/jar 50000')
execute('sudo update-alternatives --install /usr/bin/jarsigner jarsigner /usr/lib/jvm/jdk' + version + '/bin/jarsigner 50000')
execute('sudo update-alternatives --config javac')
execute('sudo update-alternatives --config java')
execute('sudo update-alternatives --config javaws')
execute('sudo update-alternatives --config javap')
execute('sudo update-alternatives --config jar')
execute('sudo update-alternatives --config jarsigner')
execute('sudo rm -f ' + default_java_file)
execute('sudo ln -s /usr/lib/jvm/jdk' + version + ' /usr/lib/jvm/default-java')
get_version_java()
def get_version_gcc():
gcc_version_result = execute('ls -l ' + gcc_file, silent=True, catch=True)
match = re.match('.+gcc-(.+)', gcc_version_result)
if match:
gcc_version = match.group(1)
else:
error('gcc is not expected')
info('gcc version: ' + gcc_version)
def set_version_gcc():
version = args.set_version
execute('sudo rm -f /usr/bin/gcc', silent=True)
execute('sudo ln -s /usr/bin/gcc-' + version + ' /usr/bin/gcc', silent=True)
execute('sudo rm -f /usr/bin/g++', silent=True)
execute('sudo ln -s /usr/bin/g++-' + version + ' /usr/bin/g++', silent=True)
execute('sudo rm -f /usr/bin/cc', silent=True)
execute('sudo ln -s /usr/bin/gcc /usr/bin/cc', silent=True)
get_version_gcc()
if __name__ == "__main__":
handle_option()
setup()
get_version()
set_version() | [
"[email protected]"
]
| |
4de8e6f3f997c044235468a34eb39dc9ca07df91 | df458ae26f8e1b59e4fc4273701f77cc2e340a3c | /tests/test_viewgroups.py | a7c869008bd24a94bbc80a387a23758393244f2e | [
"BSD-3-Clause"
]
| permissive | radiac/django-fastview | 64bcf3f07ed62a1863b5a402d1fedc998ed433f3 | daf898f416c3f89efc3ef290f8158232d055af36 | refs/heads/develop | 2023-03-20T22:49:14.789026 | 2022-10-02T19:43:22 | 2022-10-02T19:43:22 | 230,815,383 | 13 | 1 | NOASSERTION | 2023-03-04T05:44:10 | 2019-12-29T23:26:56 | Python | UTF-8 | Python | false | false | 1,637 | py | """
Test viewgroup
"""
from fastview import permissions
from fastview.viewgroups import ModelViewGroup
from .app.models import Entry
def test_modelviewgroup_permissions__permissions_set_on_subclass():
class TestPermission(permissions.Permission):
pass
test_permission = TestPermission()
class Entries(ModelViewGroup):
permission = test_permission
model = Entry
# Permissions are set at instantiation
entries = Entries()
assert entries.index_view.get_permission() == test_permission
assert entries.detail_view.get_permission() == test_permission
assert entries.create_view.get_permission() == test_permission
assert entries.update_view.get_permission() == test_permission
assert entries.delete_view.get_permission() == test_permission
# Not at definition
assert isinstance(Entries.index_view.get_permission(), permissions.Denied)
assert isinstance(Entries.detail_view.get_permission(), permissions.Denied)
assert isinstance(Entries.create_view.get_permission(), permissions.Denied)
assert isinstance(Entries.update_view.get_permission(), permissions.Denied)
assert isinstance(Entries.delete_view.get_permission(), permissions.Denied)
def test_modelviewgroup_index__index_lists(add_url, client, user_owner):
class Entries(ModelViewGroup):
permission = permissions.Public()
model = Entry
Entry.objects.create(author=user_owner)
Entry.objects.create(author=user_owner)
add_url("", Entries().include(namespace="entries"))
response = client.get("/")
assert len(response.context_data["object_list"]) == 2
| [
"[email protected]"
]
| |
10e8fdc24e2631260da50fd20f4deaaab12510ab | c1e0874f55d05ee990ed2d637c2910701b32d246 | /soft_uni_fundamentals/Data Types and Variables/exercises/03_elevator.py | 6b18bc3621dfad99d97435503cea446731f70608 | []
| no_license | borislavstoychev/Soft_Uni | 5d047bef402c50215e0abc825476326889ffd0be | ccc0b2fb18f8ad6809b475eb20e82a9e4eb4b0b0 | refs/heads/master | 2023-05-11T12:27:08.672058 | 2021-05-28T18:00:10 | 2021-05-28T18:00:10 | 277,556,731 | 3 | 2 | null | 2021-02-11T19:57:37 | 2020-07-06T13:58:23 | Python | UTF-8 | Python | false | false | 119 | py | n = int(input())
p = int(input())
if n % p == 0:
courses = n//p
else:
courses = n // p + 1
print(courses) | [
"[email protected]"
]
| |
5415aabb59728ebc4a5c6162aa5db91bddd6490d | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/envs/devstack_docker.py | 2eece814deae2c31cf5456b34af9e0f386c38c4e | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
]
| permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 129 | py | """ Overrides for Docker-based devstack. """
from .devstack import * # pylint: disable=wildcard-import, unused-wildcard-import
| [
"[email protected]"
]
| |
6653c822271f595c1ee6011406a88613852cd291 | 3325f16c04ca8e641cbd58e396f983542b793091 | /Seção 13 - Leitura e Escrita em Arquivos/Exercícios da Seção/Exercício_04.py | 3140f4c4229bb75d6849a37c399fbae14f608b1f | []
| no_license | romulovieira777/Programacao_em_Python_Essencial | ac929fbbd6a002bcc689b8d6e54d46177632c169 | e81d219db773d562841203ea370bf4f098c4bd21 | refs/heads/master | 2023-06-11T16:06:36.971113 | 2021-07-06T20:57:25 | 2021-07-06T20:57:25 | 269,442,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | """
4) Faça um programa que receba do usuário um arquivo texto e mostre na tela
quantas letras são vogais e quantas são consoantes
"""
from Exercício_03 import conta_vogais
def conta_consoantes(txt):
"""Retorna a quantidade de consoantes que existe no texto recebido por parâmetro.
Caso o que for recebido por parâmetro não seja uma string, retornará um valor do tipo None"""
try:
consoantes = ['b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
'p', 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z']
txt = txt.lower()
qtd = 0
for consoante in consoantes:
qtd += txt.count(consoante)
return qtd
except AttributeError:
return None
if __name__ == '__main__':
nome_arquivo = str(input("Digite o caminho do arquivo ou o nome do arquivo "
"(caso o arquivo esteja no mesmo local do programa): "))
nome_arquivo = nome_arquivo if ".txt" in nome_arquivo else nome_arquivo + ".txt"
try:
with open(nome_arquivo, 'r', encoding='utf-8') as arquivo:
texto = arquivo.read()
print(f"\nO arquivo texto tem {conta_vogais(texto)} vogais e {conta_consoantes(texto)} consoantes!")
except FileNotFoundError:
print("\nArquivo informado não encontrado!")
except OSError:
print("\nO SO não aceita caracteres especiais em nomes de arquivo!")
| [
"[email protected]"
]
| |
85459e0cbd82140fa14886b0e5285bd8b8a76a28 | b1303152c3977a22ff9a0192c0c32310e65a6d77 | /python/567.permutation-in-string.py | c5487e09f925108dce4b4931b43c66fadd915fda | [
"Apache-2.0"
]
| permissive | stavanmehta/leetcode | 1b8da1c2bfacaa76ddfb96b8dbce03bf08c54c27 | 1224e43ce29430c840e65daae3b343182e24709c | refs/heads/master | 2021-07-15T16:02:16.107962 | 2021-06-24T05:39:14 | 2021-06-24T05:39:14 | 201,658,706 | 0 | 0 | Apache-2.0 | 2021-06-24T05:39:15 | 2019-08-10T16:59:32 | Java | UTF-8 | Python | false | false | 81 | py | class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
| [
"[email protected]"
]
| |
6d7552c80362211c8655afa1523750f82b5f34b9 | cbdbb05b91a4463639deefd44169d564773cd1fb | /djangoproj/forms_lab/lab/models.py | a49bb957d8bc48b023dce230a3be6f848e11e28a | []
| no_license | blazprog/py3 | e26ef36a485809334b1d5a1688777b12730ebf39 | e15659e5d5a8ced617283f096e82135dc32a8df1 | refs/heads/master | 2020-03-19T20:55:22.304074 | 2018-06-11T12:25:18 | 2018-06-11T12:25:18 | 136,922,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from django.db import models
class Nakup(models.Model):
datum_nakupa = models.DateField()
trgovina = models.CharField(max_length=30)
class NakupIzdelki(models.Model):
nakup = models.ForeignKey(Nakup)
izdelek = models.CharField(max_length=30)
kolicina = models.IntegerField(default=1)
cena = models.FloatField(default=0)
| [
"[email protected]"
]
| |
a3e8f85b15362854f00e8158fedd47775ff9a1fb | 9b5597492e57313712c0a842ef887940f92636cd | /judge/sessions/2018Individual/[email protected]/PB_02.py | f67c2bfbbc1ddd96d57cfd996db5fcf43c0930bf | []
| no_license | onionhoney/codesprint | ae02be9e3c2354bb921dc0721ad3819539a580fa | fcece4daf908aec41de7bba94c07b44c2aa98c67 | refs/heads/master | 2020-03-11T11:29:57.052236 | 2019-05-02T22:04:53 | 2019-05-02T22:04:53 | 129,971,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | Cases=input()
Cases=int(Cases)
for i in range(Cases):
Num1= [int(x) for x in input().split()]
Num2= [int(x) for x in input().split()]
Num3= [int(x) for x in input().split()]
count=0
ans=[]
for i in Num1:
if i==1:
ans.append(count)
count=count+1
count=0
for i in Num2:
if i==1:
ans.append(count)
count=count+1
count=0
for i in Num3:
if i==1:
ans.append(count)
count=count+1
print(ans[0],ans[1],ans[2])
| [
"[email protected]"
]
| |
618d26a1de085c3b232b50f8a719c096a1a4c389 | b5ca0a2ce47fdb4306bbdffcb995eb7e6eac1b23 | /Python/Regex and Parsing/Validating phone numbers/attempt2.py | 2b2d832379b0eb3a6171a2ff4bfd378358e9b641 | []
| no_license | rsoemardja/HackerRank | ac257a66c3649534197b223b8ab55011d84fb9e1 | 97d28d648a85a16fbe6a5d6ae72ff6503a063ffc | refs/heads/master | 2022-04-14T22:46:03.412359 | 2020-04-03T07:44:04 | 2020-04-03T07:44:04 | 217,687,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | import re
n = int(input())
for _ in range(n):
if re.fullmatch('[789]\d{9}', input()) != None:
print('YES')
else:
print('NO') | [
"[email protected]"
]
| |
6698de943e743d11300f391dd839dad9369a9914 | c2f4afee3ec4faef7231da2e48c8fef3d309b3e3 | /AppendFile.py | 7fcd0d08932bb7aacd4bdcc2a8461d8776ca7cac | []
| no_license | tanu312000/pyChapter | a723f99754ff2b21e694a9da3cb2c6ca0cd10fce | 2fd28aefcbfaf0f6c34db90fdf0d77f9aea142ce | refs/heads/master | 2020-05-03T15:51:34.334806 | 2019-03-31T16:17:45 | 2019-03-31T16:17:45 | 178,712,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | '''#WAP to read line by line from keyboard & append to a list until n times. Break the loop under break condition.
#Step:1
# 1)ReadLine from keyboard
Line=input("Enter a Line"+"\n")
# 2)Append in the list
li=[]
li.append(Line)
# 3)Until n times
while True:
Status=input("Do you want to continue")
# 4)Break the condition
if(Status=="no"):
break
print("Success")
# 5)Write in a file'''
fp=open("/home/tanu/programs/pythonFiles/AppendFile.txt",'a')
li=[]
while True:
Line=(input("Enter a line")+"\n")
li.append(Line)
Status=input("Do you want to Continue")
if(Status=="No"):
print("Success")
break
fp.writelines(li)
fp.close()
| [
"[email protected]"
]
| |
011d3b37d7cb2a349a9f335003c370504e1fc868 | 26fb93b2df4b6226e708027beccb2f0d442a4522 | /MWTracker/GUI_Qt4/SWTrackerViewer/SWTrackerViewer_GUI.py | fbf28e5f0a9297acd767443f273c16285271614c | []
| no_license | KezhiLi/Multiworm_Tracking | bb4fd1d1beeab26f4402f5aa5a3f159700fa0009 | cd91e968a557957e920d61db8bc10957666b6bc2 | refs/heads/master | 2021-01-22T16:10:23.591064 | 2016-04-13T15:51:18 | 2016-04-13T15:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,201 | py | import sys
from PyQt4.QtGui import QApplication, QMainWindow, QFileDialog, QMessageBox, QFrame
from PyQt4.QtCore import QDir, QTimer, Qt, QPointF
from PyQt4.QtGui import QPixmap, QImage, QPainter, QColor, QFont, QPolygonF, QPen
from MWTracker.GUI_Qt4.SWTrackerViewer.SWTrackerViewer_ui import Ui_ImageViewer
from MWTracker.GUI_Qt4.MWTrackerViewerSingle.MWTrackerViewerSingle_GUI import MWTrackerViewerSingle_GUI
from MWTracker.trackWorms.getSkeletonsTables import getWormMask, binaryMask2Contour
from MWTracker.intensityAnalysis.correctHeadTailIntensity import createBlocks, _fuseOverlapingGroups
import tables, os
import numpy as np
import pandas as pd
import cv2
import json
class SWTrackerViewer_GUI(MWTrackerViewerSingle_GUI):
def __init__(self, ui = ''):
if not ui:
super().__init__(Ui_ImageViewer())
else:
super().__init__(ui)
self.skel_block = []
self.skel_block_n = 0
self.is_stage_move = []
self.ui.spinBox_skelBlock.valueChanged.connect(self.changeSkelBlock)
def updateSkelFile(self):
super().updateSkelFile()
with tables.File(self.skel_file, 'r') as fid:
if '/provenance_tracking/INT_SKE_ORIENT' in fid:
prov_str = fid.get_node('/provenance_tracking/INT_SKE_ORIENT').read()
func_arg_str = json.loads(prov_str.decode("utf-8"))['func_arguments']
gap_size = json.loads(func_arg_str)['gap_size']
good = (self.trajectories_data['int_map_id']>0).values
has_skel_group = createBlocks(good, min_block_size = 0)
self.skel_block = _fuseOverlapingGroups(has_skel_group, gap_size = gap_size)
else:
self.skel_block = []
self.ui.spinBox_skelBlock.setMaximum(max(len(self.skel_block)-1,0))
self.ui.spinBox_skelBlock.setMinimum(0)
if self.skel_block_n != 0:
self.skel_block_n = 0
self.ui.spinBox_skelBlock.setValue(0)
else:
self.changeSkelBlock(0)
with tables.File(self.skel_file, 'r') as fid:
if '/stage_movement/stage_vec' in fid:
self.is_stage_move = np.isnan(fid.get_node('/stage_movement/stage_vec')[:,0])
else:
self.is_stage_move = []
def updateImage(self):
self.readImage()
self.drawSkelResult()
if len(self.is_stage_move) > 0 and self.is_stage_move[self.frame_number]:
painter = QPainter()
painter.begin(self.frame_qimg)
pen = QPen()
pen_width = 3
pen.setWidth(pen_width)
pen.setColor(Qt.red)
painter.setPen(pen)
painter.drawRect(1, 1, self.frame_qimg.width()-pen_width, self.frame_qimg.height()-pen_width);
painter.end()
print(1)
self.pixmap = QPixmap.fromImage(self.frame_qimg)
self.ui.imageCanvas.setPixmap(self.pixmap);
def changeSkelBlock(self, val):
self.skel_block_n = val
if len(self.skel_block) > 0:
self.ui.label_skelBlock.setText('Block limits: %i-%i' % (self.skel_block[self.skel_block_n]))
#move to the frame where the block starts
self.ui.spinBox_frame.setValue(self.skel_block[self.skel_block_n][0])
else:
self.ui.label_skelBlock.setText('')
#change frame number using the keys
def keyPressEvent(self, event):
#go the previous block
if event.key() == 91:
self.ui.spinBox_skelBlock.setValue(self.skel_block_n-1)
#go to the next block
elif event.key() == 93:
self.ui.spinBox_skelBlock.setValue(self.skel_block_n+1)
elif event.key() == 59:
if self.ui.checkBox_showLabel.isChecked():
self.ui.checkBox_showLabel.setChecked(0)
else:
self.ui.checkBox_showLabel.setChecked(1)
super().keyPressEvent(event)
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = SWTrackerViewer_GUI()
ui.show()
sys.exit(app.exec_())
| [
"[email protected]"
]
| |
25e72161c8d4276d21d755c960750c74d408ce34 | 8f75dae40363144b7ea0eccb1b2fab804ee60711 | /tests/integration/goldens/credentials/samples/generated_samples/iamcredentials_v1_generated_iam_credentials_sign_blob_async.py | fffa6de4bc73a43e8c4de2347fdbc936e2ed972e | [
"Apache-2.0"
]
| permissive | software-dov/gapic-generator-python | a2298c13b02bff87888c2949f4909880c3fa2408 | 304b30d3b4ec9ccb730251154b10896146a52900 | refs/heads/master | 2022-06-04T00:14:28.559534 | 2022-02-28T18:13:26 | 2022-02-28T18:13:26 | 191,990,527 | 0 | 1 | Apache-2.0 | 2022-01-27T19:35:04 | 2019-06-14T18:41:06 | Python | UTF-8 | Python | false | false | 1,491 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SignBlob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-iam-credentials
# [START iamcredentials_v1_generated_IAMCredentials_SignBlob_async]
from google.iam import credentials_v1
async def sample_sign_blob():
# Create a client
client = credentials_v1.IAMCredentialsAsyncClient()
# Initialize request argument(s)
request = credentials_v1.SignBlobRequest(
name="name_value",
payload=b'payload_blob',
)
# Make the request
response = await client.sign_blob(request=request)
# Handle the response
print(response)
# [END iamcredentials_v1_generated_IAMCredentials_SignBlob_async]
| [
"[email protected]"
]
| |
4ce90ed6b7934f21c3463432f8284e1baa696b8f | cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69 | /dashboard/dashboard/deprecate_tests_test.py | 63b49de8e073633803e4351d0465573bfede7986 | [
"BSD-3-Clause"
]
| permissive | CTJyeh/catapult | bd710fb413b9058a7eae6073fe97a502546bbefe | c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb | refs/heads/master | 2020-08-19T21:57:40.981513 | 2019-10-17T09:51:09 | 2019-10-17T18:30:16 | 215,957,813 | 1 | 0 | BSD-3-Clause | 2019-10-18T06:41:19 | 2019-10-18T06:41:17 | null | UTF-8 | Python | false | false | 9,522 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import datetime
import mock
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import deprecate_tests
from dashboard.common import testing_common
from dashboard.common import utils
_DEPRECATE_DAYS = deprecate_tests._DEPRECATION_REVISION_DELTA.days + 1
_REMOVAL_DAYS = deprecate_tests._REMOVAL_REVISON_DELTA.days + 1
_TESTS_SIMPLE = [
['ChromiumPerf'],
['mac'],
{
'SunSpider': {
'Total': {
't': {},
't_ref': {},
},
}
}
]
_TESTS_MULTIPLE = [
['ChromiumPerf'],
['mac'],
{
'SunSpider': {
'Total': {
't': {},
't_ref': {},
},
},
'OtherTest': {
'OtherMetric': {
'foo1': {},
'foo2': {},
},
},
}
]
_TESTS_MULTIPLE_MASTERS_AND_BOTS = [
['ChromiumPerf', 'ChromiumPerfFYI'],
['mac', 'linux'],
{
'SunSpider': {
'Total': {
't': {},
},
}
}
]
class DeprecateTestsTest(testing_common.TestCase):
def setUp(self):
super(DeprecateTestsTest, self).setUp()
app = webapp2.WSGIApplication([(
'/deprecate_tests', deprecate_tests.DeprecateTestsHandler)])
self.testapp = webtest.TestApp(app)
deprecate_tests._DEPRECATE_TESTS_PARALLEL_SHARDS = 2
def _AddMockRows(self, test_path, age):
"""Adds sample TestMetadata and Row entities."""
# Add 50 Row entities to some of the tests.
ts = datetime.datetime.now() - datetime.timedelta(days=age)
data = {}
for i in range(15000, 15100, 2):
data[i] = {'value': 1, 'timestamp': ts}
testing_common.AddRows(test_path, data)
def AssertDeprecated(self, test_path, deprecated):
test_key = utils.TestKey(test_path)
test = test_key.get()
self.assertEqual(test.deprecated, deprecated)
@mock.patch.object(deprecate_tests, '_AddDeleteTestDataTask')
def testPost_DeprecateOldTest(self, mock_delete):
testing_common.AddTests(*_TESTS_MULTIPLE)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', _DEPRECATE_DAYS)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo1', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo2', 0)
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider', False)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t', True)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t_ref', False)
self.assertFalse(mock_delete.called)
@mock.patch.object(deprecate_tests, '_AddDeleteTestDataTask')
def testPost_DeprecateOldTestDeprecatesSuite(self, mock_delete):
testing_common.AddTests(*_TESTS_MULTIPLE)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', _DEPRECATE_DAYS)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', _DEPRECATE_DAYS)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo1', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo2', 0)
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
# Do a second pass to catch the suite
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider', True)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t', True)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t_ref', True)
self.assertFalse(mock_delete.called)
@mock.patch.object(deprecate_tests, '_AddDeleteTestDataTask')
def testPost_DoesNotDeleteRowsWithChildren(self, mock_delete):
testing_common.AddTests(*_TESTS_SIMPLE)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total', _REMOVAL_DAYS)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', 0)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', 0)
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
# Do a second pass to catch the suite
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider', False)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total', True)
self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t', False)
self.assertFalse(mock_delete.called)
@mock.patch.object(deprecate_tests, '_AddDeleteTestDataTask')
def testPost_DeprecateOldTestDeletesData(self, mock_delete):
testing_common.AddTests(*_TESTS_MULTIPLE)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', _REMOVAL_DAYS)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo1', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo2', 0)
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
test = utils.TestKey('ChromiumPerf/mac/SunSpider/Total/t').get()
mock_delete.assert_called_once_with(test)
@mock.patch.object(deprecate_tests, '_AddDeleteTestDataTask')
def testPost_DeletesTestsWithNoRowsOrChildren(self, mock_delete):
testing_common.AddTests(*_TESTS_MULTIPLE)
self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo1', 0)
self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo2', 0)
self.testapp.post('/deprecate_tests')
self.ExecuteTaskQueueTasks(
'/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
test = utils.TestKey('ChromiumPerf/mac/SunSpider/Total/t_ref').get()
mock_delete.assert_called_once_with(test)
@mock.patch.object(
deprecate_tests, '_AddDeprecateTestDataTask', mock.MagicMock())
def testPost_DeletesBot_NotMaster(self):
testing_common.AddTests(*_TESTS_MULTIPLE_MASTERS_AND_BOTS)
utils.TestKey('ChromiumPerf/mac/SunSpider/Total/t').delete()
utils.TestKey('ChromiumPerf/mac/SunSpider/Total').delete()
utils.TestKey('ChromiumPerf/mac/SunSpider').delete()
for m in _TESTS_MULTIPLE_MASTERS_AND_BOTS[0]:
for b in _TESTS_MULTIPLE_MASTERS_AND_BOTS[1]:
master_key = ndb.Key('Master', m)
bot_key = ndb.Key('Bot', b, parent=master_key)
self.assertIsNotNone(bot_key.get())
self.assertIsNotNone(master_key.get())
self.testapp.get('/deprecate_tests')
self.ExecuteDeferredTasks(deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
expected_deleted_bots = [ndb.Key('Master', 'ChromiumPerf', 'Bot', 'mac')]
for m in _TESTS_MULTIPLE_MASTERS_AND_BOTS[0]:
for b in _TESTS_MULTIPLE_MASTERS_AND_BOTS[1]:
master_key = ndb.Key('Master', m)
bot_key = ndb.Key('Bot', b, parent=master_key)
if bot_key in expected_deleted_bots:
self.assertIsNone(bot_key.get())
else:
self.assertIsNotNone(bot_key.get())
self.assertIsNotNone(master_key.get())
@mock.patch.object(
deprecate_tests, '_AddDeprecateTestDataTask', mock.MagicMock())
def testPost_DeletesMasterAndBot(self):
testing_common.AddTests(*_TESTS_MULTIPLE_MASTERS_AND_BOTS)
utils.TestKey('ChromiumPerf/mac/SunSpider/Total/t').delete()
utils.TestKey('ChromiumPerf/mac/SunSpider/Total').delete()
utils.TestKey('ChromiumPerf/mac/SunSpider').delete()
utils.TestKey('ChromiumPerf/linux/SunSpider/Total/t').delete()
utils.TestKey('ChromiumPerf/linux/SunSpider/Total').delete()
utils.TestKey('ChromiumPerf/linux/SunSpider').delete()
for m in _TESTS_MULTIPLE_MASTERS_AND_BOTS[0]:
for b in _TESTS_MULTIPLE_MASTERS_AND_BOTS[1]:
master_key = ndb.Key('Master', m)
bot_key = ndb.Key('Bot', b, parent=master_key)
self.assertIsNotNone(bot_key.get())
self.assertIsNotNone(master_key.get())
self.testapp.get('/deprecate_tests')
self.ExecuteDeferredTasks(deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)
expected_deleted_bots = [
ndb.Key('Master', 'ChromiumPerf', 'Bot', 'mac'),
ndb.Key('Master', 'ChromiumPerf', 'Bot', 'linux')]
expected_deleted_masters = [ndb.Key('Master', 'ChromiumPerf')]
for m in _TESTS_MULTIPLE_MASTERS_AND_BOTS[0]:
master_key = ndb.Key('Master', m)
if master_key in expected_deleted_masters:
self.assertIsNone(master_key.get())
else:
self.assertIsNotNone(master_key.get())
for b in _TESTS_MULTIPLE_MASTERS_AND_BOTS[1]:
bot_key = ndb.Key('Bot', b, parent=master_key)
if bot_key in expected_deleted_bots:
self.assertIsNone(bot_key.get())
else:
self.assertIsNotNone(bot_key.get())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
33f06e48105dd16509b58527c0eed07ca7ed05a6 | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_sdk/model/notify/subscriber_pb2.pyi | f37ead231f841e671f9d1f218fbe6e05d86a7244 | [
"Apache-2.0"
]
| permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,884 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from cmdb_sdk.model.notify.subscribe_info_pb2 import (
SubscribeInfo as cmdb_sdk___model___notify___subscribe_info_pb2___SubscribeInfo,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Subscriber(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
name = ... # type: typing___Text
admin = ... # type: typing___Text
callback = ... # type: typing___Text
ensName = ... # type: typing___Text
procNum = ... # type: builtin___int
msgType = ... # type: builtin___int
retry = ... # type: builtin___int
@property
def subscribeInfo(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[cmdb_sdk___model___notify___subscribe_info_pb2___SubscribeInfo]: ...
def __init__(self,
*,
name : typing___Optional[typing___Text] = None,
admin : typing___Optional[typing___Text] = None,
callback : typing___Optional[typing___Text] = None,
ensName : typing___Optional[typing___Text] = None,
procNum : typing___Optional[builtin___int] = None,
msgType : typing___Optional[builtin___int] = None,
retry : typing___Optional[builtin___int] = None,
subscribeInfo : typing___Optional[typing___Iterable[cmdb_sdk___model___notify___subscribe_info_pb2___SubscribeInfo]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Subscriber: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Subscriber: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"admin",b"admin",u"callback",b"callback",u"ensName",b"ensName",u"msgType",b"msgType",u"name",b"name",u"procNum",b"procNum",u"retry",b"retry",u"subscribeInfo",b"subscribeInfo"]) -> None: ...
| [
"[email protected]"
]
| |
9090363a9ae18d37db75ef5f9cfa91a9746969d5 | 054b2c78cf70a81823da522f1bb5889f42787365 | /mudao/ui/__init__.py | 78dc516a1d30d973d1d13ab0207c9f1ca480fe43 | []
| no_license | musum/mudao | 9537ca1f6262b1271f9c4f6a247e00549762d254 | fbf8c507aa6e1755ac4126d7e4d75ace99b97fd4 | refs/heads/master | 2020-03-26T16:27:59.967129 | 2018-11-11T10:40:46 | 2018-11-11T10:40:46 | 145,103,715 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from mudao.ui.uiFile import FilePannel
from mudao.ui.uiCmd import CmdPannel
from mudao.ui.uiMain import MainWindow
| [
"[email protected]"
]
| |
dbf025d7bcfc7df0a48718eccc0b0cb14810a02c | c2f35e5d3cfbbb73188a0cd6c43d161738e63bd1 | /12-Django框架学习/bj18/test2/booktest/admin.py | e10072641c8350576c99bd572fcb82581b21d2f6 | []
| no_license | yangh-zzf-itcast/Python_heima_Study | 2a7cd0d801d9d6f49548905d373bb409efc4b559 | 7d753c1cdd5c46a0e78032e12b1d2f5d9be0bf68 | refs/heads/master | 2020-04-30T06:59:04.000451 | 2019-04-19T12:15:30 | 2019-04-19T12:15:30 | 176,670,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from django.contrib import admin
from booktest.models import BookInfo, HeroInfo
# Register your models here.
# 注册模型类
admin.site.register(BookInfo)
admin.site.register(HeroInfo)
| [
"[email protected]"
]
| |
8eb3b583ba00c21e0e51f30d62670c1da9f518e3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/87/usersdata/171/57992/submittedfiles/contido.py | 9b0e89a4443abb2d93f0a13c24567c49b26e2254 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # -*- coding: utf-8 -*-
def quantidade(lista,lista2):
cont=0
for i in range(0,len(lista),1):
if lista[i]==lista2[i]:
cont=cont+1
return(cont)
n=int(input('digite o numero de elemento:'))
lista1=[]
for i in range(1,n+1,1):
cont=0
valor1=int(input('digite o numero á ser colocado na lista 1:'))
lista1.append(valor1)
n=int(input('digite o numero de elemento:'))
lista2=[]
for i in range(1,n+1,1):
cont=0
valor2=int(input('digite o numero á ser colocado na lista 2:'))
lista2.append(valor2)
if quantidade(lista1,lista2):
print(cont)
| [
"[email protected]"
]
| |
2f83c5cac7624e5304cf2aa0459939e100999280 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_085/ch2_2020_07_09_22_36_13_402167.py | c82e823dcb2277b1070d6ac039df23e7849b4131 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | def calcula_velocidade_media(d,t):
return d/t | [
"[email protected]"
]
| |
744d50f28a2c94ad5282605b6d3bb4517f7916ea | 0466559817d3a1be9409da2c83db99c4db3bacfe | /hubcheck/conf/config.py | b68b5bd69b995419b02cbc759340b2a456a15ce1 | [
"MIT"
]
| permissive | ken2190/hubcheck | 955cf9b75a1ee77e28256dfd3a780cfbc17de961 | 2ff506eb56ba00f035300862f8848e4168452a17 | refs/heads/master | 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import os
from pkg_resources import resource_filename
class Config(object):
data_dir = resource_filename('hubcheck','data')
profiles_dir = resource_filename('hubcheck','profiles')
# user configuration variables
screenshot_dir = None
video_dir = None
config_filename = None
tdfname = ''
tdpass = ''
highlight_web_elements = False
scroll_to_web_elements = False
log_locator_updates = False
log_widget_attachments = False
proxy = None
hub_hostname = None
hub_version = None
tool_container_version = None
default_workspace_toolname = None
apps_workspace_toolname = None
# full path of the hub config file, used by toolcheck
configpath = None
settings = Config()
| [
"[email protected]"
]
| |
8bcc7d5dab776217c266b88d9884fc3a7e5a583d | 1bcb966740f47c0edc23e9b05afec55f2bcae36a | /app/game/appinterface/packageInfo.py | 92697a828967b4ecf9a95dbc7d38fdf70e8c3d66 | []
| no_license | East196/diabloworld | 0d2e9dbf650aa86fcc7b9fc1ef49912e79adb954 | d7a83a21287ed66aea690ecb6b73588569478be6 | refs/heads/master | 2021-05-09T12:15:31.640065 | 2018-02-04T15:16:54 | 2018-02-04T15:16:54 | 119,007,609 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | #coding:utf8
'''
Created on 2011-4-13
@author: sean_lan
'''
from app.game.core.PlayersManager import PlayersManager
def getItemsInEquipSlotNew(dynamicId,characterId):
'''获取角色的装备栏信息
@param dynamicId: int 客户端的id
@param characterId: int 角色的id
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
equipmentList = player.pack.getEquipmentSlotItemList()
keys_copy = dict(equipmentList)
equipmentList_copy = []
for position in range(1,7):
item = keys_copy.get(position,None)
if item:
_item = {}
_item['itemid'] = item.baseInfo.id
_item['icon'] = item.baseInfo.getItemTemplateInfo().get('icon',0)
_item['tempid'] = item.baseInfo.getItemTemplateId()
_item['exp'] = item.exp
iteminfo = {'pos':position,'item':_item}
equipmentList_copy.append(iteminfo)
playerInfo = player.formatInfoForWeiXin()
data = {}
data['equip'] = equipmentList_copy
data['attack'] = playerInfo['attack']
data['fangyu'] = playerInfo['fangyu']
data['minjie'] = playerInfo['minjie']
return {'result':True,'message':u'','data':data}
def UserItemNew(dynamicId,characterId,tempid):
'''使用物品
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.equipEquipmentByItemId(tempid)
return data
def GetPackageInfo(dynamicId,characterId):
'''获取包裹的信息
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.getPackageItemList()
return data
def unloadedEquipment_new(dynamicId, characterId, itemId):
'''卸下装备
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.unloaded(itemId)
return data
| [
"[email protected]"
]
| |
bb6a13c53d923939882c90a3722dcb0ee6f65008 | ecee6e84ba18100b621c7e06f493ae48e44a34fe | /build/navigation/nav_core/catkin_generated/pkg.installspace.context.pc.py | 7154b8261ec9e1dfe4b7533a24c5c418fed5f7a6 | []
| no_license | theleastinterestingcoder/Thesis | 6d59e06b16cbe1588a6454689248c88867de2094 | 3f6945f03a58f0eff105fe879401a7f1df6f0166 | refs/heads/master | 2016-09-05T15:30:26.501946 | 2015-05-11T14:34:15 | 2015-05-11T14:34:15 | 31,631,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/alfred/quan_ws/install/include".split(';') if "/home/alfred/quan_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;geometry_msgs;tf;costmap_2d".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "nav_core"
PROJECT_SPACE_DIR = "/home/alfred/quan_ws/install"
PROJECT_VERSION = "1.13.0"
| [
"[email protected]"
]
| |
aa893d19b93fa4e46eae5303e87793c9a2afed4f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1093.py | fa244fab36bc58d7529d2c7242771c1da73f5714 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | def isPalindrome(num):
if num - int(num) != 0:
return False
num = str(int(num))
l = len(num)
for i in xrange(l):
if num[i] != num[l-i-1]:
return False
return True
def allFairAndSquare(a, b):
rtn = []
for i in xrange(a, b+1):
if isPalindrome(i) and isPalindrome(i**(0.5)):
rtn.append(i)
return rtn
f = open('C-small-attempt1.in', 'r')
g = open('output.txt', 'w')
n = int(f.readline())
count = 1
all = allFairAndSquare(1, 1000)
while count <= n:
rng = f.readline().split()
a, b = int(rng[0]), int(rng[1])
x, y = 1000, 1000
for i in xrange(len(all)):
if all[i] >= a:
x = i
break
for i in xrange(x, len(all)):
if all[i] > b:
y = i
break
total = 0
if x == 1000:
total = 0
elif y == 1000:
y = len(all)
total = y-x
else:
total = y-x
g.write("Case #" + str(count) + ": " + str(total) + '\n')
count += 1
f.close()
g.close() | [
"[email protected]"
]
| |
9a9e8b7d9284574442ab7b8a10207055a4e065fd | fa07f9ff0c833746a4195a9092f5831e1126b684 | /03逻辑回归/tool/Read_Minist_Tool.py | 20e00b911aaddbd62cbe3738177e435b941c794e | []
| no_license | shiqiuwang/ML_basic_model | 76c3b755dda772031bfba22860ee61bb2ea286fc | b6d7350332f3ef32ccc5dc69f81b629c5bcdd349 | refs/heads/master | 2023-03-23T10:23:08.130357 | 2021-03-20T16:43:30 | 2021-03-20T16:43:30 | 348,405,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,341 | py | # coding=utf-8
import numpy as np
import struct
import matplotlib.pyplot as plt
# 训练集文件
train_images_idx3_ubyte_file = 'data/minist/train-images.idx3-ubyte'
# 训练集标签文件
train_labels_idx1_ubyte_file = 'data/minist/train-labels.idx1-ubyte'
# 测试集文件
test_images_idx3_ubyte_file = 'data/minist/t10k-images.idx3-ubyte'
# 测试集标签文件
test_labels_idx1_ubyte_file = 'data/minist/t10k-labels.idx1-ubyte'
def decode_idx3_ubyte(idx3_ubyte_file):
"""
解析idx3文件的通用函数
:param idx3_ubyte_file: idx3文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(idx3_ubyte_file, 'rb').read()
# 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
offset = 0
fmt_header = '>iiii' #因为数据结构中前4行的数据类型都是32位整型,所以采用i格式,但我们需要读取前4行数据,所以需要4个i。我们后面会看到标签集中,只使用2个ii。
magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))
# 解析数据集
image_size = num_rows * num_cols
offset += struct.calcsize(fmt_header) #获得数据在缓存中的指针位置,从前面介绍的数据结构可以看出,读取了前4行之后,指针位置(即偏移位置offset)指向0016。
print(offset)
fmt_image = '>' + str(image_size) + 'B' #图像数据像素值的类型为unsigned char型,对应的format格式为B。这里还有加上图像大小784,是为了读取784个B格式数据,如果没有则只会读取一个值(即一副图像中的一个像素值)
print(fmt_image,offset,struct.calcsize(fmt_image))
images = np.empty((num_images, num_rows, num_cols))
#plt.figure()
for i in range(num_images):
if (i + 1) % 10000 == 0:
print('已解析 %d' % (i + 1) + '张')
print(offset)
images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
#print(images[i])
offset += struct.calcsize(fmt_image)
# plt.imshow(images[i],'gray')
# plt.pause(0.00001)
# plt.show()
#plt.show()
return images
def decode_idx1_ubyte(idx1_ubyte_file):
"""
解析idx1文件的通用函数
:param idx1_ubyte_file: idx1文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(idx1_ubyte_file, 'rb').read()
# 解析文件头信息,依次为魔数和标签数
offset = 0
fmt_header = '>ii'
magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张' % (magic_number, num_images))
# 解析数据集
offset += struct.calcsize(fmt_header)
fmt_image = '>B'
labels = np.empty(num_images)
for i in range(num_images):
if (i + 1) % 10000 == 0:
print ('已解析 %d' % (i + 1) + '张')
labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
offset += struct.calcsize(fmt_image)
return labels
def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
"""
TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
:param idx_ubyte_file: idx文件路径
:return: n*row*col维np.array对象,n为图片数量
"""
return decode_idx3_ubyte(idx_ubyte_file)
def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
"""
TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
:param idx_ubyte_file: idx文件路径
:return: n*1维np.array对象,n为图片数量
"""
return decode_idx1_ubyte(idx_ubyte_file)
def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
"""
TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 10000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
:param idx_ubyte_file: idx文件路径
:return: n*row*col维np.array对象,n为图片数量
"""
return decode_idx3_ubyte(idx_ubyte_file)
def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
"""
TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 10000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
:param idx_ubyte_file: idx文件路径
:return: n*1维np.array对象,n为图片数量
"""
return decode_idx1_ubyte(idx_ubyte_file)
| [
"[email protected]"
]
| |
06b102050b963026c4e5184c89d73ea7e22da896 | 4cc16cdcee820f258fcdb7550b853949fc59de46 | /mobject/vectorized_mobject.py | 615e24502c0de9496acb4dcc415c9e2f3a81e407 | []
| no_license | eitanas/manim | c710802ef301b11d0ac6549bb58e04fcc59cc16d | 825ff127a517f35041b2def6efe29a8d6358cd4c | refs/heads/master | 2021-01-22T16:26:09.805304 | 2016-08-15T22:07:28 | 2016-08-15T22:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,000 | py | import re
from .mobject import Mobject
from helpers import *
class VMobject(Mobject):
CONFIG = {
"fill_color" : None,
"fill_opacity" : 0.0,
"stroke_color" : None,
#Indicates that it will not be displayed, but
#that it should count in parent mobject's path
"is_subpath" : False,
"close_new_points" : False,
"mark_paths_closed" : False,
"considered_smooth" : True,
"propogate_style_to_family" : False,
}
def __init__(self, *args, **kwargs):
Mobject.__init__(self, *args, **kwargs)
VMobject.init_colors(self)
## Colors
def init_colors(self):
self.set_style_data(
stroke_color = self.stroke_color or self.color,
stroke_width = self.stroke_width,
fill_color = self.fill_color or self.color,
fill_opacity = self.fill_opacity,
family = self.propogate_style_to_family
)
return self
def set_family_attr(self, attr, value):
for mob in self.submobject_family():
setattr(mob, attr, value)
def set_style_data(self,
stroke_color = None,
stroke_width = None,
fill_color = None,
fill_opacity = None,
family = True):
if stroke_color is not None:
self.stroke_rgb = color_to_rgb(stroke_color)
if fill_color is not None:
self.fill_rgb = color_to_rgb(fill_color)
if stroke_width is not None:
self.stroke_width = stroke_width
if fill_opacity is not None:
self.fill_opacity = fill_opacity
if family:
kwargs = locals()
kwargs.pop("self")
for mob in self.submobjects:
mob.set_style_data(**kwargs)
return self
def set_fill(self, color = None, opacity = None, family = True):
probably_meant_to_change_opacity = reduce(op.and_, [
color is not None,
opacity is None,
self.fill_opacity == 0
])
if probably_meant_to_change_opacity:
opacity = 1
return self.set_style_data(
fill_color = color,
fill_opacity = opacity,
family = family
)
def set_stroke(self, color = None, width = None, family = True):
return self.set_style_data(
stroke_color = color,
stroke_width = width,
family = family
)
def highlight(self, color, family = True):
self.set_style_data(
stroke_color = color,
fill_color = color,
family = family
)
return self
# def fade(self, darkness = 0.5):
# Mobject.fade(self, darkness)
# return self
def get_fill_color(self):
try:
self.fill_rgb = np.clip(self.fill_rgb, 0, 1)
return Color(rgb = self.fill_rgb)
except:
return Color(WHITE)
def get_fill_opacity(self):
return self.fill_opacity
def get_stroke_color(self):
try:
self.stroke_rgb = np.clip(self.stroke_rgb, 0, 1)
return Color(rgb = self.stroke_rgb)
except:
return Color(WHITE)
def get_color(self):
if self.fill_opacity == 0:
return self.get_stroke_color()
return self.get_fill_color()
## Drawing
def start_at(self, point):
if len(self.points) == 0:
self.points = np.zeros((1, 3))
self.points[0] = point
return self
def add_control_points(self, control_points):
assert(len(control_points) % 3 == 0)
self.points = np.append(
self.points,
control_points,
axis = 0
)
return self
def is_closed(self):
return is_closed(self.points)
def set_anchors_and_handles(self, anchors, handles1, handles2):
assert(len(anchors) == len(handles1)+1)
assert(len(anchors) == len(handles2)+1)
total_len = 3*(len(anchors)-1) + 1
self.points = np.zeros((total_len, self.dim))
self.points[0] = anchors[0]
arrays = [handles1, handles2, anchors[1:]]
for index, array in enumerate(arrays):
self.points[index+1::3] = array
return self.points
def set_points_as_corners(self, points):
if len(points) <= 1:
return self
points = np.array(points)
self.set_anchors_and_handles(points, *[
interpolate(points[:-1], points[1:], alpha)
for alpha in 1./3, 2./3
])
return self
def set_points_smoothly(self, points):
if len(points) <= 1:
return self
h1, h2 = get_smooth_handle_points(points)
self.set_anchors_and_handles(points, h1, h2)
return self
def set_points(self, points):
self.points = np.array(points)
return self
def set_anchor_points(self, points, mode = "smooth"):
if not isinstance(points, np.ndarray):
points = np.array(points)
if self.close_new_points and not is_closed(points):
points = np.append(points, [points[0]], axis = 0)
if mode == "smooth":
self.set_points_smoothly(points)
elif mode == "corners":
self.set_points_as_corners(points)
else:
raise Exception("Unknown mode")
return self
def change_anchor_mode(self, mode):
anchors, h1, h2 = self.get_anchors_and_handles()
self.set_anchor_points(anchors, mode = mode)
return self
def make_smooth(self):
self.considered_smooth = True
return self.change_anchor_mode("smooth")
def make_jagged(self):
return self.change_anchor_mode("corners")
def add_subpath(self, points):
"""
A VMobject is meant to represnt
a single "path", in the svg sense of the word.
However, one such path may really consit of separate
continuous components if there is a move_to command.
These other portions of the path will be treated as submobjects,
but will be tracked in a separate special list for when
it comes time to display.
"""
subpath_mobject = self.copy()#TODO, better way?
subpath_mobject.is_subpath = True
subpath_mobject.set_points(points)
self.add(subpath_mobject)
return subpath_mobject
def get_subpath_mobjects(self):
return filter(
lambda m : m.is_subpath,
self.submobjects
)
def apply_function(self, function, maintain_smoothness = True):
Mobject.apply_function(self, function)
if maintain_smoothness and self.considered_smooth:
self.make_smooth()
return self
## Information about line
def component_curves(self):
for n in range(self.get_num_anchor_points()-1):
yield self.get_nth_curve(n)
def get_nth_curve(self, n):
return bezier(self.points[3*n:3*n+4])
def get_num_anchor_points(self):
return (len(self.points) - 1)/3 + 1
def point_from_proportion(self, alpha):
num_cubics = self.get_num_anchor_points()-1
interpoint_alpha = num_cubics*(alpha % (1./num_cubics))
index = 3*int(alpha*num_cubics)
cubic = bezier(self.points[index:index+4])
return cubic(interpoint_alpha)
def get_anchors_and_handles(self):
return [
self.points[i::3]
for i in range(3)
]
## Alignment
def align_points(self, mobject):
Mobject.align_points(self, mobject)
is_subpath = self.is_subpath or mobject.is_subpath
self.is_subpath = mobject.is_subpath = is_subpath
mark_closed = self.mark_paths_closed and mobject.mark_paths_closed
self.mark_paths_closed = mobject.mark_paths_closed = mark_closed
return self
def align_points_with_larger(self, larger_mobject):
assert(isinstance(larger_mobject, VMobject))
self.insert_n_anchor_points(
larger_mobject.get_num_anchor_points()-\
self.get_num_anchor_points()
)
return self
def insert_n_anchor_points(self, n):
curr = self.get_num_anchor_points()
if curr == 0:
self.points = np.zeros((1, 3))
n = n-1
if curr == 1:
self.points = np.repeat(self.points, 3*n+1, axis = 0)
return self
points = np.array([self.points[0]])
num_curves = curr-1
#Curves in self are buckets, and we need to know
#how many new anchor points to put into each one.
#Each element of index_allocation is like a bucket,
#and its value tells you the appropriate index of
#the smaller curve.
index_allocation = (np.arange(curr+n-1) * num_curves)/(curr+n-1)
for index in range(num_curves):
curr_bezier_points = self.points[3*index:3*index+4]
num_inter_curves = sum(index_allocation == index)
alphas = np.arange(0, num_inter_curves+1)/float(num_inter_curves)
for a, b in zip(alphas, alphas[1:]):
new_points = partial_bezier_points(
curr_bezier_points, a, b
)
points = np.append(
points, new_points[1:], axis = 0
)
self.set_points(points)
return self
def get_point_mobject(self, center = None):
if center is None:
center = self.get_center()
return VectorizedPoint(center)
def repeat_submobject(self, submobject):
if submobject.is_subpath:
return VectorizedPoint(submobject.points[0])
return submobject.copy()
def interpolate_color(self, mobject1, mobject2, alpha):
attrs = [
"stroke_rgb",
"stroke_width",
"fill_rgb",
"fill_opacity",
]
for attr in attrs:
setattr(self, attr, interpolate(
getattr(mobject1, attr),
getattr(mobject2, attr),
alpha
))
if alpha == 1.0:
# print getattr(mobject2, attr)
setattr(self, attr, getattr(mobject2, attr))
def pointwise_become_partial(self, mobject, a, b):
assert(isinstance(mobject, VMobject))
#Partial curve includes three portions:
#-A middle section, which matches the curve exactly
#-A start, which is some ending portion of an inner cubic
#-An end, which is the starting portion of a later inner cubic
if a <= 0 and b >= 1:
self.set_points(mobject.points)
self.mark_paths_closed = mobject.mark_paths_closed
return self
self.mark_paths_closed = False
num_cubics = mobject.get_num_anchor_points()-1
lower_index = int(a*num_cubics)
upper_index = int(b*num_cubics)
points = np.array(
mobject.points[3*lower_index:3*upper_index+4]
)
if len(points) > 1:
a_residue = (num_cubics*a)%1
b_residue = (num_cubics*b)%1
points[:4] = partial_bezier_points(
points[:4], a_residue, 1
)
points[-4:] = partial_bezier_points(
points[-4:], 0, b_residue
)
self.set_points(points)
return self
class VectorizedPoint(VMobject):
CONFIG = {
"color" : BLACK,
"artificial_width" : 0.01,
"artificial_height" : 0.01,
}
def __init__(self, location = ORIGIN, **kwargs):
VMobject.__init__(self, **kwargs)
self.set_points(np.array([location]))
def get_width(self):
return self.artificial_width
def get_height(self):
return self.artificial_height
| [
"[email protected]"
]
| |
c12227791c9532c511adc49b611291c60354dc51 | 948d84d2e3fc04e353a11384d8570308174242f5 | /5-Pythonda Koşul İfadeleri/if-else-demo-2.py | ee6cf34554eeaa7723e51f9eedd857630f2067ee | []
| no_license | omerfarukcelenk/PythonMaster | a0084a800b8a41cd2ad538a7ca3687c26dc679ec | 0db8f8b0ea2e1c2d810c542068cfcf1a3615f581 | refs/heads/main | 2023-04-16T17:42:05.501904 | 2021-04-26T21:19:27 | 2021-04-26T21:19:27 | 361,896,109 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | '''
1- Girilen bir sayının 0-100 arasında olup olmadığını kontrol ediniz.
sayi = float(input('sayı: '))
if (sayi > 0) and (sayi<=100):
print('sayı 0-100 arasında.')
else:
print('sayı 0-100 arasında değildir.')
'''
'''
2- Girilen bir sayının pozitif çift sayı olup olmadığını kontrol ediniz.
sayi = int(input('sayı: '))
if (sayi > 0):
if (sayi % 2 ==0):
print('girilen sayı pozitif çift sayıdır.')
else:
print('girilen sayı pozitif ancak sayı tek.')
else:
print('girilen sayı negatif sayı.')
'''
'''
3- Email ve parola bilgileri ile giriş kontrolü yapınız.
email = '[email protected]'
password = 'abc123'
girilenEmail = input('email: ')
girilenPassword = input('password: ')
if (girilenEmail == email):
if (girilenPassword == password):
print('uygulamaya giriş başarılı.')
else:
print('parolanız yanlış')
else:
print('email bilginiz yanlış')
'''
'''
4- Girilen 3 sayıyı büyüklük olarak karşılaştırınız.
a = int(input('a: '))
b = int(input('b: '))
c = int(input('c: '))
if (a > b) and (a > c):
print(f'a en büyük sayıdır.')
elif (b > a) and (b > c):
print(f'b en büyük sayıdır.')
elif (c > a) and (c > b):
print(f'c en büyük sayıdır.')
'''
'''
5- Kullanıcıdan 2 vize (%60) ve final (%40) notunu alıp ortalama hesaplayınız.
Eğer ortalama 50 ve üstündeyse geçti değilse kaldı yazdırın.
a-) Ortamalama 50 olsa bile final notu en az 50 olmalıdır.
b-) Finalden 70 alındığında ortalamanın önemi olmasın.
vize1 = float(input('vize 1: '))
vize2 = float(input('vize 2: '))
final = float(input('final : '))
ortalama = ((vize1+vize2)/2)*0.6 + (final * 0.4)
result = (ortalama>=50) and (final>=50)
result = (ortalama >=50) or (final>=70)
** durum-1
if (ortalama>=50):
if (final>=50):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız. Finalden en az 50 almalısınız.')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız')
** durum-2
if (ortalama >=50):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı')
else:
if (final>=70):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı. Finalden en az 70 aldığınız için geçtiniz.')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız')
'''
'''
6- Kişinin ad, kilo ve boy bilgilerini alıp kilo indekslerini hesaplayınız.
Formül: (Kilo / boy uzunluğunun karesi)
Aşağıdaki tabloya göre kişi hangi gruba girmektedir.
0-18.4 => Zayıf
18.5-24.9 => Normal
25.0-29.9 => Fazla Kilolu
30.0-34.9 => Şişman (Obez)
name = input('adınız: ')
kg = float(input('kilonuz: '))
hg = float(input('boyunuz: '))
index = (kg) / (hg ** 2)
if (index >= 0) and (index<=18.4):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen zayıf.')
elif (index>18.4) and (index<=24.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen normal.')
elif (index>24.9) and (index<=29.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen kilolu.')
elif (index>=29.9) and (index<=45.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen obez.')
else:
print('bilgileriniz yanlış.')
'''
| [
"[email protected]"
]
| |
51d2268bc441dc7e0809af59b51d58def2641769 | 5f10ca2439551040b0af336fd7e07dcc935fc77d | /Binary tree/二叉树性质相关题目/求每层的宽度.py | dcef6bda78001250e1f9f1a433d3a54382bd13b5 | []
| no_license | catdog001/leetcode2.0 | 2715797a303907188943bf735320e976d574f11f | d7c96cd9a1baa543f9dab28750be96c3ac4dc731 | refs/heads/master | 2021-06-02T10:33:41.552786 | 2020-04-08T04:18:04 | 2020-04-08T04:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/13 13:06
# @Author : LI Dongdong
# @FileName: 求每层的宽度.py
''''''
'''
题目分析
1.要求:求每层中,左边第一个到右边第一个的宽度
2.理解:
3.类型:
4.确认输入输出及边界条件:
4.方法及方法分析:
time complexity order:
space complexity order:
'''
'''
思路:bfs + [node, index]
方法:
deque record node and index (2* index (+1))
traversal all nodes, calculate index dif of every level node by for loop
time complex:
space complex:
易错点:
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import deque
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root: # corner case
return 0
width = 0
res = []
queue = deque()
queue.append([root, 0])
while queue:
width = len(queue)
left = queue[0][1]
right = queue[-1][1]
res.append(right - left + 1)
for _ in range(width): # traversal all same level node
node, index = queue.popleft() # 易错点
if node.left:
queue.append([node.left, index * 2])
if node.right:
queue.append([node.right, index * 2 + 1])
return res
'''
思路:dfs + dic[level:index]
方法:
main: set level, index, dic
helper:
DFS scan every node, renew level and index = index * 2 (+ 1)
save dic[level] = [first node index, other node index]
time complex:
space complex:
易错点:dic[level] = max(index + 1, dic[level])
'''
# 求tree的每层的节点数,求哪一层具有最多节点数,节点数是多少
# input: root
# output: dic:key:每层序数,value:每层的node个数
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root: # corner case
return 0
level = 0
index = 0
dic ={} # store level and index
self.res = 0
self.dfs(root, level, index, dic)
return dic
def dfs(self, root, level, index, dic):
if not root: # corner case
return
if level in dic:
dic[level][1] = index
else:
dic[level] = [index, index]
self.dfs(root.left, level + 1, index * 2, dic)
self.dfs(root.right, level + 1, index * 2 + 1, dic)
from collections import deque
def constructTree(nodeList): # input: list using bfs, output: root
new_node = []
for elem in nodeList: # transfer list val to tree node
if elem:
new_node.append(TreeNode(elem))
else:
new_node.append(None)
queue = deque()
queue.append(new_node[0])
resHead = queue[0]
i = 1
while i <= len(new_node) - 1: # bfs method building
head = queue.popleft()
head.left = new_node[i] # build left and push
queue.append(head.left)
if i + 1 == len(new_node): # if no i + 1 in new_node
break
head.right = new_node[i + 1] # build right and push
queue.append(head.right)
i = i + 2
return resHead
root = constructTree([1,2,3,None,5,6])
x = Solution()
x.widthOfBinaryTree(root) | [
"[email protected]"
]
| |
f1df3479147b367dfc6cc0d007b4386d3a0e7fa8 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/express_route_circuit_authorization_py3.py | b4bb3df4dd52a8366e8cb7b9b1d60fa77e023bf0 | [
"MIT"
]
| permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,559 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ExpressRouteCircuitAuthorization(SubResource):
"""Authorization in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param authorization_key: The authorization key.
:type authorization_key: str
:param authorization_use_status: AuthorizationUseStatus. Possible values
are: 'Available' and 'InUse'. Possible values include: 'Available',
'InUse'
:type authorization_use_status: str or
~azure.mgmt.network.v2017_03_01.models.AuthorizationUseStatus
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'authorization_use_status': {'key': 'properties.authorizationUseStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, authorization_key: str=None, authorization_use_status=None, provisioning_state: str=None, name: str=None, **kwargs) -> None:
super(ExpressRouteCircuitAuthorization, self).__init__(id=id, **kwargs)
self.authorization_key = authorization_key
self.authorization_use_status = authorization_use_status
self.provisioning_state = provisioning_state
self.name = name
self.etag = None
| [
"[email protected]"
]
| |
20c78b60b815c01583da61d8d071a7b4e1735589 | dbf8768bb3818b4003f2e34ff561afb235a3734a | /Python/Templates/Django/ProjectTemplates/Python/Web/PollsDjango/app-admin.py | 898eb59ef40b51c7c1a179375f53430f2d5f5b8c | [
"Apache-2.0"
]
| permissive | wjk/PTVS | bf3880198ba35ae34b12872a86fe2b03d2a82180 | 184b6711a8700a7f9d78f6d6ac3b225f81a8b8b8 | refs/heads/master | 2020-12-14T16:11:40.486645 | 2020-01-17T20:45:15 | 2020-01-17T20:45:15 | 234,801,602 | 1 | 0 | Apache-2.0 | 2020-01-18T21:41:27 | 2020-01-18T21:41:26 | null | UTF-8 | Python | false | false | 669 | py | """
Customizations for the Django administration interface.
"""
from django.contrib import admin
from app.models import Choice, Poll
class ChoiceInline(admin.TabularInline):
"""Choice objects can be edited inline in the Poll editor."""
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
"""Definition of the Poll editor."""
fieldsets = [
(None, {'fields': ['text']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('text', 'pub_date')
list_filter = ['pub_date']
search_fields = ['text']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
| [
"[email protected]"
]
| |
b7f93333d8f7f87274baefcfac762f864f7617c2 | f385e93fb799629318b6f5bbae1a3b29d62d8359 | /database/citations/asuncion2012a.py | ea381f3189a4abf25a7944dc6845c8cf0f359501 | []
| no_license | JoaoFelipe/ipaw-index | bf113649de497d2008922eb80f8ea3bf2cd6aba5 | f8fe329f0c35b11c84bd76e7b7da7a465d380a02 | refs/heads/master | 2020-03-17T19:51:13.892958 | 2018-05-18T00:54:08 | 2018-05-18T00:54:08 | 133,880,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # coding: utf-8
from snowballing.models import *
from snowballing import dbindex
dbindex.last_citation_file = dbindex.this_file(__file__)
from ..work.y2012 import asuncion2012a
from ..work.y2016 import reddish2016a
DB(Citation(
reddish2016a, asuncion2012a, ref="",
contexts=[
],
))
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.