blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f578f05e9eeca2e7a85d76fb6cb42a0d606f54b
|
8ec910de801b424540abb4e6e955838a287663b6
|
/Bucles/ManoMoneda.py
|
7ea1f29e4f99d1de6ef426446f1d5ad4a59e551a
|
[] |
no_license
|
hector81/Aprendiendo_Python
|
f4f211ace32d334fb6b495b1b8b449d83a7f0bf8
|
9c73f32b0c82f08e964472af1923f66c0fbb4c22
|
refs/heads/master
| 2022-12-28T03:41:20.378415 | 2020-09-28T09:15:03 | 2020-09-28T09:15:03 | 265,689,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,520 |
py
|
# El programa simulará el juego de adivinar en qué mano está la moneda.
# Le preguntará a la persona el nombre y cuantas partidas quiere jugar,
# luego calculará el ganador.
import random
def introducirNumero():
while True:
try:
numeroVecesPartida = int(input("Por favor ingrese un número : "))
if numeroVecesPartida > 0:
return numeroVecesPartida
break
except ValueError:
print("Oops! No era válido. Intente nuevamente...")
print("Por favor ingrese un número de partidas: ")
numeroVecesPartida = introducirNumero()
while numeroVecesPartida > 0:
print('¿En que mano tengo la moneda? Si crees que en la derecha pulsa 1 y si es en la izquierda pulsa 2')
numeroEleccion = int(input("Escoge la mano : "))
if numeroEleccion > 2 or numeroEleccion < 1:
print('Tienes que poner 1:derecha o 2:izquierda. No valen otros numeros')
else:
numeroAleatorio = random.randint(1, 2)
if numeroAleatorio == numeroEleccion:
print('Has acertado')
numeroVecesPartida = 0
else:
if (numeroVecesPartida - 1) == 0:
print('No has acertado y ya no quedan intentos')
numeroVecesPartida = 0
else:
print('No has acertado. Vuelve a intertarlo. Te quedan ' + str(numeroVecesPartida - 1) + ' intentos')
numeroVecesPartida = numeroVecesPartida - 1
|
[
"[email protected]"
] | |
6b57aa51eb80cb2ba879e3fe19dc47e190d2b60e
|
65cefe621c2444d9b793d7969d2e2c3ff54373d1
|
/analyze/api.py
|
c831cbc8c9d0c07781dd86cede87b9b4f346509d
|
[
"Apache-2.0"
] |
permissive
|
leekangsan/HanziSRS
|
01585e694fbf81428085b8a162acf101f7f5bec1
|
50f84a9171b61df3305e8922b645b553e895a509
|
refs/heads/master
| 2020-03-29T05:19:19.583429 | 2018-04-28T04:37:35 | 2018-04-28T04:37:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import requests
from bs4 import BeautifulSoup
def jukuu(word):
params = {
'q': word
}
res = requests.get('http://www.jukuu.com/search.php', params=params)
soup = BeautifulSoup(res.text, 'html.parser')
for c, e in zip(soup.find_all('tr', {'class':'c'}), soup.find_all('tr', {'class':'e'})):
yield {
'Chinese': c.text.strip(),
'English': e.text.strip()
}
if __name__ == '__main__':
print(list(jukuu('寒假')))
|
[
"[email protected]"
] | |
296283618e61a02a6f6c8c8516a5ae54f984803f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02766/s873846892.py
|
1b5165713fefb3469a152f09d2a75411c31b81ee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
py
|
def main():
N, K = map(int, input().split())
ans = base10to(N, K)
print(len(ans))
def base10to(n, b):
if (int(n/b)):
return base10to(int(n/b), b) + str(n%b)
return str(n%b)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
068d8dce5daa9ac6705c8b77bd447240a513c227
|
a38bf459ae380f67e0de22f7106a8df4385a7076
|
/tests/integration/goldens/logging/samples/generated_samples/logging_v2_generated_config_service_v2_list_views_sync.py
|
b273c465d3ec976b018c54a7b83e2a4218b81327
|
[
"Apache-2.0"
] |
permissive
|
googleapis/gapic-generator-python
|
73ce9d52f6f5bb2652d49b237b24263d6637b1da
|
4eee26181e8db9fb5144eef5a76f178c1594e48a
|
refs/heads/main
| 2023-09-04T11:12:14.728757 | 2023-09-02T10:34:44 | 2023-09-02T10:34:44 | 129,809,857 | 116 | 65 |
Apache-2.0
| 2023-09-12T18:57:01 | 2018-04-16T21:47:04 |
Python
|
UTF-8
|
Python
| false | false | 1,852 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListViews
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_ListViews_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import logging_v2
def sample_list_views():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.ListViewsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_views(request=request)
# Handle the response
for response in page_result:
print(response)
# [END logging_v2_generated_ConfigServiceV2_ListViews_sync]
|
[
"[email protected]"
] | |
73f8dacb0a6fd98a99721e8a113c12641950a990
|
175522feb262e7311fde714de45006609f7e5a07
|
/code/nprd/visualize_PTB.py
|
aab1cc504cd8b84b3cb1e24016f16bf6ca0b9dde
|
[] |
no_license
|
m-hahn/predictive-rate-distortion
|
a048927dbc692000211df09da09ad1ed702525df
|
1ff573500a2313e0a79d68399cbd83970bf05e4d
|
refs/heads/master
| 2020-04-17T13:49:36.961798 | 2019-06-20T12:37:28 | 2019-06-20T12:37:28 | 166,631,865 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,051 |
py
|
# Was called runMemoryManyConfigs_NeuralFlow_Words_English.py
from matplotlib.ticker import MaxNLocator
import math
import subprocess
import random
import os
from paths import LOG_PATH_WORDS
import numpy as np
ids = [x.split("_")[-1][:-4] for x in os.listdir("/home/user/CS_SCR/CODEBOOKS/") if "nprd" in x]
print(ids)
language = "PTB"
model = "REAL"
ress = []
times = []
epochs_nums = []
for idn in ids:
with open("/home/user/CS_SCR/CODE/predictive-rate-distortion/results/outputs-nprd-words/test-estimates-"+language+"_"+"nprd_words_PTB_saveCodebook.py"+"_model_"+idn+"_"+model+".txt", "r") as inFile:
args = next(inFile).strip().split(" ")
epochs = len(next(inFile).strip().split(" "))
epochs_nums.append(epochs)
next(inFile)
next(inFile)
next(inFile)
time = float(next(inFile).strip())
times.append(time)
print(args)
beta = args[-3]
beta = -math.log(float(beta))
if abs(beta - round(beta)) > 0.001:
continue
if round(beta) not in [1.0, 3.0, 5.0]:
continue
dat = []
with open("/home/user/CS_SCR/CODE/predictive-rate-distortion/results/nprd-samples/samples_"+idn+".txt", "r") as inFile:
data = [x.split("\t") for x in inFile.read().strip().split("\n")]
for i in range(0, len(data), 30):
dat.append(data[i:i+30])
assert data[i][0] == '0', data[i]
# print(len(dat))
ress.append((idn, round(beta), dat))
print(epochs_nums)
print(times)
#quit()
ress = sorted(ress, key=lambda x:x[1])
#print(ress)
import matplotlib
import matplotlib.pyplot as plt
numsOfTexts = [len(x[2]) for x in ress]
print(numsOfTexts)
variations = []
for j in range(min(numsOfTexts)): #len(ress[0][2])):
data = ress[0][2][j]
print(data)
pos = np.asarray([int(x[0]) for x in data])
char = [x[1] for x in data]
ys = []
for i in range(len(ress)):
ys.append([float(x[2]) for x in ress[i][2][j]])
ys[-1] = np.asarray(ys[-1])
print(ys[-1])
print(ys[0])
fig, ax = plt.subplots()
for y, color, style in zip(ys, ["red", "green", "blue"], ["dotted", "dashdot", "solid"]):
ax.plot(pos[16:], y[16:], color=color, linestyle=style)
variation = [y[16] for y in ys]
variation = max(variation) - min(variation)
variations.append((j, variation))
plt.subplots_adjust(left=0.03, right=0.99, top=0.99, bottom=0.17)
fig.set_size_inches(9, 1.7)
#ax.grid(False)
plt.xticks(pos[16:], [x.decode("utf-8") for x in char][16:])
# plt.axvline(x=15.5, color="green")
ax.grid(False)
ax.set_ylabel("Cross-Entropy", fontsize=12)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# figure(figsize=(25,10))
fileName = "sample_"+str(j)
fig.savefig("figures/"+fileName+".png", bbox_inches='tight')
# plt.show()
plt.gcf().clear()
print("figures/"+fileName+".png")
with open("figures/"+fileName+".txt", "w") as outFile:
print >> outFile, (" ".join(char[:16]))
print(sorted(variations, key=lambda x:x[1]))
|
[
"[email protected]"
] | |
8043bf4f0fcdecc59ee5421189b23a4884fc8599
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp/sblp_ut=3.5_rd=0.65_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=30/params.py
|
d0a52d0abffd2812cb65883935448f5d49c0ec13
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.528333',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 30,
'utils': 'uni-medium-3'}
|
[
"[email protected]"
] | |
7e3a97d42210041be00fe78eac7fdc797d8027a2
|
e74e89592d8a3b1a0b465a7b1595708b224362d2
|
/pset_pandas1_wine_reviews/data_cleaning/solutions/p8.py
|
3d5b21f733b7e5400eea8fc4cc02f3214b41120a
|
[
"MIT"
] |
permissive
|
mottaquikarim/pydev-psets
|
016f60f1e9d9a534bd9a66ecde8eb412beee37d1
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
refs/heads/master
| 2023-01-10T11:15:57.041287 | 2021-06-07T23:38:34 | 2021-06-07T23:38:34 | 178,547,933 | 5 | 2 |
MIT
| 2023-01-03T22:28:27 | 2019-03-30T11:09:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 564 |
py
|
"""
Cleaning Data VIII - Find Null Values
"""
import numpy as np
import pandas as pd
wine_reviews = pd.read_csv('../../winemag-data-130k.csv')
wine_reviews.rename(columns={'points': 'rating'}, inplace=True)
# Use the below df for these problems:
wine_ratings = wine_reviews[['title', 'country', 'rating', 'price']]
# Return a count of the null values in wine_ratings.
print(wine_ratings.isnull().sum())
"""
title 0
country 63
rating 0
price 8996
"""
# Print out the number of rows in wine_ratings.
print(len(wine_ratings)) # 129971
|
[
"[email protected]"
] | |
d27a3896f5fa3feb5f17cd7861eb6378cabfc5d6
|
867846ed1df7f560ccc473413a70020155f66ad4
|
/writeImageToBinary.py
|
8ebf38106007bbfc86bd7004a67293c4219d32a9
|
[] |
no_license
|
abhineet123/PTF
|
84297bf5aa95320dbc2d34f422f2dd563ff65a58
|
0c63f7f8251af0d70c329b2cef53694db76c1656
|
refs/heads/master
| 2023-08-18T18:34:40.513936 | 2023-08-09T17:28:51 | 2023-08-09T17:28:51 | 157,794,848 | 5 | 1 | null | 2021-05-16T18:48:32 | 2018-11-16T01:24:05 |
MATLAB
|
UTF-8
|
Python
| false | false | 4,088 |
py
|
# from DecompUtils import *
# from distanceGrid import applyFilter
# import time
import os
import cv2
import numpy as np
# from Misc import getParamDict
if __name__ == '__main__':
db_root_dir = 'C:/Datasets'
track_root_dir = '../Tracking Data'
img_root_dir = '../Image Data'
dist_root_dir = '../Distance Data'
track_img_root_dir = '../Tracked Images'
# params_dict = getParamDict()
# param_ids = readDistGridParams()
#
# actors = params_dict['actors']
# sequences = params_dict['sequences']
# challenges = params_dict['challenges']
# filter_types = params_dict['filter_types']
#
# actor_id = param_ids['actor_id']
# seq_id = param_ids['seq_id']
# challenge_id = param_ids['challenge_id']
# inc_id = param_ids['inc_id']
# start_id = param_ids['start_id']
# filter_id = param_ids['filter_id']
# kernel_size = param_ids['kernel_size']
# show_img = param_ids['show_img']
#
# arg_id = 1
# if len(sys.argv) > arg_id:
# actor_id = int(sys.argv[arg_id])
# arg_id += 1
# if len(sys.argv) > arg_id:
# seq_id = int(sys.argv[arg_id])
# arg_id += 1
# if len(sys.argv) > arg_id:
# challenge_id = int(sys.argv[arg_id])
# arg_id += 1
# if len(sys.argv) > arg_id:
# filter_id = int(sys.argv[arg_id])
# arg_id += 1
#
# if actor_id >= len(actors):
# print 'Invalid actor_id: ', actor_id
# sys.exit()
#
# actor = actors[actor_id]
# sequences = sequences[actor]
#
# if seq_id >= len(sequences):
# print 'Invalid dataset_id: ', seq_id
# sys.exit()
# if challenge_id >= len(challenges):
# print 'Invalid challenge_id: ', challenge_id
# sys.exit()
# if filter_id >= len(filter_types):
# print 'Invalid filter_id: ', filter_id
# sys.exit()
#
# seq_name = sequences[seq_id]
# filter_type = filter_types[filter_id]
# challenge = challenges[challenge_id]
#
# if actor == 'METAIO':
# seq_name = seq_name + '_' + challenge
actor = 'GRAM'
seq_name = 'idot_1_intersection_city_day_short'
start_id = 0
filter_type = 'none'
kernel_size = 3
show_img = 1
print 'actor: ', actor
# print 'seq_id: ', seq_id
print 'seq_name: ', seq_name
# print 'filter_type: ', filter_type
# print 'kernel_size: ', kernel_size
src_dir = db_root_dir + '/' + actor + '/Images/' + seq_name
if not os.path.exists(img_root_dir):
os.makedirs(img_root_dir)
if filter_type != 'none':
img_fname = img_root_dir + '/' + seq_name + '_' + filter_type + str(kernel_size) + '.bin'
else:
img_fname = img_root_dir + '/' + seq_name + '.bin'
print 'Reading images from: {:s}'.format(src_dir)
print 'Writing image binary data to: {:s}'.format(img_fname)
img_fid = open(img_fname, 'wb')
file_list = os.listdir(src_dir)
# print 'file_list: ', file_list
no_of_frames = len(file_list)
print 'no_of_frames: ', no_of_frames
end_id = no_of_frames
init_img = cv2.imread(src_dir + '/image{:06d}.jpg'.format(1))
img_height = init_img.shape[0]
img_width = init_img.shape[1]
# np.array([no_of_frames, ], dtype=np.uint32).tofile(img_fid)
np.array([img_width, img_height], dtype=np.uint32).tofile(img_fid)
win_name = 'Filtered Image'
if show_img:
cv2.namedWindow(win_name)
for frame_id in xrange(start_id, end_id):
# print 'frame_id: ', frame_id
curr_img = cv2.imread(src_dir + '/image{:06d}.jpg'.format(frame_id + 1))
if len(curr_img.shape) == 3:
curr_img_gs = cv2.cvtColor(curr_img, cv2.cv.CV_BGR2GRAY)
else:
curr_img_gs = curr_img
# if filter_type != 'none':
# curr_img_gs = applyFilter(curr_img_gs, filter_type, kernel_size)
curr_img_gs.astype(np.uint8).tofile(img_fid)
if show_img:
cv2.imshow(win_name, curr_img_gs)
if cv2.waitKey(1) == 27:
break
img_fid.close()
|
[
"[email protected]"
] | |
336b66817aeb69caf5a08e2b80c1beac92d48c6d
|
de01cb554c2292b0fbb79b4d5413a2f6414ea472
|
/algorithms/Easy/1275.find-winner-on-a-tic-tac-toe-game.py
|
b051c0e2d84dffb8192059fd3585ceb77ed909e8
|
[] |
no_license
|
h4hany/yeet-the-leet
|
98292017eadd3dde98a079aafcd7648aa98701b4
|
563d779467ef5a7cc85cbe954eeaf3c1f5463313
|
refs/heads/master
| 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,576 |
py
|
#
# @lc app=leetcode id=1275 lang=python3
#
# [1275] Find Winner on a Tic Tac Toe Game
#
# https://leetcode.com/problems/find-winner-on-a-tic-tac-toe-game/description/
#
# algorithms
# Easy (52.88%)
# Total Accepted: 17K
# Total Submissions: 32.2K
# Testcase Example: '[[0,0],[2,0],[1,1],[2,1],[2,2]]'
#
# Tic-tac-toe is played by two players A and B on a 3 x 3 grid.
#
# Here are the rules of Tic-Tac-Toe:
#
#
# Players take turns placing characters into empty squares (" ").
# The first player A always places "X" characters, while the second player B
# always places "O" characters.
# "X" and "O" characters are always placed into empty squares, never on filled
# ones.
# The game ends when there are 3 of the same (non-empty) character filling any
# row, column, or diagonal.
# The game also ends if all squares are non-empty.
# No more moves can be played if the game is over.
#
#
# Given an array moves where each element is another array of size 2
# corresponding to the row and column of the grid where they mark their
# respective character in the order in which A and B play.
#
# Return the winner of the game if it exists (A or B), in case the game ends in
# a draw return "Draw", if there are still movements to play return "Pending".
#
# You can assume that moves is valid (It follows the rules of Tic-Tac-Toe), the
# grid is initially empty and A will play first.
#
#
# Example 1:
#
#
# Input: moves = [[0,0],[2,0],[1,1],[2,1],[2,2]]
# Output: "A"
# Explanation: "A" wins, he always plays first.
# "X " "X " "X " "X " "X "
# " " -> " " -> " X " -> " X " -> " X "
# " " "O " "O " "OO " "OOX"
#
#
# Example 2:
#
#
# Input: moves = [[0,0],[1,1],[0,1],[0,2],[1,0],[2,0]]
# Output: "B"
# Explanation: "B" wins.
# "X " "X " "XX " "XXO" "XXO" "XXO"
# " " -> " O " -> " O " -> " O " -> "XO " -> "XO "
# " " " " " " " " " " "O "
#
#
# Example 3:
#
#
# Input: moves = [[0,0],[1,1],[2,0],[1,0],[1,2],[2,1],[0,1],[0,2],[2,2]]
# Output: "Draw"
# Explanation: The game ends in a draw since there are no moves to make.
# "XXO"
# "OOX"
# "XOX"
#
#
# Example 4:
#
#
# Input: moves = [[0,0],[1,1]]
# Output: "Pending"
# Explanation: The game has not finished yet.
# "X "
# " O "
# " "
#
#
#
# Constraints:
#
#
# 1 <= moves.length <= 9
# moves[i].length == 2
# 0 <= moves[i][j] <= 2
# There are no repeated elements on moves.
# moves follow the rules of tic tac toe.
#
#
class Solution:
def tictactoe(self, moves: List[List[int]]) -> str:
|
[
"[email protected]"
] | |
e9f6eacfaf01a7fff4da4c15768700dfd006c709
|
423cc7775d1ab9874729ba304d7682a12b4a4d43
|
/plugins/analyzer/previewcomparer.py
|
6cc2539edc04ea1fb6f7350dfbf6e684d05043bc
|
[] |
no_license
|
eyeyunianto/ghiro
|
7ec2dc5ae2b766883da6f26975fd41829336e8f8
|
24ce80244893fc94300e1c4f5e3305bd182d65a6
|
refs/heads/master
| 2020-04-06T04:33:07.155509 | 2015-06-21T21:30:27 | 2015-06-21T21:30:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,807 |
py
|
# Ghiro - Copyright (C) 2013-2015 Ghiro Developers.
# This file is part of Ghiro.
# See the file 'docs/LICENSE.txt' for license terms.
import logging
from itertools import izip
from lib.analyzer.base import BaseAnalyzerModule
from lib.utils import str2image
from lib.db import get_file
try:
from PIL import Image
IS_PIL = True
except ImportError:
IS_PIL = False
logger = logging.getLogger(__name__)
class ImageComparer():
"""Image comparator."""
@staticmethod
def calculate_difference(preview, original_image_id):
"""Calculate difference between two images.
@param preview: preview dict
@param original_image_id: original image ID
@return: difference, difference percentage
"""
try:
i1 = str2image(get_file(original_image_id).read())
except IOError as e:
logger.warning("Comparer error reading image: {0}".format(e))
return
# Check if thumb was resized.
if "original_file" in preview:
i2 = str2image(get_file(preview["original_file"]).read())
else:
i2 = str2image(get_file(preview["file"]).read())
# Resize.
width, height = i2.size
try:
i1 = i1.resize([width, height], Image.ANTIALIAS)
except IOError as e:
logger.warning("Comparer error reading image: {0}".format(e))
return
# Checks.
#assert i1.mode == i2.mode, "Different kinds of images."
#assert i1.size == i2.size, "Different sizes."
# Calculate difference.
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
# Get diff percentage.
diff_perc = int((dif / 255.0 * 100) / ncomponents)
# Binary option.
if diff_perc >= 15:
diff = True
else:
diff = False
return diff, diff_perc
class PreviewComparerAnalyzer(BaseAnalyzerModule):
"""Compares previews extracted with the original image."""
order = 20
def check_deps(self):
return IS_PIL
def run(self, task):
# Compare previews to catch differences.
if "metadata" in self.results:
if "preview" in self.results["metadata"]:
for preview in self.results["metadata"]["preview"]:
difference = ImageComparer.calculate_difference(preview, self.results["file_data"])
if difference:
preview["diff"], preview["diff_percent"] = difference
return self.results
|
[
"[email protected]"
] | |
a9f0ba50e1273c6a25a49d2e0bba74d1942c67b8
|
b1c7a768f38e2e987a112da6170f49503b9db05f
|
/stockkeeping/migrations/0021_remove_purchase_stockitem.py
|
56aa900ba0363ea26816b298cca975bca2319252
|
[] |
no_license
|
Niladrykar/bracketerp
|
8b7491aa319f60ec3dcb5077258d75b0394db374
|
ca4ee60c2254c6c132a38ce52410059cc6b19cae
|
refs/heads/master
| 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 |
JavaScript
|
UTF-8
|
Python
| false | false | 338 |
py
|
# Generated by Django 2.0.6 on 2018-11-02 11:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stockkeeping', '0020_auto_20181102_1628'),
]
operations = [
migrations.RemoveField(
model_name='purchase',
name='stockitem',
),
]
|
[
"[email protected]"
] | |
770326a7c554452415a3a4823c7975bc958ac5bb
|
45a2fef5a35090e2c3794824735dc137553c3d3b
|
/backup/fcards/utils.py
|
a968797ac2ea04b200ab1d427b0368cd1c17ba3c
|
[] |
no_license
|
kris-brown/personal_website
|
9248ec23e2ebab8d820a0c6be70f6fb06a80144f
|
4dadfeba80eaf3f25f87b6f7bed48aa9db6ec8fc
|
refs/heads/master
| 2021-08-28T00:03:07.483092 | 2021-08-09T06:19:56 | 2021-08-09T06:19:56 | 190,462,717 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,164 |
py
|
from typing import List as L, Tuple as T
'''Misc helpful things'''
def flatten(lol:L[list])->list:
return [item for sublist in lol for item in sublist]
################################################################################
class Tree(object):
'''
Parse a nested bullet structure where nesting is determined by whitespace, e.g.
- A
- A1
- A2
- A2i
- B
'''
def __init__(self, value : str, children : L['Tree']) -> None:
self.value = value; self.children = children
def __str__(self)->str:
return self.print(0)
def __len__(self)->int: return 1 + sum(map(len,self.children))
def showflat(self,_discard : bool = True)->str:
'''Discard root and show flattened information (for Anki cards)'''
if _discard: curr = ''
else:
if self.value and self.value[0]=='-': curr = '\n'+self.value[1:]
else: curr = '\n'+self.value
return curr + ''.join([c.showflat(_discard=False) for c in self.children])
def print(self, indent : int) -> str:
'''Visualize as tree'''
rest = ''.join([c.print(indent+1) for c in self.children])
return '\n' + '\t'*indent + self.value + rest
@classmethod
def from_str(cls, lines:L[str]) -> 'Tree':
'''Takes the "content" of an orgmode node (list of strings) and makes a Tree'''
pairs = [(cls.level(x),x) for x in filter(lambda x: not x.isspace(),lines)]
try:
root = Tree(value = 'root', children = cls.parse_children(pairs))
except ValueError as e:
print(e)
for k,v in pairs: print(k,v)
import pdb;pdb.set_trace();assert False
return root
@classmethod
def parse_children(cls, pairs : L[T[int,str]]) -> L['Tree']:
'''Recursively parse a list of (indent-level, <content>) pairs'''
if not pairs: return [] # Base case: no more children
next_val = pairs[0][1].strip() # our first element is definitely a child.
childlevel = pairs[0][0] # All children have this indentation level
children = [] # The list that we will return
next_pairs = [] # type: L[T[int,str]] ## the lines that are descendents of the child
for i,x in pairs[1:]:
if i < childlevel: raise ValueError('Indentation level incorrectly parsed: ',x)
elif i > childlevel: next_pairs.append((i,x)) # something that belongs to next child at some depth
else:
# We've returned back to the child indentation level, so everything we've seen up to now gets added
children.append(Tree(value = next_val, children = cls.parse_children(next_pairs)))
next_val, next_pairs = x.strip(), [] # reset these variables
# Add the last tree
children.append(Tree(value=next_val,children = cls.parse_children(next_pairs)))
return children
@staticmethod
def level(astr : str) -> int:
'''Get indentation level assuming tab spacing = 8'''
ws = astr[:len(astr) - len(astr.lstrip())]
return ws.count(' ')+8*ws.count('\t')
|
[
"[email protected]"
] | |
bd298e7985f0b09e4222e354e3f0afc394e96595
|
b47f2e3f3298388b1bcab3213bef42682985135e
|
/experiments/heat-3d/tmp_files/1539.py
|
27d7e85b4987f69ec0aa2f1e52f9247dec53052f
|
[
"BSD-2-Clause"
] |
permissive
|
LoopTilingBenchmark/benchmark
|
29cc9f845d323431e3d40e878cbfc6d1aad1f260
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
refs/heads/master
| 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 369 |
py
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/1539.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,8,2)
tile(0,4,8,4)
tile(0,6,8,6)
tile(1,2,8,2)
tile(1,4,8,4)
tile(1,6,8,6)
|
[
"[email protected]"
] | |
f2a797d1c550dbc9843f6fe14e7ad572536407a7
|
a24b8446639f2157e2ecbdb7c11eda8e4e4344cc
|
/Configurations/UserConfigs/2018_AntiIso/ST_t_topConfig.py
|
82fbbb685a0a6b4499f198945b017c0e1a347268
|
[] |
no_license
|
aloeliger/ReweightScheme
|
dcebc5651094d8d3da65885c59dae4070983624a
|
05c9783fcf8e024fd26a6dbb9b1fbab4aee3c7f4
|
refs/heads/master
| 2021-12-11T16:10:12.881863 | 2021-08-27T21:02:21 | 2021-08-27T21:02:21 | 215,565,834 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,797 |
py
|
import ROOT
from Configurations.Weights.CrossSectionWeightingModule.CrossSectionWeight import crossSectionWeight
from Configurations.Weights.MuIDIsoReweightingModule.MuIDIsoWeight import muIDIsoWeight_2018 as muIDIsoWeight
from Configurations.Weights.MuTrackingWeightModule.MuTrackingWeight import muTrackingWeight_2018 as muTrackingWeight
from Configurations.Weights.PileupWeightingModule.PileupWeight import pileupWeight_2018 as pileupWeight
from Configurations.Weights.TauFakeRateWeightModule.eTauFakeRateWeight import eTauFakeRateWeight_2018 as eTauFakeRateWeight
from Configurations.Weights.TauIDModule.TauIDWeight import tauIDWeight_2018 as tauIDWeight
from Configurations.Weights.TriggerSFModule.TriggerWeight import triggerWeight_2018 as triggerWeight
from Configurations.Weights.bTaggingWeightModule.bTaggingWeight import bTaggingWeight_2018
from Configurations.Weights.PrefiringWeightModule.PrefiringWeight import PrefiringWeighting
from Configurations.ConfigDefinition import ReweightConfiguration
EWKConfiguration = ReweightConfiguration()
EWKConfiguration.name = "ST_t_top"
EWKConfiguration.inputFile = "/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/ST_t_top.root"
crossSectionWeight.sample = 'ST_t_top'
crossSectionWeight.year = '2018'
totalEventsFile = ROOT.TFile.Open("/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/ST_t_top.root")
crossSectionWeight.totalEvents = totalEventsFile.eventCount.GetBinContent(2)
totalEventsFile.Close()
pileupWeight.year = '2018'
pileupWeight.sample = 'ST_t_top'
pileupWeight.InitPileupWeightings(pileupWeight)
EWKConfiguration.listOfWeights = [
crossSectionWeight,
muIDIsoWeight,
muTrackingWeight,
pileupWeight,
eTauFakeRateWeight,
#tauIDWeight,
triggerWeight,
bTaggingWeight_2018,
#PrefiringWeighting,
]
|
[
"[email protected]"
] | |
0c75fb6bf1bbf0e8a76928ce29bf5b4f0a014996
|
6a4ebebbe0d7f81efc4f1749054a2ed7242c0e58
|
/setup.py
|
345a9c9073ffb87c82e6fbcc413a8d8703519644
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
skylarker/granary
|
6e192ecd2475febb3585728d5ba7afe34742107d
|
2fd8ef017588b955e78606242ce582849cfd57ac
|
refs/heads/master
| 2020-12-26T21:35:04.155528 | 2016-04-18T18:15:30 | 2016-04-18T18:15:30 | 56,891,160 | 1 | 0 | null | 2016-04-22T23:43:09 | 2016-04-22T23:43:09 | null |
UTF-8
|
Python
| false | false | 1,868 |
py
|
"""setuptools setup module for granary.
Docs:
https://packaging.python.org/en/latest/distributing.html
http://pythonhosted.org/setuptools/setuptools.html
Based on https://github.com/pypa/sampleproject/blob/master/setup.py
"""
import unittest
from setuptools import setup, find_packages
from setuptools.command.test import ScanningLoader
class TestLoader(ScanningLoader):
def __init__(self, *args, **kwargs):
super(ScanningLoader, self).__init__(*args, **kwargs)
# webutil/test/__init__.py makes App Engine SDK's bundled libraries importable.
import oauth_dropins.webutil.test
setup(name='granary',
version='1.3.1',
description='Free yourself from silo API chaff and expose the sweet social data foodstuff inside in standard formats and protocols!',
long_description=open('README.rst').read(),
url='https://github.com/snarfed/granary',
packages=find_packages(),
include_package_data=True,
author='Ryan Barrett',
author_email='[email protected]',
license='Public domain',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'License :: Public Domain',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='facebook twitter google+ twitter activitystreams html microformats2 mf2 atom',
install_requires=[
# Keep in sync with requirements.txt!
'beautifulsoup4',
'html2text',
'jinja2',
'mf2py>=0.2.7',
'mf2util>=0.3.3',
'oauth-dropins>=1.3',
'requests<2.6.0',
],
test_loader='setup:TestLoader',
test_suite='granary.test',
)
|
[
"[email protected]"
] | |
918f237882bc12ca5169f08d0b2a86dd2b388b12
|
ec00584ab288267a7cf46c5cd4f76bbec1c70a6b
|
/Python/__function/functions1/functions1/23 keyword non-keyword argument.py
|
9adc44c2843f16255ab0ee092696537a2eac3237
|
[] |
no_license
|
rahuldbhadange/Python
|
b4cc806ff23953389c9507f43d817b3815260e19
|
7e162117f1acc12537c7eeb36d6983d804122ff3
|
refs/heads/master
| 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 |
Python
|
UTF-8
|
Python
| false | false | 553 |
py
|
#3.keyword arguments: During fn call,using parameter name,passing value
#4.non-keyword arguments:During fn call,without parameter name,passing value
def display(branch,code):
print(branch,code)
display("CSE","05") #non-keyword argument
display(branch="ECE",code="04") #keyword argument (using parameter name)
display(code="02",branch="EEE") #keyword argument
#display(code="12","IT")
#default and non-default related to fn definition
#key-word and non-keyword relatd to fn call
#Note: After keyword argument,we cannot have nonkeyword argument
|
[
"[email protected]"
] | |
d1cc019f002492e4ca2f30241964186934bb36af
|
930309163b930559929323647b8d82238724f392
|
/abc108_b.py
|
c72c059e12f6e5358657caa002cf6e7a6a309c3c
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 162 |
py
|
x1, y1, x2, y2 = map(int, input().split())
dx = x2 - x1
dy = y2 - y1
x3 = x2 - dy
y3 = y2 + dx
x4 = x3 - dx
y4 = y3 - dy
print("%d %d %d %d" % (x3, y3, x4, y4))
|
[
"[email protected]"
] | |
c7c1943a417de7573e5aebf77ae57a09db5008a5
|
3b89c0a97ac6b58b6923a213bc8471e11ad4fe69
|
/python/CodingExercises/LeetCode1.py
|
86ca7efb65730bbd49152c8028c24b15a168c256
|
[] |
no_license
|
ksayee/programming_assignments
|
b187adca502ecf7ff7b51dc849d5d79ceb90d4a6
|
13bc1c44e1eef17fc36724f20b060c3339c280ea
|
refs/heads/master
| 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 747 |
py
|
'''
1. Two Sum
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
def LeetCode1(ary,k):
dict={}
fnl_lst=[]
for i in range(0,len(ary)):
key=ary[i]
diff=k-key
if diff in dict.keys():
val=dict[diff]
tup=(i,val)
fnl_lst.append(tup)
else:
dict[key]=i
return fnl_lst
def main():
ary=[2, 7, 11, 15]
k=9
print(LeetCode1(ary,k))
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
0c7109401894b8ab6fa958daf9320f6f6999c573
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03231/s342511104.py
|
c53bcfef1be5b00fe39ad9752b5ac05a7a1bf748
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 239 |
py
|
from math import gcd
N, M = map(int, input().split())
S = input()
T = input()
L = N*M // gcd(N, M)
for i in range(N):
if M*i % N == 0:
j = M*i // N
if S[i] != T[j]:
print(-1)
exit()
print(L)
|
[
"[email protected]"
] | |
3acd2877be2d35889598ed2111ffaffb3f802be0
|
4b434c6af1d205e33941289211159dfde865e38e
|
/con.Bmaml.eq/train.py
|
fdc88d237727a3c3e47393deafa25044993743e3
|
[] |
no_license
|
a1600012888/BMAML
|
3b2a7f264ed13ef598cc3677d18714c4f8354176
|
4802a917d8061011be9a2b09174598216812cc58
|
refs/heads/master
| 2020-04-14T19:10:40.363219 | 2019-01-16T17:03:18 | 2019-01-16T17:03:18 | 164,047,888 | 9 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,915 |
py
|
import torch
from collections import OrderedDict
from tqdm import tqdm
from utils import AvgMeter
from torch.nn.utils import vector_to_parameters, parameters_to_vector
def TrainOneTask(Task, M, SVGD, optimizer, DEVICE, num_of_step = 3, step_size = 1e-3):
X, Y, Xtest, Ytest, std = Task
X = X.to(DEVICE)
Y = Y.to(DEVICE)
Xtest = Xtest.to(DEVICE)
Ytest = Ytest.to(DEVICE)
std = std.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
with torch.no_grad():
start_logp = 0
for paramsvec in M:
start_logp = start_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
start_logp = start_logp / len(M)
for i in range(num_of_step):
M = SVGD.step(M, retain_graph = True, step_size = step_size)
with torch.no_grad():
end_logp = 0
for paramsvec in M:
end_logp = end_logp + SVGD.NablaLogP(True, paramsvec, ret_grad = False)
end_logp = end_logp / len(M)
SVGD.NablaLogP.update(Xtest, Ytest, std)
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
loss = logp * -1.0
loss.backward()
optimizer.step()
optimizer.zero_grad()
ret_dic = OrderedDict()
ret_dic['start_logp_train'] = start_logp.item()
ret_dic['end_logp_train'] = end_logp.item()
ret_dic['end_logp_joint'] = logp.item()
return ret_dic
def TrainOneTaskWithChaserLoss(Task, M, SVGD, optimizer, DEVICE, num_of_step = 3, step_size = 1e-3):
optimizer.zero_grad()
X, Y, Xtest, Ytest, std = Task
X = X.to(DEVICE)
Y = Y.to(DEVICE)
Xtest = Xtest.to(DEVICE)
Ytest = Ytest.to(DEVICE)
std = std.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
# Compute the LogP for initial particles (For hyper-param tuning)
with torch.no_grad():
start_logp = 0
for paramsvec in M:
start_logp = start_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
start_logp = start_logp / len(M)
# Inner fit
for i in range(num_of_step):
M = SVGD.step(M, retain_graph = True, step_size = step_size)
# Compute the LogP of the training set after the fitting (For hyper-param tuning)
with torch.no_grad():
end_logp = 0
for paramsvec in M:
end_logp = end_logp + SVGD.NablaLogP(True, paramsvec, ret_grad = False)
end_logp = end_logp / len(M)
Xtrain_and_test = torch.cat((X, Xtest))
Ytrain_and_test = torch.cat((Y, Ytest))
SVGD.NablaLogP.update(Xtrain_and_test, Ytrain_and_test, std)
SVGD.InitMomentumUpdaters()
# Compute the LogP of the whole set after the fitting (For hyper-param tuning)
with torch.no_grad():
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
# Approximate the true prior
M_true = []
for paramsvec in M:
m = torch.nn.ParameterList([torch.nn.Parameter(p.detach()) for p in paramsvec])
#m = [p.detach() for p in paramsvec]
M_true.append(m)
#M_true = SVGD.step(M, retain_graph=False, step_size=step_size)
for i in range(num_of_step):
M_true= SVGD.step(M_true, retain_graph=False, step_size=step_size)
chaser_loss = 0
for paramsvec, paramsvec_true in zip(M, M_true):
vec = parameters_to_vector(paramsvec)
vec_true = parameters_to_vector(paramsvec_true).detach()
chaser_loss = chaser_loss + torch.dot((vec - vec_true),(vec - vec_true) )
#for param, param_true in zip(paramsvec, paramsvec_true):
# chaser_loss = chaser_loss + torch.mean((param - param_true.detach()) ** 2)
chaser_loss = chaser_loss / len(M)
# Compute the true LogP of the whole set (For hyper-param tuning)
with torch.no_grad():
true_logp = 0
for paramsvec in M_true:
true_logp = true_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
true_logp = true_logp / len(M)
chaser_loss.backward()
optimizer.step()
optimizer.zero_grad()
ret_dic = OrderedDict()
ret_dic['start_logp_train'] = start_logp.item()
ret_dic['end_logp_train'] = end_logp.item()
ret_dic['end_logp_joint'] = logp.item()
ret_dic['true_logp_joint'] = true_logp.item()
ret_dic['chaser_loss'] = chaser_loss.item()
return ret_dic
def test(TaskLoader, M, SVGD, DEVICE, num_of_step = 3, step_size = 1e-3):
'''
test for continious
'''
raw_M = M
LogP = AvgMeter()
pbar = tqdm(range(100))
for i in pbar:
task = next(TaskLoader)
for j in range(len(task)-1):
X, Y, Xtest, Ytest, std = task[j]
X_next, Y_next, Xtest_next, Ytest_next, std_next = task[j+1]
X = X.to(DEVICE)
Y = Y.to(DEVICE)
#Xtest = Xtest.to(DEVICE)
#Ytest = Ytest.to(DEVICE)
#std = std.to(DEVICE) * 100 # * 100 to stablize
Xtest = Xtest_next.to(DEVICE)
Ytest = Ytest_next.to(DEVICE)
std = std_next.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
#Mt = SVGD.step(M, retain_graph=False, step_size=step_size)
for tt in range(num_of_step):
M = SVGD.step(M, retain_graph = False, step_size = step_size )#/ (len(task) -1 ))
SVGD.NablaLogP.update(Xtest, Ytest, std)
with torch.no_grad():
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
LogP.update(logp.item())
pbar.set_description("Running Validation")
pbar.set_postfix({'Logp_test':LogP.mean})
M = raw_M
return LogP.mean
|
[
"[email protected]"
] | |
67b0a46a7d02e459b2ca4a9e9d9c5635591b21bf
|
b659e99f89cf17ae886857383cb5b708847fe3f1
|
/gettingStarted/problem7.py
|
8402c5ac64f20f3cd28685736d51b82d10eddaae
|
[] |
no_license
|
nitheeshmavila/practice-python
|
bea06cc4b2b9247b926e07fd5a3987552e531242
|
f54bf8934a4cf160cdfc9dc43176f1eea3bc7a41
|
refs/heads/master
| 2021-07-03T17:24:29.450939 | 2021-06-16T08:40:48 | 2021-06-16T08:40:48 | 100,113,256 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 443 |
py
|
'''
Problem 7: How many multiplications are performed when each of the following lines(line 1 and line 2) of code is executed?
'''
noofCalls = 0
def square(n):
print(n)
global noofCalls
noofCalls += 1
return n*n
def printCalls():
print('no of multiplications performed:',noofCalls)
print(square(5)) # line1
printCalls()
print(square(2*5)) #line2
printCalls()
'''
output
-----
no of multiplications performed:1
'''
|
[
"[email protected]"
] | |
dd70383bd799a8f104e751a763ba69d1a5ff85be
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03330/s307392217.py
|
de40af9fdc43a32599ce03bad31896ab49cb00ac
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 787 |
py
|
def main():
from itertools import permutations
N, C = map(int, input().split())
change_cost = [[int(x) for x in input().split()] for _ in range(C)]
init_color = [[int(x) - 1 for x in input().split()] for _ in range(N)]
ctr = [[0] * C for _ in range(3)]
for r in range(N):
for c in range(N):
p = (r + c) % 3
color = init_color[r][c]
ctr[p][color] += 1
mi = 1000 * 500 * 500 + 1
for perm in permutations(range(C), r=3):
it = iter(perm)
t = 0
for p in range(3):
color_to_be = next(it)
for color, count in enumerate(ctr[p]):
t += change_cost[color][color_to_be] * count
mi = min(mi, t)
print(mi)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
390906999c1c0e7466b96f59d5a0c7e6cc9ab7d4
|
986d78fdcb40f4ee7db15bafc77070c087d16b63
|
/studies/MultiBoomSparMass_v2/point_design.py
|
f0268c72689269395046cb2711265a992c71d693
|
[
"MIT"
] |
permissive
|
hdolfen/AeroSandbox
|
8578b5e36b9a4be69801c1c9ad8819965f236edb
|
4c48690e31f5f2006937352a63d653fe268c42c3
|
refs/heads/master
| 2023-01-20T15:36:58.111907 | 2020-11-24T13:11:44 | 2020-11-24T13:11:44 | 313,655,155 | 0 | 0 |
MIT
| 2020-11-24T13:11:46 | 2020-11-17T15:05:02 | null |
UTF-8
|
Python
| false | false | 1,885 |
py
|
### Imports
from aerosandbox.structures.beams import *
import copy
n_booms = 1
# n_booms = 2
# load_location_fraction = 0.50
# n_booms = 3
# load_location_fraction = 0.60
mass = 80 * 6
span = 7.3
### Set up problem
opti = cas.Opti()
beam = TubeBeam1(
opti=opti,
length=span / 2,
points_per_point_load=100,
diameter_guess=10,
thickness=1e-3,
bending=True,
torsion=False,
max_allowable_stress=570e6,
)
lift_force = 9.81 * mass
# load_location = opti.variable()
# opti.set_initial(load_location, 12)
# opti.subject_to([
# load_location > 1,
# load_location < beam.length - 1,
# ])
assert (n_booms == np.array([1,2,3])).any()
if n_booms == 2 or n_booms == 3:
load_location = beam.length * load_location_fraction
beam.add_point_load(location = load_location, force = -lift_force / n_booms)
beam.add_elliptical_load(force=lift_force / 2)
beam.setup()
# Constraints (in addition to stress)
opti.subject_to([
# beam.u[-1] < 2, # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10, # local dihedral constraint
beam.du * 180 / cas.pi > -10, # local anhedral constraint
cas.diff(beam.nominal_diameter) < 0, # manufacturability
])
# # Zero-curvature constraint (restrict to conical tube spars only)
# opti.subject_to([
# cas.diff(cas.diff(beam.nominal_diameter)) == 0
# ])
opti.minimize(beam.mass)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
opti.solver('ipopt', p_opts, s_opts)
sol = opti.solve()
beam_sol = copy.deepcopy(beam).substitute_solution(sol)
spar_mass = beam_sol.mass * 2
# Run a sanity check
beam_sol.draw_bending()
print("Spar mass:", spar_mass)
|
[
"[email protected]"
] | |
f53ed7447917dec09d5d66ad99297a866cab65af
|
78f3fe4a148c86ce9b80411a3433a49ccfdc02dd
|
/2018/11/graphics/elex18-all-suburb-map-20181119/graphic_config.py
|
006fc1be9c13a6867f9c6636d339a291b2f137a6
|
[] |
no_license
|
nprapps/graphics-archive
|
54cfc4d4d670aca4d71839d70f23a8bf645c692f
|
fe92cd061730496cb95c9df8fa624505c3b291f8
|
refs/heads/master
| 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '105w9FOQjFxe2xS_gA8rB6fXNWs-Tlyr4Jgu3icfRJgI'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
[
"[email protected]"
] | |
734bf560f6432a6a310f7a443c030f24bb698856
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/358/usersdata/288/102525/submittedfiles/estatistica.py
|
9fdd68752a0b6be1cd5aa630f88df85ada3bc87a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 817 |
py
|
# -*- coding: utf-8 -*-
def media(lista):
media=sum(lista)/len(lista)
return media
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio_padrao(lista):
soma=0
for i in range (0,len(lista),1):
soma+=((media(lista)-lista[i])**2)
desvio=(soma/(n-1))**0.5
return desvio
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
m=int(input("Digite a quantidade de colunas: "))
n=int(input("Digite a quantidade de linhas: "))
matriz=[]
for i in range (0,m,1):
linha=[]
for j in range (0,n,1):
linha.append(int(input("Digite o %d numero da matriz: "%(j+1))))
matriz.append(linha)
for i in range (0,n,1):
print (media(matriz[i]))
print ("%.2f"%(desvio_padrao(matriz[i])))
|
[
"[email protected]"
] | |
2d527612149fb4de87f1e28e4faa947f02b7d21c
|
407ca85cd6051a50884f38bb0514a6301f8e7101
|
/Consolidated/POM/process_igd.py
|
95e19baa709e2dfd1c4abcd641e5a4c6d49fe827
|
[] |
no_license
|
vivekaxl/MOLearner
|
5ae4f40027b814ae5b20aaaeb255d6041505c0b9
|
236bf61e8ee1663eabcd73f355070022f908acfa
|
refs/heads/master
| 2021-01-23T01:12:30.836318 | 2017-04-27T05:54:39 | 2017-04-27T05:54:39 | 85,847,238 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,027 |
py
|
from __future__ import division
import pickle
import os
from sk import rdivDemo
pickle_files = [f for f in os.listdir(".") if ".py" not in f]
content = pickle.load(open(pickle_files[0]))
problems = content.keys()
prob = {}
for problem in problems:
al2 = pickle.load(open('al2_POM.p'))
al = pickle.load(open('al_POM.p'))
mmre = pickle.load(open('mmre_POM.p'))
nsgaii = pickle.load(open('nsgaii_POM.p'))
rank = pickle.load(open('rank_POM.p'))
spea2 = pickle.load(open('spea2_POM.p'))
sway5 = pickle.load(open('SWAY5_POM.p'))
lists = list()
lists.append(['AL2'] + al2[problem]['igd'])
lists.append(['AL'] + al[problem]['igd'])
lists.append(['MMRE'] + mmre[problem]['igd'])
lists.append(['NSGAII'] + nsgaii[problem]['igd'])
lists.append(['Rank'] + rank[problem]['igd'])
lists.append(['SPEA2'] + spea2[problem]['igd'])
lists.append(['SWAY5'] + sway5[problem]['igd'])
rdivDemo( problem.replace('_', '\_'), "", lists, globalMinMax=False,
isLatex=True)
|
[
"[email protected]"
] | |
161618add3f39c9fe876a8d7b56a02309bb09785
|
2c68f9156087d6d338373f9737fee1a014e4546b
|
/src/vmware/azext_vmware/vendored_sdks/models/tracked_resource_py3.py
|
27620df2d70b14816bdf6d808642b7b81ad668c9
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
anpaz/azure-cli-extensions
|
8b0d4071c49840da9883f13cb0fd1f4515246ee0
|
847fd487fe61e83f2a4163a9393edc9555267bc2
|
refs/heads/master
| 2023-04-23T17:22:53.427404 | 2021-01-29T17:48:28 | 2021-01-29T18:01:33 | 257,394,204 | 2 | 0 |
MIT
| 2021-01-28T10:31:07 | 2020-04-20T20:19:43 |
Python
|
UTF-8
|
Python
| false | false | 1,447 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
|
[
"[email protected]"
] | |
b15f982248a07bd81468603028e04993dab62e2c
|
948f0a1ccee30084b5e6e9b1043bd1681d2ad38f
|
/app/1.2.py
|
11b5df0c3846432a5436737d1486af287a9799af
|
[
"MIT"
] |
permissive
|
filangelos/random-forest
|
41454e934cf72cf1480cf5c001d569e629f578ac
|
0fc7a4f74b1120f3e527e824abc1de1aa32f2b18
|
refs/heads/master
| 2021-09-09T13:53:30.982028 | 2018-03-16T18:38:36 | 2018-03-16T18:38:36 | 121,535,264 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,429 |
py
|
# EXECUTION TIME: 49s
# Python 3 ImportError
import sys
sys.path.append('.')
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# prettify plots
plt.rcParams['font.family'] = 'Times New Roman'
sns.set_style({"xtick.direction": "in", "ytick.direction": "in"})
b_sns, g_sns, r_sns, p_sns, y_sns, l_sns = sns.color_palette("muted")
import src as ya
from src.struct import SplitNodeParams
from src.struct import ForestParams
np.random.seed(0)
# fetch data
data_train, data_query = ya.data.getData('Toy_Spiral')
N, D = data_train.shape
###########################################################################
# Split functions Comparison and Sparsity
###########################################################################
# number of splits
numSplit = 10
# weak learners
kernels = ['axis-aligned', 'linear', 'quadratic', 'cubic']
for frac in [1.00, 0.50, 0.25, 0.10]:
# random dataset
idx = np.random.choice(range(N), int(N*frac), True)
# root node
root = ya.tree.Node(idx=idx, t=np.nan, dim=-2, prob=[])
for kernel in kernels:
# reset seed
np.random.seed(0)
# get information gain
_ = ya.tree.splitNode(data_train,
root, SplitNodeParams(numSplit, kernel),
savefig_path='1.2/%s_%.2f' % (kernel, frac))
###########################################################################
# Kernel Complexity
###########################################################################
# number of experiments per kernel
M = 10
# execution time
runtime = pd.DataFrame(columns=kernels, index=range(M))
# memory
memory = pd.DataFrame(columns=kernels, index=range(M))
for kernel in kernels:
# repetitions
for j in range(M):
# start time
t0 = time.time()
_forest = ya.tree.growForest(data_train, ForestParams(
num_trees=10, max_depth=5, weak_learner=kernel
))
# end time
runtime.loc[j, kernel] = time.time() - t0
# object memory size
memory.loc[j, kernel] = sys.getsizeof(_forest)
# figure
fig, axes = plt.subplots(ncols=2, figsize=(12.0, 3.0))
# execution time
run = runtime.mean().values
axes[0].bar(range(len(runtime.columns)),
[run[i]*(1+0.15*i) for i in range(len(run))],
color=sns.color_palette("muted"))
axes[0].set_xticks(range(len(runtime.columns)))
axes[0].set_xticklabels(runtime.columns)
axes[0].set_title("Time Complexity of Weak Learners")
axes[0].set_xlabel("Weak Learner")
axes[0].set_ylabel("Training Time (s)")
# memory complexity
mem = memory.mean().values
axes[1].bar(range(len(memory.columns)),
[mem[i]*(1+0.1*i) for i in range(len(mem))],
color=sns.color_palette("muted"))
axes[1].set_xticks(range(len(memory.columns)))
axes[1].set_xticklabels(memory.columns)
axes[1].set_title("Memory Complexity of Weak Learners")
axes[1].set_xlabel("Weak Learner")
axes[1].set_ylabel("Memory Size (byte)")
fig.tight_layout()
fig.savefig('assets/1.2/complexity_kernel.pdf',
format='pdf',
dpi=300,
transparent=True,
bbox_inches='tight',
pad_inches=0.01)
###########################################################################
# `numSplit` vs weak-learners
###########################################################################
# random dataset
idx = np.random.choice(range(N), N, True)
# root node
root = ya.tree.Node(idx=idx, t=np.nan, dim=-2, prob=[])
# range of number of splits
numSplits = [1, 5, 10, 25, 50, 100, 1000]
# weak learners
kernels = ['axis-aligned', 'linear', 'quadratic', 'cubic']
IGS = pd.DataFrame(columns=kernels, index=numSplits)
for j, numSplit in enumerate(numSplits):
# weak-learners
for kernel in kernels:
# reset seed
np.random.seed(0)
# get information gain
_, _, _, ig = ya.tree.splitNode(data_train,
root,
SplitNodeParams(numSplit, kernel))
IGS.loc[numSplit, kernel] = ig
# table to be used for report
print('\n', IGS.to_latex(), '\n')
IGS.to_csv('assets/1.2/information_gain_vs_weak_learners.csv')
# we could also generate a qualitative comparison with a matrix
# of decision boundaries and IGs
# reference: Figure 4 from https://github.com/sagarpatel9410/mlcv/blob/master/CW1/report/mlcv.pdf
|
[
"[email protected]"
] | |
45195cd9a511fdfd4e923e24cec6b203242b4440
|
5d5365a73e81ccf71c73b9d86eb070841f1e0001
|
/backend/wallet/admin.py
|
bee6e39eb2e35074d6224625dcf762cc55d21246
|
[] |
no_license
|
crowdbotics-apps/yjjhffhg-22011
|
3c908901c5fa930df11d6af17471a39a7e3b1dd9
|
c5649303aef6b69f515c4526df8b43ee82212c12
|
refs/heads/master
| 2023-01-12T02:01:16.200496 | 2020-10-27T15:36:39 | 2020-10-27T15:36:39 | 307,746,507 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
from django.contrib import admin
from .models import (
PaymentTransaction,
TaskerPaymentAccount,
TaskerWallet,
PaymentMethod,
CustomerWallet,
)
admin.site.register(TaskerWallet)
admin.site.register(PaymentMethod)
admin.site.register(TaskerPaymentAccount)
admin.site.register(CustomerWallet)
admin.site.register(PaymentTransaction)
# Register your models here.
|
[
"[email protected]"
] | |
30e1a84c06ca00940832ccc37ecb9ec95c660bef
|
f1c0ce462b185f7b633acb04ee8a85fcda87c748
|
/tests/ui/help/test_application_help.py
|
937e44be50886b1b7c39936744091d0878f09a36
|
[
"MIT"
] |
permissive
|
cole/clikit
|
056c5f388043a43971a633470122b291fb51d23f
|
bdb286672f93e1ff7df1d864fb0751476e034d57
|
refs/heads/master
| 2020-09-08T12:11:05.287042 | 2019-11-12T05:02:42 | 2020-02-22T22:19:49 | 221,129,623 | 1 | 1 |
MIT
| 2019-11-12T04:27:10 | 2019-11-12T04:27:09 | null |
UTF-8
|
Python
| false | false | 5,056 |
py
|
from clikit import ConsoleApplication
from clikit.api.args import Args
from clikit.api.args.format import ArgsFormat
from clikit.api.args.format import Option
from clikit.api.config import ApplicationConfig
from clikit.args import ArgvArgs
from clikit.ui.help import ApplicationHelp
def test_render(io):
config = ApplicationConfig("test-bin")
config.set_display_name("The Application")
config.add_argument(
"global-argument", description='Description of "global-argument"'
)
config.add_option("global-option", description='Description of "global-option"')
with config.command("command1") as c:
c.set_description('Description of "command1"')
with config.command("command2") as c:
c.set_description('Description of "command2"')
with config.command("longer-command3") as c:
c.set_description('Description of "longer-command3"')
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application
USAGE
test-bin [--global-option] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
--global-option Description of "global-option"
AVAILABLE COMMANDS
command1 Description of "command1"
command2 Description of "command2"
longer-command3 Description of "longer-command3"
"""
assert expected == io.fetch_output()
def test_sort_commands(io):
config = ApplicationConfig("test-bin")
config.set_display_name("The Application")
config.create_command("command3")
config.create_command("command1")
config.create_command("command2")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
AVAILABLE COMMANDS
command1
command2
command3
"""
assert expected == io.fetch_output()
def test_render_version(io):
config = ApplicationConfig("test-bin", "1.2.3")
config.set_display_name("The Application")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application version 1.2.3
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_default_display_name(io):
config = ApplicationConfig("test-bin")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Test Bin
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_default_no_name(io):
config = ApplicationConfig()
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_global_options_with_preferred_short_name(io):
config = ApplicationConfig()
config.add_option(
"global-option", "g", Option.PREFER_SHORT_NAME, 'Description of "global-option"'
)
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console [-g] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
-g (--global-option) Description of "global-option"
"""
assert expected == io.fetch_output()
def test_render_global_options_with_preferred_long_name(io):
config = ApplicationConfig()
config.add_option(
"global-option", "g", Option.PREFER_LONG_NAME, 'Description of "global-option"'
)
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console [--global-option] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
--global-option (-g) Description of "global-option"
"""
assert expected == io.fetch_output()
def test_render_description(io):
config = ApplicationConfig()
config.set_help("The help for {script_name}\n\nSecond paragraph")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
DESCRIPTION
The help for console
Second paragraph
"""
assert expected == io.fetch_output()
|
[
"[email protected]"
] | |
5ab2fec2b8f90755f0c2c41cd1c55b6a58f2d869
|
ea02eb8c52ef66fe8399516dc0103b95ea1dd7c4
|
/leo/lilac.py
|
62a481a1484a09dabefd18f739923e60614fee7a
|
[] |
no_license
|
y010204025/repo
|
6c9d9601a14b8d003789bfe8266b1e10e9d41a49
|
074fef70cdccf3c62092a848e88bb27fbabea8d3
|
refs/heads/master
| 2020-03-23T03:38:57.191796 | 2018-07-15T14:51:38 | 2018-07-15T14:51:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,265 |
py
|
#!/usr/bin/env python3
#
# This is a complex version of lilac.py for building
# a package from AUR.
#
# You can do something before/after building a package,
# including modify the 'pkgver' and 'md5sum' in PKBUILD.
#
# This is especially useful when a AUR package is
# out-of-date and you want to build a new one, or you
# want to build a package directly from sourceforge but
# using PKGBUILD from AUR.
#
# See also:
# [1] ruby-sass/lilac.py
# [2] aufs3-util-lily-git/lilac.py
# [3] octave-general/lilac.py
#
from lilaclib import *
build_prefix = 'extra-x86_64'
def pre_build():
aur_pre_build()
need_rebuild = False
for line in edit_file('PKGBUILD'):
# edit PKGBUILD
if line.strip().startswith("depends="):
words = line.split(" ")
words.insert(-1, "'python-setuptools'")
line = " ".join(words)
if line.strip().startswith("pkgver=5.1"):
need_rebuild = True
if need_rebuild and line.strip().startswith("pkgrel=1"):
line = "pkgrel=2"
print(line)
post_build = aur_post_build
# do some cleanup here after building the package, regardless of result
# def post_build_always(success):
# pass
if __name__ == '__main__':
single_main(build_prefix)
|
[
"[email protected]"
] | |
afa2880ef7c9ad5d7d0c8b552c93ec596a1567aa
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02712/s236119093.py
|
71fea111b5f86f505bae1ae0688f57fdca6fed08
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 144 |
py
|
import sys
input = sys.stdin.readline
N = int(input())
ans = 0
for i in range(1,N+1):
if i%3 != 0 and i%5 !=0:
ans += i
print(ans)
|
[
"[email protected]"
] | |
cbd4dff58fb534940486ad7a745bc32cfa732058
|
e268832c9a5ecd465851347fc870ccf92e073309
|
/Top_Interview_Questions/48._Rotate_Image/solution.py
|
0c0c4d7037d1b48406cc887ceb7c12e888dc1f8c
|
[] |
no_license
|
hkim150/Leetcode-Problems
|
a995e74ecca6b34213d9fa34b0d84eea649f57c2
|
1239b805a819e4512860a6507b332636941ff3e9
|
refs/heads/master
| 2020-12-04T08:06:42.981990 | 2020-10-03T00:20:29 | 2020-10-03T00:20:29 | 231,688,355 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 565 |
py
|
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# we can first transpose and then reverse the row
if not matrix:
return
l = len(matrix)
if l <= 1:
return
for row in range(l):
for col in range(row, l):
matrix[row][col], matrix[col][row] = matrix[col][row], matrix[row][col]
for row in range(l):
matrix[row].reverse()
|
[
"[email protected]"
] | |
11a6fcb57a8d8be2f2d2ef039795233e246976d1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03346/s839224624.py
|
607b47a18949a8482e6d9a712cdca3e6c434dd8e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 745 |
py
|
import bisect
import heapq
import itertools
import math
import operator
import os
import re
import string
import sys
from collections import Counter, deque, defaultdict
from copy import deepcopy
from decimal import Decimal
from fractions import gcd
from functools import lru_cache, reduce
from operator import itemgetter, mul, add, xor
import numpy as np
if os.getenv("LOCAL"):
sys.stdin = open("_in.txt", "r")
sys.setrecursionlimit(2147483647)
INF = float("inf")
IINF = 10 ** 18
MOD = 10 ** 9 + 7
N = int(sys.stdin.readline())
P = [int(sys.stdin.readline()) for _ in range(N)]
# dp[i]: i より前に何個連続してるか
dp = np.zeros(N + 1, dtype=int)
for i in range(N):
p = P[i]
dp[p] = dp[p - 1] + 1
print(N - dp.max())
|
[
"[email protected]"
] | |
fd423a0b542b090db684de0a6a6f97329d80eeda
|
129ea5d4b576639da63cf94dd3d1adb27422aa03
|
/ceshi.py
|
77dff77147c176733aea9a6bd577b26228b2cbda
|
[] |
no_license
|
lianzhang132/bookroom
|
55392db40bdf4bfd4d49c33d4dfb60947f954061
|
2bebdbd90be3fc356efdb6514688d1b6c7cb3c48
|
refs/heads/master
| 2020-07-11T01:29:03.630781 | 2019-08-26T07:21:45 | 2019-08-26T07:21:45 | 204,419,274 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,399 |
py
|
import unittest
from run import app
import json
# # 可以借助于 urllib,requeset 发送请求
# import urllib
# import requests
# 登录测试
class LoginTest(unittest.TestCase):
# 测试用户名,密码 为空的测试方法
def setUp(self):
app.testing = True
# 调用测试代码之前一定会执行
# 初始化的代码 执行 放在这里
self.client = app.test_client()
def test_empty_username_password(self):
# app 对象 内置发送请求的方式 参数一 路由,参数二,数据
response = self.client.post('/login',data={})
json_dict = json.loads(response.data)
# print(json_dict)
# 断言
self.assertIn('errcode',json_dict,'数据格式返回错误')
self.assertEqual(1,json_dict['errcode'],'状态码返回错误')
# requests.post('/login')
def test_username_password(self):
response = self.client().post('/login', data={'uname':'xiaoming','upass':'abc'})
json_dict = json.loads(response.data)
# print(json_dict)
# 断言
self.assertIn('errcode', json_dict, '数据格式返回错误')
self.assertEqual(2, json_dict['errcode'], '用户名或者密码不正确')
# 注册测试
# class RegisterTest(unittest.TestCase):
# pass
# 订单 会员 购物车 模块测试
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
901fa07849d22ed8b30cf47e067a33598d238cf6
|
916480ae24345193efa95df013f637e0a115653b
|
/web/transiq/api/management/commands/save_blackbuck_data.py
|
ef02a5eccb3223c25f6cb6c0b3b3b085eb722b2e
|
[
"Apache-2.0"
] |
permissive
|
manibhushan05/tms
|
50e289c670e1615a067c61a051c498cdc54958df
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
refs/heads/master
| 2022-12-11T07:59:30.297259 | 2021-09-08T03:24:59 | 2021-09-08T03:24:59 | 210,017,184 | 0 | 0 |
Apache-2.0
| 2022-12-08T02:35:01 | 2019-09-21T16:23:57 |
Python
|
UTF-8
|
Python
| false | false | 303 |
py
|
from django.core.management.base import BaseCommand
from api.blackbuck import fetch_blackbuck_data
class Command(BaseCommand):
args = 'Arguments not needed'
help = 'Django admin command to save blackbuck data'
def handle(self, *args, **options):
fetch_blackbuck_data(clean=True)
|
[
"[email protected]"
] | |
a1ea1cd0c4454fea650614ef561225696796a60d
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-organizations/huaweicloudsdkorganizations/v1/model/tag_resource_req_body.py
|
f43bc9df67f8aa35a1f1ec41372a38258bee7053
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 3,188 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TagResourceReqBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[TagDto]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""TagResourceReqBody
The model defined in huaweicloud sdk
:param tags: 要添加到指定资源的标签列表。
:type tags: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
self._tags = None
self.discriminator = None
self.tags = tags
@property
def tags(self):
"""Gets the tags of this TagResourceReqBody.
要添加到指定资源的标签列表。
:return: The tags of this TagResourceReqBody.
:rtype: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this TagResourceReqBody.
要添加到指定资源的标签列表。
:param tags: The tags of this TagResourceReqBody.
:type tags: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagResourceReqBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
f453a36bcee504e1fc87eb4a16f5709e38556740
|
415a8a4315e6331b2a157de8a1429fe0562729f8
|
/python/TryCatch.py
|
75b958f68079d3ef596723ff8107b476c5de2643
|
[] |
no_license
|
alfaceor/programming-examples
|
784690dd1104e4adbdf958e4163b3b462f635881
|
abea970a54cfab0eacc5280ae62383495e9e6eeb
|
refs/heads/master
| 2022-05-04T23:14:30.503114 | 2022-04-29T10:11:45 | 2022-04-29T10:11:45 | 36,015,541 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
#!/usr/bin/python
import numpy as np
for i in range(4):
try:
data = np.loadtxt("NoExiste.dat")
except IOError as e:
print "Oops!"
pass
for i in range(5):
try:
data = np.loadtxt("NoExiste.dat")
except IOError as e:
print "NO PASS!"
break
print "Fuck U!!!"
|
[
"alfaceor"
] |
alfaceor
|
afe52020807529d3d426f1f80748977c241334c4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_137/548.py
|
6499377835513cc99a4e2110302f4abf9f61a149
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 755 |
py
|
data = open('minesweeper.txt')
mines = {}
while True:
line = data.readline()
line = line.split()
if len(line) == 0:
break
R = int(line[0])
C = int(line[1])
M = int(line[2])
line = data.readline().strip()
if line == 'Impossible':
mines[(R, C, M)] = 'Impossible'
else:
mine = line + '\n'
for r in range(1, R):
mine += data.readline()
mines[(R, C, M)] = mine
test_data = open('C-small-attempt0.in')
num_tests = int(test_data.readline().strip())
for test in range(num_tests):
line = test_data.readline().split()
R = int(line[0])
C = int(line[1])
M = int(line[2])
output = mines[(R, C, M)]
print('Case #{0}:'.format(test + 1))
print(output)
|
[
"[email protected]"
] | |
4b270fa9d701f65ef4e79353a53e22d43df8424f
|
ad9782856ec2f860fccbefa5e75a896691b8e1cc
|
/MonteCarlo/test/opt6s3l/crab_step2_VBF_HToBB_OT613_200_IT4025_opt6s3l.py
|
8031794a20b9cb961ae352984ee3b6e5b3a772d7
|
[] |
no_license
|
OSU-CMS/VFPix
|
7fe092fc5a973b4f9edc29dbfdf44907664683e5
|
4c9fd903219742a4eba1321dc4181da125616e4c
|
refs/heads/master
| 2020-04-09T05:52:05.644653 | 2019-01-09T13:44:22 | 2019-01-09T13:44:22 | 30,070,948 | 0 | 0 | null | 2018-11-30T13:15:54 | 2015-01-30T12:26:20 |
Python
|
UTF-8
|
Python
| false | false | 944 |
py
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'VBF_HToBB_14TeV_step2_923_PU200_OT613_200_IT4025_opt6s3l'
config.General.workArea = 'crab'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'step2_DIGI_L1_L1TrackTrigger_DIGI2RAW_HLT_PU200_OT613_200_IT4025_opt6s3l.py'
config.JobType.maxMemoryMB = 4000
config.Data.inputDataset = '/VBF_HToBB_14TeV_923_OT613_200_IT4025_opt6s3l/jalimena-LheGenSim_RAWSIMoutput-efeae19cc3c320703c0b5144577e0f10/USER'
config.Data.outputDatasetTag = 'step2_PU200'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/group/lpcfpix'
config.Data.publication = True
config.Data.ignoreLocality = True
config.Site.whitelist = ["T1_US_FNAL"]
config.Site.storageSite = 'T3_US_FNALLPC'
|
[
"[email protected]"
] | |
5a308f6b7f9ceacdf803dead7dbd5a2dfc85628e
|
9aa1885bfd666b5d3719c29334c9769bbe88d3e0
|
/bin/cache-purge-consumer.py
|
d1bd99303ae097493edde7eadcd860165b207716
|
[] |
permissive
|
praekelt/django-ultracache
|
9c240cfad4660afdb7e679192ca0f4b05bab1831
|
476eb8a4935043f4fc6901ed3541ececed1664bf
|
refs/heads/develop
| 2022-01-27T18:20:00.062349 | 2020-05-29T09:58:01 | 2020-05-29T09:58:01 | 38,880,711 | 32 | 4 |
BSD-3-Clause
| 2022-01-06T22:24:32 | 2015-07-10T13:02:45 |
Python
|
UTF-8
|
Python
| false | false | 3,973 |
py
|
"""Subscribe to RabbitMQ and listen for purge instructions continuously. Manage
this script through eg. supervisor."""
import json
import traceback
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
from time import sleep
import pika
import requests
import yaml
class Consumer:
channel = None
connection = None
def __init__(self):
self.pool = ThreadPool()
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="Configuration file", metavar="FILE")
(options, args) = parser.parse_args()
config_file = options.config
self.config = {}
if config_file:
self.config = yaml.load(open(config_file)) or {}
def log(self, msg):
name = self.config.get("logfile", None)
if not name:
return
if name == "stdout":
print(msg)
return
fp = open(name, "a")
try:
fp.write(msg + "\n")
finally:
fp.close()
def connect(self):
parameters = pika.URLParameters(
self.config.get(
"rabbit-url",
"amqp://guest:[email protected]:5672/%2F"
)
)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange="purgatory", exchange_type="fanout"
)
queue = self.channel.queue_declare(exclusive=True)
queue_name = queue.method.queue
self.channel.queue_bind(exchange="purgatory", queue=queue_name)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
self.on_message, queue=queue_name, no_ack=False, exclusive=True
)
def on_message(self, channel, method_frame, header_frame, body):
self.pool.apply_async(self.handle_message, (body,))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def handle_message(self, body):
if body:
try:
di = json.loads(body)
except ValueError:
path = body
headers = {}
else:
path = di["path"]
headers = di["headers"]
self.log("Purging %s with headers %s" % (path, str(headers)))
host = self.config.get("host", None)
try:
if host:
final_headers = {"Host": host}
final_headers.update(headers)
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
headers=final_headers,
timeout=10
)
else:
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
timeout=10,
headers=headers
)
except Exception as exception:
msg = traceback.format_exc()
self.log("Error purging %s: %s" % (path, msg))
else:
content = response.content
def consume(self):
loop = True
while loop:
try:
if self.channel is None:
raise pika.exceptions.ConnectionClosed()
self.channel.start_consuming()
except KeyboardInterrupt:
loop = False
self.channel.stop_consuming()
except pika.exceptions.ConnectionClosed:
try:
self.connect()
except pika.exceptions.ConnectionClosed:
sleep(1)
self.connection.close()
consumer = Consumer()
consumer.consume()
|
[
"[email protected]"
] | |
0e224f6a0ff6149cf70f6a426a50cdc40b769be9
|
8d1ceed7720e374691829d78007ea146a9030e4f
|
/arkestra_clinical_studies/lister.py
|
5905346c3c39dba5232bfa745f6c1a2ba387225d
|
[
"BSD-2-Clause"
] |
permissive
|
gonff/arkestra-clinical-studies
|
25ef186207781bbc979f7f12bdef194802d9c71c
|
d75540e006a5d8b1ccb6d05a8253eba9c9fb0a79
|
refs/heads/master
| 2021-01-18T05:10:23.067652 | 2014-05-21T11:19:03 | 2014-05-21T11:19:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,978 |
py
|
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from arkestra_utilities.generic_lister import (
ArkestraGenericLister, ArkestraGenericList, ArkestraGenericFilterList,
ArkestraGenericFilterSet
)
from arkestra_utilities.settings import MULTIPLE_ENTITY_MODE
from .models import Study
# we're going to have a list of Studies that we can search, filter and paginate
# the ArkestraGenericFilterSet provides us with some of that machinery
class StudiesFilterSet(ArkestraGenericFilterSet):
# the fields we want to be able to filter on
fields = ["date", "status", "studytype"]
class StudiesListMixin(object):
def set_items_for_entity(self):
# if we're not in MULTIPLE_ENTITY_MODE, just leave self.items alone
if MULTIPLE_ENTITY_MODE and self.entity:
# we want to include any item that has any relationship with any
# of the descendants of the entity we're looking at
# get a list of all those entities
entities = self.entity.get_descendants(
include_self=True
).values_list('id', flat=True)
# get the Studies that have a relationship with any item in that list
self.items = self.items.filter(
Q(hosted_by__in=entities) | Q(publish_to__in=entities) |
Q(funding_body__in=entities) | Q(sponsor__in=entities) |
Q(clinical_centre__in=entities)
).distinct()
# the class that produces the list of items, based on ArkestraGenericFilterList
class StudiesList(StudiesListMixin, ArkestraGenericFilterList):
# it must have a filter_set class
filter_set = StudiesFilterSet
# the model we're listing
model = Study
# the text search fields - each one is a dictionary
search_fields = [
{
# the field as its name appears in the URL: ?text=
"field_name": "text",
# a label for the field
"field_label": "Search title/summary",
# the placeholder text in the search widget
"placeholder": "Search",
# the model fields we want to search through
"search_keys": [
"title__icontains",
"summary__icontains",
],
},
]
# we want to override the generic list item template
item_template = "clinical_studies/study_list_item.html"
# we need our own build() method to override the generic one
def build(self):
# get the listable (by default, published and shown in lists) items
self.items = self.model.objects.listable_objects()
# we'll limit the items according to the appropriate entity - the
# method that does this is defined in the StudiesListMixin
self.set_items_for_entity()
# and limit by search terms
self.filter_on_search_terms()
# and set up the filter for rendering
self.itemfilter = self.filter_set(self.items, self.request.GET)
# the Lister class is the one that determines which lists to display, along
# with the surrounding furniture - in the case of Studies, it's just one List,
# but we could have more
class StudiesLister(ArkestraGenericLister):
# a list of available List classes
listkinds = [("studies", StudiesList)]
# the List classes we want to use
display = "studies"
class StudiesMenuList(StudiesListMixin, ArkestraGenericList):
model = Study
heading_text = _(u"News")
def build(self):
# get the listable (by default, published and shown in lists) items
self.items = self.model.objects.listable_objects()
# we'll limit the items according to the appropriate entity - the
# method that does this is defined in the StudiesListMixin
self.set_items_for_entity()
class StudiesMenuLister(ArkestraGenericLister):
listkinds = [("studies", StudiesMenuList)]
display = "studies"
|
[
"[email protected]"
] | |
3b15efcd4c58e73f9d4c0135da5f36a883347fa3
|
d170efa06e6e682c71961fe1213298e5a68193c3
|
/python/python/rotate/test_rotate.py
|
fc39d304949523ec047f2d1eddf13bc3a777fc50
|
[
"MIT"
] |
permissive
|
iggy18/data-structures-and-algorithms
|
45b9ebf3c0820968bda62c0ebd90a9cfd65b3902
|
700ef727ca7656724120a1873af4bd4bce5962f4
|
refs/heads/main
| 2023-02-27T04:45:12.535801 | 2021-02-08T22:41:28 | 2021-02-08T22:41:28 | 300,975,693 | 0 | 0 |
MIT
| 2021-02-12T18:39:18 | 2020-10-03T20:42:08 |
JavaScript
|
UTF-8
|
Python
| false | false | 232 |
py
|
from rotate import rotate
def test_rotate():
assert rotate
def test_rotate_works_properly():
x = [[1,2,3], [1,2,3], [1,2,3]]
actual = rotate(x)
expected = [[1,1,1], [2,2,2], [3,3,3,]]
assert actual == expected
|
[
"[email protected]"
] | |
5b5318e9339850b6265dc415340e362ff7e63894
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/diameter-of-binary-tree.py
|
2f8d44152f1d1d7bc911f4df55398ee39e93ccf0
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 |
C++
|
UTF-8
|
Python
| false | false | 501 |
py
|
# Time: O(n)
# Space: O(h)
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.depth(root, 0)[1]
def depth(self, root, diameter):
if not root:
return 0, diameter
left, diameter = self.depth(root.left, diameter)
right, diameter = self.depth(root.right, diameter)
return 1 + max(left, right), max(diameter, left + right)
|
[
"[email protected]"
] | |
96fd3506464c392a8fd723e5f4d4aeaf7d0ba1cc
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/cctbx/libtbx_refresh.py
|
b1206d46a3f922f73066f670596a7b7a0ef8f24f
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 |
BSD-3-Clause
| 2020-01-25T01:41:39 | 2019-10-18T19:03:17 |
Python
|
UTF-8
|
Python
| false | false | 1,790 |
py
|
from __future__ import absolute_import, division, print_function
import os
from libtbx.utils import warn_if_unexpected_md5_hexdigest
if self.env.is_ready_for_build():
message_template = ' Generating C++ files in:\n "%s"'
# eltbx
from cctbx.source_generators.eltbx import generate_henke_cpp
from cctbx.source_generators.eltbx import generate_sasaki_cpp
target_dir = self.env.under_build("cctbx/eltbx")
print(message_template % target_dir)
for label,generator_module in [("Henke", generate_henke_cpp),
("Sasaki", generate_sasaki_cpp)]:
if os.path.isdir(generator_module.reference_tables_directory):
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
generator_module.run(target_dir=target_dir)
else:
print("*"*79)
print("Warning: directory with %s tables is missing:" % label)
print(" ", repr(generator_module.reference_tables_directory))
print("*"*79)
# flex_fwd.h
from cctbx.source_generators import flex_fwd_h
target_dir = self.env.under_build("include/cctbx/boost_python")
print(message_template % target_dir)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
flex_fwd_h.run(target_dir)
# reference_table.cpp : checking that it is up-to-date
for f,sig in [
("reference_table.py", "b4d948c292357b90c8b4d5716d607bb9"),
("short_cuts.py", "18e5b9d93962d70711497de1d6dbebbb"),
("proto/generate_cpp_asu_table.py", "0f19e51b469650aa23e81483051eeb10")]:
fn = "sgtbx/direct_space_asu/" + f
warn_if_unexpected_md5_hexdigest(
path=self.env.under_dist( module_name="cctbx", path=fn),
expected_md5_hexdigests=[ sig ],
hints=[
" Files to review:",
" "+fn,
" cctbx/libtbx_refresh.py"])
|
[
"[email protected]"
] | |
264b62e7d0d2651cf9ec655cdfb6fafd32babdd4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_201/2701.py
|
4dfd25441bfd296e14ceefcf2861b262d56462e9
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,353 |
py
|
import math
def get_stall(arg):
l = []
arg.sort()
for p1, p2 in zip(arg, arg[1:]):
diff = abs(p1 - p2)
if not l:
l.append(diff)
l.append(p1+(diff//2))
l.append(p1)
elif l[0] < diff:
l.clear()
l.append(diff)
l.append(p1 + (diff//2))
l.append(p1)
else:
pass
return l
t = int(input()) # read a line with a single integer
for x in range(1, t + 1):
n, k = [int(s) for s in input().split(" ")] # read a list of integers, 2 in this case
if n == k:
print("Case #{}: {} {}".format(x, 0, 0))
else:
ls = [0, n+1]
blank_list = []
for i in range(k):
mee = get_stall(ls)
# print(mee)
ls.append(mee[1])
ls.sort()
# print("***", ls)
stall = ls.index(mee[1])
val1 = ls[stall-1]
val2 = ls[stall+1]
z = mee[1]-val1 - 1
y = val2 - mee[1] - 1
# y = max(([abs(t - s)//2 for s, t in zip(ls, ls[1:])]))
# z = min(([abs(t - s)//2 for s, t in zip(ls, ls[1:])]))
# print("Case #{}: {} {}".format(x, max(abs(mee[1]-mee[0])-1, y), max(abs(mee[2]-mee[1]), abs(z))-1))
print("Case #{}: {} {}".format(x, max(y, z), min(y, z)))
|
[
"[email protected]"
] | |
a28f99427e7b585a4de577169e2d4afd3ab4e90e
|
618522a8ffed585e27701b9acb1a1171e3c5c924
|
/salience_sum/module/encoder.py
|
845e220e59a1dd02f3abb3eeec33d31e13a09aba
|
[] |
no_license
|
blodstone/Salience_Sum
|
9795c2a1c03c86218a8c4560ba65f7d1ff5f65e8
|
ce2e9e316a68c18bd523ba9e3d1e3ea286bbf068
|
refs/heads/master
| 2020-08-29T11:49:40.695618 | 2020-01-21T16:17:18 | 2020-01-21T16:17:18 | 218,023,295 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,495 |
py
|
import torch
from allennlp.modules import Seq2SeqEncoder
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from torch.nn import LSTM, Linear, Sequential, ReLU
from typing import Dict, Tuple
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
@Seq2SeqEncoder.register('salience_encoder')
class Encoder(Seq2SeqEncoder):
"""
A standard LSTM encoder that supports bidirectional. If bidirectional is True, we split
the hidden layer and then concatenate the two directions in the resulting encoder states.
Everything is on first batch basis.
"""
def __init__(self, input_size,
hidden_size,
num_layers,
bidirectional,
stateful: bool = False) -> None:
super().__init__(stateful)
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.num_layers = num_layers
self.input_size = input_size
self._rnn = LSTM(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
batch_first=True)
self._reduce = Linear(self.hidden_size * 2, self.hidden_size)
def forward(self, embedded_src: torch.Tensor, source_mask: torch.Tensor) \
-> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
source_lengths = get_lengths_from_binary_sequence_mask(source_mask)
packed_src = pack_padded_sequence(embedded_src, source_lengths,
batch_first=True, enforce_sorted=False)
# states = (B x L X 2*H)
packed_states, final = self._rnn(packed_src)
states, _ = pad_packed_sequence(packed_states, batch_first=True)
batch_size = states.size(0)
# final_states and context = (B x 2*num_layer x H)
final_state, context = final
# Reducing the dual hidden size to one hidden size
if self.bidirectional:
final_state = self._reduce(final_state.view(batch_size, self.num_layers, -1))
context = self._reduce(context.view(batch_size, self.num_layers, -1))
return states, (final_state, context)
def get_input_dim(self) -> int:
return self.input_size
def get_output_dim(self) -> int:
return self.hidden_size
def is_bidirectional(self) -> bool:
return self.bidirectional
|
[
"[email protected]"
] | |
ad33747c00bc3429bacdc1bf31667c00daab67fc
|
5f09c2581c28751589871068d1faa9297859d2f3
|
/insert_banco.py
|
737b50ad7286cc87824d9969603d863b81f055e2
|
[] |
no_license
|
fandrefh/curso-python-e-django-senac
|
f68b4b4ce7071ac78034afdaf63251ed0422fa56
|
8a418a7d9acd12c3ca8820c5589d5d02476d3d0c
|
refs/heads/master
| 2021-01-20T20:28:53.311346 | 2016-08-27T20:48:44 | 2016-08-27T20:48:44 | 65,097,253 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 492 |
py
|
import sqlite3
conn = sqlite3.connect("clientes.db")
cursor = conn.cursor()
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Regis', 35);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Aloisio', 87);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Bruna', 21);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Matheus', 19);
""")
conn.commit()
print('Dados inseridos com sucesso.')
conn.close()
|
[
"[email protected]"
] | |
47ec0b8daf1be246726bb38689c9967a2047b1d3
|
76050b0002dac757866a9fb95dc199918da665bb
|
/acme/utils/iterator_utils_test.py
|
ebe21f3a602dbf5b91ce2fc5ab468a73080be58f
|
[
"Apache-2.0"
] |
permissive
|
RaoulDrake/acme
|
2829f41688db68d694da2461d301fd6f9f27edff
|
97c50eaa62c039d8f4b9efa3e80c4d80e6f40c4c
|
refs/heads/master
| 2022-12-29T01:16:44.806891 | 2022-12-21T14:09:38 | 2022-12-21T14:10:06 | 300,250,466 | 0 | 0 |
Apache-2.0
| 2020-10-01T11:13:03 | 2020-10-01T11:13:02 | null |
UTF-8
|
Python
| false | false | 1,249 |
py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iterator_utils."""
from acme.utils import iterator_utils
import numpy as np
from absl.testing import absltest
class IteratorUtilsTest(absltest.TestCase):
def test_iterator_zipping(self):
def get_iters():
x = iter(range(0, 10))
y = iter(range(20, 30))
return [x, y]
zipped = zip(*get_iters())
unzipped = iterator_utils.unzip_iterators(zipped, num_sub_iterators=2)
expected_x, expected_y = get_iters()
np.testing.assert_equal(list(unzipped[0]), list(expected_x))
np.testing.assert_equal(list(unzipped[1]), list(expected_y))
if __name__ == '__main__':
absltest.main()
|
[
"[email protected]"
] | |
e0484f2e58aab4de9e567907b0778dc57f18cc34
|
574d7955a32116e2fa315b5f75f124863ca70614
|
/blog/admin.py
|
ee30581a5a79496780dd1cb38aa3d14fd815c3c0
|
[] |
no_license
|
harunurkst/django_course_04
|
b15cb8e52a821b1157e1ac4dbe56b89fdebce848
|
5d93290cbee0f47795b6c9ecef8d33d8afe859d1
|
refs/heads/master
| 2022-11-22T20:48:36.196279 | 2020-07-26T17:20:37 | 2020-07-26T17:20:37 | 278,904,995 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 294 |
py
|
from django.contrib import admin
from .models import Post, Author, Category, Comment
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Post, PostAdmin)
admin.site.register(Author)
admin.site.register(Category)
admin.site.register(Comment)
|
[
"[email protected]"
] | |
bb17b14f9cc0eaaeb740793ec62035edb8637a1f
|
71f00ed87cd980bb2f92c08b085c5abe40a317fb
|
/Data/GoogleCloud/google-cloud-sdk/lib/surface/privateca/subordinates/activate.py
|
f9f73fb40b0bb8a564338a2a28bed7e1e5cf84c6
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
factoryofthesun/Rao-NLP
|
2bd8269a8eed1cb352c14c8fde88e3111ccca088
|
87f9723f5ee51bd21310d58c3425a2a7271ec3c5
|
refs/heads/master
| 2023-04-18T08:54:08.370155 | 2020-06-09T23:24:07 | 2020-06-09T23:24:07 | 248,070,291 | 0 | 1 | null | 2021-04-30T21:13:04 | 2020-03-17T20:49:03 |
Python
|
UTF-8
|
Python
| false | false | 3,547 |
py
|
# Lint as: python3
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activate a pending Certificate Authority."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.privateca import base as privateca_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.privateca import operations
from googlecloudsdk.command_lib.privateca import pem_utils
from googlecloudsdk.command_lib.privateca import resource_args
from googlecloudsdk.core.util import files
class Activate(base.SilentCommand):
r"""Activate a subordinate certificate authority in a pending state.
## EXAMPLES
To activate a subordinate CA named 'server-tls-1' in the location 'us' using
a PEM certificate
chain in 'chain.crt':
$ {command} server-tls-1 \
--location us \
--pem-chain ./chain.crt
"""
@staticmethod
def Args(parser):
resource_args.AddCertificateAuthorityPositionalResourceArg(
parser, 'to activate')
base.Argument(
'--pem-chain',
required=True,
help='A file containing a list of PEM-encoded certificates, starting '
'with the current CA certificate and ending with the root CA '
'certificate.').AddToParser(parser)
def _ParsePemChainFromFile(self, pem_chain_file):
"""Parses a pem chain from a file, splitting the leaf cert and chain.
Args:
pem_chain_file: file containing the pem_chain.
Raises:
exceptions.InvalidArgumentException if not enough certificates are
included.
Returns:
A tuple with (leaf_cert, rest_of_chain)
"""
try:
pem_chain_input = files.ReadFileContents(pem_chain_file)
except (files.Error, OSError, IOError):
raise exceptions.BadFileException(
"Could not read provided PEM chain file '{}'.".format(pem_chain_file))
certs = pem_utils.ValidateAndParsePemChain(pem_chain_input)
if len(certs) < 2:
raise exceptions.InvalidArgumentException(
'pem-chain',
'The pem_chain must include at least two certificates - the subordinate CA certificate and an issuer certificate.'
)
return certs[0], certs[1:]
def Run(self, args):
client = privateca_base.GetClientInstance()
messages = privateca_base.GetMessagesModule()
ca_ref = args.CONCEPTS.certificate_authority.Parse()
pem_cert, pem_chain = self._ParsePemChainFromFile(args.pem_chain)
operation = client.projects_locations_certificateAuthorities.Activate(
messages
.PrivatecaProjectsLocationsCertificateAuthoritiesActivateRequest(
name=ca_ref.RelativeName(),
activateCertificateAuthorityRequest=messages
.ActivateCertificateAuthorityRequest(
pemCaCertificate=pem_cert, pemCaCertificateChain=pem_chain)))
operations.Await(operation, 'Activating Certificate Authority.')
|
[
"[email protected]"
] | |
0b06190e016241e069caff14b930d190e7d5f83f
|
00d1856dbceb6cef7f92d5ad7d3b2363a62446ca
|
/djexample/images/forms.py
|
dce42d13b42c6f5dec509f69a49c66092513e4b3
|
[] |
no_license
|
lafabo/django_by_example
|
0b05d2b62117f70681c5fc5108b4072c097bc119
|
3cf569f3e6ead9c6b0199d150adf528bd0b2a7c5
|
refs/heads/master
| 2020-12-29T17:54:12.894125 | 2016-06-04T10:35:22 | 2016-06-04T10:35:22 | 58,313,176 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,039 |
py
|
from django import forms
from .models import Image
from urllib import request
from django.core.files.base import ContentFile
from django.utils.text import slugify
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title', 'url', 'description')
widgets = {
'url': forms.HiddenInput
}
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg', 'jpeg']
extension = url.rsplit('.', 1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError('The given url does not match valid image extensions')
return url
def save(self, force_insert=False, force_update=False, commit=True):
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = '%s.%s' % (slugify(image.title), image_url.rsplit('.', 1)[1].lower())
# download image from url
response = request.urlopen(image_url)
image.image.save(image_name, ContentFile(response.read()), save=False)
if commit:
image.save()
return image
|
[
"[email protected]"
] | |
548053fb510f44628c5bba5b2b7d3b962e5a86e1
|
b0b87924d07101e25fa56754ceaa2f22edc10208
|
/workspace/python_study/python_gspark/15-2.py
|
88ec8fdb1e8e43f9901bf9017a64fa128a312bad
|
[] |
no_license
|
SoheeKwak/Python
|
2295dd03e5f235315d07355cbe72998f8b86c147
|
e1a5f0ecf31e926f2320c5df0e3416306b8ce316
|
refs/heads/master
| 2020-04-02T13:49:58.367361 | 2018-11-23T09:33:23 | 2018-11-23T09:33:23 | 154,499,204 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,204 |
py
|
import numpy as np
a1 = np.arange(24)
a2 = np.arange(24).reshape((4,6))
a3 = np.arange(24).reshape((2,4,3))
a1[5]=1000
a2[0,1]=1000
a3[1,0,1]=1000 #2번째 행, 1번째열, 2번째depth
print(a1)
print(a2)
print(a2[1:3,1:5])
print(a2[1:-1,1:-1])
print(a2[:,1:3])
a2[:,1:3]=99
print(a2)
a1 = np.arange(1,25).reshape(4,6)
even_a = a1%2==0
print(a1[even_a])
print("="*50)
import pandas as pd
rain = pd.read_csv("seattle.csv")
print(rain)
print("="*50)
rain_r = rain['PRCP']
print(rain_r)
print(type(rain_r)) #<class 'pandas.core.series.Series'>
print("="*50)
rain_r = rain['PRCP'].values
print(rain_r)
print(type(rain_r)) #<class 'numpy.ndarray'>
print("데이터 크기:",len(rain_r))
days_a = np.arange(0,365)
con_jan = days_a < 31 #True:31개 False:334개
print(con_jan[:40]) #1월1일부터 40일간의 강수량 데이터
print("="*50)
print(con_jan) #1월 한달간(31일간) 강수량 데이터
print(np.sum(rain_r[con_jan]))#1월달 강수량의 총합
print(np.mean(rain_r[con_jan])) #1월달 평균 강수량
a = np.arange(1,25).reshape((4,6))
# 팬시 인덱싱: 배열에 인덱스 배열을 전달해서 데이터를 참조
print(a)
print(a[0,0],a[1,1],a[2,2],a[3,3])
print(a[[0,1,2,3],[0,1,2,3]])
print(a[:,[1,2]])#대괄호 안에 콜론없이 지정되면 범위가 아닌, 그 해당 열만 출력
print(a[:,[1,3]])
print("="*50)
#ravel(배열을 1차원으로)
a = np.random.randint(1,10,(2,3))
print(a)
print(a.ravel())
#resize:배열크기 변경(요소 수 변경), reshape:배열변경(요소 수 변경X)
print(a.shape)
a.resize((2,2))
print(a)
print("="*50)
a = np.random.randint(1,10,(2,6))
print(a)
a.resize((2,10)) #사이즈가 커지면 늘어난 요소만큼 채워지고 0으로 초기화
print(a)
a.resize((3,3)) # 사이즈가 줄어들면 순서대로 요소가 들어가고 나머지 삭제됨
print(a)
print("="*50)
a = np.arange(1,10).reshape(3,3)
b = np.arange(10,19).reshape(3,3)
res = np.append(a,b)
print(res) #1차원으로 출력
print(a)
print(b)
print("="*50)
res = np.append(a,b, axis=0) #행방향 2차원 배열
print(res)
print("="*50)
a = np.arange(1,10).reshape(3,3)
res = np.arange(10,20).reshape(2,5)
b = np.arange(10,19).reshape(3,3)
# np.append(a,res,axis=0) #기준축과 Shape다르면 append 오류 발생
# print(res)
print(a)
res = np.append(a,b,axis=1) #열방향, 2차원 배열
print(res)
print(b)
res = np.append(a,b,axis=0) #행방향, 2차원 배열
print(res)
# x = np.arange(10,20).reshape(2,5)
# np.append(res,x,axis=1) #shape이 다르므로 오류
a = np.arange(1,10).reshape(3,3)
print(a)
a = np.insert(a,3,99) #1차원, 99를 3번째 자리에 넣어라
print(a)
a = np.arange(1,10).reshape(3,3)
a = np.insert(a,2,99, axis=0) #행을 따라 2번째 줄에 99를 추가로 넣어라
print(a)
a = np.arange(1,10).reshape(3,3)
a = np.insert(a,1,99, axis=1) #열을 따라 2번째 줄에 99를 추가로 넣어라
print(a)
print("="*50)
a = np.arange(1,10).reshape(3,3)
print(a)
print(np.delete(a,3)) #1차원, 3번째 자리 요소를 지워라
#a배열의 1번 인덱스 행 제거한 후 출력
print(np.delete(a,1,axis=0))
#a배열의 1번 인덱스 열 제거한 후 출력
print(np.delete(a,1,axis=1))
print("="*50)
#배열 간의 결합(concatenate, vstack, hastack)
a = np.arange(1,7).reshape(2,3)
print(a)
b = np.arange(7,13).reshape(2,3)
print(b)
res = np.concatenate((a,b))
print(res)
print("="*50)
a = np.arange(1,7).reshape(2,3)
b = np.arange(7,13).reshape(2,3)
print(np.vstack((a,b)))
print(np.vstack((a,b,a,b))) #vertical 수직방향으로 붙음
print("="*50)
a = np.arange(1,7).reshape(2,3)
b = np.arange(7,13).reshape(2,3)
print(np.hstack((a,b))) #horizontal 수평방향으로 붙음
print(np.hstack((a,b,a,b,a,b)))
print("="*50)
a = np.arange(1,25).reshape(4,6)
print(a)
res = np.hsplit(a,2) #a를 두개의 그룹으로 좌우로 나눔
print(res)
res = np.hsplit(a,3)
print(res)
res = np.vsplit(a,2) #a를 두개의 그룹으로 상하로 나눔
print(res)
#
print("="*50)
x = np.array([1,2])
print(x)
print(x.dtype)
x = np.array([1.,2.])
print(x.dtype)
x = np.array([1,2],dtype=np.int64)
print(x.dtype)
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
v = np.array([9,10])
w = np.array([11,12])
#벡터의 내적
print(np.dot(v,w)) #9*11+10*12=219
print(v.dot(w))
#행렬과 벡터의 곱
print(x.dot(v)) #[1,2]*[9,10]+[3,4]*[9,10]=[29,67]
#행렬곱
print(x)
print(y)
print(np.dot(x,y)) #1*5+2*7, 1*6+2*8, 3*5+4*7, 3*6+4*8
x = np.array([[1,2],[3,4]])
print(x)
print(x.T) #transpose 대칭되는 요소끼리 묶어줌
print("="*50)
x = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
print(x)
v = np.array([1,0,1])
y = np.empty_like(x) #x와 같은 shape을 만들어 준다
print(y)
print("="*50)
for i in range(4):
y[i,:] = x[i,:]+v #[2,2,4]=[1,2,3]+[1,0,1]
print(y)
print("="*50)
x = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
v = np.array([1,0,1])
vv = np.tile(v,(4,1)) #열방향으로 v를 4번 반복
print(vv)
vv = np.tile(v,(4,2))
print(vv)
vv = np.tile(v,(4,5))
print(vv)
a = np.array([[1,2],[4,5]])
s = np.prod(a) #각각의 요소에 대해 곱셈
print(s)
s = np.prod(a,axis=0)
print(s)
s = np.prod(a,axis=1)
print(s)
s = np.max(np.prod(a,axis=1))
print(s)
|
[
"[email protected]"
] | |
2a267560a422f7c6eff4da4d5177892beb9c99f9
|
abeec076f89231c4dd589e84def8301e653d6e20
|
/orders/views.DEP.py
|
9ac624bc2133c17490ffaf2dc25abdf9178452e3
|
[] |
no_license
|
gibil5/pcm_restaurant
|
1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a
|
a56ec01c533ed2b6e198de9813f9518a3eca2d14
|
refs/heads/master
| 2020-08-29T20:10:13.606229 | 2019-12-01T19:48:47 | 2019-12-01T19:48:47 | 218,160,478 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 982 |
py
|
def add_order(request, employee_id):
print()
print('Add order')
title = 'Add Order'
cook = get_object_or_404(Employee, pk=employee_id) # Get Object
print(cook)
table = Table.objects.first()
# Create and populate
if request.method == 'POST':
print('Create and populate')
form = lib.NewOrderForm(request.POST)
if form.is_valid():
form_instance = lib.NewOrderForm(request.POST)
form_instance.cook_id = 1
new_order = form_instance.save()
return HttpResponseRedirect('/orders/thanks/')
# Create a blank form
else:
order = Order()
#order = Order.objects.create(cook=cook)
#order.save()
#form = lib.NewOrderForm(instance=order)
form = lib.NewOrderForm(
instance=order,
initial={
'cook': cook,
'table': table,
},
)
#form.cook = cook
ctx = {
'title': title,
'form': form,
}
output = render(request, 'orders/add.html', ctx)
return HttpResponse(output)
|
[
"[email protected]"
] | |
0302d39e78724531b2f09e38788aa5f669609958
|
82ca891008793f570668a7f2c760ae0f22d40494
|
/src/VAMPzero/Component/Wing/Aileron/Geometry/tipYLocation.py
|
078b55d940fb68c2cb44be5e6bcef8800cd0f839
|
[
"Apache-2.0"
] |
permissive
|
p-chambers/VAMPzero
|
22f20415e83140496b1c5702b6acbb76a5b7bf52
|
4b11d059b1c7a963ec7e7962fa12681825bc2f93
|
refs/heads/master
| 2021-01-19T10:49:06.393888 | 2015-06-24T10:33:41 | 2015-06-24T10:33:41 | 82,208,448 | 1 | 0 | null | 2017-02-16T17:42:55 | 2017-02-16T17:42:55 | null |
UTF-8
|
Python
| false | false | 2,598 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)
Contact: [email protected] and [email protected]
'''
from cmath import pi
from VAMPzero.Handler.Parameter import parameter
rad = pi / 180.
class tipYLocation(parameter):
'''
Calculates the spanwise tip location of the aileron, measured from the fuselage center line
(note: only one aileron is considered).
:Unit: [m]
:Source:
:Author: Lisanne van Veen
'''
def __init__(self, value=0., unit='m', parent='', cpacsPath=''):
super(tipYLocation, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,
cpacsPath=cpacsPath)
def calc(self):
'''
The function is a statistical relation obtained by analyzing data of large passenger aircraft.
The design space of this equation is:
* refAreaWing 72.72 - 845 m2
* spanWing 26 - 79.80 m
:Source:
'''
refAreaWing = self.parent.wing.refArea.getValue()
spanWing = self.parent.wing.span.getValue()
tipYLocationAileron = - 2.103872236 + 0.5286847608 * spanWing + 0.00004371791524 * (refAreaWing ** 2) \
- 0.0007899727342 * spanWing * refAreaWing + 0.002586029039 * (spanWing ** 2)
# if the spanwise tip location of the aileron is larger than half of the wing span
# set the location of the spanwise tip location equal to 95% of the half wing span
if tipYLocationAileron > (spanWing / 2.):
tipYLocationAileron = (spanWing / 2.) * 0.95
return self.setValueCalc(tipYLocationAileron)
###################################################################################################
#EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#
###################################################################################################
|
[
"[email protected]"
] | |
b8e22c9f0854b5dda5191d086ca45baaa3e98d35
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/twelve-days/5d8ab06a7a6b4acdb6be11d098786e90.py
|
8c8955c509d158350c07858c3b2a1c0d850b89cb
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 2,893 |
py
|
#twelve-days
def verse(day):
day = day
if day == 1:
return "On the first day of Christmas my true love gave to me, a Partridge in a Pear Tree.\n"
elif day == 2:
return "On the second day of Christmas my true love gave to me, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==3:
return "On the third day of Christmas my true love gave to me, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==4:
return "On the fourth day of Christmas my true love gave to me, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==5:
return "On the fifth day of Christmas my true love gave to me, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==6:
return "On the sixth day of Christmas my true love gave to me, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==7:
return "On the seventh day of Christmas my true love gave to me, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==8:
return "On the eighth day of Christmas my true love gave to me, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==9:
return "On the ninth day of Christmas my true love gave to me, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==10:
return "On the tenth day of Christmas my true love gave to me, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==11:
return "On the eleventh day of Christmas my true love gave to me, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
else:
return "On the twelfth day of Christmas my true love gave to me, twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
def verses(start, end):
result = ''
for i in range (start,end+1):
result += verse(i)+'\n'
return result
def sing():
return verses(1,12)
|
[
"[email protected]"
] | |
6b0c2baf8952bfe46d3c9ac541be5644748044b9
|
e6a48a7d5ee2df232355f5d5488fa1cd3c53ce89
|
/tests.py
|
7f298b580cf0a2453c734408872a2479a954b2cd
|
[] |
no_license
|
charleycodes/testing-py
|
f1e07cb678d52e26cd1cdb6bc34dcf7a3c2b331f
|
963a50d0074083cf02a253ef77cef46db5c7ff7a
|
refs/heads/master
| 2021-06-05T11:10:35.950108 | 2016-10-14T19:57:53 | 2016-10-14T19:57:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,406 |
py
|
import unittest
import party
class PartyTests(unittest.TestCase):
"""Tests for my party site."""
def setUp(self):
self.client = party.app.test_client()
party.app.config['TESTING'] = True
def test_homepage(self):
result = self.client.get("/")
self.assertIn("I'm having a party", result.data)
def test_no_rsvp_yet(self):
# FIXME: Add a test to show we haven't RSVP'd yet
result = self.client.get("/")
self.assertNotIn('<h2>Party Details</h2>', result.data)
self.assertIn('<h2>Please RSVP</h2>', result.data)
def test_rsvp(self):
result = self.client.post("/rsvp",
data={'name': "Jane",
'email': "[email protected]"},
follow_redirects=True)
self.assertIn('<h2>Party Details</h2>', result.data)
self.assertNotIn('<h2>Please RSVP</h2>', result.data)
def test_rsvp_mel(self):
result = self.client.post("/rsvp",
data={'name': "Mel Melitpolski",
'email': "[email protected]"},
follow_redirects=True)
self.assertNotIn('<h2>Party Details</h2>', result.data)
self.assertIn('<h2>Please RSVP</h2>', result.data)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
4f5bb79b857664488c0166556416293328d76651
|
b45230162af7ea65416f61cbbbcf1011a422692b
|
/tests/test_pygrade.py
|
3508d0f3021904959e65229d569c26efa43bef25
|
[
"ISC"
] |
permissive
|
Joaron4/pygrade
|
47a12ce4e8925d20e0d4384f4f39a102bf149f97
|
68416ba92afd3ef634a83560935941d03265df8f
|
refs/heads/master
| 2023-03-16T18:47:48.576434 | 2020-12-01T02:52:15 | 2020-12-01T02:52:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pygrade
----------------------------------
Tests for `pygrade` module.
"""
import unittest
from pygrade import pygrade
class TestPygrade(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
[
"[email protected]"
] | |
93d2a93c3766b10060f1163b0518cd03a037d4de
|
e2468c60810764971f2dae2b959650b553042810
|
/1859_sortingTheSentence.py
|
e35fc60b5dd3422f73069456f2c324e9ddef7fc4
|
[] |
no_license
|
awesome-liuxiao/leetcodesolution
|
9a01b6f36266149ae7fe00625785d1ada41f190a
|
3637cd1347b5153daeeb855ebc44cfea5649fc90
|
refs/heads/master
| 2023-06-08T13:42:14.653688 | 2023-06-01T08:39:35 | 2023-06-01T08:39:35 | 213,380,224 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
class Solution:
def sortSentence(self, s: str) -> str:
ls = s.split()
res = [""]*len(ls)
for word in ls:
res[int(word[-1])-1] = word[0:len(word)-1]
return ' '.join(res)
X = Solution()
s = "is2 sentence4 This1 a3"
print(X.sortSentence(s))
s = "Myself2 Me1 I4 and3"
print(X.sortSentence(s))
|
[
"[email protected]"
] | |
ce03108274b37dc8809c8883264cd853956d525c
|
17f918c06ca476f79d28d712abfa356b2dcfb6c7
|
/koishi/plugins/automation_touhou_feed/events.py
|
c0284afe3effa51079eda45d6079ea30d3d6ee10
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
HuyaneMatsu/Koishi
|
eb87693ad34da2483efe2b6bdaa4f3fae417e491
|
74f92b598e86606ea3a269311316cddd84a5215f
|
refs/heads/master
| 2023-08-23T22:54:37.006530 | 2023-08-23T20:26:49 | 2023-08-23T20:26:49 | 163,678,458 | 17 | 6 |
NOASSERTION
| 2023-06-14T14:18:27 | 2018-12-31T15:18:31 |
Python
|
UTF-8
|
Python
| false | false | 1,167 |
py
|
__all__ = ()
from ...bots import SLASH_CLIENT
from ..automation_core import get_touhou_feed_enabled
from .logic import (
reset_touhou_feeders, reset_channel, should_touhou_feed_in_channel, try_remove_channel, try_remove_guild,
try_update_channel, try_update_guild
)
@SLASH_CLIENT.events
async def channel_create(client, channel):
if get_touhou_feed_enabled(channel.guild_id):
if should_touhou_feed_in_channel(client, channel):
try_update_channel(channel)
@SLASH_CLIENT.events
async def channel_delete(client, channel):
try_remove_channel(channel)
@SLASH_CLIENT.events
async def channel_edit(client, channel, old_parameters):
if get_touhou_feed_enabled(channel.guild_id):
reset_channel(client, channel)
@SLASH_CLIENT.events
async def guild_create(client, guild):
if get_touhou_feed_enabled(guild.id):
try_update_guild(client, guild)
@SLASH_CLIENT.events
async def guild_delete(client, guild, guild_profile):
if get_touhou_feed_enabled(guild.id):
try_remove_guild(guild)
@SLASH_CLIENT.events
async def ready(client):
client.events.remove(ready)
reset_touhou_feeders(client)
|
[
"[email protected]"
] | |
55d1af3af949c3e159d60b095ce259600e812de8
|
156f5362e7381b96f3b2839f94de8778b005274d
|
/tests/bindings/test_bindings.py
|
99e0ca3c3d74647d7e7e35d5cb0769064383656b
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
sairam4123/godot-python
|
3f8bfcd989ae1b06ec6bf5e01462895b9f5f5fe0
|
a95ed14f6e53ae4eb59e6bd03efb0db90b070bc6
|
refs/heads/master
| 2021-05-20T20:43:18.764693 | 2020-03-02T13:49:06 | 2020-03-02T13:49:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,157 |
py
|
import pytest
import godot
from godot import Vector3, Object, Node, Node2D, PluginScript, OK
def test_free_node():
v = Node.new()
v.free()
# `check_memory_leak` auto fixture will do the bookkeeping
def test_expose_contains_constant():
assert "OK" in dir(godot)
assert OK is not None
def test_expose_contains_class():
assert "Node" in dir(godot)
assert Node is not None
def test_expose_contains_builtins():
assert "Vector3" in dir(godot)
assert Vector3 is not None
def test_call_one_arg_short(current_node):
with pytest.raises(TypeError) as exc:
current_node.get_child()
assert str(exc.value) == "get_child() takes exactly one argument (0 given)"
def test_call_too_few_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.move_child()
assert (
str(exc.value) == "move_child() takes exactly 2 positional arguments (0 given)"
)
def test_call_with_defaults_and_too_few_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.add_child()
assert (
str(exc.value) == "add_child() takes at least 1 positional argument (0 given)"
)
def test_call_none_in_base_type_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_child(self, godot_int idx)
current_node.get_child(None)
assert str(exc.value) == "an integer is required"
def test_call_none_in_builtin_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_node(self, NodePath path not None)
current_node.get_node(None)
assert str(exc.value) == "Invalid value None, must be str or NodePath"
def test_call_none_in_bindings_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_path_to(self, Node node not None)
current_node.get_path_to(None)
assert (
str(exc.value)
== "Argument 'node' has incorrect type (expected godot.bindings.Node, got NoneType)"
)
def test_call_too_many_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.get_child(1, 2)
assert str(exc.value) == "get_child() takes exactly one argument (2 given)"
def test_call_with_default_and_too_many_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.add_child(1, 2, 3)
assert (
str(exc.value) == "add_child() takes at most 2 positional arguments (3 given)"
)
def test_call_with_defaults(generate_obj):
node = generate_obj(Node)
child = generate_obj(Node)
# signature: void add_child(Node node, bool legible_unique_name=false)
node.add_child(child)
# legible_unique_name is False by default, check name is not human-redable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["@@2"]
def test_call_with_kwargs(generate_obj):
node = generate_obj(Node)
child = generate_obj(Node)
new_child = generate_obj(Node)
node.add_child(child, legible_unique_name=True)
# Check name is readable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["Node"]
# Kwargs are passed out of order
node.add_child_below_node(legible_unique_name=True, child_node=new_child, node=node)
# Check names are still readable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["Node", "Node2"]
def test_inheritance(generate_obj):
node = generate_obj(Node)
node2d = generate_obj(Node2D)
isinstance(node, Object)
isinstance(node2d, Object)
isinstance(node2d, Node)
def test_call_with_refcounted_return_value(current_node):
script = current_node.get_script()
assert isinstance(script, PluginScript)
def test_call_with_refcounted_param_value(generate_obj):
node = generate_obj(Node)
script = PluginScript.new()
node.set_script(script)
def test_create_refcounted_value(current_node):
script1_ref1 = PluginScript.new()
script2_ref1 = PluginScript.new()
script1_ref2 = script1_ref1
script2_ref2 = script2_ref1
del script1_ref1
|
[
"[email protected]"
] | |
43d0b28b0b29cb0bb324df2d02c8001c4efe022f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_sting.py
|
bafade4da0ff73720d1509ad0c570e87d50fe446
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
#calss header
class _STING():
def __init__(self,):
self.name = "STING"
self.definitions = [u'If an insect, plant, or animal stings, it produces a small but painful injury, usually with a poison, by brushing against the skin or making a very small hole in the skin: ', u'to cause sharp but usually temporary pain: ', u"If someone's unkind remarks sting, they make you feel upset and annoyed: ", u'to charge someone a surprisingly large amount of money for something: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
8ff0cf9f0326d522054d28a689a09a1bba5d292a
|
1c8ea05ed65d76ab0e7bf8e642e0573e34d880ab
|
/BOJ/continue_number.py
|
fd2990487b7814cfe23effa2dc2d61bb8c9f9285
|
[] |
no_license
|
PARKJUHONG123/turbo-doodle
|
1d259c88544d5e52ed375f119792363f5c1b4377
|
6b073281236af50949042c1a6b269752037cb829
|
refs/heads/master
| 2023-01-14T13:41:58.299164 | 2020-11-23T12:12:30 | 2020-11-23T12:12:30 | 259,669,738 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 336 |
py
|
# 1~9 : 9
# 10~99 : 90
# 100~999 : 900
# 1000~9999 : 9000
import sys
def nine_num(size):
num = pow(10, size - 1)
return num * 9
N = sys.stdin.readline().split()[0]
length = len(N)
answer = 0
for i in range(1, length):
answer += nine_num(i) * i
num = pow(10, length - 1)
answer += (int(N) - num + 1) * length
print(answer)
|
[
"[email protected]"
] | |
c016beb5d996c1ca1390e35753f3e429fdebd5a6
|
4ec6ed4ebcb9346042669e6aa03be0e502ed48b3
|
/leetcode/convert-sorted-array-to-binary-search-tree.py
|
84e69232dd85203daae4a1d75c1f376e113add3f
|
[] |
no_license
|
shonihei/road-to-mastery
|
79ed41cb1ad0dc2d0b454db2ccc7dd9567b03801
|
312bdf5101c3c1fc9a4d0b6762b5749ca57efe08
|
refs/heads/master
| 2021-01-22T19:59:17.038641 | 2017-11-16T15:21:55 | 2017-11-16T15:21:55 | 85,266,186 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
"""
Given an array where elements are sorted in ascending order, convert it to a height balanced BST.
"""
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def sortedArrayToBST(nums):
if not nums:
return None
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = sortedArrayToBST(nums[:mid])
root.right = sortedArrayToBST(nums[mid+1:])
return root
|
[
"[email protected]"
] | |
146ac52c155f6a21ab8f406bde451d1ce53f6925
|
0d464df42f5cc3c9a3b992ae9ff6160e5da5701d
|
/CHAPTER 12 (sorting and selection)/decorated_merge_sort.py
|
1ac0479db866b540143a43557da8834d51e73996
|
[
"MIT"
] |
permissive
|
ahammadshawki8/DSA-Implementations-in-Python
|
6b61d44e638bfb7f6cf3a8b1fc57d15777313420
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
refs/heads/master
| 2022-12-26T03:54:16.229935 | 2020-10-07T05:17:55 | 2020-10-07T05:17:55 | 267,899,551 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 527 |
py
|
from linked_queue_class import *
from merge_sort_linked import *
def decorated_merge_sort(data,key=None):
"""Demonstration of the decorate-sort-undercorate pattern."""
if key is not None:
for j in range(len(data)):
data[j] = _Item(key(data[j]), data[j]) # decorate each element
merge_sort(data) # sort with existing algorithm
if key is not None:
for j in range(len(data)):
data[j] = data[j]._value # undercoat each element
|
[
"[email protected]"
] | |
ad1b85e24bddffba1588102c18d19e9a7f5c4a35
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part2/batch/jm/parser_errors_2/925229167.py
|
e0ca9ce450c7ef5983d536f9dd4a53f2584448db
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 |
MIT
| 2020-06-09T21:15:38 | 2020-05-08T10:10:47 |
C
|
UTF-8
|
Python
| false | false | 1,170 |
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 925229167
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 3, 3)
assert board is not None
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_free_fields(board, 3) == 3
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_golden_move(board, 3, 1, 1) == 1
gamma_delete(board)
|
[
"[email protected]"
] | |
dff782445f083c8852c95a14f37f05b290a8043b
|
ba6921a268198bc0af433622c021533905f5d462
|
/scripts/in_container/run_migration_reference.py
|
43692b2c458d2e0a50e097a818144f39bdf31553
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
potiuk/airflow
|
b6447765b1a7b586a3d6c8d7ba9262f6bf68fbfd
|
ca2f3013bcb123c4b3973a5b85de77094bf2c459
|
refs/heads/main
| 2023-08-30T13:05:50.698888 | 2023-05-21T21:08:14 | 2023-05-21T21:26:14 | 173,467,275 | 8 | 7 |
Apache-2.0
| 2023-05-21T21:58:40 | 2019-03-02T15:50:53 |
Python
|
UTF-8
|
Python
| false | false | 6,272 |
py
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Module to update db migration information in Airflow
"""
from __future__ import annotations
import os
import re
from pathlib import Path
from textwrap import wrap
from typing import TYPE_CHECKING, Iterable
from alembic.script import ScriptDirectory
from tabulate import tabulate
from airflow import __version__ as airflow_version
from airflow.utils.db import _get_alembic_config
if TYPE_CHECKING:
from alembic.script import Script
airflow_version = re.match(r"(\d+\.\d+\.\d+).*", airflow_version).group(1) # type: ignore
project_root = Path(__file__).parents[2].resolve()
def replace_text_between(file: Path, start: str, end: str, replacement_text: str):
original_text = file.read_text()
leading_text = original_text.split(start)[0]
trailing_text = original_text.split(end)[1]
file.write_text(leading_text + start + replacement_text + end + trailing_text)
def wrap_backticks(val):
def _wrap_backticks(x):
return f"``{x}``"
return ",\n".join(map(_wrap_backticks, val)) if isinstance(val, (tuple, list)) else _wrap_backticks(val)
def update_doc(file, data):
replace_text_between(
file=file,
start=" .. Beginning of auto-generated table\n",
end=" .. End of auto-generated table\n",
replacement_text="\n"
+ tabulate(
headers={
"revision": "Revision ID",
"down_revision": "Revises ID",
"version": "Airflow Version",
"description": "Description",
},
tabular_data=data,
tablefmt="grid",
stralign="left",
disable_numparse=True,
)
+ "\n\n",
)
def has_version(content):
return re.search(r"^airflow_version\s*=.*", content, flags=re.MULTILINE) is not None
def insert_version(old_content, file):
new_content = re.sub(
r"(^depends_on.*)",
lambda x: f"{x.group(1)}\nairflow_version = '{airflow_version}'",
old_content,
flags=re.MULTILINE,
)
file.write_text(new_content)
def revision_suffix(rev: Script):
if rev.is_head:
return " (head)"
if rev.is_base:
return " (base)"
if rev.is_merge_point:
return " (merge_point)"
if rev.is_branch_point:
return " (branch_point)"
return ""
def ensure_airflow_version(revisions: Iterable[Script]):
for rev in revisions:
assert rev.module.__file__ is not None # For Mypy.
file = Path(rev.module.__file__)
content = file.read_text()
if not has_version(content):
insert_version(content, file)
def get_revisions() -> Iterable[Script]:
config = _get_alembic_config()
script = ScriptDirectory.from_config(config)
yield from script.walk_revisions()
def update_docs(revisions: Iterable[Script]):
doc_data = []
for rev in revisions:
doc_data.append(
dict(
revision=wrap_backticks(rev.revision) + revision_suffix(rev),
down_revision=wrap_backticks(rev.down_revision),
version=wrap_backticks(rev.module.airflow_version), # type: ignore
description="\n".join(wrap(rev.doc, width=60)),
)
)
update_doc(
file=project_root / "docs" / "apache-airflow" / "migrations-ref.rst",
data=doc_data,
)
def num_to_prefix(idx: int) -> str:
return f"000{idx+1}"[-4:] + "_"
def ensure_mod_prefix(mod_name, idx, version):
prefix = num_to_prefix(idx) + "_".join(version) + "_"
match = re.match(r"([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_(.+)", mod_name)
if match:
# previously standardized file, rebuild the name
mod_name = match.group(5)
else:
# new migration file, standard format
match = re.match(r"([a-z0-9]+)_(.+)", mod_name)
if match:
mod_name = match.group(2)
return prefix + mod_name
def ensure_filenames_are_sorted(revisions):
renames = []
is_branched = False
unmerged_heads = []
for idx, rev in enumerate(revisions):
mod_path = Path(rev.module.__file__)
version = rev.module.airflow_version.split(".")[0:3] # only first 3 tokens
correct_mod_basename = ensure_mod_prefix(mod_path.name, idx, version)
if mod_path.name != correct_mod_basename:
renames.append((mod_path, Path(mod_path.parent, correct_mod_basename)))
if is_branched and rev.is_merge_point:
is_branched = False
if rev.is_branch_point:
is_branched = True
elif rev.is_head:
unmerged_heads.append(rev.revision)
if is_branched:
head_prefixes = [x[0:4] for x in unmerged_heads]
alembic_command = (
"alembic merge -m 'merge heads " + ", ".join(head_prefixes) + "' " + " ".join(unmerged_heads)
)
raise SystemExit(
"You have multiple alembic heads; please merge them with the `alembic merge` command "
f"and re-run pre-commit. It should fail once more before succeeding. "
f"\nhint: `{alembic_command}`"
)
for old, new in renames:
os.rename(old, new)
if __name__ == "__main__":
revisions = list(reversed(list(get_revisions())))
ensure_airflow_version(revisions=revisions)
revisions = list(reversed(list(get_revisions())))
ensure_filenames_are_sorted(revisions)
revisions = list(get_revisions())
update_docs(revisions)
|
[
"[email protected]"
] | |
40a42af680a63e3a17bb18fe661dc09bb9d98b56
|
6ef8abce322da7a6acf8b940801d7c2286b55f42
|
/Programmers/compressString.py
|
fccb357227a2eb76df7a508063b6c1f0361d23a2
|
[] |
no_license
|
702criticcal/1Day1Commit
|
747a61308e2fae87bad6369cd0bc481bdc89b29a
|
aec375b8b41de1ed5366c714cc6a204905fb2763
|
refs/heads/master
| 2023-01-31T16:47:24.457584 | 2020-12-18T03:42:28 | 2020-12-18T03:42:28 | 287,663,502 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,211 |
py
|
def solution(s):
if len(s) == 1:
return 1
answer = len(s)
# 압축할 수 있는 문자열의 최대 길이는 길이 / 2까지이다.
for cut in range(1, len(s) // 2 + 1):
result = ''
cnt = 1
temp_str = s[:cut]
# 1부터 길이 / 2까지 잘라서 문자열 비교.
for i in range(cut, len(s) + cut, cut):
# 앞의 자른 문자열과 같다면 cnt + 1.
if s[i:i + cut] == temp_str:
cnt += 1
# 다르다면, cnt가 1이면 문자열만 결과에 추가하고, cnt가 1이 아니면 숫자와 문자열을 결과에 추가한다.
else:
if cnt == 1:
result += temp_str
else:
result += str(cnt) + temp_str
# 자를 문자열 크기 만큼 인덱스를 옮겨서 다시 비교 수행.
temp_str = s[i:i + cut]
# 카운트 초기화
cnt = 1
# 해당 길이만큼 다 잘랐다면 전체 결과 값과 해당 길이의 결과 값의 최솟값을 구하여 전체 결과값에 저장.
answer = min(answer, len(result))
return answer
|
[
"[email protected]"
] | |
b86e21edd60be743de7e055ffd942d0674c17b3d
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GstGL/GLRenderbufferAllocationParams.py
|
23d2ebdb5969b2516f3587d02945af8e1fa95f4d
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null |
UTF-8
|
Python
| false | false | 6,772 |
py
|
# encoding: utf-8
# module gi.repository.GstGL
# from /usr/lib64/girepository-1.0/GstGL-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class GLRenderbufferAllocationParams(__gi.Boxed):
"""
:Constructors:
::
GLRenderbufferAllocationParams()
new(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int) -> GstGL.GLRenderbufferAllocationParams
new_wrapped(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int, gl_handle=None, user_data=None, notify:GLib.DestroyNotify=None) -> GstGL.GLRenderbufferAllocationParams
"""
def copy(self, *args, **kwargs): # real signature unknown
pass
def new(self, context, alloc_params=None, renderbuffer_format, width, height): # real signature unknown; restored from __doc__
""" new(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int) -> GstGL.GLRenderbufferAllocationParams """
pass
def new_wrapped(self, context, alloc_params=None, renderbuffer_format, width, height, gl_handle=None, user_data=None, notify=None): # real signature unknown; restored from __doc__
""" new_wrapped(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int, gl_handle=None, user_data=None, notify:GLib.DestroyNotify=None) -> GstGL.GLRenderbufferAllocationParams """
pass
def _clear_boxed(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
height = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
renderbuffer_format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
width = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(GLRenderbufferAllocationParams), '__module__': 'gi.repository.GstGL', '__gtype__': <GType GstGLRenderbufferAllocationParams (93979012457472)>, '__dict__': <attribute '__dict__' of 'GLRenderbufferAllocationParams' objects>, '__weakref__': <attribute '__weakref__' of 'GLRenderbufferAllocationParams' objects>, '__doc__': None, 'parent': <property object at 0x7f56a3a28310>, 'renderbuffer_format': <property object at 0x7f56a3a28450>, 'width': <property object at 0x7f56a3a284f0>, 'height': <property object at 0x7f56a3a285e0>, '_padding': <property object at 0x7f56a3a286d0>, 'new': gi.FunctionInfo(new), 'new_wrapped': gi.FunctionInfo(new_wrapped)})"
__gtype__ = None # (!) real value is '<GType GstGLRenderbufferAllocationParams (93979012457472)>'
__info__ = StructInfo(GLRenderbufferAllocationParams)
|
[
"[email protected]"
] | |
4f8956babeaeca36bcee259d46ecb8ec16dbe067
|
dc084a369e0f767bc5296739b24813470869522f
|
/main.py
|
ad9e07dfe136e778903fe0b117e3877fc9bb1631
|
[] |
no_license
|
JakeRoggenbuck/player_data_finder
|
4a539ac7963f1f5025eda89c96c75e76a8268574
|
0ba87a5511810ac10d3f40049b21541b9a8be1bb
|
refs/heads/master
| 2022-12-02T06:59:34.054775 | 2020-08-19T05:56:55 | 2020-08-19T05:56:55 | 288,645,171 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,609 |
py
|
from position_reader import PositionReader
from config import Config
from ftplib import FTP
import inquirer
import json
import os
class FTPConnection:
def __init__(self):
self.server = FTP()
def connect(self):
self.server.connect(Config.host, Config.port)
def login(self):
self.server.login(Config.username, Config.password)
def start(self):
self.connect()
self.login()
class UsernameCache:
def __init__(self):
self.ftp = FTPConnection()
self.ftp.start()
self.path = "/"
self.name = "usernamecache.json"
self.data = []
def handle_binary(self, more_data):
self.data.append(more_data.decode('utf-8'))
def get_usernames(self):
self.ftp.server.cwd(self.path)
self.ftp.server.retrbinary(f"RETR {self.name}",
callback=self.handle_binary)
def return_file(self):
return "".join(self.data)
def get_json(self):
return json.loads(self.return_file())
class Username:
def __init__(self):
self.usernamecache = UsernameCache()
self.usernamecache.get_usernames()
self.json = self.usernamecache.get_json()
self.usernames = []
self.new_json = self.get_new_json()
def get_new_json(self):
return dict((y, x) for x, y in self.json.items())
class AskUsername:
def __init__(self):
self.username = Username()
self.json = self.username.new_json
def get_username(self):
usernames = self.json.keys()
questions = [inquirer.Checkbox(
"Username",
message="Select username",
choices=usernames
)]
answers = inquirer.prompt(questions)
return answers
def get_uuid(self):
username = self.get_username()["Username"][0]
return self.json[username]
class GetDataFile:
def __init__(self):
self.ftp = FTPConnection()
self.ftp.start()
self.path = "/RAT STANDO/playerdata/"
self.username = AskUsername()
self.uuid = self.username.get_uuid()
self.filename = f"{self.uuid}.dat"
self.full_path = os.path.join("data/", self.filename)
def save_file(self):
self.ftp.server.cwd(self.path)
self.ftp.server.retrbinary(f"RETR {self.filename}",
open(self.full_path, 'wb').write)
if __name__ == "__main__":
datafile = GetDataFile()
datafile.save_file()
path = datafile.full_path
pr = PositionReader(path)
pos = pr.get_pos()
print(pos)
|
[
"[email protected]"
] | |
d80b410b5348e0c0627858a462910f433012546d
|
53be3b2a18d4df2e675525a610f4e7dab8c0de6f
|
/myems-api/core/version.py
|
0b884d96063823d791417aec31bff4eac4345132
|
[
"MIT"
] |
permissive
|
yxw027/myems
|
16dc82fa26328e00adbba4e09301c4cf363ad28d
|
ea58d50c436feafb1a51627aa4d84caf8f3aee08
|
refs/heads/master
| 2023-09-03T17:57:18.728606 | 2021-11-04T14:13:42 | 2021-11-04T14:13:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
import falcon
import simplejson as json
class VersionItem:
@staticmethod
def __init__():
""""Initializes VersionItem"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
result = {"version": 'MyEMS v1.3.3',
"release-date": '2021-10-30',
"website": "https://myems.io"}
resp.body = json.dumps(result)
|
[
"[email protected]"
] | |
0e541e32d749a1302541ce19ccdf2b6c8d442a16
|
bdc4ae3d691fcb50405235d963012d84ea8b8a06
|
/src/b2r2b_youbot/message.py
|
d8d3c2a392182dba8f26573558c936ec091c3489
|
[] |
no_license
|
AndreaCensi/rosstream2boot
|
53552a256979f072c7bf4abb9bc01eed3c960e97
|
7cce8cf270b67e8c9e5abe6cdfed9d5969a82c00
|
refs/heads/master
| 2021-01-01T19:11:04.669798 | 2013-07-13T14:11:28 | 2013-07-13T14:11:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,658 |
py
|
from b2r2b_youbot import get_JointVelocities, get_JointValue, get_JointPositions
__all__ = ['get_joint_position_msg', 'get_joint_velocity_msg']
def get_joint_velocity_msg(array, timeStamp=None):
'''
:param array: float array with values to the joints
'''
JointVelocities = get_JointVelocities()
JointValue = get_JointValue()
num_joints = len(array)
msg = JointVelocities()
msg.poisonStamp.description = 'Joint velocities generated by b2r2b.'
for i in range(num_joints):
joint_value = JointValue()
joint_value.joint_uri = 'arm_joint_' + str(i + 1)
if timeStamp is not None:
joint_value.timeStamp = timeStamp
joint_value.unit = 's^-1 rad'
joint_value.value = array[i]
msg.velocities.append(joint_value)
assert len(msg.velocities) == num_joints
return msg
def get_joint_position_msg(array, timeStamp=None):
'''
:param array: float array with values to the joints
'''
JointPositions = get_JointPositions()
JointValue = get_JointValue()
num_joints = len(array)
msg = JointPositions()
msg.poisonStamp.description = 'Joint velocities generated by b2r2b'
for i in range(num_joints):
joint_value = JointValue()
joint_value.joint_uri = 'arm_joint_' + str(i + 1)
if timeStamp is not None:
joint_value.timeStamp = timeStamp
joint_value.unit = 'rad'
joint_value.value = array[i]
msg.positions.append(joint_value)
assert len(msg.positions) == num_joints
return msg
|
[
"[email protected]"
] | |
5fa21fadd207f6922a41ad01fad7d3295d852e5d
|
de358ba57518d65393c810da20c53e1c41494bff
|
/ALGOPYTHON/array2.py
|
616580064c661e80fb9915007e10e09ad9a0cb0f
|
[] |
no_license
|
avirupdandapat/ALGOPROJECT
|
43eef94b13e38452cdc6a506b17b6fee581a07e1
|
55b60a0c6e51cae900e243505f6a4557ad4d7069
|
refs/heads/master
| 2022-12-29T13:02:54.655976 | 2020-10-18T12:23:57 | 2020-10-18T12:23:57 | 305,095,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
import subprocess
subprocess.run('dir', shell=True)
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
n = int(input())
q = list(map(int, input().rstrip().split()))
# arr = minimumBribes(a)
print(q)
|
[
"[email protected]"
] | |
0b7b8cc2114aca9d05671b6b132a1a909f63ca55
|
f97a267b066f64177e382346e36cc06c25a3a6b1
|
/src/quart/typing.py
|
a1be3a52dbb6ec3db1d23f2cf3688f19f97a56fe
|
[
"MIT"
] |
permissive
|
p-unity-lineage/quart
|
a54ec9a1e6f61159c5c2688e24a2b54462bcd231
|
14efcd92f37bb4ef78d463d6d145f71c61665470
|
refs/heads/master
| 2023-03-31T23:56:56.881288 | 2021-04-11T09:28:28 | 2021-04-11T09:28:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,067 |
py
|
from __future__ import annotations
import os
from datetime import datetime, timedelta
from types import TracebackType
from typing import (
Any,
AnyStr,
AsyncContextManager,
AsyncGenerator,
Awaitable,
Callable,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from hypercorn.typing import (
ASGIReceiveCallable,
ASGISendCallable,
HTTPScope,
LifespanScope,
WebsocketScope,
)
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore
if TYPE_CHECKING:
from werkzeug.datastructures import Headers # noqa: F401
from werkzeug.wrappers import Response as WerkzeugResponse
from .app import Quart
from .sessions import Session
from .wrappers.response import Response # noqa: F401
FilePath = Union[bytes, str, os.PathLike]
# The possible types that are directly convertible or are a Response object.
ResponseValue = Union[
"Response",
"WerkzeugResponse",
AnyStr,
Dict[str, Any], # any jsonify-able dict
AsyncGenerator[bytes, None],
Generator[bytes, None, None],
]
StatusCode = int
# the possible types for an individual HTTP header
HeaderName = str
HeaderValue = Union[str, List[str], Tuple[str, ...]]
# the possible types for HTTP headers
HeadersValue = Union["Headers", Dict[HeaderName, HeaderValue], List[Tuple[HeaderName, HeaderValue]]]
# The possible types returned by a route function.
ResponseReturnValue = Union[
ResponseValue,
Tuple[ResponseValue, HeadersValue],
Tuple[ResponseValue, StatusCode],
Tuple[ResponseValue, StatusCode, HeadersValue],
]
AppOrBlueprintKey = Optional[str] # The App key is None, whereas blueprints are named
AfterRequestCallable = Callable[["Response"], Awaitable["Response"]]
AfterWebsocketCallable = Callable[["Response"], Awaitable[Optional["Response"]]]
BeforeRequestCallable = Callable[[], Awaitable[None]]
BeforeWebsocketCallable = Callable[[], Awaitable[None]]
ErrorHandlerCallable = Callable[[Exception], Awaitable[None]]
TeardownCallable = Callable[[Optional[BaseException]], Awaitable["Response"]]
TemplateContextProcessorCallable = Callable[[], Awaitable[Dict[str, Any]]]
URLDefaultCallable = Callable[[str, dict], None]
URLValuePreprocessorCallable = Callable[[str, dict], None]
class ASGIHTTPProtocol(Protocol):
def __init__(self, app: Quart, scope: HTTPScope) -> None:
...
async def __call__(self, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:
...
class ASGILifespanProtocol(Protocol):
def __init__(self, app: Quart, scope: LifespanScope) -> None:
...
async def __call__(self, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:
...
class ASGIWebsocketProtocol(Protocol):
def __init__(self, app: Quart, scope: WebsocketScope) -> None:
...
async def __call__(self, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:
...
class TestHTTPConnectionProtocol(Protocol):
push_promises: List[Tuple[str, Headers]]
def __init__(self, app: Quart, scope: HTTPScope, _preserve_context: bool = False) -> None:
...
async def send(self, data: bytes) -> None:
...
async def send_complete(self) -> None:
...
async def receive(self) -> bytes:
...
async def disconnect(self) -> None:
...
async def __aenter__(self) -> TestHTTPConnectionProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
async def as_response(self) -> Response:
...
class TestWebsocketConnectionProtocol(Protocol):
def __init__(self, app: Quart, scope: WebsocketScope) -> None:
...
async def __aenter__(self) -> TestWebsocketConnectionProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
async def receive(self) -> AnyStr:
...
async def send(self, data: AnyStr) -> None:
...
async def receive_json(self) -> Any:
...
async def send_json(self, data: Any) -> None:
...
async def close(self, code: int) -> None:
...
async def disconnect(self) -> None:
...
class TestClientProtocol(Protocol):
http_connection_class: Type[TestHTTPConnectionProtocol]
push_promises: List[Tuple[str, Headers]]
websocket_connection_class: Type[TestWebsocketConnectionProtocol]
def __init__(self, app: Quart, use_cookies: bool = True) -> None:
...
async def open(
self,
path: str,
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
data: Optional[AnyStr] = None,
form: Optional[dict] = None,
query_string: Optional[dict] = None,
json: Any = None,
scheme: str = "http",
follow_redirects: bool = False,
root_path: str = "",
http_version: str = "1.1",
) -> Response:
...
def request(
self,
path: str,
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "http",
root_path: str = "",
http_version: str = "1.1",
) -> TestHTTPConnectionProtocol:
...
def websocket(
self,
path: str,
*,
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "ws",
subprotocols: Optional[List[str]] = None,
root_path: str = "",
http_version: str = "1.1",
) -> TestWebsocketConnectionProtocol:
...
async def delete(self, *args: Any, **kwargs: Any) -> Response:
...
async def get(self, *args: Any, **kwargs: Any) -> Response:
...
async def head(self, *args: Any, **kwargs: Any) -> Response:
...
async def options(self, *args: Any, **kwargs: Any) -> Response:
...
async def patch(self, *args: Any, **kwargs: Any) -> Response:
...
async def post(self, *args: Any, **kwargs: Any) -> Response:
...
async def put(self, *args: Any, **kwargs: Any) -> Response:
...
async def trace(self, *args: Any, **kwargs: Any) -> Response:
...
def set_cookie(
self,
server_name: str,
key: str,
value: str = "",
max_age: Optional[Union[int, timedelta]] = None,
expires: Optional[Union[int, float, datetime]] = None,
path: str = "/",
domain: Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: str = None,
charset: str = "utf-8",
) -> None:
...
def delete_cookie(
self, server_name: str, key: str, path: str = "/", domain: Optional[str] = None
) -> None:
...
def session_transaction(
self,
path: str = "/",
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "http",
data: Optional[AnyStr] = None,
form: Optional[dict] = None,
json: Any = None,
root_path: str = "",
http_version: str = "1.1",
) -> AsyncContextManager[Session]:
...
async def __aenter__(self) -> TestClientProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
class TestAppProtocol(Protocol):
def __init__(self, app: Quart) -> None:
...
def test_client(self) -> TestClientProtocol:
...
async def startup(self) -> None:
...
async def shutdown(self) -> None:
...
async def __aenter__(self) -> TestAppProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
|
[
"[email protected]"
] | |
f2db845f6ed0455eb72a111058ae787634caf967
|
181e9cc9cf4e52fcc6e9979890cc5b41e7beb756
|
/Module 1/02_Codes/miscellaneous/4-TenSecondCameraCapture.py
|
ffe0427cfa69b339ad2ddf078ae4c70f2d0dfbde
|
[
"MIT"
] |
permissive
|
PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python
|
ace8576dce8d5f5db6992b3e5880a717996f78cc
|
45a9c695e5bb29fa3354487e52f29a565d700d5c
|
refs/heads/master
| 2023-02-09T14:10:42.767047 | 2023-01-30T09:02:09 | 2023-01-30T09:02:09 | 71,112,659 | 96 | 72 | null | null | null | null |
UTF-8
|
Python
| false | false | 515 |
py
|
import cv2
cameraCapture = cv2.VideoCapture(0)
fps = 30 # an assumption
size = (int(cameraCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(cameraCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
videoWriter = cv2.VideoWriter(
'MyOutputVid.avi', cv2.cv.CV_FOURCC('I','4','2','0'), fps, size)
success, frame = cameraCapture.read()
numFramesRemaining = 10 * fps - 1
while success and numFramesRemaining > 0:
videoWriter.write(frame)
success, frame = cameraCapture.read()
numFramesRemaining -= 1
|
[
"[email protected]"
] | |
49cc7ab24fcf653315ed8b0a0c768e7420905965
|
4a2eac368e3e2216b0cd1dd70224da3ca4ee7c5e
|
/SecretManager/owlbot.py
|
50c7c9bb3fb0bdd3f2138665e345cd50407ebd4c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
googleapis/google-cloud-php
|
856a940eee158eafa6f2443f8d61813779216429
|
ad50f749431287e7074279e2b4fa32d6d6c2c952
|
refs/heads/main
| 2023-09-06T05:48:31.609502 | 2023-09-05T20:27:34 | 2023-09-05T20:27:34 | 43,642,389 | 642 | 330 |
Apache-2.0
| 2023-09-13T22:39:27 | 2015-10-04T16:09:46 |
PHP
|
UTF-8
|
Python
| false | false | 1,871 |
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/SecretManager").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(src=src, dest=dest)
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
|
[
"[email protected]"
] | |
1ac2383f4356c4123f3f0424f2d41eeb4d65eef7
|
e00d41c9f4045b6c6f36c0494f92cad2bec771e2
|
/multimedia/sound/celt/actions.py
|
ebbc7ace073cda6de994f48d6de40b6054304d7d
|
[] |
no_license
|
pisilinux/main
|
c40093a5ec9275c771eb5fb47a323e308440efef
|
bfe45a2e84ea43608e77fb9ffad1bf9850048f02
|
refs/heads/master
| 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 |
Python
|
UTF-8
|
Python
| false | false | 407 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("COPYING", "README")
|
[
"[email protected]"
] | |
ef1dee427fd1886305f8a5488e392f4572706fde
|
bc9cb3f0f104778026ca6f3a07595dd5d6ce840f
|
/DIA_01/introducao_python/aula/linguagem_python/05_conversao_de_tipos/exemplo_03.py
|
ba7db84a4bd8944411f327a1af919b84d29843a2
|
[] |
no_license
|
JohnRhaenys/escola_de_ferias
|
ff7a5d7f399459725f3852ca6ee200486f29e7d4
|
193364a05a5c7ccb2e5252c150745d6743738728
|
refs/heads/master
| 2023-01-12T11:30:46.278703 | 2020-11-19T14:59:10 | 2020-11-19T14:59:10 | 314,278,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
# Convertendo float para string
real = 1.23
print(real)
print(type(real))
print()
string = str(real)
print(string)
print(type(string))
|
[
"[email protected]"
] | |
2bb80d3e76ad8eeb95f106c44017b8529377a982
|
b715c79f52cf2c95927c19edf8f6d64f5322bf7d
|
/PJLink/start_kernel.py
|
94e029381f90239bda03ffb6c2dba92292df92be
|
[
"MIT"
] |
permissive
|
sunt05/PJLink
|
4d6805b07070c0a2c7452464358ebcf075eeabb0
|
cd623efebf4ddae8c5ea75b3ee08fe9819e78b40
|
refs/heads/master
| 2020-03-29T05:46:10.707576 | 2018-09-20T07:36:51 | 2018-09-20T07:36:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,842 |
py
|
"""start_kernel is a convenience script for starting a kernel thread in python
"""
import sys, os, argparse
sys.stdout.flush()
true_file = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(true_file)))
from PJLink import *
### I should do a lot more argparsing... but I don't
parser = argparse.ArgumentParser(description='Start a PJLink kernel.')
parser.add_argument('--blocking', dest='block', type=bool, nargs='?', default=False,
help='whether the kernel should block or not')
parser.add_argument('--debug', dest='debug', type=int, nargs='?', default=0,
help='debug level for underlying PJLink lib')
parser.add_argument('-linkname', dest='name', type=str, nargs='?',
help='name for the link')
parser.add_argument('-linkmode', dest='mode', type=str, nargs='?',
help='mode for the link')
parser.add_argument('-linkprotocol', dest='protocol', type=str, nargs='?',
help='protocol for the link')
parser = parser.parse_args()
blocking = parser.block
debug = parser.debug
name = parser.name
mode = parser.mode
protocol = parser.protocol
opts = { 'linkname' : parser.name, 'linkmode' : parser.mode, 'linkprotocol' : parser.protocol }
opts = [ ('-'+k, v) for k, v in opts.items() if v is not None]
init = [ None ] * (2 * len(opts))
for i, t in enumerate(opts):
init[2*i] = t[0]
init[2*i+1] = t[1]
reader = create_reader_link(init=init, debug_level=debug)
# print(reader.link.drain())
# stdout = open(os.path.expanduser("~/Desktop/stdout.txt"), "w+")
# stderr = open(os.path.expanduser("~/Desktop/stderr.txt"), "w+")
# sys.stdout = stdout
# sys.stderr = stderr
if blocking:
# reader.run()
# else:
import code
code.interact(banner = "", local={"Kernel":reader.link, "KernelReader":reader})
|
[
"[email protected]"
] | |
86ca70ec064a1b497a8d74d0e3d0844b4fa7c668
|
3e1d9a25426e2a157a69f3c4c6c41b5216deb8de
|
/LeetCode/Python/Easy/Heaters.py
|
749d9559b5c23d9ae2cfa22db847e1981c3ed067
|
[] |
no_license
|
jashansudan/Data-Structures-and-Algorithms
|
4e420586bc773c5fc35b7ce7be369ca92bf51898
|
e38cfb49b3f9077f47f1053207f4e44c7726fb90
|
refs/heads/master
| 2021-01-12T10:47:05.754340 | 2018-02-13T01:01:50 | 2018-02-13T01:01:50 | 72,697,093 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 412 |
py
|
class Solution(object):
def findRadius(self, houses, heaters):
houses.sort()
heaters.sort()
i, maxDistance = 0, 0
for house in houses:
while (i < len(heaters) - 1 and
abs(heaters[i] - house) >= abs(heaters[i + 1] - house)):
i += 1
maxDistance = max(maxDistance, abs(heaters[i] - house))
return maxDistance
|
[
"[email protected]"
] | |
d7844833faffeeb6ea09e3c6a4e91c845c8bcd78
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/new_short/feedback/job13.py
|
70a3df461607c82369d2bf42af52327b64207e16
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,222 |
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.01
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_feedback/' + job_name + '*'
total_epochs = 11
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_feedback/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[
"[email protected]"
] | |
c250ce17de3d4b5cc6706f63c1977f4f2fcee481
|
d6d20681f41102df3feb2b438ef80569bd73730f
|
/Uge4-numpy/.history/exercises_20200218170520.py
|
ca2f210387343db0503dbab4b0b5f8b7d2cf8f1b
|
[] |
no_license
|
MukHansen/pythonAfleveringer
|
d0ad2629da5ba2b6011c9e92212949e385443789
|
4107c3c378f757733961812dd124efc99623ff2e
|
refs/heads/master
| 2020-12-22T13:27:19.135138 | 2020-05-22T11:35:52 | 2020-05-22T11:35:52 | 236,796,591 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,046 |
py
|
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
filename = './befkbhalderstatkode.csv'
data = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
specificHoods = {3: 'Nørrebro'} #, 4: 'Vesterbro/Kgs.'
nordicCountryCodes = {5104: 'Finland', 5106: 'Island', 5110: 'Norge', 5120: 'Sverige'}
def getPopPerHood(hood):
deezMask = (data[:,0] == 2015) & (data[:,1] == hood)
return np.sum(data[deezMask][:,4])
def getPopPerSpecificHood(hood):
deezMask = (data[:,1] == hood)
print((data[deezMask][:,(0,4)]))
return np.sum(data[deezMask][:,(0,4)])
def getOldPeople():
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65)
return np.sum(data[deezMask][:,4])
def getOldNordicPeople(countrycode):
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65) & (data[:,3] == countrycode)
return np.sum(data[deezMask][:,4])
def getSumOfOldNordicPeople():
lst = {}
for key, value in nordicCountryCodes.items():
# print(value, getOldNordicPeople(key))
lst.update({value: getOldNordicPeople(key)})
return lst
def getSumPerHood():
lst = {}
for key, value in neighb.items():
# print(value, getPopPerHood(key))
lst.update({value: getPopPerHood(key)})
return lst
def getSumPerSpecificHoods():
lst = {}
for key, value in specificHoods.items():
# print(value, getPopPerSpecificHood(key))
lst.update({value: getPopPerSpecificHood(key)})
return lst
def displayPlotOfHoodsPop():
lst = getSumPerHood()
hoodsSorted = OrderedDict(sorted(lst.items(), key=lambda x: x[1]))
cityAreas = []
sumOfPeople = []
for key, value in hoodsSorted.items():
cityAreas.append(key)
sumOfPeople.append(value)
plt.bar(cityAreas, sumOfPeople, width=0.5, linewidth=0, align='center')
title = 'Population in various areas in cph'
plt.title(title, fontsize=12)
plt.xticks(cityAreas, rotation=65)
plt.tick_params(axis='both', labelsize=8)
plt.show()
def displayPopulationOverTheYears():
population = []
years = []
for key, value in specificHoods.items():
years.append(key)
population.append(value)
# West = []
# East = []
# lst = {West, East}
# East = [lst.get(4)]
# print('West --------', West)
# print('East --------', East)
plt.figure()
plt.plot(years, population, linewidth=5)
plt.title("Population over the years", fontsize=24)
plt.xlabel("Year", fontsize=14)
plt.tick_params(axis='both', labelsize=14)
# print(getSumPerHood())
# displayPlotOfHoodsPop()
# print('Number of people above the age of 65 --',getOldPeople())
# print(getSumOfOldNordicPeople())
# displayPopulationOverTheYears()
print(getSumPerSpecificHoods())
# getSumPerSpecificHoods()
|
[
"[email protected]"
] | |
4d7a7650d95d9418c7a99e03149894a0c5c686dc
|
bbe447a740929eaee1955bd9c1517cf760dd5cb9
|
/keygrabber/adwords/adwords_api_python_14.2.1/adspygoogle/adwords/zsi/v200909/CampaignService_services.py
|
f1101c95aafb53ba4c75e90dcf48c195df9f883f
|
[
"Apache-2.0"
] |
permissive
|
MujaahidSalie/aranciulla
|
f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893
|
34197dfbdb01479f288611a0cb700e925c4e56ce
|
refs/heads/master
| 2020-09-07T02:16:25.261598 | 2011-11-01T21:20:46 | 2011-11-01T21:20:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,176 |
py
|
##################################################
# CampaignService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from CampaignService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class CampaignServiceLocator:
CampaignServiceInterface_address = "https://adwords.google.com:443/api/adwords/cm/v200909/CampaignService"
def getCampaignServiceInterfaceAddress(self):
return CampaignServiceLocator.CampaignServiceInterface_address
def getCampaignServiceInterface(self, url=None, **kw):
return CampaignServiceSoapBindingSOAP(url or CampaignServiceLocator.CampaignServiceInterface_address, **kw)
# Methods
class CampaignServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# get: getCampaign
def getCampaign(self, request):
if isinstance(request, getCampaignRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getCampaignResponse.typecode)
return response
# mutate: getCampaign
def mutateCampaign(self, request):
if isinstance(request, mutateCampaignRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(mutateCampaignResponse.typecode)
return response
getCampaignRequest = ns0.getCampaign_Dec().pyclass
getCampaignResponse = ns0.getCampaignResponse_Dec().pyclass
mutateCampaignRequest = ns0.mutateCampaign_Dec().pyclass
mutateCampaignResponse = ns0.mutateCampaignResponse_Dec().pyclass
|
[
"[email protected]"
] | |
fdd25e91bdb09e58d4f219ef3803e81fd78e0545
|
6493bc4fdf2618b401c7c2acf6e04567a27a1b00
|
/klearn/kernels/__init__.py
|
1a14748333710a3cb0f1405360498e01722b3acd
|
[] |
no_license
|
mpharrigan/klearn
|
75dc5bfea65ed7018fd42d7eb502b32c4ff7a007
|
697e62993cf3a42444cc9115f8fea0425950fec2
|
refs/heads/master
| 2021-01-16T00:31:34.616280 | 2014-05-21T22:50:10 | 2014-05-21T22:50:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
from .baseclasses import AbstractKernel
from .dotproduct import DotProduct
from .polynomial import Polynomial
from .gaussian import Gaussian
|
[
"[email protected]"
] | |
c63339a1a53f68331bf60b14c7938e6240219f5a
|
826e10209af5462022e3aff1511f1e48842d32a4
|
/promoterz/representation/oldschool.py
|
99a894122cd5eb447a3eeae0a2347854961cdb4b
|
[
"MIT"
] |
permissive
|
IanMadlenya/japonicus
|
0309bbf8203525770e237b2b385844ef4a3610ae
|
112aabdd3362ca6259ddbe57440cdd583a674022
|
refs/heads/master
| 2021-04-15T12:06:15.027249 | 2018-03-20T22:23:43 | 2018-03-20T22:23:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,645 |
py
|
#!/bin/python
import random
import json
import os
from copy import deepcopy
from .import Creator
from deap import base
from deap import tools
from deap import algorithms
import numpy as np
from .import Creator
from . .import parameterOperations
def constructPhenotype(stratSettings, individue):
# THIS FUNCTION IS UGLYLY WRITTEN; USE WITH CAUTION;
# (still works :})
Strategy = individue.Strategy
R = lambda V, lim: ((lim[1] - lim[0]) / 100) * V + lim[0]
AttributeNames = sorted(list(stratSettings.keys()))
Phenotype = {}
for K in range(len(AttributeNames)):
Value = R(individue[K], stratSettings[AttributeNames[K]])
Phenotype[AttributeNames[K]] = Value
Phenotype = parameterOperations.expandNestedParameters(Phenotype)
return Phenotype
def createRandomVarList(IndSize):
VAR_LIST = [random.randrange(0, 100) for x in range(IndSize)]
return VAR_LIST
def initInd(Criterion, Attributes):
w = Criterion()
IndSize = len(list(Attributes.keys()))
w[:] = createRandomVarList(IndSize)
return w
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator = Creator.init(base.Fitness, {'Strategy': Strategy})
creator = Creator.init(base.Fitness, {'Strategy': Strategy})
toolbox.register("newind", initInd, creator.Individual, Attributes)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=10, up=10, indpb=0.2)
toolbox.register("constructPhenotype", constructPhenotype, Attributes)
return toolbox
|
[
"[email protected]"
] | |
a582cff63bfa1208999424ac532f639d57e4946c
|
ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a
|
/AtCoder/Grand 039/A.py
|
2ec2212c13eebe7ef3a938d8513f35a3b69c6e01
|
[] |
no_license
|
cormackikkert/competitive-programming
|
f3fa287fcb74248ba218ecd763f8f6df31d57424
|
3a1200b8ff9b6941c422371961a127d7be8f2e00
|
refs/heads/master
| 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 546 |
py
|
S = input()
K = int(input())
import random
import string
def count(string):
total = 0
i = 1
while i < len(string):
if string[i-1] == string[i]:
total += 1
i += 1
i += 1
return total
if S == len(S) * S[0]:
res = (K * len(S)) // 2
elif S[0] == S[-1]:
new = S.strip(S[0])
start = len(S) - len(S.lstrip(S[0]))
end = len(S) - len(S.rstrip(S[0]))
res = start // 2 + end // 2 + K * count(new) + (K - 1) * ((start + end) // 2)
else:
res = K * count(S)
print(res)
|
[
"[email protected]"
] | |
2bea257ba29d7d3568169cd2499598f98eebd812
|
8255dcf7689c20283b5e75a452139e553b34ddf3
|
/app/views/dashboard/items/index.py
|
b38bac919cd8357f3585cc1c835d0bce2c8762e2
|
[
"MIT"
] |
permissive
|
Wern-rm/raton.by
|
09871eb4da628ff7b0d0b4415a150cf6c12c3e5a
|
68f862f2bc0551bf2327e9d6352c0cde93f45301
|
refs/heads/main
| 2023-05-06T02:26:58.980779 | 2021-05-25T14:09:47 | 2021-05-25T14:09:47 | 317,119,285 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,654 |
py
|
from flask import render_template, redirect, url_for, request
from flask_login import login_required
from flask_paginate import get_page_args
from app import db, logger
from app.controllers.dashboard_controller import dashboard_controller
from app.controllers.pagination import get_pagination
from app.forms.dashboard_items import ItemsCategoryForm, ItemsForm
from app.models.items import Items
from app.models.items_category import ItemsCategory
from app.views.dashboard import bp
@bp.route('/items', methods=['GET', 'POST'])
@login_required
@dashboard_controller
def items(**kwargs):
category = db.session.query(ItemsCategory).order_by(ItemsCategory.id).all()
count = db.session.query(Items).count()
page, per_page, offset = get_page_args(page_parameter='page', per_page_parameter='per_page')
form_create_category = ItemsCategoryForm()
if form_create_category.validate_on_submit() and request.form['form-id'] == '1':
try:
db.session.add(ItemsCategory(title=form_create_category.title.data))
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=21))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=7))
form_edit_category = ItemsCategoryForm()
if form_edit_category.validate_on_submit() and request.form['form-id'] == '2':
try:
category_id = int(request.form['category-id'])
found = db.session.query(ItemsCategory).filter(ItemsCategory.title == form_edit_category.title.data, ItemsCategory.id != category_id).first()
if not found:
db.session.query(ItemsCategory).filter(ItemsCategory.id == category_id).update({'title': form_edit_category.title.data})
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=22))
else:
return redirect(url_for('dashboard.items', action='error', id=8))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=9))
form_create_item = ItemsForm()
form_create_item.category_id.choices = [(i.id, i.title) for i in category]
if form_create_item.validate_on_submit() and request.form['form-id'] == '3':
try:
db.session.add(Items(title=form_create_item.title.data,
text=form_create_item.text.data,
category_id=form_create_item.category_id.data))
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=23))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=10))
form_edit_item = ItemsForm()
form_edit_item.category_id.choices = [(i.id, i.title) for i in category]
if form_edit_item.validate_on_submit() and request.form['form-id'] == '4':
try:
item_id = int(request.form['item-id'])
found = db.session.query(Items).filter(Items.title == form_edit_item.title.data, Items.id != item_id).first()
if not found:
db.session.query(Items).filter(Items.id == item_id).update({
'title': form_edit_item.title.data,
'category_id': form_edit_item.category_id.data,
'text': form_edit_item.text.data
})
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=7))
else:
return redirect(url_for('dashboard.items', action='error', id=12))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=11))
kwargs['title'] = 'Управление каталогом продукции'
kwargs['form_create_category'] = form_create_category
kwargs['form_edit_category'] = form_edit_category
kwargs['form_create_item'] = form_create_item
kwargs['form_edit_item'] = form_edit_item
kwargs['category'] = category
kwargs['items'] = db.session.query(Items).order_by(Items.id.desc()).limit(per_page).offset(offset).all()
kwargs['pagination'] = get_pagination(page=page, per_page=per_page, total=count, record_name='items', format_total=True, format_number=True)
return render_template("dashboard/items/index.html", **kwargs)
|
[
"[email protected]"
] | |
09e278b839107b839c504a7ee39854da665cd9f9
|
394072f7fd3e2a226aeed78bf0a4f587f4c4e383
|
/lambdaExpr/pick_lambda.py
|
eb8c2306a372907e577c942ccd5c5b4e7827dcb3
|
[] |
no_license
|
LeonCrashCode/DRSparsing
|
ec5cca079a2c73eb512444e1ac86215722e6503a
|
c7e92beb8878ff2386bc6789e6c17f0d35bf1277
|
refs/heads/master
| 2020-03-16T09:52:11.217219 | 2019-01-17T14:20:16 | 2019-01-17T14:20:16 | 124,549,958 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 932 |
py
|
import sys
import types
import json
def ascii_encode_dict(data):
ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x
return dict(map(ascii_encode, pair) for pair in data.items())
def tostring(expre):
assert type(expre) == types.DictType
re = []
re.append(expre["type"]+"(")
if len(expre["indexs"]) != 0:
re.append("["+" ".join(expre["indexs"])+"]")
if expre["text"] != "":
re.append(expre["text"])
if len(expre["attrib"]) != 0:
for key in expre["attrib"].keys():
re.append(expre["attrib"][key])
re.append(")")
return " ".join(re)
L = []
for line in open(sys.argv[1]):
line = line.strip()
if line == "":
if L[1].split()[1] == sys.argv[2]:
print "\n".join(L[0:4])
#print tostring(json.loads(L[3], object_hook=ascii_encode_dict))
print "\n".join(L[4:6])
#print tostring(json.loads(L[5], object_hook=ascii_encode_dict))
print
L = []
else:
L.append(line)
|
[
"[email protected]"
] | |
8ac0480670678ce2f641aae18ee7719838e5f722
|
d30c6d691a34fc9181fb71e9712b9505384422ec
|
/数字,日期和时间/分数的计算_P96.py
|
be37c7b9b724074146f45662cb34e480751597bf
|
[] |
no_license
|
shishengjia/PythonDemos
|
cef474eb01ee9541ba0c70fc0750ee48a025f42f
|
c0a857b1cacdbb2b6b727a84f95f93b6e86d60c2
|
refs/heads/master
| 2021-01-01T16:15:19.593635 | 2017-10-26T07:18:46 | 2017-10-26T07:18:46 | 97,797,104 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 328 |
py
|
"""
fractions模块
"""
from fractions import Fraction
a = Fraction(1, 2)
b = Fraction(3, 4)
print(a + b) # 5/4
c = a + b
print(c.numerator) # 5 分子
print(c.denominator) # 4 分母
print(float(c)) # 1.25 转化为小数
print(c.limit_denominator(8))
x = 0.625
print(Fraction(*x.as_integer_ratio())) # 5/8 小数化分数
|
[
"[email protected]"
] | |
8dff565e4a3145736f3847daf00e0a91801c7e21
|
a4275e529b564c3ec5c084fb360c2f4207068477
|
/src/montague/validation.py
|
9a20e422a8c17d2122978e27db86e67d8d0db92f
|
[
"MIT"
] |
permissive
|
rmoorman/montague
|
aacc11837016400e37b69e18b2461a3246c2052c
|
423a2a5a773e975fa27f7b61627cc706fb084984
|
refs/heads/master
| 2020-12-29T01:41:17.262828 | 2015-06-18T00:46:45 | 2015-06-18T00:46:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,702 |
py
|
from __future__ import absolute_import
import collections
import types
def validate_montague_standard_format(config):
for key in ('globals', 'application', 'composite', 'filter', 'server', 'logging'):
assert key in config
assert isinstance(config[key], collections.Mapping)
def validate_config_loader_methods(config_loader):
assert hasattr(config_loader, 'config')
assert isinstance(config_loader.config, types.MethodType)
specific_methods_required = False
try:
result = config_loader.config()
validate_montague_standard_format(result)
except NotImplementedError:
# config loaders can fail to implement config() as long as they implement the other methods
specific_methods_required = True
for method in ('app_config', 'filter_config', 'server_config', 'logging_config'):
if specific_methods_required:
# If you don't implement .config(), you have to implement these
assert hasattr(config_loader, method)
assert isinstance(getattr(config_loader, method), types.MethodType)
# We don't know the names of actual apps/filters/etc to load, but we do know
# the loader shouldn't raise NotImplementedError if it has actually implemented them,
# so we can try that.
try:
getattr(config_loader, method)('__bogus__')
except NotImplementedError:
if specific_methods_required:
raise
except Exception:
# any other exception is fine here, because we don't know exactly what happens
# with a bogus name. usually KeyError, but maybe something else would be raised
pass
|
[
"[email protected]"
] | |
a211852f23f82de502629246d40e8e38a13b64de
|
96fe253e9a740b51dcd7f83d6ab01bb248c2bf4b
|
/patrones_arquitectura/DDD/value_object/prueba_cuenta_bancaria.py
|
013e69c83e8ab92979a1390c08af8ed518910598
|
[] |
no_license
|
vvalotto/Patrones_Disenio_Python
|
7574470752a5f14214434a927c2c5e0faaa592ba
|
7ab6a74e9b008c3434af0a56d4c2b6b7de3617bf
|
refs/heads/master
| 2021-04-28T19:16:21.535998 | 2018-10-21T14:05:36 | 2018-10-21T14:05:36 | 121,891,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from DDD.value_object.cuenta_bancaria import *
mi_dinero = Dinero(100, Moneda.Pesos)
print(mi_dinero.moneda)
print(mi_dinero.monto)
mi_cuenta = CuentaBancaria(1, mi_dinero)
print(mi_cuenta.balance.monto)
print(mi_cuenta.balance.moneda)
|
[
"[email protected]"
] | |
a1b6fb392741e41deadbff3e6f9ad7e1f8c4f790
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/158/31530/submittedfiles/swamee.py
|
2c5e1ac2906cab6729e807c9e54f3c38de172e65
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
# -*- coding: utf-8 -*-
import math
f=float(input('digite f:'))
L=float(input('digite L:'))
Q=float(input('digite Q:'))
deltaH=float(input('digite deltaH:'))
v=float(input('digite v:'))
g=9.81
E=0.000002
D=((8*f*L*(Q)**2)/((math.pi)**2*g*deltaH))**1/5
print('D é:%.4f'%D)
Rey=(4*Q)/(math.pi*D*v)
print('Rey é:%.4f'%Rey)
K=0.25/(math.log10((E/(3.7*D))+(5.74)/(Rey**0.9)))**2
print('K é:%.4f'%K)
|
[
"[email protected]"
] | |
185d7e4291d29d014020b6b40ebfb2d8398b5f8c
|
cf444d07d8056416dfba34e73bba128567b7c692
|
/readandloadperfpadbenchasof.py
|
fd9df58a4e0df4d295b906d84dace758c8374d5d
|
[] |
no_license
|
rtstock/scriptsvapp01
|
cf9e993e5253e9a60dc191cca5e34532fa559ee1
|
7c2db888f0dcd92de62c031f9867e1c5cb4cbc0e
|
refs/heads/master
| 2021-01-23T00:29:11.267852 | 2017-03-21T19:24:34 | 2017-03-21T19:24:34 | 85,737,024 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,756 |
py
|
import ftplib
class perform:
#def __init__(self):
# print 'class initialized...'
# #self.DataFilePathName = []
# #self.BuildFilepaths()
def __init__(self,p_datafilepathname):
print 'initialized readandloadperfpadbenchasof.py'
self.DataFilePathName = p_datafilepathname
self.ReadAndLoad()
print 'exiting readandloadperfpadbenchasof.py'
def set_DataFilePathName(self,DataFilePathName):
self._DataFilePathName = DataFilePathName
def get_DataFilePathName(self):
return self._DataFilePathName
DataFilePathName = property(get_DataFilePathName, set_DataFilePathName)
def set_Results(self,Results):
self._Results = Results
def get_Results(self):
return self._Results
Results = property(get_Results, set_Results)
def xstr(self,s):
try:
return '' if s is None else str(s)
except:
return ''
def ReadAndLoad(self):
procresults = {}
try:
my_datafilepathname = self.DataFilePathName
# get and format the modified date
import os.path, time
print 'got here !', my_datafilepathname
filedatetime = os.path.getmtime(my_datafilepathname)
from datetime import datetime
filedatetime_forsql = datetime.fromtimestamp(filedatetime).strftime('%Y-%m-%d %H:%M:%S')
import bs4, sys
with open(my_datafilepathname, 'r') as f:
webpage = f.read().decode('utf-8')
soup = bs4.BeautifulSoup(webpage, "lxml")
market_index_ext = ''
fieldnames = {}
is_dataline = 0
total_deleted = 0
total_inserted = 0
for node in soup.find_all('th', attrs={}): #'style':'display: table-header-group; mso-number-format:\@;'
if node.attrs['class'][0] in ['HeaderCellNumeric','HeaderCellString']:
fieldnames[len(fieldnames)] = node.string
#print node.string
for nodeA in soup.find_all('tr', attrs={}):
print '-----------------------'
is_dataline = 0
fieldvalues = {}
for nodeB in nodeA.find_all('td', attrs={}):
#print 'got here!!'
#print nodeB.attrs['class'][0]
if nodeB.attrs['class'][0] in ['DataCellNumeric','DataCellString']:
#print 'got here!!!'
if fieldnames[len(fieldvalues)] == 'market_index_ext':
is_dataline = 1
market_index_ext = nodeB.string
#print market_index_ext, fieldnames[len(fieldvalues)],'=', nodeB.string
#print fieldnames[len(fieldvalues)]
#print ' ',nodeB.string
fieldvalues[fieldnames[len(fieldvalues)]] = nodeB.string
print 'got here xxxxxx'
if is_dataline == 1:
#print 'got here !@'
fieldnames_string = ''
fieldvalues_string = ''
for k,v in fieldvalues.items():
#print 'fieldvalues:',k, v
if v == None:
goodvaluestring = ''
else:
goodvaluestring = v
print 'fieldvalues:',k, goodvaluestring
fieldnames_string = fieldnames_string + k + ','
fieldvalues_string = fieldvalues_string + "'" + goodvaluestring + "',"
fieldnames_string = fieldnames_string[:-1]
fieldvalues_string = fieldvalues_string[:-1]
print 'fieldnames_string....................'
print fieldnames_string
print 'fieldvalues_string.............................'
print fieldvalues_string
print market_index_ext
#print fieldvalues[fieldnames[0]],fieldvalues[fieldnames[1]],fieldvalues[fieldnames[2]]
import pyodbc
cnxn = pyodbc.connect(r'DRIVER={SQL Server};SERVER=ipc-vsql01;DATABASE=DataAgg;Trusted_Connection=True;')
cursor = cnxn.cursor()
#print 'got here !@'
#sql_delete = "delete from dbo.xanalysisofbenchmarks_padbenchasof_imported where market_node_last_invested_date = ? and market_index_ext = ?", fieldvalues['market_node_last_invested_date'],fieldvalues['market_index_ext']
#print sql_delete
cursor.execute( "delete from dbo.xanalysisofbenchmarks_padbenchasof_imported where market_node_last_invested_date = ? and market_index_ext = ?", fieldvalues['market_node_last_invested_date'],fieldvalues['market_index_ext'] )
total_deleted = total_deleted + cursor.rowcount
print ' ',cursor.rowcount, 'records deleted'
cnxn.commit()
insert_sql = "insert into xanalysisofbenchmarks_padbenchasof_imported("+fieldnames_string+") values ("+fieldvalues_string+")"
#print insert_sql
cursor.execute(insert_sql)
procresults['records inserted'] = cursor.rowcount
total_inserted = total_inserted + cursor.rowcount
print ' ',cursor.rowcount, 'records inserted'
cnxn.commit()
procresults['resultvalue1'] = 'success'
procresults['total_deleted'] = total_deleted
procresults['total_inserted'] = total_inserted
except Exception,e:
print type(e)
print 'there was an error on ' + self.DataFilePathName
self.Results = procresults
if __name__=='__main__':
print 'running ___name___'
myDataFilePathName = r"//Ipc-vsql01/data/Batches/prod/WatchFolder/incoming/PagesOutput_GetPBAsOf_2016-11-30 132015270.xls"
o = perform(myDataFilePathName)
#o.DataFilePathName = r"//Ipc-vsql01/data/Batches/prod/WatchFolder/incoming/PagesOutput_GetPadPortBenchAsOf_20161124_ADAKAT.xls"
#o.ReadAndLoad()
print o.Results
|
[
"[email protected]"
] | |
912732738030d84355aa57768facc7293bf43a88
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/clouddnsservice/models/HostRRlb.py
|
a70c1c23618e54df92e3c0396719c63ee09311d7
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 |
Apache-2.0
| 2023-09-07T06:54:49 | 2018-03-22T03:47:02 |
Python
|
UTF-8
|
Python
| false | false | 1,141 |
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class HostRRlb(object):
def __init__(self, hostValue=None, id=None, weight=None, rate=None):
"""
:param hostValue: (Optional) 解析记录的值
:param id: (Optional) 解析记录的ID
:param weight: (Optional) 解析记录的权重
:param rate: (Optional) 此条记录在总均衡中的比率的100倍
"""
self.hostValue = hostValue
self.id = id
self.weight = weight
self.rate = rate
|
[
"[email protected]"
] | |
a1f08199e9d65120982277c5b73430048437c363
|
e751c59ca3c98c8f6a98b7c6fc7167fe615aa1b0
|
/streamz/orderedweakset.py
|
82136ecdea1138314b8cd2277154f13f468712af
|
[
"BSD-3-Clause"
] |
permissive
|
yutiansut/streamz
|
a10e0d2beefd450b5d19cb7d78b4c8a333ebcd48
|
e51f0397d27957f8b3bfc78ecdb946cbfbac21b6
|
refs/heads/master
| 2020-07-10T15:23:35.567092 | 2019-12-24T07:07:43 | 2019-12-24T07:07:43 | 204,297,562 | 1 | 0 |
NOASSERTION
| 2019-12-24T07:07:44 | 2019-08-25T13:24:35 |
Python
|
UTF-8
|
Python
| false | false | 977 |
py
|
# -*- coding: utf8 -*-
# This is a copy from Stack Overflow
# https://stackoverflow.com/questions/7828444/indexable-weak-ordered-set-in-python
# Asked by Neil G https://stackoverflow.com/users/99989/neil-g
# Answered/edited by https://stackoverflow.com/users/1001643/raymond-hettinger
import collections
import weakref
class OrderedSet(collections.MutableSet):
def __init__(self, values=()):
self._od = collections.OrderedDict().fromkeys(values)
def __len__(self):
return len(self._od)
def __iter__(self):
return iter(self._od)
def __contains__(self, value):
return value in self._od
def add(self, value):
self._od[value] = None
def discard(self, value):
self._od.pop(value, None)
class OrderedWeakrefSet(weakref.WeakSet):
def __init__(self, values=()):
super(OrderedWeakrefSet, self).__init__()
self.data = OrderedSet()
for elem in values:
self.add(elem)
|
[
"[email protected]"
] | |
afe7b68eebc859166be1c5e13503095b75df042c
|
3527ff6346f98a5b7c51ce3c58428227f4bc8617
|
/acwing/800.py
|
3e10fbf147a1197999a55e116c697baa1c94510e
|
[] |
no_license
|
ShawnDong98/Algorithm-Book
|
48e2c1158d6e54d4652b0791749ba05a4b85f96d
|
f350b3d6e59fd5771e11ec0b466f9ba5eeb8e927
|
refs/heads/master
| 2022-07-17T04:09:39.559310 | 2022-07-13T15:46:37 | 2022-07-13T15:46:37 | 242,317,482 | 0 | 0 | null | 2020-10-11T14:50:48 | 2020-02-22T09:53:41 |
C++
|
UTF-8
|
Python
| false | false | 277 |
py
|
n, m, x = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
i = 0
j = m -1
while i< n:
while j >= 0 and A[i] + B[j] > x:
j -= 1
if j >= 0 and A[i] + B[j] == x:
print(f'{i} {j}')
break
i += 1
|
[
"[email protected]"
] | |
5d9d6c025131f2a3f97852b760a240950735157f
|
46eb22dbe3514fc586ca57dd5e9a2ef1e486ed7e
|
/src/lanyon/parser.py
|
5e43a9d5db751938ac34fd688b085ebb43fc5d44
|
[
"BSD-3-Clause"
] |
permissive
|
arthurk/lanyon
|
06d37d2e96358a190f8926a65f9f8df00db09393
|
d4c319cc5659c50aa3e356cbf74e976ba82acc4b
|
refs/heads/master
| 2021-01-17T07:41:28.518803 | 2011-04-16T11:37:06 | 2011-04-16T11:37:06 | 804,624 | 5 | 2 | null | 2016-06-06T13:33:41 | 2010-07-29T06:45:49 |
Python
|
UTF-8
|
Python
| false | false | 6,056 |
py
|
import datetime
import re
from os.path import splitext
class ParserException(Exception):
"""Exception raised for errors during the parsing."""
pass
class Parser(object):
output_ext = None
def __init__(self, settings, source):
self.settings = settings
self.source = source
self.headers = {}
self.text = ''
def _parse_headers(self):
"""
Parses and removes the headers from the source.
"""
META_RE = re.compile(
r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
lines = self.source.splitlines()
for num, line in enumerate(lines):
match = META_RE.match(line)
if match:
key = match.group('key').strip().lower()
value = match.group('value').strip()
if value:
# custom header transformation
header_method = getattr(self, '_parse_%s_header' % key,
None)
if header_method:
value = header_method(value)
self.headers[key] = value
num_last_match = num
else:
break
# remove header lines from input source
try:
del lines[:num_last_match + 1]
except UnboundLocalError:
pass
# check if a blank line followed the header lines and remove it
try:
if not lines[0]:
del lines[0]
except IndexError:
pass
self.text = '\n'.join(lines)
def _parse_tags_header(self, value):
"""
Parses the value from the 'tags' header into a list.
"""
return [t.strip() for t in value.split(',')]
def _parse_date_header(self, value):
"""
Parses the date header into a python datetime.datetime object.
The value must be in the format as specified by the 'date_format'
setting; otherwise a ParserException will be thrown.
"""
format = self.settings['date_format']
try:
return datetime.datetime.strptime(value, format)
except ValueError as error:
raise ParserException(error)
def _parse_updated_header(self, value):
return self._parse_date_header(value)
def _parse_status_header(self, value):
"""
Checks that the value of the 'status' header is 'live', 'hidden' or
'draft'. If not 'live' is returned.
"""
if value in ('live', 'hidden', 'draft'):
return value
return 'live'
def _parse_text(self):
"""
Returns the raw input text. Override this method to process
text in another markup language such as Markdown.
"""
return self.text
def parse(self):
self._parse_headers()
self._parse_text()
return (self.headers, self.text)
class RstParser(Parser):
"""ReStructuredText Parser"""
output_ext = 'html'
def pygments_directive(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Parse sourcecode using Pygments
From http://bitbucket.org/birkenfeld/pygments-main/src/tip/external/rst-directive-old.py
"""
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
from docutils import nodes
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = HtmlFormatter(noclasses=False)
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
def _parse_text(self):
try:
from docutils.core import publish_parts
from docutils.parsers.rst import directives
except ImportError:
raise Exception("The Python docutils library isn't installed. " +
"Install with `pip install docutils`")
else:
# if pygments is installed, register the "sourcecode" directive
try:
import pygments
except ImportError:
pass
else:
directives.register_directive('sourcecode',
self.pygments_directive)
self.text = publish_parts(source=self.text,
settings_overrides={
"doctitle_xform": False,
"initial_header_level": 2
},
writer_name='html4css1')['fragment']
class MarkdownParser(Parser):
"""Markdown Parser"""
output_ext = 'html'
def _parse_text(self):
try:
import markdown
except ImportError:
raise Exception("The Python markdown library isn't installed. " +
"Install with `pip install markdown`")
else:
self.text = markdown.markdown(self.text,
['codehilite(css_class=highlight)'])
# a mapping of file extensions to the corresponding parser class
parser_map = (
(('html', 'htm', 'xml', 'txt'), Parser),
(('rst',), RstParser),
(('md', 'markdown'), MarkdownParser),
)
def get_parser_for_filename(filename):
"""
Factory function returning a parser class based on the file extension.
"""
ext = splitext(filename)[1][1:]
try:
return [pair[1] for pair in parser_map if ext in pair[0]][0]
except IndexError:
return
|
[
"[email protected]"
] | |
ae5543276e6ec4f6dc0823885d5ba0a303c5e818
|
cfc3fa658f826d02308453e557d82758895399c2
|
/datasets/covid_qa_deepset/covid_qa_deepset.py
|
d43c1e5924c54b5a73b245220e1e6d2c37d225e1
|
[
"Apache-2.0"
] |
permissive
|
meehawk/datasets
|
cac530ec0e17514c01cdff30302521d6303ed93b
|
b70141e3c5149430951773aaa0155555c5fb3e76
|
refs/heads/master
| 2023-03-29T12:51:54.700891 | 2021-04-08T17:22:53 | 2021-04-08T17:22:53 | 355,996,122 | 9 | 0 |
Apache-2.0
| 2021-04-08T17:31:03 | 2021-04-08T17:31:02 | null |
UTF-8
|
Python
| false | false | 4,607 |
py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COVID-QA: A Question Answering Dataset for COVID-19."""
from __future__ import absolute_import, division, print_function
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{moller2020covid,
title={COVID-QA: A Question Answering Dataset for COVID-19},
author={M{\"o}ller, Timo and Reina, Anthony and Jayakumar, Raghavan and Pietsch, Malte},
booktitle={Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020},
year={2020}
}
"""
# You can copy an official description
_DESCRIPTION = """\
COVID-QA is a Question Answering dataset consisting of 2,019 question/answer pairs annotated by volunteer biomedical \
experts on scientific articles related to COVID-19.
"""
_HOMEPAGE = "https://github.com/deepset-ai/COVID-QA"
_LICENSE = "Apache License 2.0"
_URL = "https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/question-answering/"
_URLs = {"covid_qa_deepset": _URL + "COVID-QA.json"}
class CovidQADeepset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="covid_qa_deepset", version=VERSION, description="COVID-QA deepset"),
]
def _info(self):
features = datasets.Features(
{
"document_id": datasets.Value("int32"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"is_impossible": datasets.Value("bool"),
"id": datasets.Value("int32"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url = _URLs[self.config.name]
downloaded_filepath = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_filepath},
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
covid_qa = json.load(f)
for article in covid_qa["data"]:
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
document_id = paragraph["document_id"]
for qa in paragraph["qas"]:
question = qa["question"].strip()
is_impossible = qa["is_impossible"]
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"document_id": document_id,
"context": context,
"question": question,
"is_impossible": is_impossible,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
|
[
"[email protected]"
] | |
a92459e9e8ae533390d8cd2ec5542ac8dbe5714e
|
7e9977bbf988fe2136529640afb08460cce833bc
|
/HeroRegistrationProj/manage.py
|
c117eea3936972ec247c0372ef508f7c1c854c19
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-2019-spring/django-fields-widgets-cw-itayanna
|
b8c761376f89fd0c8b4d2fead46b5dc75a7194df
|
7702fe9b541dfaf5ac0458729bcdacd538b6c232
|
refs/heads/master
| 2020-04-27T13:24:13.427170 | 2019-03-12T14:37:11 | 2019-03-12T14:37:11 | 174,368,562 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 552 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HeroRegistrationProj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
10f5ab79003ff1e2cbfd7c31754b890b1ab31a6d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03283/s440786665.py
|
fe6e5bf6654473cf6b9ff410adac87f56d2c24dd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,237 |
py
|
class BIT:
def __init__(self, n):
self.n = n
self.bit = [0]*(self.n+1) # 1-indexed
def init(self, init_val):
for i, v in enumerate(init_val):
self.add(i, v)
def add(self, i, x):
# i: 0-indexed
i += 1 # to 1-indexed
while i <= self.n:
self.bit[i] += x
i += (i & -i)
def sum(self, i, j):
# return sum of [i, j)
# i, j: 0-indexed
return self._sum(j) - self._sum(i)
def _sum(self, i):
# return sum of [0, i)
# i: 0-indexed
res = 0
while i > 0:
res += self.bit[i]
i -= i & (-i)
return res
import sys
import io, os
input = sys.stdin.buffer.readline
#input = io.BytesIO(os.read(0,os.fstat(0).st_size)).readline
n, m, Q = map(int, input().split())
X = []
for i in range(m):
l, r = map(int, input().split())
l, r = l-1, r-1
X.append((l, r, 0, i))
for i in range(Q):
p, q = map(int, input().split())
p, q = p-1, q-1
X.append((p, q, 1, i))
X.sort(key=lambda x: (x[1], x[2]))
bit = BIT(550)
ans = [-1]*Q
for l, r, t, i in X:
if t == 0:
bit.add(l, 1)
else:
ans[i] = bit.sum(l, r+1)
print(*ans, sep='\n')
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.