blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb4921718ea76bd76fd0d09bef6d3040445b07fe
|
bfd6ac084fcc08040b94d310e6a91d5d804141de
|
/PulseSequences2/multi2d_test2.py
|
1609e844e7a2e84b959142d2d35d97635fe46e69
|
[] |
no_license
|
jqwang17/HaeffnerLabLattice
|
3b1cba747b8b62cada4467a4ea041119a7a68bfa
|
03d5bedf64cf63efac457f90b189daada47ff535
|
refs/heads/master
| 2020-12-07T20:23:32.251900 | 2019-11-11T19:26:41 | 2019-11-11T19:26:41 | 232,792,450 | 1 | 0 | null | 2020-01-09T11:23:28 | 2020-01-09T11:23:27 | null |
UTF-8
|
Python
| false | false | 671 |
py
|
import numpy as np
from common.devel.bum.sequences.pulse_sequence import pulse_sequence
from labrad.units import WithUnit as U
from treedict import TreeDict
from common.client_config import client_info as cl
from multi_test import multi_test
class multi2d_test2(pulse_sequence):
is_2dimensional = True
is_composite = True
show_params = ['NSY.pi_time']
scannable_params = {
'Heating.background_heating_time': [(0., 5000., 500., 'us'), 'current']
}
fixed_params = {'StateReadout.ReadoutMode':'pmt'}
sequence = multi_test
@classmethod
def run_finally(cls, cxn, parameter_dct, all_data, data_x):
return 0.1
|
[
"[email protected]"
] | |
5fda096a90541b4f8f01c8692ee9f34c6977c70a
|
b40a140a911279f3c61737367ab8f3b7c15fe98b
|
/avakas/get_parameters_file.py
|
6f6976a02b4d1dc3baa10e6796e10d3f55ed8aa2
|
[] |
no_license
|
AurelienNioche/HotellingBathtub
|
80fef9b4106454ec339a6c106c52738f1e95e77b
|
5b370a20b1d2417022fd2a6de8a7a4baeeda321e
|
refs/heads/master
| 2021-05-06T13:02:04.130850 | 2018-02-16T22:47:01 | 2018-02-16T22:47:01 | 113,213,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
import os
def get_parameters_file(i):
parameters_files = sorted(
[os.path.join("tasks", f)
for f in os.listdir("tasks") if os.path.isfile(os.path.join("tasks", f))])
return parameters_files[i]
|
[
"[email protected]"
] | |
7c238c319c6f6d8ba62cadcb28faf56b3f32ab3b
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/AtCoder/AtCoder Beginner Contest 247/B.py
|
973864707113b363529868eab237a721c0f7de7b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null |
UTF-8
|
Python
| false | false | 954 |
py
|
from collections import defaultdict
n = int(raw_input())
d1 = defaultdict(int)
d2 = defaultdict(int)
names = []
for i in xrange(n):
name1, name2 = raw_input().split()
d1[name1] += 1
d2[name2] += 1
names.append((name1, name2))
flag = True
for (name1, name2) in names:
if name1 == name2:
if d1[name1] > 1 or d2[name1] > 1:
flag = False
break
else:
if ((d1[name1] <= 1 and d2[name1] == 0) or
(d1[name2] == 0 and d2[name2] <= 1)):
pass
else:
flag = False
break
if flag:
print 'Yes'
else:
print 'No'
'''
^^^^TEST^^^^
3
tanaka taro
tanaka jiro
suzuki hanako
-----
Yes
$$$TEST$$$
^^^^TEST^^^^
3
aaa bbb
xxx aaa
bbb yyy
-----
No
$$$TEST$$$
^^^^TEST^^^^
2
tanaka taro
tanaka taro
-----
No
$$$TEST$$$
^^^^TEST^^^^
3
takahashi chokudai
aoki kensho
snu ke
-----
Yes
$$$TEST$$$
^^^^TEST^^^^
3
a a
b b
c a
-----
No
$$$TEST$$$
'''
|
[
"[email protected]"
] | |
8987a79b8238e079d6527786951d545fffd1ab1c
|
f1614f3531701a29a33d90c31ab9dd6211c60c6b
|
/test/menu_sun_integration/infrastructure/aws/sqs/mocks/customer_mock.py
|
a7b78f7010ca6a18c5de255b002fa7e7ea1d8312
|
[] |
no_license
|
pfpacheco/menu-sun-api
|
8a1e11543b65db91d606b2f3098847e3cc5f2092
|
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
|
refs/heads/master
| 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,321 |
py
|
def mock_queue_make_api_call(self, operation_name, kwarg):
if operation_name == 'SendMessage':
return {'MD5OfMessageBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'MessageId': '85e8a505-2ba4-4fa3-a93c-cc30bf5e65e7',
'ResponseMetadata': {'RequestId': '7313c686-bca3-5d79-9295-90a51d270c9c',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '7313c686-bca3-5d79-9295-90a51d270c9c',
'date': 'Fri, 18 Oct 2019 11:17:24 GMT',
'content-type': 'text/xml', 'content-length': '378'},
'RetryAttempts': 0}}
if operation_name == 'ReceiveMessage':
return {'Messages': [{'MessageId': '92de7972-f8e5-4998-a182-3977455f8cb0',
'ReceiptHandle': 'AQEBWvhuG9mMCVO0LE7k'
'+flexfAzfGFn4yGRI5Xm60pwu1RwlGot4GqWveL1tOYmUTM63bwR+OFj5CL'
'/e1ZchKlZ0DTF6rc9Q+pyNdbIKckaVrfgbYySsZDkr68AtoWzFoIf0U68SUO83ys0ydK'
'+TSHgpw38zKICpupwccqe67HDu2Vve6ATFtjHa10+w3fU6l63NRFnmNeDjuDw'
'/uq86s0puouRFHQmoeNlLg'
'/5wjlT1excIDKxlIvJFBoc420ZgxulvIOcblqUxcGIG6Ah6x3aJw27q14vT'
'+0wRi9aoQ8dG0ys57OeWjlRRG3UII1J5uiShet9F15CKF3GZatNEZOOXkIqdQO'
'+lMHIhwMt7wls2EMtVO4KFIdWokzIFhidzfAHMTANCoAD26gUsp2Z9UyZaA==',
'MD5OfBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'Body': '{"integration_type": "BRF","seller_id": 1,"seller_code": "ABC",'
'"document": "00005234000121",'
'"cep": "09185030",'
'"credit_limit": "103240.72",'
'"customer_id": "1",'
'"payment_terms":['
'{"deadline": 5,"description": "Payment 5","payment_type": "BOLETO"},'
'{"deadline": 10,"description": "Payment 10","payment_type": "CHEQUE"}],'
'"seller_metafields": [{"namespace": "CODIGO_PAGAMENTO","key": "BOLETO_7",'
'"value": "007"},{"namespace": "CODIGO_PAGAMENTO","key": "BOLETO_14",'
'"value": "014"}],'
'"customer_metafields": [{"namespace": "Customer Namespace 1",'
'"key": "Customer Key 1",'
'"value": "Customer VALUE 1"},{"namespace": "Customer Namespace 2",'
'"key": "Customer Key 2","value": "Customer VALUE 2"}]}'},
],
'ResponseMetadata': {'RequestId': '0ffbdfb3-809f-539e-84dd-899024785f25',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '0ffbdfb3-809f-539e-84dd-899024785f25',
'date': 'Fri, 18 Oct 2019 11:31:51 GMT',
'content-type': 'text/xml',
'content-length': '892'}, 'RetryAttempts': 0}}
if operation_name == 'DeleteMessage':
return {'MD5OfMessageBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'ResponseMetadata': {'RequestId': '7313c686-bca3-5d79-9295-90a51d270c9c',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '7313c686-bca3-5d79-9295-90a51d270c9c',
'date': 'Fri, 18 Oct 2019 11:17:24 GMT',
'content-type': 'text/xml', 'content-length': '378'},
'RetryAttempts': 0}}
|
[
"[email protected]"
] | |
84bd69b3aecc431f55e1f816dbfe988f0e2443fc
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/mlxnas004/question1.py
|
615d9525446c91fd8b2b6c646c028b7d0a290c6e
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 279 |
py
|
#nasha meoli
#mlxnas004
#leap year
x = eval(input("Enter a year:\n"))
condition_1 = x%400
condition_2 = x%4
condition_3 = x%100
if (condition_1 == 0) or ((condition_2 == 0) and (condition_3 >= 1)):
print(x,"is a leap year.")
else:
print(x,"is not a leap year.")
|
[
"[email protected]"
] | |
e4275df4e69cf6565d2afddbef18539b2d4d99f3
|
4f875744ccae8fa9225318ce16fc483b7bf2735e
|
/google/findDuplicate.py
|
44e01dd1b67af92eaf0af5a61e728e840331fdcb
|
[] |
no_license
|
nguyenngochuy91/companyQuestions
|
62c0821174bb3cb33c7af2c5a1e83a60e4a29977
|
c937fe19be665ba7ac345e1729ff531f370f30e8
|
refs/heads/master
| 2020-07-27T05:58:36.794033 | 2020-04-10T20:57:15 | 2020-04-10T20:57:15 | 208,893,527 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 663 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 20:34:41 2020
@author: huyn
"""
#609. Find Duplicate File in System
from typing import List
class Solution:
def findDuplicate(self, paths: List[str]) -> List[List[str]]:
d = {}
for path in paths:
item = path.split()
root = item[0]
for file in item[1:]:
file = file.split("(")
fileName = file[0]
content = file[1].split(")")[0]
if content not in d:
d[content] = []
d[content].append(root+"/"+fileName)
return [d[key] for key in d if len(d[key])>=2]
|
[
"[email protected]"
] | |
0fdb7a7c501f03fb7f776e4965cd4da3243f4ed9
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/squareroot/7ab7bec6-576b-4910-98d1-ec30c84244ab__calculate_square.py
|
0bf1d0137076df117eaec3d77052d26dce255f54
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,150 |
py
|
# calculate_square.py
from Tkinter import *
import ttk
def calculate_square(*args):
value_in = float(number_in.get())
number_out.set(value_in * value_in)
root = Tk()
root.title('Calculate square')
mainframe = ttk.Frame(root)
mainframe.grid(column=1, row=1, sticky=(N, E, S, W))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
number_in = StringVar()
number_out = StringVar()
square_of_string_label = ttk.Label(mainframe, text='The square of')
square_of_string_label.grid(column=1, row=1, sticky=E)
number_in_entry = ttk.Entry(mainframe, width=5, textvariable=number_in)
number_in_entry.grid(column=2, row=1, sticky=(E, W))
is_string_label = ttk.Label(mainframe, text='is')
is_string_label.grid(column=1, row=2, sticky=E)
number_out_label = ttk.Label(mainframe, textvariable=number_out)
number_out_label.grid(column=2, row=2, sticky=W)
go_button = ttk.Button(mainframe, text='Go!', command=calculate_square)
go_button.grid(column=2, row=3, sticky=W)
for child in mainframe.winfo_children():
child.grid_configure(padx=2, pady=2)
number_in_entry.focus()
root.bind('<Return>', calculate_square)
root.mainloop()
|
[
"[email protected]"
] | |
201ec0e778d39c619ca7d2db0f6caee17ddd1f95
|
d7363da78e6f1e8ae2c6abca3f845853756165d4
|
/src/adafruit_blinka/board/dragonboard_410c.py
|
a627309d6c32ff8ab6a13dc5b5cc9a989804b538
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Blinka
|
7a9ed88f39ff12082d1b46647fa8869b541fba49
|
009b352a3234339000c32d2e61e830455cf389fa
|
refs/heads/main
| 2023-08-09T06:25:02.178935 | 2023-07-28T16:45:40 | 2023-07-28T16:45:40 | 120,540,744 | 398 | 331 |
MIT
| 2023-09-14T20:32:23 | 2018-02-07T00:25:03 |
Python
|
UTF-8
|
Python
| false | false | 972 |
py
|
# SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for the Dragonboard 410c."""
from adafruit_blinka.microcontroller.snapdragon.apq8016 import pin
GPIO_A = pin.GPIO_36
GPIO_B = pin.GPIO_12
GPIO_C = pin.GPIO_13
GPIO_D = pin.GPIO_69
GPIO_E = pin.GPIO_115
GPIO_F = pin.PM_MPP_4
GPIO_G = pin.GPIO_24
GPIO_H = pin.GPIO_25
GPIO_I = pin.GPIO_35
GPIO_J = pin.GPIO_34
GPIO_K = pin.GPIO_28
GPIO_L = pin.GPIO_33
GPIO_36 = pin.GPIO_36
GPIO_12 = pin.GPIO_12
GPIO_13 = pin.GPIO_13
GPIO_69 = pin.GPIO_69
GPIO_115 = pin.GPIO_115
GPIO_4 = pin.PM_MPP_4
GPIO_24 = pin.GPIO_24
GPIO_25 = pin.GPIO_25
GPIO_35 = pin.GPIO_35
GPIO_34 = pin.GPIO_34
GPIO_28 = pin.GPIO_28
GPIO_33 = pin.GPIO_33
SDA = pin.I2C0_SDA
SCL = pin.I2C0_SCL
I2C0_SDA = pin.I2C0_SDA
I2C0_SCL = pin.I2C0_SCL
I2C1_SDA = pin.I2C1_SDA
I2C1_SCL = pin.I2C1_SCL
SCLK = pin.SPI0_SCLK
MOSI = pin.SPI0_MOSI
MISO = pin.SPI0_MISO
SPI_CS = pin.SPI0_CS
|
[
"[email protected]"
] | |
3dcca22538909e4ca7c9e1f85a4a19c897d9ccc0
|
bf4178e73f0f83781be6784d7587cb34a38d6edd
|
/platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4010_cfg_calc/si4010cfgcalcsecurity.py
|
3da55602e5855910430be093d1a8e3ae2b503b84
|
[] |
no_license
|
kolbertv/ZigbeeSiliconV3
|
80d70515e93be1413c24cdcb3485f50c65a1564b
|
ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9
|
refs/heads/master
| 2023-01-02T07:18:01.393003 | 2020-10-25T15:33:08 | 2020-10-25T15:33:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,459 |
py
|
'''
Created on August 25, 2013
@author: shyang
'''
__all__ = ["Si4010CfgCalcSecurity"]
class Si4010CfgCalcSecurity(object):
'''
classdocs
'''
OEM_Key16_Table = [
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0x4B, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x29, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xDC, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x24, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x02, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x30, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xCB, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
]
OEM_ID_KEY_Table = [ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000 ]
def __init__(self, inputs):
self.cfg = {}
self.cfg_PQ_file = {}
# TODO check
if inputs.security.OEM_Key == []:
self.Key16 = self.OEM_Key16_Table[inputs.topLevelSetup.OTPcfgNum]
else:
self.Key16 = inputs.security.OEM_Key
self.cfg['bOEM_Key[16]'] = self.Key16
self.cfg_PQ_file['bOEM_Key[16]'] = self.Key16
if inputs.security.OEM_ID_Key == 0:
self.ID_Key = self.OEM_ID_KEY_Table[inputs.topLevelSetup.OTPcfgNum]
else:
self.ID_Key = inputs.security.OEM_ID_Key
self.cfg['lOEM_ID_Key'] = self.ID_Key
def get_ID_Key(self, index):
return self.OEM_ID_KEY_Table[index]
def get_Key16(self, index):
return self.OEM_Key16_Table[index]
def get_cfg_data(self):
return self.cfg
def dump(self):
print(' ------------- configuration data -------------')
for m in self.cfg:
print(' {} = {}'.format(m, self.cfg[m]))
|
[
"[email protected]"
] | |
8f9c7c45bf173c6b1593881386614ed222c6c593
|
2bf43e862b432d44ba545beea4e67e3e086c1a1c
|
/tests/nemo_text_processing/zh/test_char.py
|
1ca553eca3d027fe254df28f4d9b682ca08f9b57
|
[
"Apache-2.0"
] |
permissive
|
ericharper/NeMo
|
719e933f6ffce1b27358bc21efe87cdf144db875
|
f1825bc4b724b78c2d6ca392b616e8dc9a8cde04
|
refs/heads/master
| 2022-10-06T01:45:21.887856 | 2022-09-14T19:09:42 | 2022-09-14T19:09:42 | 259,380,135 | 1 | 0 |
Apache-2.0
| 2022-09-20T18:01:57 | 2020-04-27T15:54:20 |
Python
|
UTF-8
|
Python
| false | false | 1,257 |
py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer_zh = Normalizer(lang='zh', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('zh/data_text_normalization/test_cases_char.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer_zh.normalize(test_input)
assert expected == preds
|
[
"[email protected]"
] | |
2bb192e13d0b897544b36848f736cf1666918f37
|
e8160ba62759fc390daf60d88146e95c0c0de1b4
|
/TestDjangoORM/settings.py
|
97366c9073674155c60edddae7971a54bbb699fe
|
[] |
no_license
|
imranq2/TestDjangoORM
|
2a3a72aff36f03b6e2bb1a0f394a3499d2607bba
|
8d51d772f42635c0dbbd1d462057defaa9cdfbff
|
refs/heads/master
| 2023-01-05T23:07:07.662717 | 2020-11-03T04:36:44 | 2020-11-03T04:36:44 | 309,496,480 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,510 |
py
|
"""
Django settings for TestDjangoORM project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@f5a-qggnb9d=y^%tcto40rnxzb=6kq5)=077s*9in+$wx&y37'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
# Django stuff
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TestDjangoORM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TestDjangoORM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
},
},
'root': {
'handlers': ['console'],
}
}
|
[
"[email protected]"
] | |
afdfc45217af92feca35e8df5f3b06c51cf1a18f
|
32cb84dd41e4be24c065bb205f226f9b121a6db2
|
/feedback/urls.py
|
523511566940bbd365ca5900079a62fd10f87512
|
[] |
no_license
|
InformatykaNaStart/staszic-sio2
|
b38fda84bd8908472edb2097774838ceed08fcfa
|
60a127e687ef8216d2ba53f9f03cfaa201c59e26
|
refs/heads/master
| 2022-06-29T11:09:28.765166 | 2022-06-13T21:56:19 | 2022-06-13T21:56:19 | 115,637,960 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 145 |
py
|
from django.conf.urls import patterns, include, url
import views
noncontest_patterns = [url(r'^staszic/judging/(?P<jid>\d+)/$', views.judging)]
|
[
"[email protected]"
] | |
4c84bb0dd04ef0a5558dab96f89e9a850724abde
|
0386591b51fdbf5759faef6afb8729b64a3f1589
|
/layerserver/widgets/modificationdate.py
|
3b9aab935d42e5aa5a0047c815f565c8306afad5
|
[
"BSD-3-Clause"
] |
permissive
|
giscube/giscube-admin
|
1e155402e094eb4db1f7ca260a8d1402e27a31df
|
4ce285a6301f59a8e48ecf78d58ef83c3827b5e0
|
refs/heads/main
| 2023-07-11T17:23:56.531443 | 2023-02-06T15:12:31 | 2023-02-06T15:12:31 | 94,087,469 | 7 | 1 |
BSD-3-Clause
| 2023-07-07T13:22:09 | 2017-06-12T11:12:56 |
Python
|
UTF-8
|
Python
| false | false | 556 |
py
|
from datetime import datetime
from django.utils.timezone import get_current_timezone
from .date import DateWidget
class ModificationDateWidget(DateWidget):
base_type = 'date'
@staticmethod
def update(request, instance, validated_data, widget):
validated_data[widget['name']] = datetime.now(tz=get_current_timezone()).date()
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return ModificationDateWidget.ERROR_READONLY_REQUIRED
return DateWidget.is_valid(cleaned_data)
|
[
"[email protected]"
] | |
6f1547fab3b6b91f274d8e7a04e2ac3e28693ae2
|
3b593b412c663a34784b1f60ad07cd2ee6ef87d1
|
/month01/python base/day12/code03.py
|
19ca59f6f051da2f348473bcdba1941fb51fd14e
|
[] |
no_license
|
ShijieLiu-PR/Python_Learning
|
88694bd44aeed4f8b022202c1065342bd17c26d2
|
ed01cc0956120ea287c51667604db97ff563c829
|
refs/heads/master
| 2023-05-22T16:35:24.252313 | 2021-06-16T10:56:21 | 2021-06-16T10:56:21 | 337,445,284 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 404 |
py
|
"""
运算符重载
"""
print("a" + "b")
class Vector:
"""
向量类
"""
def __init__(self, x):
self.x = x
def __add__(self, other):
# self.x += other
# return self
return Vector(self.x + other)
def __str__(self):
return "Vector(%d)" % self.x
v01 = Vector(10)
v02 = v01 + 5
print(id(v01))
print(id(v02))
print(v01)
print(v02)
|
[
"[email protected]"
] | |
b62b9d12528fab30ba13d52d4ab9d783c4f58689
|
e7c84801d7755806e58795d5fe51f7a924815ffc
|
/python-image-watermark/python-watermark-image.py
|
86d4a5401ae25cc33b68205ae57687d2b72853e3
|
[] |
no_license
|
c0c1/python-image
|
3454b37b3e0339fd3e204a38d7aa14c885e10e38
|
b785801589722571ac7ed8ad4428b4d04f518a2b
|
refs/heads/master
| 2023-06-04T23:36:17.974408 | 2021-06-21T12:38:23 | 2021-06-21T12:38:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 998 |
py
|
import os, sys
from PIL import Image, ImageDraw, ImageFont
img_dir = "images/non-watermark/"
dirs = os.listdir( img_dir )
for img in dirs:
if os.path.isfile(img_dir + img):
#Create an Image Object from an Image
im = Image.open(img_dir + img)
#Image width and height
width, height = im.size
#Image name
img_name = os.path.basename(img_dir + img)
#print(img_name)
text = "{roytuts.com}"
font = ImageFont.truetype('arial.ttf', 30)
draw = ImageDraw.Draw(im)
textwidth, textheight = draw.textsize(text, font)
#Right bottom corner with margin 5 from right
margin = 5
#x = width - textwidth - margin
#y = height - textheight - margin
#Center of the image
x = (width - textwidth)/2 #center
y = (height - textheight)/2 #center
#draw.text((x, y), text, font=font)
draw.text((x, y), text, font=font, fill=(254, 130, 75, 15))
#im.show() //Will display in the image window
#Save watermarked image
im.save('images/watermark/' + img_name)
|
[
"[email protected]"
] | |
2d24087778240384516917c28596440c2aed5e2b
|
8520c991dc543f5f4e1efe59ab401824173bb985
|
/332-reconstruct-itinerary/solution.py
|
9deb98ca04053efa355f326607f4c90351f51542
|
[] |
no_license
|
katryo/leetcode
|
d44f70f2853c4f5ea9a462d022feb0f5436c2236
|
0da45559271d3dba687858b8945b3e361ecc813c
|
refs/heads/master
| 2020-03-24T12:04:53.859047 | 2020-02-18T04:27:55 | 2020-02-18T04:27:55 | 142,703,107 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 924 |
py
|
from collections import defaultdict
import heapq
class Solution:
def findItinerary(self, tickets):
dests = defaultdict(list)
ans = []
for src, dest in tickets:
heapq.heappush(dests[src], dest)
def dfs(dep):
arrivals = dests[dep]
while arrivals:
dfs(heapq.heappop(arrivals))
ans.insert(0, dep)
dfs('JFK')
return ans
# def findItinerary(self, tickets):
# dests = defaultdict(list)
# for a, b in sorted(tickets)[::-1]:
# dests[a].append(b)
# ans = []
#
# def visit(start):
# while dests[start]:
# visit(dests[start].pop())
# ans.append(start)
#
# visit('JFK')
# return list(reversed(ans))
s = Solution()
print(s.findItinerary([["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]))
|
[
"[email protected]"
] | |
cc81969fe3c3463a9a336a1e77f56a7592cde567
|
b91bd5b0954776fd186bf064a87fb8f7ffa4a58a
|
/python2/flask/flask_fun/flask_table/server.py
|
5c146559aa55798c1023ee96a350f5061e5a2f4d
|
[] |
no_license
|
ronaldaguerrero/practice
|
ddf1f41b693110cebe4d52e29910909f3ba21115
|
38627fddd8f79e6fb50c05a0e4e8d27a92146e1b
|
refs/heads/master
| 2023-01-23T17:06:18.642983 | 2019-09-13T05:01:48 | 2019-09-13T05:01:48 | 186,157,588 | 0 | 0 | null | 2023-01-07T09:40:40 | 2019-05-11T16:40:12 |
Python
|
UTF-8
|
Python
| false | false | 564 |
py
|
# import things
from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Name')
description = Col('Description')
# Get some objects
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
items = [Item('Name1', 'Description1'),
Item('Name2', 'Description2'),
Item('Name3', 'Description3')]
# Populate the table
table = ItemTable(items)
# Print the html
print(table.__html__())
# or just {{ table }} from within a Jinja template
|
[
"[email protected]"
] | |
51e6d0b64816e845f3804107099f83eb52511405
|
030cea4006a4ff559f23cb3b3c31cd038ed2e332
|
/week11/hh_back/api/migrations/0001_initial.py
|
ff433e7b38b000547c461e4b1354c718d2bfa422
|
[] |
no_license
|
ayananygmetova/Web-Dev-2020
|
f8834e0ee26f0f0f06d0e3a282c73b373954a430
|
957bca91554f015e9a3d13b4ec12e64de7ac633e
|
refs/heads/master
| 2023-01-22T16:49:39.857983 | 2020-03-31T10:09:54 | 2020-03-31T10:09:54 | 236,937,810 | 1 | 0 | null | 2023-01-07T16:34:35 | 2020-01-29T08:41:10 |
Python
|
UTF-8
|
Python
| false | false | 669 |
py
|
# Generated by Django 3.0.4 on 2020-03-31 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('city', models.CharField(max_length=200)),
('address', models.TextField(default='')),
],
),
]
|
[
"[email protected]"
] | |
f20231cfc5c8195e5135526087d532d334a0c5fa
|
9907b3dd74d1aedbed5243105649f0acd8e965d8
|
/demo/pytorch_laguerre.py
|
0aded5c456579f8f7de77004c4e2c77956273df5
|
[
"MIT"
] |
permissive
|
shubhampachori12110095/OrthNet
|
68c7442c448acdca2b0f2fbef0709efec280be4c
|
74824c1858e14f023d3f0251910f223d6b8672ce
|
refs/heads/master
| 2021-01-25T13:12:07.142646 | 2018-02-28T15:18:38 | 2018-02-28T15:18:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 911 |
py
|
import sys
sys.path.append('../')
from orthnet.pytorch import laguerre_tensor, multi_dim_laguerre_tensor
import torch
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
order1 = 5
order2 = 3
x1_data = np.linspace(-1, 1, 100).reshape((-1, 1))
x2_data = np.linspace(-1, 1, 100).reshape((-1, 1))
x1 = Variable(torch.Tensor(x1_data))
x2 = Variable(torch.Tensor(x2_data))
y1 = laguerre_tensor(n = order1, x = x1)
y2 = multi_dim_laguerre_tensor(n = order2, var = [x1, x2])
z1 = y1.data.numpy()
z2 = y2.data.numpy()
fig1 = plt.figure()
ax1 = fig1.gca()
for i in range(order1+1):
ax1.plot(x1_data, z1[:, i], label = 'n = '+str(i))
ax1.legend()
ax1.grid(True)
fig2 = plt.figure()
ax2 = fig2.gca(projection='3d')
x1_data, x2_data = np.meshgrid(x1_data, x2_data)
ax2.plot_surface(X = x1_data, Y = x2_data, Z = z2[:, -2])
plt.show()
|
[
"[email protected]"
] | |
d0eb44f47aea9e440d8ce9e2190b0d49f9f3822d
|
94b101b38acb682422b8e26ff09527e1102e6524
|
/project/users/views.py
|
4ae6702c4b12933ac5fa836b8207dbb98b6bbb8b
|
[] |
no_license
|
mjoze/Web-App
|
f0ff12118510cb5bfa6d4ff5541194b184848c41
|
8f5c237231d35d87a77cf9dffa7261c19f81dec7
|
refs/heads/master
| 2020-12-23T02:47:06.241269 | 2020-03-07T14:34:54 | 2020-03-07T14:34:54 | 237,010,747 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,493 |
py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(
request, f'Your account has been created! You are now able to log in.')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(
request, f'Your account has been updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
|
[
"[email protected]"
] | |
f58dc5c06357b947dad8b998e8070480de396f5d
|
d47b841f7e64d83cebbe63a25bac47adc495a760
|
/test/test_box_score_teams_overall.py
|
74d000b20e37922744080d08c234957c32e396ab
|
[] |
no_license
|
CiscoNeville/cfbd-python
|
810029240de30a2b7a205cbc3bb009599481206c
|
5775ff7ce7464e881f1940a7c0a534b0c26c1ce8
|
refs/heads/master
| 2023-09-04T18:27:23.773119 | 2021-11-19T01:49:07 | 2021-11-19T01:49:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,071 |
py
|
# coding: utf-8
"""
College Football Data API
This is an API for accessing all sorts of college football data. It currently has a wide array of data ranging from play by play to player statistics to game scores and more. # noqa: E501
OpenAPI spec version: 2.4.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cfbd
from cfbd.models.box_score_teams_overall import BoxScoreTeamsOverall # noqa: E501
from cfbd.rest import ApiException
class TestBoxScoreTeamsOverall(unittest.TestCase):
"""BoxScoreTeamsOverall unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBoxScoreTeamsOverall(self):
"""Test BoxScoreTeamsOverall"""
# FIXME: construct object with mandatory attributes with example values
# model = cfbd.models.box_score_teams_overall.BoxScoreTeamsOverall() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1d8762c60b7af569450421e970799689990cf863
|
69a8a88c99f5c401b188ce7637174c19a3ed48d8
|
/0x0A-python-inheritance/10-square.py
|
9f90ed3be2ee071cbcc079312aa9f6543eda60d0
|
[] |
no_license
|
JDorangetree/holbertonschool-higher_level_programming
|
0546b25726052a8ce6468781f933eb28d1aee30d
|
f984f5047f690d352c7f203ef16aa7f0cc49afcd
|
refs/heads/master
| 2020-09-29T01:22:22.387395 | 2020-05-16T23:35:12 | 2020-05-16T23:35:12 | 226,912,872 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 495 |
py
|
#!/usr/bin/python3
""" Class that inherits from Rectangle """
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
""" Class that defines a Square by inheritance of Rectangle class """
def __init__(self, size):
""" Constructor """
self.integer_validator("size", size)
self.__size = size
super().__init__(size, size)
def area(self):
""" Area method"""
My_area = self.__size * self.__size
return My_area
|
[
"[email protected]"
] | |
78b373ee16f0efc70102408817bb21f313d8525e
|
fdcbf5b203f07cceefbb38a746f4a43b322e263e
|
/Python/findNeighbors_of_Nulls.py
|
52f8694848396c9527b570609bc2724e421599bd
|
[] |
no_license
|
Masoumeh/0390.IbnAhmadMuqaddasi.AhsanTaqasim
|
e7a3eddc895edb79f8d93c1bd0f09f130a761858
|
592720e5a154fcfe9cdab84b16eaf5574f30b806
|
refs/heads/master
| 2021-01-18T00:36:09.962622 | 2017-11-07T13:13:46 | 2017-11-07T13:13:46 | 45,922,253 | 0 | 0 | null | 2015-11-10T15:49:02 | 2015-11-10T15:49:02 | null |
UTF-8
|
Python
| false | false | 1,682 |
py
|
"""
To get some information from the route network graph, like how far are the first two neighbours (with coords) of a node (without coords)
"""
from networkx.readwrite import json_graph
import io, json, csv
import re
import networkx as nx
import sys
import operator
import compose_graphs as cg
def findNeighbors_of_Nulls(G, writer):
#G = nx.Graph()
#G = cg.composeGraphs(textRoutes, cornuRoutes, cornuPlaces)
'''with open(fileName, 'r') as meterFile:
distReader = csv.reader(meterFile, delimiter=',')
next(distReader, None)
for row in distReader:
G.add_node(row[0], lat=row[1], lng=row[2])
G.add_node(row[3], lat=row[4], lng=row[5])
G.add_edge(row[0],row[3], length= row[-1])'''
coord_neighbors = {}
nulls = [n for n in G.nodes() if G.node[n]['lat'] == "null" and G.node[n]['lng'] == "null"]
print(len(nulls))
for node in nulls:
length = nx.single_source_shortest_path_length(G,node)
sorted_length = sorted(length.items(), key=operator.itemgetter(1))
neighCoords = []
# exclude the firs item of list from the loop which is the node itself with the distance of zero from the node! i.e. ('node',0)
for l in sorted_length[1:]:
# check the distance of node from the neigbor and if the neighbor has coordinate
if l[1] == 1 and G.node[l[0]]['lat'] != "null" and G.node[l[0]]['lng'] != "null":
# add the neighbor to array
neighCoords.append( [l[0],l[1]])
# limit the neighbors to two to have at leat two neighbours with
if len(neighCoords) >= 2:
break
if len(neighCoords) == 2:
writer.writerow([node,neighCoords])
|
[
"[email protected]"
] | |
bbef2beee7c94d588e9831ccbb760157f2f2e422
|
6915d6a20d82ecf2a2a3d3cd84ca22dab2491004
|
/advtempproject/advtempproject/wsgi.py
|
507d246211545d55217dfb1767569eb090224823
|
[] |
no_license
|
iitian-gopu/django
|
bb4302d101f4434fb61ab374807e29699a432e42
|
31db982212bbb453cc4c56c7f5cfad9a00cd231d
|
refs/heads/master
| 2023-05-14T07:22:35.176477 | 2021-06-04T04:43:26 | 2021-06-04T04:43:26 | 366,114,402 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
"""
WSGI config for advtempproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "advtempproject.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
102056145a28eec5b448b8975f4633f44a628b6a
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/synthetic/rttoaobj.py
|
f85a0bd999b0746da1b151ecd36cc2f7a907ac50
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,526 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtToAObj(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.synthetic.RtToAObj", "cobra.model.synthetic.SwCTestObj")
meta.moClassName = "syntheticRtToAObj"
meta.rnFormat = "rttoAObj"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Synthetic Sw C Test Object"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.synthetic.IfcCTestObj")
meta.parentClasses.add("cobra.model.synthetic.IfcTLTestObj")
meta.parentClasses.add("cobra.model.synthetic.SwCTestObj")
meta.parentClasses.add("cobra.model.synthetic.SwTLTestObj")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rttoAObj', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20610, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4272
prop.defaultValueStr = "syntheticSwCTestObj"
prop._addConstant("syntheticSwCTestObj", None, 4272)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
dda9f5d4466062d8ad277427e9721c6efad04a50
|
e9d52dcf101aea0327c6b0d7e5244c91dfd62cf6
|
/spexy/bases/regular.py
|
ee2e4fd35ec1af3c62bc446c89556cd8cd5295c7
|
[] |
no_license
|
drufat/spexy
|
6eba9f44a5539245486cd4ef8fefd24bdb7ade6a
|
53255009c1830501986afbf6688142ddefe17b9a
|
refs/heads/master
| 2021-09-18T19:51:47.313946 | 2018-07-19T05:09:02 | 2018-07-19T05:09:02 | 100,453,374 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,976 |
py
|
# Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
from spexy.bases import basesimp
class BasesImp(basesimp.BasesImp):
def module(self):
return 'spexy.bases.circular'
def numbers(self):
N = self.N
N0 = N + 1
N1 = N
N0d = N
N1d = N + 1
return (N0, N1), (N0d, N1d)
def cells_index(self):
half = self.imp.half
i0 = lambda n: (n,)
i1 = lambda n: (n, n + 1)
id0 = lambda n: (n + half,)
id1 = lambda n: (n - half, n + half)
return (i0, i1), (id0, id1)
def points(self, n):
N = self.N
return self.imp.points_regular_clamped(N, n)
def bases(self, correct=True):
imp = self.imp
N, half = imp.S(self.N), imp.half
def corr0(kappa):
# primal boundary vertex
if correct:
return lambda N, n, x: kappa(N, n, x) * imp.correction0(N, n)
return kappa
# Bases Functions
kappa0 = lambda n: lambda x: corr0(imp.kappa)(N, n, x)
kappa1 = lambda n: lambda x: imp.kappa_star(N, n + half, x)
kappad0 = lambda n: lambda x: imp.kappa(N, n + half, x)
kappad1 = lambda n: lambda x: imp.kappa_star(N, n, x)
# Gradients
kappa0.grad = lambda n: lambda x: corr0(imp.kappa_grad)(N, n, x)
kappad0.grad = lambda n: lambda x: imp.kappa_grad(N, n + half, x)
return (kappa0, kappa1), (kappad0, kappad1)
def boundary(self):
pi = self.imp.pi
return None, (0, pi)
def run_kappa():
"""
>>> from sympy.abc import x
>>> (kappa0, kappa1), (kappad0, kappad1) = BasesImp(2, 'sym').bases()
>>> kappa0(0)(x)
cos(x)/2 + cos(2*x)/4 + 1/4
>>> kappa0(1)(x)
-cos(2*x)/2 + 1/2
>>> kappa0(2)(x)
-cos(x)/2 + cos(2*x)/4 + 1/4
>>> kappa1(0)(x)
cos(x)/2 + 1/pi
>>> kappa1(1)(x)
-cos(x)/2 + 1/pi
>>> kappad0(0)(x)
sqrt(2)*cos(x)/2 + 1/2
>>> kappad0(1)(x)
-sqrt(2)*cos(x)/2 + 1/2
>>> kappad1(0)(x)
sqrt(2)*cos(x)/2 + cos(2*x)/2 + 1/pi
>>> kappad1(1)(x)
-cos(2*x)/2 + 1/pi
>>> kappad1(2)(x)
-sqrt(2)*cos(x)/2 + cos(2*x)/2 + 1/pi
"""
pass
def run(N):
"""
>>> run(1)
zero-form
[1, 0]
[0, 1]
one-form
[1]
dual zero-form
[1]
dual one-form
[1, 0]
[0, 1]
>>> run(2)
zero-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
one-form
[1, 0]
[0, 1]
dual zero-form
[1, 0]
[0, 1]
dual one-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
>>> run(3)
zero-form
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
one-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
dual zero-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
dual one-form
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
"""
from spexy.bases.symintegrals import run_integrals
run_integrals(BasesImp)(N)
|
[
"[email protected]"
] | |
e3be99e1c6547843b797fea330aa576499260d31
|
99a4e7a4db3a3e062c0b08a5462749a28f3f7a39
|
/core/utils/make_joint_dataset.py
|
592af25331103bb288cfcb090d2dcd893614f3bb
|
[] |
no_license
|
B-Step62/pytorch-motiongan-open
|
f85c1481363230826e9094e1c323ad90f0922744
|
4aefe2c427b88f357e8894d309ff46602e109001
|
refs/heads/master
| 2021-03-20T23:22:49.591472 | 2020-03-15T10:34:54 | 2020-03-15T10:34:54 | 247,241,734 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,651 |
py
|
import os
import sys
import math
import subprocess
import cv2
from collections import OrderedDict
import numpy as np
import core.utils.bvh_to_joint as btoj
BVH_ROOT = './data/bvh/Edi_Mocap_Data/Iwan_style_data'
OUT = './data/bvh/Edi_Mocap_Data/Iwan_style_data'
def main():
# Copy all original bvh file
root_depth = BVH_ROOT.count(os.path.sep)
bvh_paths = []
out_dir = OUT
for (root, dirs, files) in os.walk(BVH_ROOT):
for origin_file in files:
if not origin_file.endswith('.bvh'):
continue
# Output path is 'out' + ('origin_path' - 'root')
if BVH_ROOT != OUT:
post = root.split(os.path.sep)[root_depth:]
out_dir = OUT + ''.join([os.path.sep + p for p in post])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# If save to different directory, copy original bvh
shutil.copy(os.path.join(root, origin_file), os.path.join(out_dir, origin_file))
bvh_paths.append(os.path.join(out_dir, origin_file))
else:
bvh_paths.append(os.path.join(root, origin_file))
skelton, non_end_bones, joints_to_index, permute_xyz_order = btoj.get_standard_format(bvh_paths[0])
for bvh_path in bvh_paths:
_, non_zero_joint_to_index = btoj.cut_zero_length_bone(skelton, joints_to_index)
format_data = btoj.create_data(bvh_path, skelton, joints_to_index)
npy_path = os.path.splitext(bvh_path)[0] + '.npy'
np.save(npy_path, format_data)
print(npy_path, format_data.shape)
|
[
"[email protected]"
] | |
19e5e99b4598f9270e0cc992301e841753fd2870
|
c2b386e1d28c58efbb9d847098a87032e2cbacca
|
/products_app/init.py
|
930a8691b7c54fa99f1d8508a131fb4977bb6b31
|
[] |
no_license
|
jmlm74/P11-Ameliorez-un-projet-existant-en-Python
|
e6468342554f5c4aa03bc0bb954aa7995e98e293
|
28cd84698bf272e279bbf6e1d15211ef2a3c6403
|
refs/heads/master
| 2022-12-11T02:57:59.563283 | 2020-09-10T16:02:27 | 2020-09-10T16:02:27 | 290,844,109 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 845 |
py
|
# url an parameters for the open food fact API
url = 'https://fr-en.openfoodfacts.org/cgi/search.pl'
params_off = {'search_simple': 1,
'action': 'process',
'json': 1,
'page_size': 300,
'page': 1,
'tagtype_0': 'categories',
'tag_contains_0': 'contains',
'tag_0': 'cat',
'tagtype_1': 'countries',
'tag_contains_1': 'contains',
'tag_1': 'france',
'sort_by': 'unique_scans_n'
}
# categories to fetch
categories = ['biscuits',
'Crepes',
'desserts',
'sweetened-beverages', ]
# brands to fecth to have well known products
brands = {'coca cola',
'ferrero',
'pepsi'}
# items per page for the paginator
NB_ITEMS_PAGE = 12
|
[
"[email protected]"
] | |
cf3dc94deb898b3147c1d529a1fbf335561c2e0b
|
639d6a00e3a8fab07ce07fec408eef6bc050d21d
|
/indice_pollution/db.py
|
8c93d5b392a7f26b2474e3b88b9e22891432315d
|
[
"MIT"
] |
permissive
|
betagouv/indice_pollution
|
e04634e0b9c6d4ce24ffdc4c19868599995c1bd5
|
b85e53ca22d420e3d685fc84843d2011c6a696e4
|
refs/heads/master
| 2023-02-10T20:25:13.321999 | 2023-02-06T10:57:09 | 2023-02-06T10:57:09 | 250,297,957 | 4 | 1 |
MIT
| 2023-01-25T09:25:45 | 2020-03-26T15:33:02 |
Python
|
UTF-8
|
Python
| false | false | 218 |
py
|
from sqlalchemy import MetaData
from sqlalchemy.orm import declarative_base
global engine, Session
metadata = MetaData(schema="indice_schema")
Base = declarative_base(metadata=metadata)
engine = None
session = None
|
[
"[email protected]"
] | |
72b698651d6f869623903874a9cb46cd307ac5e2
|
05218d01394294bb7ede726bf3dc6f0691e4299b
|
/machineLearning/mini_Project/mini_project_0401.py
|
21e36071cf323a4a3e1726e08d32fe4925ed6a43
|
[] |
no_license
|
bitacademy-howl/anaconda_workspace
|
156556c52342b461ffb8304dfb55a845fff5ae90
|
d9dc5007b6e95fa0bf7b95a457cafe68a0167992
|
refs/heads/master
| 2020-03-23T12:10:14.872385 | 2018-08-10T10:04:55 | 2018-08-10T10:04:55 | 141,539,377 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,058 |
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import os
# sklearn 에서 제공하는 학습용 데이터셋
data = load_boston()
print('=============================================================================')
print('================================ 데이터 타입 =================================')
print(type(data))
print('=============================================================================')
print('=============================================================================')
print(type(data.keys()), data.keys())
print('=============================================================================')
print('=============================== =설명서= ==================================')
print(data['DESCR'])
print('=============================================================================')
# 실제 값들만 존재하는 데이터셋
print('================================데이터 셋=====================================')
X = data['data']
print(X)
print('=============================================================================')
# 실제 데이터 필드에 컬럼명이 들어있지 않다.
print('=============================================================================')
header = data['feature_names']
print(header)
# 제공되는 데이터셋에 가격은 별도로 target 으로 제공되므로 dataframe을 만들때는 합쳐서 만든다....
print('=============================================================================')
Y = data['target']
Y = Y.reshape(-1, 1)
print(type(Y), Y)
print('=============================================================================')
# 실제 사용될 데이터 프레임 : 아직 헤더 포함되지 않음
df = pd.DataFrame(np.append(X, Y, axis=1))
print(df)
print('=============================================================================')
# 헤더에 header와 PRICE 컬럼명 추가
df.columns = np.append(header,'PRICE')
# 데이터 프레임에 헤더 추가
# 데이터프레임의 확인
print(df.head(5))
print(df.tail(5))
# 여러 통계치의 종합 선물세트
result_desc = df.describe()
print(result_desc)
#######################################################################################################
# 여기서 잠깐 번외로 통계치를 가지고
# 1. 박스플롯 그려보고
# 2. 분포도 그려보고
# # 1. 가격 분포도
# plt.hist(df['PRICE'],bins=100,color='green', density=True)
# plt.show()
# # 2.
# plt.boxplot([df['PRICE']],0)
# plt.show()
# 일단 이건 계속 해보고 생각해보쟈....
#######################################################################################################
# 각각의 컬럼간 상관관계
corr_df = np.round(df.corr(),3)
print(corr_df)
# ,marker='o',s=10
pd.plotting.scatter_matrix(df,alpha=0.8, diagonal='kde')
# os.chdir(r'D:\1. stark\temp')
#
# df.to_csv('data.csv',index=True)
|
[
"[email protected]"
] | |
dac61de3894ea89b441f9876d43b4e8b8e7aabcc
|
a7587f813492163433202e244df2237c9993a1a1
|
/Store/migrations/0003_variation.py
|
192756496452ac5feb5ca11e93277167f0ed89b4
|
[] |
no_license
|
kamran1231/E-COM-WEBSITE-2021
|
3a10bc0059f4d29fc52ee029e4919d4f965174c6
|
32214468cf716cc312a63f6346b8c844f720abda
|
refs/heads/master
| 2023-06-01T03:18:03.137405 | 2021-07-04T14:20:16 | 2021-07-04T14:20:16 | 381,634,544 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 935 |
py
|
# Generated by Django 3.2.4 on 2021-07-02 18:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Store', '0002_alter_product_price'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variation_category', models.CharField(choices=[('color', 'color'), ('size', 'size')], max_length=100)),
('variation_value', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Store.product')),
],
),
]
|
[
"[email protected]"
] | |
826ff29b8209c97f97229d3a9b5855b40d325524
|
1a166165ab8287d01cbb377a13efdb5eff5dfef0
|
/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py
|
efb21f39026ffdd1e919cf6d1b8d713df2b94c91
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
manoj0806/azure-sdk-for-python
|
7a14b202ff80f528abd068bf50334e91001a9686
|
aab999792db1132232b2f297c76800590a901142
|
refs/heads/master
| 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 |
MIT
| 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null |
UTF-8
|
Python
| false | false | 2,804 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
from .operations import ServiceOperations
from .operations import FileSystemOperations
from .operations import PathOperations
from . import models
class AzureDataLakeStorageRESTAPI(object):
"""Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
:ivar service: ServiceOperations operations
:vartype service: azure.storage.filedatalake.operations.ServiceOperations
:ivar file_system: FileSystemOperations operations
:vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
:ivar path: PathOperations operations
:vartype path: azure.storage.filedatalake.operations.PathOperations
:param url: The URL of the service account, container, or blob that is the targe of the desired operation.
:type url: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{url}'
self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.service = ServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_system = FileSystemOperations(
self._client, self._config, self._serialize, self._deserialize)
self.path = PathOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AzureDataLakeStorageRESTAPI
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
[
"[email protected]"
] | |
06eb118e8879ca755ff7c592ecfb8c07b1333b91
|
553b34a101c54090e68f540d96369ac7d5774d95
|
/python/algo/src/minimum_cut.py
|
bf33b42a8714492e38de25c04a941877eafc0264
|
[
"MIT"
] |
permissive
|
topliceanu/learn
|
fd124e1885b5c0bfea8587510b5eab79da629099
|
1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3
|
refs/heads/master
| 2022-07-16T19:50:40.939933 | 2022-06-12T15:40:20 | 2022-06-12T15:40:20 | 21,684,180 | 26 | 12 |
MIT
| 2020-03-26T20:51:35 | 2014-07-10T07:22:17 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,668 |
py
|
# -*- coding: utf-8 -*-
import random
from src.maximum_flow import ford_fulkerson_maximum_flow
def pick_random_edge(graph):
""" Returns a random edge from the given graph. """
edges = graph.get_edges()
return random.choice(edges)
def contract(graph, edge):
""" Composes a new vertex from the ends of the given edge.
All the resulting self-loop edges are removed.
Args:
graph: a data structure containg all data and operations.
edge: a tuple of format (tail, head, value)
Returns:
The graph after contracting value.
"""
(tail, head, value) = graph.split_edge(edge)
super_vertex = '{start}_{end}'.format(start=tail, end=head)
# Remove individual vertices and add super-vertex.
graph.rename_vertex(tail, super_vertex)
graph.rename_vertex(head, super_vertex)
return graph
def randomized_cut(graph):
""" Finds a cut in a given graph using the random contraction algorithm
defined by David Karger in '93.
NOTE! This algorithm modifies the graph in place, so make sure you clone
it before compacting if you don't want your original graph modified.
Args:
graph: a data structure containg all data and operations.
Returns:
The compacted graph.
"""
while len(graph.get_vertices()) != 2:
edge = pick_random_edge(graph)
contract(graph, edge)
return graph
def minimum_cut(graph, tries):
""" Finds the the minimum cut in the given graph after a running the
randomized cut algorithm a given number of tries.
Args:
graph: a data structure containg all vertices, edges and supported
operations.
tries: int, number of times to try the randomized cut algorithm.
Returns:
cuts, list of cut edges which produce the minimum cut.
"""
min_cuts = []
for __ in xrange(tries):
g = graph.clone()
randomized_cut(g)
[left_super_vertex, right_super_vertex] = g.get_vertices()
left_vertices = set(left_super_vertex.split('_'))
right_vertices = set(right_super_vertex.split('_'))
cuts = []
for left_vertex in left_vertices:
right_neighbours = set(graph.neighbours(left_vertex))\
.intersection(right_vertices)
for right_vertex in right_neighbours:
cuts.append((left_vertex, right_vertex))
if (len(min_cuts) == 0 or len(min_cuts) > len(cuts)):
min_cuts = cuts
return min_cuts
def minimum_cut_using_maximum_flow(graph, start, end):
""" Solve the minimum cut problem by reducing it to maximum flow. """
# TODO
|
[
"[email protected]"
] | |
3bc48ad57dbf84c0d65a2c59a2f654b60f5b1089
|
a98bc512be9b9691200c6a0cc33a5fb7b4053c13
|
/com.ppc.Bot/devices/thermostat/thermostat_honeywell_lyric.py
|
4fc7ab8f7dd9ec9b7fd8517681898b5f9c38d9cf
|
[
"Apache-2.0"
] |
permissive
|
30s/botlab
|
c21682ed2c9aefc9cba688c6a8c136e9f969adc9
|
f7617147b65521a66ad88cdbc175176021a7a486
|
refs/heads/master
| 2020-04-04T18:39:00.776636 | 2018-10-04T04:56:32 | 2018-10-04T04:56:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,179 |
py
|
'''
Created on March 27, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
# Device Model
# https://presence.atlassian.net/wiki/display/devices/Thermostat
from devices.thermostat.thermostat import ThermostatDevice
# Set the default rounding to 3 numbers.
from decimal import *
getcontext().prec = 1
class ThermostatHoneywellLyricDevice(ThermostatDevice):
"""Honeywell Lyric Thermostat Device"""
# List of Device Types this class is compatible with
DEVICE_TYPES = [4230]
# Minimum setpoint in Celsius
MIN_SETPOINT_C = 7.0
# Maximum setpoint in Celsius
MAX_SETPOINT_C = 29.0
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Honeywell Lyric Thermostat")
def set_system_mode(self, botengine, system_mode, reliably=False):
"""
Set the system mode
:param botengine:
:param system_mode:
:param reliably: True to keep retrying to get the command through
:return:
"""
ThermostatDevice.set_system_mode(self, botengine, system_mode, reliably=False)
def set_cooling_setpoint(self, botengine, setpoint_celsius, reliably=False):
"""
Set the cooling setpoint
:param botengine: BotEngine environment
:param setpoint_celsius: Absolute setpoint in Celsius
:param reliably: True to keep retrying to get the command through
"""
ThermostatDevice.set_cooling_setpoint(self, botengine, setpoint_celsius, reliably=False)
def set_heating_setpoint(self, botengine, setpoint_celsius, reliably=False):
"""
Set the heating set-point
:param botengine: BotEngine environmnet
:param setpoint_celsius: Temperature in Celsius
:param reliably: True to keep retrying to get the command through
"""
ThermostatDevice.set_heating_setpoint(self, botengine, setpoint_celsius, reliably=False)
|
[
"[email protected]"
] | |
1d0b6383c33973c35589ec4404e85d7a6c72e8e8
|
8130c34d546c323d6d5d2ca6b4a67330af08828f
|
/.history/menu_app/models_20210105152309.py
|
ede59bfeb80ad64987ea4b2b9d2f75c6e48ba8a7
|
[] |
no_license
|
lienusrob/final
|
ba2dad086fc97b21b537ef12df834dfadd222943
|
f2726e31f1d51450e4aed8c74021c33679957b28
|
refs/heads/master
| 2023-02-15T01:36:54.463034 | 2021-01-07T12:47:05 | 2021-01-07T12:47:05 | 327,279,792 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,265 |
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
import random
import string
from datetime import date, datetime
class ToppingsCategory(models.Model):
name = models.CharField(max_length=100)
type = models.CharField(max_length=100)
description = models.TextField(max_length=100, blank=True, null=True, default='')
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits = 4, decimal_places=2, default=0)
category = models.ForeignKey(ToppingsCategory, on_delete = models.PROTECT, default=None)
def __str__(self):
return self.name
class ItemsCategory(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class MenuItem(models.Model):
name = models.CharField(max_length=22)
price = models.DecimalField(max_digits = 4, decimal_places=2)
category = models.ForeignKey(ItemsCategory, on_delete = models.PROTECT)
detail = models.TextField(max_length=1000, default = ' ')
# toppings = models.ManyToManyField(Topping, blank=True)
#image = models.ImageField(default=None, upload_to='', null=True, blank=True)
def __str__(self):
return self.name
class Extras(models.Model):
requests = models.TextField(max_length=400,)
def __str__(self):
return self.name
class Cart (models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
current = models.BooleanField(default=True)
date_ordered = models.DateTimeField(auto_now_add= True )
class CartItem (models.Model):
add_item = models.ForeignKey(MenuItem, on_delete= models.CASCADE)
quantity = models.IntegerField(default=0)
cart = models.ForeignKey(Cart, on_delete= models.CASCADE)
def __str__(self):
return self.add_item.name
#remove dont need
class OrderItem(models.Model):
item = models.ForeignKey(MenuItem, on_delete=models.SET_NULL, null=True)
price = models.DecimalField(max_digits = 4, decimal_places=2, default=0)
order_item_order = models.ForeignKey('menu_app.Order', on_delete=models.CASCADE, null=True)
#toppings = models.ManyToManyField(Topping, blank=True)
def __str__(self):
return self.item.name
def get_item_price(self):
self.price = sum(topping.price for topping in self.toppings.all()) + self.item.price
def get_all_topping_categories(self):
categories = []
for topping in self.toppings.all():
if not topping.category in categories:
categories.append(topping.category)
return categories
class Orders (models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
placed = models.BooleanField(default=False)
def __str__ (self):
return self.cart.user.username
#old need to remove
class Order(models.Model):
customer = models.ForeignKey(User, on_delete = models.CASCADE)
date_ordered = models.DateTimeField(default=timezone.now)
items = models.ManyToManyField(MenuItem)
order_items = models.ManyToManyField(OrderItem)
total = models.DecimalField(max_digits = 6, decimal_places=2, null=True)
is_ordered = models.BooleanField(default=False)
pickup_time = models.DateTimeField(default=timezone.now)
special_instructions = models.TextField(max_length=256, blank=True)
def __str__(self):
return f'Order #{self.id} - {self.customer.username}'
# # url to redirect to when submitting order form
# def get_absolute_url(self):
# return reverse('orders:order_detail', kwargs={'pk':self.pk})
# returns the sum of each item price in order and assigns it to self.total
def get_order_total(self):
self.total = sum(order_item.price for order_item in self.order_items.all())
def get_cart_items(self):
return self.items.all()
def generate_order_id():
date_str = date.today().strftime('%Y%m%d')[2:] + str(datetime.now().second)
rand_str = "".join([random.choice(string.digits) for count in range(3)])
return date_str + rand_str
# class Meta():
# ordering = ['-date_ordered']
|
[
"[email protected]"
] | |
31291fea928eb8e023f65781c71fa4432037efea
|
ba1eff6535027c16b9e1d399b96e7853bc1514dc
|
/tests/test_16_userinfo_endpoint.py
|
03ec0337b9ec1fd4207b1850726eb13b7fc2b0da
|
[
"Apache-2.0"
] |
permissive
|
sklemer1/oidcendpoint
|
09d06e4cf21113f74a78734cdd06c964aaed3c7d
|
bc2cd9222bd05aec7b7ba5c7c7f593c2143357f3
|
refs/heads/master
| 2020-03-30T12:24:20.500373 | 2018-10-04T13:42:31 | 2018-10-04T13:42:31 | 151,222,819 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,738 |
py
|
import json
import os
import pytest
import time
from oidcmsg.key_jar import build_keyjar
from oidcmsg.oidc import AccessTokenRequest
from oidcmsg.oidc import AuthorizationRequest
from oidcendpoint.client_authn import verify_client
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.provider_config import ProviderConfiguration
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.authn_event import AuthnEvent
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.user_info import UserInfo
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
KEYJAR = build_keyjar(KEYDEFS)[1]
RESPONSE_TYPES_SUPPORTED = [
["code"], ["token"], ["id_token"], ["code", "token"], ["code", "id_token"],
["id_token", "token"], ["code", "token", "id_token"], ['none']]
CAPABILITIES = {
"response_types_supported": [" ".join(x) for x in RESPONSE_TYPES_SUPPORTED],
"token_endpoint_auth_methods_supported": [
"client_secret_post", "client_secret_basic",
"client_secret_jwt", "private_key_jwt"],
"response_modes_supported": ['query', 'fragment', 'form_post'],
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code", "implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer", "refresh_token"],
"claim_types_supported": ["normal", "aggregated", "distributed"],
"claims_parameter_supported": True,
"request_parameter_supported": True,
"request_uri_parameter_supported": True,
}
AUTH_REQ = AuthorizationRequest(client_id='client_1',
redirect_uri='https://example.com/cb',
scope=['openid'],
state='STATE',
response_type='code')
TOKEN_REQ = AccessTokenRequest(client_id='client_1',
redirect_uri='https://example.com/cb',
state='STATE',
grant_type='authorization_code',
client_secret='hemligt')
TOKEN_REQ_DICT = TOKEN_REQ.to_dict()
BASEDIR = os.path.abspath(os.path.dirname(__file__))
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
USERINFO = UserInfo(json.loads(open(full_path('users.json')).read()))
def setup_session(endpoint_context, areq):
authn_event = AuthnEvent(uid="uid", salt='salt',
authn_info=INTERNETPROTOCOLPASSWORD,
time_stamp=time.time())
sid = endpoint_context.sdb.create_authz_session(authn_event, areq,
client_id='client_id')
endpoint_context.sdb.do_sub(sid, '')
return sid
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
self.endpoint = userinfo.UserInfo(KEYJAR)
conf = {
"issuer": "https://example.com/",
"password": "mycket hemligt",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"jwks": {
'url_path': '{}/jwks.json',
'local_path': 'static/jwks.json',
'private_path': 'own/jwks.json'
},
'endpoint': {
'provider_config': {
'path': '{}/.well-known/openid-configuration',
'class': ProviderConfiguration,
'kwargs': {}
},
'registration': {
'path': '{}/registration',
'class': Registration,
'kwargs': {}
},
'authorization': {
'path': '{}/authorization',
'class': Authorization,
'kwargs': {}
},
'token': {
'path': '{}/token',
'class': AccessToken,
'kwargs': {}
},
'userinfo': {
'path': '{}/userinfo',
'class': userinfo.UserInfo,
'kwargs': {'db_file': 'users.json'}
}
},
'client_authn': verify_client,
"authentication": [{
'acr': INTERNETPROTOCOLPASSWORD,
'name': 'NoAuthn',
'kwargs': {'user': 'diana'}
}],
'template_dir': 'template'
}
endpoint_context = EndpointContext(conf, keyjar=KEYJAR)
endpoint_context.cdb['client_1'] = {
"client_secret": 'hemligt',
"redirect_uris": [("https://example.com/cb", None)],
"client_salt": "salted",
'token_endpoint_auth_method': 'client_secret_post',
'response_types': ['code', 'token', 'code id_token', 'id_token']
}
self.endpoint = userinfo.UserInfo(endpoint_context)
def test_init(self):
assert self.endpoint
def test_parse(self):
session_id = setup_session(self.endpoint.endpoint_context, AUTH_REQ)
_dic = self.endpoint.endpoint_context.sdb.upgrade_to_token(
key=session_id)
_req = self.endpoint.parse_request(
{}, auth="Bearer {}".format(_dic['access_token']))
assert set(_req.keys()) == {'client_id', 'access_token'}
|
[
"[email protected]"
] | |
48f6bf7eed3e7ed029e76a1561da9c2b9fd6b645
|
4488e3c26de4291da447d8251c491b43cb810f7c
|
/account_banking_payment_export/model/payment_mode.py
|
798c8ed20daab08128d6d0b68c1d1b223e11f9d5
|
[] |
no_license
|
smart-solution/odoo-crm-80
|
b19592ce6e374c9c7b0a3198498930ffb1283018
|
85dfd0cc37f81bcba24d2a0091094708a262fe2c
|
refs/heads/master
| 2016-09-06T06:04:35.191924 | 2015-07-14T12:48:28 | 2015-07-14T12:48:28 | 33,174,511 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,285 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class payment_mode(orm.Model):
''' Restoring the payment type from version 5,
used to select the export wizard (if any) '''
_inherit = "payment.mode"
def suitable_bank_types(self, cr, uid, payment_mode_id=None, context=None):
""" Reinstates functional code for suitable bank type filtering.
Current code in account_payment is disfunctional.
"""
res = []
payment_mode = self.browse(
cr, uid, payment_mode_id, context)
if (payment_mode and payment_mode.type and
payment_mode.type.suitable_bank_types):
res = [t.code for t in payment_mode.type.suitable_bank_types]
return res
_columns = {
'type': fields.many2one(
'payment.mode.type', 'Payment type',
required=True,
help='Select the Payment Type for the Payment Mode.'
),
'payment_order_type': fields.related(
'type', 'payment_order_type', readonly=True, type='selection',
selection=[('payment', 'Payment'), ('debit', 'Direct debit')],
string="Payment Order Type"),
}
|
[
"[email protected]"
] | |
7c7405d5b792cd6f20e89b0b56489b366c8baecf
|
ba730380c8406b234202a6a19a9e5f01f6b66d25
|
/django/crud2/articles/views.py
|
4a9f35242edeef84e1211c795529a801b810b62b
|
[] |
no_license
|
ssabum/note
|
3b0fd891ab7053997c7978298635e599b42a7659
|
47354aa55a87813dab66f2ff7a930f5313bffe7a
|
refs/heads/master
| 2023-06-19T03:03:02.398976 | 2021-07-09T15:09:42 | 2021-07-09T15:09:42 | 331,743,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,151 |
py
|
from django.shortcuts import render, redirect
from .models import Article
# Create your views here.
# READ
def index(request):
# 모든 게시글 조회
# articles = Article.objects.all()[::-1] # 파이썬 언어로 해결
articles = Article.objects.order_by('-updated_at') # DB 단에서 해결, 수정순으로 정렬
context = {
'articles': articles,
}
return render(request, 'articles/index.html', context)
# CREATE
def new(request):
return render(request, 'articles/new.html')
# CREATE
def create(request):
# POST 요청으로 들어온 사용자 데이터를 추출
title = request.POST.get('title')
content = request.POST.get('content')
# Article 모델 클래스를 기반으로 인스턴스를 생성
article = Article(title=title, content=content)
# DB에 저장
article.save()
# return render(request, 'articles/index.html')
# return redirect('articles:index')
return redirect('articles:detail', article.pk)
# READ
def detail(request, pk):
article = Article.objects.get(pk=pk)
context = {
'article': article,
}
return render(request, 'articles/detail.html', context)
# DELETE
# 발동조건: /articles/index/게시글번호/delete
# 띠라서 POST만 삭제되게 만들어야 한다
def delete(request, pk):
# 삭제할 데이터 불러오기
article = Article.objects.get(pk=pk)
if request.method == 'POST':
# 삭제
article.delete()
# 메인페이지로 이동
return redirect('articles:index')
else:
return redirect('articles:detail', article.pk)
# UPDATE
def edit(request, pk):
article = Article.objects.get(pk=pk)
context = {
'article':article,
}
return render(request, 'articles/edit.html', context)
def update(request, pk):
# 수정할 게시글 불러오기
article = Article.objects.get(pk=pk)
# 사용자가 건네준 데이터 추출
article.title = request.POST.get('title')
article.content = request.POST.get('content')
# DB에 저장
article.save()
return redirect('articles:detail', article.pk)
|
[
"[email protected]"
] | |
1d626c9dbdb41c344f8870b691bab05f897edafa
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/dogma/items/fittableDogmaItem.py
|
8e70cbc858bb571171c14d42eeafc1040058e7eb
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,769 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\dogma\items\fittableDogmaItem.py
from dogma.dogmaLogging import *
from baseDogmaItem import BaseDogmaItem
from ccpProfile import TimedFunction
import weakref
class FittableDogmaItem(BaseDogmaItem):
def __init__(self, *args, **kwargs):
self._location = None
self.lastStopTime = None
BaseDogmaItem.__init__(self, *args, **kwargs)
@property
def location(self):
if self._location:
return self._location()
@location.setter
def location(self, location):
if location is None:
self._location = None
else:
self._location = weakref.ref(location)
@property
def ownerID(self):
if self.location:
return self.location.ownerID
@ownerID.setter
def ownerID(self, ownerID):
if self.location and self.location.ownerID != ownerID:
self.dogmaLocation.LogError('Setting ownerID on a FittableDogmaItem to something that disagrees with its location!', self.location.ownerID, ownerID)
@TimedFunction('FittableDogmaItem::Unload')
def Unload(self):
BaseDogmaItem.Unload(self)
if self.location:
try:
locationFittedItems = self.location.fittedItems
except AttributeError:
return
if self.itemID in locationFittedItems:
del locationFittedItems[self.itemID]
elif self.itemID in self.dogmaLocation.itemsMissingLocation:
del self.dogmaLocation.itemsMissingLocation[self.itemID]
def SetLastStopTime(self, lastStopTime):
self.lastStopTime = lastStopTime
def IsActive(self):
for effectID in self.activeEffects:
if effectID == const.effectOnline:
continue
effect = self.dogmaLocation.GetEffect(effectID)
if effect.effectCategory in (const.dgmEffActivation, const.dgmEffTarget):
return True
return False
@TimedFunction('FittableDogmaItem::SetLocation')
def SetLocation(self, locationID, location, flagID):
if location is None:
self.dogmaLocation.LogError('FittableDogmaItem.SetLocation :: Location dogma item is None')
return
if not self.IsValidFittingLocation(location):
self.dogmaLocation.LogError('FittableDogmaItem.SetLocation :: Invalid fitting location')
return
oldData = self.GetLocationInfo()
self.location = location
self.flagID = flagID
location.RegisterFittedItem(self, flagID)
return oldData
def IsValidFittingLocation(self, location):
return False
def UnsetLocation(self, locationDogmaItem):
locationDogmaItem.UnregisterFittedItem(self)
def GetShipID(self):
if self.location:
return self.location.itemID
def GetPilot(self):
if self.location:
return self.location.GetPilot()
def GetOtherID(self):
otherID = None
if self.location:
otherID = self.location.subLocations.get(self.flagID, None)
if otherID is None:
other = self.dogmaLocation.GetChargeNonDB(self.location.itemID, self.flagID)
if other is not None:
otherID = other.itemID
return otherID
def SerializeForPropagation(self):
retVal = BaseDogmaItem.SerializeForPropagation(self)
retVal.lastStopTime = self.lastStopTime
return retVal
def UnpackPropagationData(self, propData, charID, shipID):
BaseDogmaItem.UnpackPropagationData(self, propData, charID, shipID)
self.SetLastStopTime(propData.lastStopTime)
|
[
"[email protected]"
] | |
8d8ddb865c6a12401cc24112051255881181248e
|
f4f5d98101db7baf9703be077615383b831c35d8
|
/setup.py
|
f00a4b6116f81b93954694c531ecc2ff819e8e74
|
[
"MIT"
] |
permissive
|
TrendingTechnology/PyYouTube-1
|
23099fd1b825f226cabf2e0f50112e1b3f53346b
|
774213412210ab03adf11eb8b38906b0f3de5ee6
|
refs/heads/main
| 2023-08-17T13:50:03.035784 | 2021-09-15T09:11:31 | 2021-09-15T09:11:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 858 |
py
|
import pathlib
import setuptools
def read(file: str) -> list:
with open(file, encoding="utf-8") as r:
return [i.strip() for i in r]
file = pathlib.Path(__file__).parent
README = (file / "README.md").read_text()
setuptools.setup(
name='PyYouTube',
version="1.0.7",
author="mrlokaman",
author_email="[email protected]",
long_description = README,
long_description_content_type = "text/markdown",
description="Python library Get YouTube Video Data",
license="MIT",
url="https://github.com/lntechnical2/PyYouTube",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
install_requires = read("requirements.txt"),
python_requires=">=3.6"
)
|
[
"[email protected]"
] | |
faf4719b940c4e5811346205c59cd9ad7daa89ec
|
2813f969fc9833023f543fa14c1c22a87325ca8f
|
/logging_tree/tests/test_format.py
|
787b959fd92bbf2bdc6650b8ba7e639e870cd017
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
ralphbean/logging_tree
|
5761afc380719f4069fd00b1f21f5765927ce593
|
1db0ad6f485b5333fee637813faf827990924421
|
refs/heads/master
| 2021-01-16T22:05:05.459626 | 2012-12-04T02:16:14 | 2012-12-04T02:16:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,425 |
py
|
"""Tests for the `logging_tree.format` module."""
import logging
import logging.handlers
import unittest
import sys
from logging_tree.format import build_description, printout
from logging_tree.tests.case import LoggingTestCase
if sys.version_info >= (3,):
from io import StringIO
else:
from StringIO import StringIO
class FakeFile(StringIO):
def __init__(self, filename, mode):
self.filename = filename
StringIO.__init__(self)
def __repr__(self):
return '<file %r>' % self.filename
class FormatTests(LoggingTestCase):
def setUp(self):
# Prevent logging file handlers from trying to open real files.
# (The keyword delay=1, which defers any actual attempt to open
# a file, did not appear until Python 2.6.)
logging.open = FakeFile
super(FormatTests, self).setUp()
def tearDown(self):
del logging.open
super(FormatTests, self).tearDown()
def test_printout(self):
stdout, sys.stdout = sys.stdout, StringIO()
printout()
self.assertEqual(sys.stdout.getvalue(), '<--""\n Level WARNING\n')
sys.stdout = stdout
def test_simple_tree(self):
logging.getLogger('a')
logging.getLogger('a.b').setLevel(logging.DEBUG)
logging.getLogger('x.c')
self.assertEqual(build_description(), '''\
<--""
Level WARNING
|
o<--"a"
| |
| o<--"a.b"
| Level DEBUG
|
o<--[x]
|
o<--"x.c"
''')
def test_fancy_tree(self):
logging.getLogger('').setLevel(logging.DEBUG)
log = logging.getLogger('db')
log.setLevel(logging.INFO)
log.propagate = False
log.addFilter(MyFilter())
handler = logging.StreamHandler()
log.addHandler(handler)
handler.addFilter(logging.Filter('db.errors'))
logging.getLogger('db.errors')
logging.getLogger('db.stats')
log = logging.getLogger('www.status')
log.setLevel(logging.DEBUG)
log.addHandler(logging.FileHandler('/foo/log.txt'))
log.addHandler(MyHandler())
self.assertEqual(build_description(), '''\
<--""
Level DEBUG
|
o "db"
| Level INFO
| Propagate OFF
| Filter <MyFilter>
| Handler Stream %r
| Filter name='db.errors'
| |
| o<--"db.errors"
| |
| o<--"db.stats"
|
o<--[www]
|
o<--"www.status"
Level DEBUG
Handler File '/foo/log.txt'
Handler <MyHandler>
''' % (sys.stderr,))
def test_most_handlers(self):
ah = logging.getLogger('').addHandler
ah(logging.handlers.RotatingFileHandler(
'/bar/one.txt', maxBytes=10000, backupCount=3))
ah(logging.handlers.SocketHandler('server.example.com', 514))
ah(logging.handlers.DatagramHandler('server.example.com', 1958))
ah(logging.handlers.SysLogHandler())
ah(logging.handlers.SMTPHandler(
'mail.example.com', 'Server', 'Sysadmin', 'Logs!'))
# ah(logging.handlers.NTEventLogHandler())
ah(logging.handlers.HTTPHandler('api.example.com', '/logs', 'POST'))
ah(logging.handlers.BufferingHandler(20000))
sh = logging.StreamHandler()
ah(logging.handlers.MemoryHandler(30000, target=sh))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler RotatingFile '/bar/one.txt' maxBytes=10000 backupCount=3
Handler Socket server.example.com 514
Handler Datagram server.example.com 1958
Handler SysLog ('localhost', 514) facility=1
Handler SMTP via mail.example.com to ['Sysadmin']
Handler HTTP POST to http://api.example.com//logs
Handler Buffering capacity=20000
Handler Memory capacity=30000 dumping to:
Handler Stream %r
''' % (sh.stream,))
logging.getLogger('').handlers[3].socket.close() # or Python 3 warning
def test_2_dot_5_handlers(self):
if sys.version_info < (2, 5):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.TimedRotatingFileHandler('/bar/two.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler TimedRotatingFile '/bar/two.txt' when='H' interval=3600 backupCount=0
''')
def test_2_dot_6_handlers(self):
if sys.version_info < (2, 6):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.WatchedFileHandler('/bar/three.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler WatchedFile '/bar/three.txt'
''')
def test_nested_handlers(self):
h1 = logging.StreamHandler()
h2 = logging.handlers.MemoryHandler(30000, target=h1)
h2.addFilter(logging.Filter('worse'))
h3 = logging.handlers.MemoryHandler(30000, target=h2)
h3.addFilter(logging.Filter('bad'))
logging.getLogger('').addHandler(h3)
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler Memory capacity=30000 dumping to:
Filter name='bad'
Handler Memory capacity=30000 dumping to:
Filter name='worse'
Handler Stream %r
''' % (h1.stream,))
class MyFilter(object):
def __repr__(self):
return '<MyFilter>'
class MyHandler(object):
def __repr__(self):
return '<MyHandler>'
if __name__ == '__main__': # for Python <= 2.4
unittest.main()
|
[
"[email protected]"
] | |
83bee1c913ad98cd00f75327075dbef6727ae53a
|
3784495ba55d26e22302a803861c4ba197fd82c7
|
/venv/lib/python3.6/site-packages/torchx/legacy/nn/VolumetricReplicationPadding.py
|
16cc7a1c097d7c351bcc12cb145425dff9ac1bf3
|
[
"MIT"
] |
permissive
|
databill86/HyperFoods
|
cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
refs/heads/master
| 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 |
MIT
| 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null |
UTF-8
|
Python
| false | false | 1,969 |
py
|
import torch
from .Module import Module
class VolumetricReplicationPadding(Module):
def __init__(self, pleft, pright=None, ptop=None, pbottom=None, pfront=None, pback=None):
super(VolumetricReplicationPadding, self).__init__()
self.pleft = pleft
self.pright = pright or pleft
self.ptop = ptop or pleft
self.pbottom = pbottom or pleft
self.pfront = pfront or pleft
self.pback = pback or pleft
def updateOutput(self, input):
assert input.dim() == 5
self._backend.VolumetricReplicationPadding_updateOutput(
self._backend.library_state,
input,
self.output,
self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return self.output
def updateGradInput(self, input, gradOutput):
assert input.dim() == 5 and gradOutput.dim() == 5
assert input.size(0) == gradOutput.size(0)
assert input.size(1) == gradOutput.size(1)
assert input.size(2) + self.pfront + self.pback == gradOutput.size(2)
assert input.size(3) + self.ptop + self.pbottom == gradOutput.size(3)
assert input.size(4) + self.pleft + self.pright == gradOutput.size(4)
self._backend.VolumetricReplicationPadding_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return self.gradInput
def __repr__(self):
s = super(VolumetricReplicationPadding, self).__repr__()
s += '({}, {}, {}, {}, {}, {})'.format(self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return s
|
[
"[email protected]"
] | |
cb0b2c679a02d35a32e443a9412c0292555d4f6b
|
cff588a68be44913be884ba5c4ebf36a0a96cb75
|
/python/007study_namespace.py
|
3377539352b3e241261c717dfa8c5240c876539d
|
[] |
no_license
|
KOOKDONGHUN/trading
|
e6a8d023f4bdbb0f1cf32e3e5b6b26b6265fc3a6
|
2d4337978a5849098ed890e9e2c3f059e4706536
|
refs/heads/master
| 2022-11-15T00:38:32.705125 | 2020-07-12T10:25:46 | 2020-07-12T10:25:46 | 275,761,616 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,215 |
py
|
class Stock:
market = 'kospi'
print(dir())# // ['Stock', '__annotations__', '__builtins__', '__cached__', '__doc__',
# // '__file__', '__loader__', '__name__', '__package__', '__spec__']
# Stock이 추가됐다
print(Stock) # <class '__main__.Stock'>
# 클래스가 정의되면 하나의 독립적인 네임스페이스가 생기고 클래스내에 정의된 변수나 메서드는 해당 네임스페이스 안에 파이썬 딕셔너리 타입으로 저장된다
print(Stock.market)
# 네임스페이스를 확인하는 방법
print(Stock.__dict__) # // {'__module__': '__main__', 'market': 'kospi', '__dict__': <attribute '__dict__' of 'Stock' objects>,
#// '__weakref__': <attribute '__weakref__' of 'Stock' objects>, '__doc__': None}
s1 = Stock()
s2 = Stock()
print(id(s1)) # 2120139199496
print(id(s2)) # 2120139199560
print(s1.__dict__) # 비어있음
print(s2.__dict__) # 비어있음
s1.market = 'kosdaq'
print(s1.__dict__) # {'market': 'kosdaq'}
print(s2.__dict__) # 비어있음
print(s1.market) # kosdaq
# 인스턴스의 네임스페이스에 해당 이름이 없으면 클래스의 네임스페이스로 이동
print(s2.market) # kospi
|
[
"[email protected]"
] | |
e7b07e9da69275211369027ccc4b4e3df2428c9a
|
98d328e4e00ac7cf8930d2ff9bd68af1d9d9cc3b
|
/utils/lib_clustering.py
|
3e1b9079f84417c6585bb40e6d8bcf926bf03a2b
|
[] |
no_license
|
jtpils/Lane-Detection-from-Point-Cloud
|
4d7e98cafada569097e16e7bcb5fdabc048e0644
|
238cb8cedc823a84c32b60ce13e7de8c81f19232
|
refs/heads/master
| 2020-06-05T08:42:46.397450 | 2019-06-17T15:41:58 | 2019-06-17T15:41:58 | 192,380,398 | 14 | 4 | null | 2019-06-17T16:16:58 | 2019-06-17T16:16:58 | null |
UTF-8
|
Python
| false | false | 3,810 |
py
|
'''
Clustering by DBSCAN using sklearn library
This code is copied and modified from:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
'''
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
class Clusterer(object):
def __init__(self):
self.fit_success = False
def fit(self, X, eps=0.3, min_samples=10):
# Compute DBSCAN
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# samples that is close to the center
core_samples_mask[db.core_sample_indices_] = True
self.X = X
self.db = db
self.core_samples_mask = core_samples_mask
self.fit_success = True
self.labels = db.labels_ # label of each sample
self.unique_labels = set(self.labels)
self.n_clusters = len(set(self.labels)) - \
(1 if -1 in self.labels else 0)
def plot_clusters(self):
if not self.fit_success:
return
assert self.X.shape[1] == 2, "To visualize result, X must be 2 dimenstions."
# member vars used in this function
labels, n_clusters, unique_labels = self.labels, self.n_clusters, self.unique_labels
core_samples_mask = self.core_samples_mask
X = self.X
# Black removed and is used for noise instead.
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
# print(colors)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
# break
plt.title('Clustering result: {} clusters'.format(n_clusters))
def print_clustering_result(self):
if not self.fit_success:
return
labels, n_clusters = self.labels, self.n_clusters
# Number of clusters in labels, ignoring noise if present.
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" %
metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" %
metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" %
metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels,
average_method='arithmetic'))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
if __name__ == "__main__":
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
# Fit
cluster = Clusterer()
cluster.fit(X)
# Plot
cluster.plot_clusters()
plt.show()
|
[
"[email protected]"
] | |
5e57e42cf81e3523dfaa874a315995fbc33cfcb9
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D11B/PAYDUCD11BUN.py
|
3dccdf3361385387dedef9f876212a5ce94c56a8
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null |
UTF-8
|
Python
| false | false | 1,580 |
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD11BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'PAI', MIN: 1, MAX: 1},
{ID: 'FII', MIN: 1, MAX: 2},
{ID: 'DTM', MIN: 1, MAX: 4},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'PYT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'NAD', MIN: 0, MAX: 6, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 1},
]},
]},
{ID: 'GEI', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99, LEVEL: [
{ID: 'UGH', MIN: 0, MAX: 1, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'AJT', MIN: 0, MAX: 9},
{ID: 'PYT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'UGT', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 1},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"[email protected]"
] | |
33707edb80b081ec1ed745507088f9c26ebd20fd
|
b182ff74d1107c00d77d3bb241dfca589ccc9404
|
/config.py
|
2bba1aadff966f60605fa7fdf900d990f46442d1
|
[] |
no_license
|
aexleader/Tornado-OA-System
|
7846a13a90c6da512a7f7620b003bd77b331a63d
|
6ffc51d2f42fcbd5b0abe7082dae4505bf687894
|
refs/heads/master
| 2020-08-01T14:00:28.966198 | 2019-09-10T10:57:23 | 2019-09-10T10:57:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,153 |
py
|
#coding=utf-8
from libs.flash.flash_lib import get_flashed_messages # 引入一个变量
from libs.permission.permission_auth.permission_interface_libs import menu_permission
settings = dict(
template_path = 'templates',
static_path = 'static',
debug = True,
cookie_secret = 'aaaa',
login_url = '/auth/user_login',
xsrf_cookies = True,
# ui_mrthods 是可以作为全局模板变量,在所有的html文件中都可以获取这个参数
ui_methods= {
"menu_permission": menu_permission,
"get_flashed_messages": get_flashed_messages
},
# pycket的配置信息
pycket = {
'engine': 'redis', # 设置存储器类型
'storage': {
'host': 'localhost',
'port': 6379,
'db_sessions': 5,
'db_notifications': 11,
'max_connections': 2 ** 31,
},
'cookies': {
'expires_days': 30, # 设置过期时间
#'max_age': 5000,
},
},
)
|
[
"[email protected]"
] | |
5f96b2f9df61b2997848aed9767153a92a516338
|
762de1c66746267e05d53184d7854934616416ee
|
/tools/MolSurfGenService/MolSurfaceGen32/chimera/share/VolumeProcessing/apply.py
|
e3698c7a49fcc4c0b7f6619db155e7b141e47eb8
|
[] |
no_license
|
project-renard-survey/semanticscience
|
6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677
|
024890dba56c3e82ea2cf8c773965117f8cda339
|
refs/heads/master
| 2021-07-07T21:47:17.767414 | 2017-10-04T12:13:50 | 2017-10-04T12:13:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,364 |
py
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Apply a function to a grid pointwise.
# The resulting volume is written in netcdf format.
#
# Syntax: apply.py sqrt|square|abs|exp|log <infile> <outfile>
#
# The file type must be one of the types handled by VolumeData.
#
import sys
from VolumeData import Grid_Data
# -----------------------------------------------------------------------------
#
def apply_function(array_func, inpath, outpath):
from VolumeData import fileformats
try:
grids = fileformats.open_file(inpath)
except fileformats.Unknown_File_Type, e:
sys.stderr.write(str(e))
sys.exit(1)
fvalues = [Mapped_Grid(g, array_func) for g in grids]
from VolumeData.netcdf import write_grid_as_netcdf
write_grid_as_netcdf(fvalues, outpath)
# -----------------------------------------------------------------------------
#
class Mapped_Grid(Grid_Data):
def __init__(self, grid_data, array_func):
self.array_func = array_func
Grid_Data.__init__(self, grid_data.size, grid_data.value_type,
grid_data.origin, grid_data.step,
name = grid_data.name, default_color = grid_data.rgba)
# ---------------------------------------------------------------------------
#
def read_matrix(self, ijk_origin, ijk_size, ijk_step, progress):
data = self.component.matrix(ijk_origin, ijk_size, ijk_step, progress)
fvalues = self.array_func(data)
return fvalues
# -----------------------------------------------------------------------------
#
def syntax():
msg = ('Apply a function to a grid pointwise.\n' +
'The resulting volume is written in netcdf format.\n'
'Syntax: apply.py sqrt|square|abs|exp|log <infile> <outfile>\n')
sys.stderr.write(msg)
sys.exit(1)
# -----------------------------------------------------------------------------
#
if len(sys.argv) != 4:
syntax()
fname = sys.argv[1]
from numpy import sqrt, power, absolute, exp, log
if fname == 'sqrt':
array_func = sqrt
elif fname == 'square':
array_func = lambda a: power(a, 2)
elif fname == 'abs':
array_func = absolute
elif fname == 'exp':
array_func = exp
elif fname == 'log':
array_func = log
else:
syntax()
inpath = sys.argv[2]
outpath = sys.argv[3]
apply_function(array_func, inpath, outpath)
|
[
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] |
alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5
|
aa8aabf65ecb49d7092f518affba7b4f4200745b
|
609582ee37a01ac6a67fb9c957825dcd3c9a5b3a
|
/LeetCode_Math/67_Add_Binaray.py
|
77bf2de64eddd1dca19c9a8f56aeabd0235107f3
|
[] |
no_license
|
captainjack331089/captainjack33.LeetCode
|
a9ad7b3591675c76814eda22e683745068e0abed
|
4c03f28371e003e8e6a7c30b7b0c46beb5e2a8e7
|
refs/heads/master
| 2022-03-07T19:53:40.454945 | 2019-11-06T19:32:00 | 2019-11-06T19:32:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
"""
67. Add Binary
Category: Math
Difficulty: Easy
"""
"""
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class Solution():
def addBinary(self,a,b):
return (bin( int(a,2) + int(b,2) )[2:])
a = "100"
b = "100"
if __name__ == "__main__":
print(Solution().addBinary(a,b))
|
[
"[email protected]"
] | |
db5478f9a0cb0cf030d084d4aa9c480907c197a7
|
0dc3e9b70da8ccd056e0a0fab2b1d8f850c3d470
|
/lantern/django/django_celery/src/apps/cars/serializers.py
|
3b2841adafff0d4d82de945686eeba93f6718cd8
|
[] |
no_license
|
ArturYefriemov/green_lantern
|
28e7150af7b9d2281a107ad80026828ad77af62a
|
2841b647e1bfae4a7505e91e8a8695d03f35a3a2
|
refs/heads/master
| 2021-03-01T16:54:58.881835 | 2020-11-17T19:42:23 | 2020-11-17T19:42:23 | 245,799,969 | 0 | 0 | null | 2020-07-14T18:51:13 | 2020-03-08T11:13:32 |
Python
|
UTF-8
|
Python
| false | false | 190 |
py
|
from rest_framework import serializers
from apps.cars.models import Car
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = '__all__'
|
[
"[email protected]"
] | |
76b5e2452098e49235282783ad7eb1263db83e08
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/yubinbai/pcuva-problems/UVa 10539 - Almost Prime Numbers/main.py
|
30bb7c3cab4b9a2a5ac9a024702a2f2bdb6ddbf0
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 820 |
py
|
from bisect import *
from bitstring import BitArray
import sys
MAXN = 1000005
def prime_sieve(top=MAXN):
b = BitArray(top) # bitstring of ’0’ bits
for i in range(2, top):
if not b[i]:
yield i
# i is prime, so set all its multiples to ’1’.
b.set(True, range(i * i, top, i))
if __name__ == '__main__':
primes = list(prime_sieve())
almostPrimes = []
for p in primes:
p1 = p ** 2
while p1 < MAXN:
almostPrimes.append(p1)
p1 *= p
almostPrimes.sort()
sys.stdin = open('input.txt')
numTest = int(input())
for x in range(numTest):
left, right = map(int, raw_input().split())
i1 = bisect_right(almostPrimes, left)
i2 = bisect_right(almostPrimes, right)
print(i2 - i1)
|
[
"[email protected]"
] | |
8a52bc396fcafcd7f2ed6b20d0b110a3e5a59648
|
1d60c5a7b8ce6277bff514e376f79848f706344c
|
/Data Scientist with Python - Career Track /22. Machine Learning with the Experts: School Budgets/02. Creating a simple first model/01. Setting up a train-test split in scikit-learn.py
|
09e603e05172de82530517858d1031747721ca01
|
[] |
no_license
|
DidiMilikina/DataCamp
|
338c6e6d3b4f5b6c541c1aba155a36e9ee24949d
|
3bf2cf3c1430190a7f8e54efda7d50a5fd66f244
|
refs/heads/master
| 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,016 |
py
|
'''
Setting up a train-test split in scikit-learn
Alright, you've been patient and awesome. It's finally time to start training models!
The first step is to split the data into a training set and a test set. Some labels don't occur very often, but we want to make sure that they appear in both the training and the test sets. We provide a function that will make sure at least min_count examples of each label appear in each split: multilabel_train_test_split.
Feel free to check out the full code for multilabel_train_test_split here.
You'll start with a simple model that uses just the numeric columns of your DataFrame when calling multilabel_train_test_split. The data has been read into a DataFrame df and a list consisting of just the numeric columns is available as NUMERIC_COLUMNS.
Instructions
100 XP
Create a new DataFrame named numeric_data_only by applying the .fillna(-1000) method to the numeric columns (available in the list NUMERIC_COLUMNS) of df.
Convert the labels (available in the list LABELS) to dummy variables. Save the result as label_dummies.
In the call to multilabel_train_test_split(), set the size of your test set to be 0.2. Use a seed of 123.
Fill in the .info() method calls for X_train, X_test, y_train, and y_test.
'''
SOLUTION
# Create the new DataFrame: numeric_data_only
numeric_data_only = df[NUMERIC_COLUMNS].fillna(-1000)
# Get labels and convert to dummy variables: label_dummies
label_dummies = pd.get_dummies(df[LABELS])
# Create training and test sets
X_train, X_test, y_train, y_test = multilabel_train_test_split(numeric_data_only,
label_dummies,
size=0.2,
seed=123)
# Print the info
print("X_train info:")
print(X_train.info())
print("\nX_test info:")
print(X_test.info())
print("\ny_train info:")
print(y_train.info())
print("\ny_test info:")
print(y_test.info())
|
[
"[email protected]"
] | |
9b930250c80b39f856585160a5b1f150a3d9355a
|
6053cef7fc0b063a6105cd38659ba082ee706335
|
/tweettools/blockmute.py
|
945725ca153e6f977a12db922ae170e6fb90aabe
|
[
"MIT"
] |
permissive
|
jdidion/blockmute
|
18dd24535d75d6c8998a432a1a5b657a3e91b93f
|
05984da637206d2bc5c69d2f68b10a1df4f9985f
|
refs/heads/main
| 2021-01-19T19:52:16.657531 | 2018-04-29T01:20:39 | 2018-04-29T01:20:39 | 101,212,612 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,470 |
py
|
#!/usr/bin/env python
# Block everyone you've muted, and vice-versa.
from argparse import ArgumentParser
import time
from tqdm import tqdm
from tweettools import get_client
def blockmute(api, sleep_secs=300):
mutes = set(api.GetMutesIDs())
blocks = set(api.GetBlocksIDs())
new_blocks = mutes - blocks
for user_id in tqdm(new_blocks):
while True:
try:
api.CreateBlock(user_id)
break
except:
print("Exceeded rate limit; sleeping for {} seconds".format(sleep_secs))
time.sleep(sleep_secs)
new_mutes = blocks - mutes
for user_id in tqdm(new_mutes):
while True:
try:
api.CreateMute(user_id)
break
except:
print("Exceeded rate limit; sleeping for {} seconds".format(sleep_secs))
time.sleep(sleep_secs)
def main():
parser = ArgumentParser()
parser.add_argument('-ck', '--consumer-key')
parser.add_argument('-cs', '--consumer-secret')
parser.add_argument('-tk', '--token-key', default=None)
parser.add_argument('-ts', '--token-secret', default=None)
parser.add_argument('-s', '--sleep-secs', type=int, default=15*60)
args = parser.parse_args()
api = get_client(args.token_key, args.token_secret, args.consumer_key, args.consumer_secret)
blockmute(api, sleep_secs=args.sleep_secs)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b5722af8ed32f8e2da48f5c2d6fcd13c8de9701f
|
52d324c6c0d0eb43ca4f3edc425a86cdc1e27d78
|
/scripts/asos/archive_quantity.py
|
9c22be17d7528b94acd44e3f1e30933859ee8315
|
[
"MIT"
] |
permissive
|
deenacse/iem
|
992befd6d95accfdadc34fb7928d6b69d661d399
|
150512e857ca6dca1d47363a29cc67775b731760
|
refs/heads/master
| 2021-02-04T04:20:14.330527 | 2020-02-26T21:11:32 | 2020-02-26T21:11:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,342 |
py
|
""" Create a simple prinout of observation quanity in the database """
from __future__ import print_function
import sys
import datetime
import numpy as np
from pyiem.util import get_dbconn
class bcolors:
"""Kind of hacky"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
def d(hits, total):
"""another hack"""
if total == 0:
return " N/A"
val = hits / float(total)
c1 = bcolors.ENDC
if val > 0.5:
c1 = bcolors.FAIL
return "%s%.2f%s" % (c1, val, bcolors.ENDC)
def main(argv):
"""Go Main Go"""
now = datetime.datetime.utcnow()
counts = np.zeros((120, 12))
mslp = np.zeros((120, 12))
metar = np.zeros((120, 12))
pgconn = get_dbconn("asos", user="nobody")
acursor = pgconn.cursor()
stid = argv[1]
acursor.execute(
"""
SELECT extract(year from valid) as yr,
extract(month from valid) as mo, count(*),
sum(case when mslp is null or mslp < 1 then 1 else 0 end),
sum(case when metar is null or metar = '' then 1 else 0 end)
from alldata WHERE
station = %s GROUP by yr, mo ORDER by yr ASC, mo ASC
""",
(stid,),
)
for row in acursor:
counts[int(row[0] - 1900), int(row[1] - 1)] = row[2]
mslp[int(row[0] - 1900), int(row[1] - 1)] = row[3]
metar[int(row[0] - 1900), int(row[1] - 1)] = row[4]
print("Observation Count For %s" % (stid,))
print("YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC")
output = False
for i in range(120):
year = 1900 + i
if year > now.year:
continue
if not output and np.max(counts[i, :]) == 0:
continue
output = True
if len(argv) < 3:
print(
("%s %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i")
% (
year,
counts[i, 0],
counts[i, 1],
counts[i, 2],
counts[i, 3],
counts[i, 4],
counts[i, 5],
counts[i, 6],
counts[i, 7],
counts[i, 8],
counts[i, 9],
counts[i, 10],
counts[i, 11],
)
)
else:
if argv[2] == "metar":
data = metar
else:
data = mslp
print(
("%s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s")
% (
year,
d(data[i, 0], counts[i, 0]),
d(data[i, 1], counts[i, 1]),
d(data[i, 2], counts[i, 2]),
d(data[i, 3], counts[i, 3]),
d(data[i, 4], counts[i, 4]),
d(data[i, 5], counts[i, 5]),
d(data[i, 6], counts[i, 6]),
d(data[i, 7], counts[i, 7]),
d(data[i, 8], counts[i, 8]),
d(data[i, 9], counts[i, 9]),
d(data[i, 10], counts[i, 10]),
d(data[i, 11], counts[i, 11]),
)
)
if __name__ == "__main__":
main(sys.argv)
|
[
"[email protected]"
] | |
2d83f6345f4629fb349ea3e2aa1ecd09b77cec8b
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/account_budget_proposal_service/transports/base.py
|
86d3e463eb723e6cf5e1dcff665b4d0e784c1fce
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,334 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import account_budget_proposal
from google.ads.googleads.v8.services.types import account_budget_proposal_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AccountBudgetProposalServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AccountBudgetProposalService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_account_budget_proposal: gapic_v1.method.wrap_method(
self.get_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
self.mutate_account_budget_proposal: gapic_v1.method.wrap_method(
self.mutate_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_account_budget_proposal(self) -> typing.Callable[
[account_budget_proposal_service.GetAccountBudgetProposalRequest],
account_budget_proposal.AccountBudgetProposal]:
raise NotImplementedError
@property
def mutate_account_budget_proposal(self) -> typing.Callable[
[account_budget_proposal_service.MutateAccountBudgetProposalRequest],
account_budget_proposal_service.MutateAccountBudgetProposalResponse]:
raise NotImplementedError
__all__ = (
'AccountBudgetProposalServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
cc472b1754e73618c88e880b49f00b891157f7e0
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/DPN-68_ID1889_for_PyTorch/timm/data/dataset.py
|
4b32a3a0617ad45b963c62d5fc03f7d56de6b2f8
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"CC-BY-NC-4.0"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 6,548 |
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.utils.data as data
import os
import torch
import logging
from PIL import Image
from .parsers import create_parser
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map='',
load_bytes=False,
transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
class_map='',
load_bytes=False,
repeats=0,
transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training, batch_size=batch_size, repeats=repeats)
else:
self.parser = parser
self.transform = transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
|
[
"[email protected]"
] | |
50c5dd1046b86e17916c7169ac1be8c2aa36dc0b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/49/usersdata/107/19461/submittedfiles/pico.py
|
d085c047956c05bb79cd9376fc75eadbc27af13d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 612 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def pico(a):
posicao=0
for i in range (0,len(a)-1,1):
if a[i]> a[i+1]:
posicao=i
break
cont=0
for i in range (posicao,len(a)-1,1):
if a[i] <= a[i+1]:
cont=cont+1
if cont==0 and posicao !=0:
return True
else:
return False
n = input('digite a quantidade de elemento')
a=[]
for i in range (0,n,1):
a.append(input('a:'))
if pico (a):
print ('S')
else:
primt ('N')
n = input('Digite a quantidade de elementos da lista: ')
#CONTINUE...
|
[
"[email protected]"
] | |
4101fd7aac1737d98b2dfafe6118696400bd4e4a
|
844e0cd4ffbe1ead05b844508276f66cc20953d5
|
/test/testconfigurationmanager.py
|
e9fae9d325da652711c99ddbfa3770ec19e87574
|
[] |
no_license
|
Archanciel/cryptopricer
|
a256fa793bb1f2d65b5c032dd81a266ee5be79cc
|
00c0911fe1c25c1da635dbc9b26d45be608f0cc5
|
refs/heads/master
| 2022-06-29T13:13:22.435670 | 2022-05-11T20:37:43 | 2022-05-11T20:37:43 | 100,196,449 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,083 |
py
|
import unittest
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from configurationmanager import ConfigurationManager
class TestConfigurationManager(unittest.TestCase):
def setUp(self):
if os.name == 'posix':
self.filePath = '/sdcard/cryptopricer_test.ini'
else:
self.filePath = 'c:\\temp\\cryptopricer_test.ini'
def testConfigurationManagerInstanciation(self):
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationNoConfigFile(self):
os.remove(self.filePath)
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationEmptyConfigFile(self):
open(self.filePath, 'w').close()
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationOneMissingKey(self):
#removing second line in config file
with open(self.filePath, 'r') as configFile:
lines = configFile.readlines()
with open(self.filePath, 'w') as configFile:
# first line contains [General] section name !
configFile.write(''.join(lines[0:1] + lines[2:]))
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
if __name__ == '__main__':
#unittest.main()
tst = TestConfigurationManager()
tst.setUp()
tst.testConfigurationManagerInstanciationEmptyConfigFile()
|
[
"[email protected]"
] | |
066554d6b1f8b0a91a6ca227d27ae0ea8cfbd211
|
9a1b033774e371bd6442048f43e862dfb71abed7
|
/Comprehensions/Lab/Flattening_Matrix.py
|
57887545e4a87d7ca53a75baebc41865c380cf13
|
[] |
no_license
|
mialskywalker/PythonAdvanced
|
ea4fde32ba201f6999cd0d59d1a95f00fb5f674b
|
c74ad063154c94b247aaf73b7104df9c6033b1a5
|
refs/heads/master
| 2023-03-09T00:13:28.471328 | 2021-02-24T15:21:11 | 2021-02-24T15:21:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 139 |
py
|
n = int(input())
matrix = [[int(j) for j in input().split(', ')] for i in range(n)]
flat = [x for row in matrix for x in row]
print(flat)
|
[
"[email protected]"
] | |
a25b6496f12166e06a56177364a4c1ecfbc4a31f
|
ffd5e689f88c49ab7af3554c22dc0c36301084fa
|
/thinking_and_testing_uniq_or_not_uniq.py
|
d7b067767d0bea11d3b61a30da4b020ac1ca2f17
|
[] |
no_license
|
ellismckenzielee/codewars-python
|
1710e6f0499047139479de386927c7dbd5f1cdf6
|
af3f4b4534798a58115d0565730aae28ce87437e
|
refs/heads/master
| 2023-08-09T13:38:40.964141 | 2023-08-01T14:45:22 | 2023-08-01T14:45:22 | 168,981,376 | 45 | 18 | null | null | null | null |
UTF-8
|
Python
| false | false | 199 |
py
|
#thinking and testing: uniq or not uniq kata
#https://www.codewars.com/kata/56d949281b5fdc7666000004
def testit(a, b):
a = list(set(a))
b = list(set(b))
a.extend(b)
return sorted(a)
|
[
"[email protected]"
] | |
211c727e8d52656e27ff87503013df32b74cd429
|
bc54edd6c2aec23ccfe36011bae16eacc1598467
|
/simscale_sdk/models/flow_rate_mean_outlet_vbc.py
|
e896a0e17e908cfccdaca58f5a681e31f2fb9e87
|
[
"MIT"
] |
permissive
|
SimScaleGmbH/simscale-python-sdk
|
4d9538d5efcadae718f12504fb2c7051bbe4b712
|
6fe410d676bf53df13c461cb0b3504278490a9bb
|
refs/heads/master
| 2023-08-17T03:30:50.891887 | 2023-08-14T08:09:36 | 2023-08-14T08:09:36 | 331,949,105 | 17 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,305 |
py
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class FlowRateMeanOutletVBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'flow_rate': 'OneOfFlowRateMeanOutletVBCFlowRate'
}
attribute_map = {
'type': 'type',
'flow_rate': 'flowRate'
}
def __init__(self, type='FLOW_RATE_MEAN_OUTLET_VELOCITY', flow_rate=None, local_vars_configuration=None): # noqa: E501
"""FlowRateMeanOutletVBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._flow_rate = None
self.discriminator = None
self.type = type
if flow_rate is not None:
self.flow_rate = flow_rate
@property
def type(self):
"""Gets the type of this FlowRateMeanOutletVBC. # noqa: E501
Schema name: FlowRateMeanOutletVBC # noqa: E501
:return: The type of this FlowRateMeanOutletVBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this FlowRateMeanOutletVBC.
Schema name: FlowRateMeanOutletVBC # noqa: E501
:param type: The type of this FlowRateMeanOutletVBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def flow_rate(self):
"""Gets the flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:return: The flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:rtype: OneOfFlowRateMeanOutletVBCFlowRate
"""
return self._flow_rate
@flow_rate.setter
def flow_rate(self, flow_rate):
"""Sets the flow_rate of this FlowRateMeanOutletVBC.
:param flow_rate: The flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:type: OneOfFlowRateMeanOutletVBCFlowRate
"""
self._flow_rate = flow_rate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlowRateMeanOutletVBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FlowRateMeanOutletVBC):
return True
return self.to_dict() != other.to_dict()
|
[
"simscale"
] |
simscale
|
e5bc633da5a7a8bc72a5896a2acd1b80d49ca5f1
|
91fb65972d69ca25ddd892b9d5373919ee518ee7
|
/python-training-courses/pfc-sample-programs/func_example_002_a_with_its_use.py
|
fa56a44bfd53a1e492b260df8427a8512dba5dd3
|
[] |
no_license
|
zeppertrek/my-python-sandpit
|
c36b78e7b3118133c215468e0a387a987d2e62a9
|
c04177b276e6f784f94d4db0481fcd2ee0048265
|
refs/heads/master
| 2022-12-12T00:27:37.338001 | 2020-11-08T08:56:33 | 2020-11-08T08:56:33 | 141,911,099 | 0 | 0 | null | 2022-12-08T04:09:28 | 2018-07-22T16:12:55 |
Python
|
UTF-8
|
Python
| false | false | 867 |
py
|
# func_example_002_a_with_its_use.py
# refer to func_example_002_without_its_use.py
#
# Passing variable number of arguments to the function
def add_numbers (*myNumbers):
sum = 0
for i in myNumbers:
sum = sum + i
return sum
num01, num02, num03, num04, num05, num06, num07, num08, num09, num10 = 1,2,3,4,5,6,7,8,9,10
# Calculate and Print sum of the first 5 numbers
sum1 = add_numbers (num01, num02, num03, num04, num05)
print ("Sum of the first 5 numbers is - ", sum1 )
# Calculate and Print sum of the numbers from 6 to 10
sum2 = add_numbers (num06, num07, num08, num09, num10)
print ("Sum of the numbers from 6 to 10 - ", sum2 )
# Calculate and Print sum of the numbers in odd positions
sum3 = add_numbers (num01, num03, num05, num07, num09)
print ("Sum of the numbers in odd positions - ", sum3)
|
[
"[email protected]"
] | |
bf4ab1b554798c38423c6b25ffc2e3404c7b9980
|
eea1be5dbac7fa10167eae167eb6712e3937f53a
|
/siteuser/utils/models.py
|
607ac2c9399c5f052d881715a70bed9367b4b671
|
[] |
no_license
|
chidimo/Voidcoin
|
40962e46661b2a7106bd8e60d0830c3b9629b8fa
|
227c160dfa671818522781aab013f2d1fcb098a9
|
refs/heads/develop
| 2022-12-09T17:40:26.294425 | 2019-07-04T08:32:20 | 2019-07-04T08:32:20 | 135,197,447 | 5 | 2 | null | 2022-12-08T02:08:45 | 2018-05-28T18:45:19 |
Python
|
UTF-8
|
Python
| false | false | 441 |
py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import AutoCreatedField, AutoLastModifiedField
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = AutoCreatedField(_('created'))
modified = AutoLastModifiedField(_('modified'))
class Meta:
abstract = True
|
[
"[email protected]"
] | |
c96baa39b9776108de52e68614ff8a956ef413f8
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/recommend/JEPOO/model/mel.py
|
3581eddba520c6e2403c416cad136096a7b09a35
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 |
Apache-2.0
| 2023-05-17T11:22:28 | 2021-10-15T06:38:37 |
Python
|
UTF-8
|
Python
| false | false | 3,514 |
py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore as ms
from mindspore import ops
from librosa.filters import mel
from librosa.util import pad_center
from scipy.signal import get_window
class STFT(nn.Cell):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length, hop_length, win_length=None, window='hann'):
super(STFT, self).__init__()
if win_length is None:
win_length = filter_length
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
fourier_basis = np.fft.fft(np.eye(self.filter_length))
self.cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:self.cutoff, :]),
np.imag(fourier_basis[:self.cutoff, :])])
self.forward_basis = ms.Tensor(fourier_basis[:, None, :], ms.float32)
if window is not None:
assert filter_length >= win_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = ms.Tensor(fft_window, ms.float32)
self.forward_basis *= fft_window
def construct(self, input_data):
input_data = ops.expand_dims(input_data, 1)
input_data = ops.Pad(((0, 0), (0, 0), (int(self.filter_length / 2), int(self.filter_length / 2))))(input_data)
forward_transform = nn.Conv1d(1, self.cutoff * 2, self.win_length, stride=self.hop_length, pad_mode='valid',
weight_init=self.forward_basis)(input_data)
real_part = forward_transform[:, :self.cutoff, :]
imag_part = forward_transform[:, self.cutoff:, :]
magnitude = ops.sqrt(real_part**2 + imag_part**2)
phase = ops.atan2(imag_part, real_part)
return magnitude, phase
class MelSpectrogram(nn.Cell):
def __init__(self, n_mels, sample_rate, filter_length, hop_length,
win_length=None, mel_fmin=0.0, mel_fmax=None):
super(MelSpectrogram, self).__init__()
self.stft = STFT(filter_length, hop_length, win_length)
mel_basis = mel(sample_rate, filter_length, n_mels, mel_fmin, mel_fmax, htk=True)
self.mel_basis = ms.Tensor(mel_basis, ms.float32)
self.min_bound = ms.Tensor(1e-5, ms.float32)
def construct(self, y):
magnitudes, _ = self.stft(y)
mel_output = ops.matmul(self.mel_basis, magnitudes)
mel_output = ops.clip_by_value(mel_output, clip_value_min=self.min_bound)
mel_output = ops.log(mel_output)
return mel_output
|
[
"[email protected]"
] | |
408eefcd98a92dd07cb9fa4f21371235a339bf84
|
d032bc0c01a7cd598481644e22043de8df4c71c4
|
/consultant_app/versatilimagefield.py
|
90f5c5be364e762bcd094b0cd36c0169a6108c18
|
[] |
no_license
|
amrit-kumar/project-for-engineering
|
eb5f410cd2f0a271633fb6c24132a36e6215f0e0
|
7e975866e540ab4625e735009fdba971df74e393
|
refs/heads/master
| 2020-12-03T01:49:02.429186 | 2017-06-30T09:09:46 | 2017-06-30T09:09:46 | 95,863,800 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,467 |
py
|
import io
from io import StringIO
from PIL import Image
# from StringIO import StringIO
from .views import *
from versatileimagefield.datastructures import SizedImage
from django.utils.datastructures import *
from versatileimagefield.fields import VersatileImageField
from versatileimagefield.registry import versatileimagefield_registry
# Unregistering the 'crop' Sizer
# versatileimagefield_registry.unregister_sizer('crop')
# Registering a custom 'crop' Sizer
# versatileimagefield_registry.register_sizer('crop', SomeCustomSizedImageCls)
class ThumbnailImage(SizedImage):
"""
Sizes an image down to fit within a bounding box
See the `process_image()` method for more information
"""
filename_key = 'thumbnail'
def process_image(self, image, image_format, save_kwargs,
width=400, height=400):
"""
Returns a StringIO instance of `image` that will fit
within a bounding box as specified by `width`x`height`
"""
imagefile = io.BytesIO()
image.thumbnail(
(width, height),
Image.ANTIALIAS
)
image.save(
imagefile,
**save_kwargs
)
return imagefile
# Registering the ThumbnailSizer to be available on VersatileImageField
# via the `thumbnail` attribute
versatileimagefield_registry.unregister_sizer('thumbnail')
versatileimagefield_registry.register_sizer('thumbnail', ThumbnailImage)
|
[
"[email protected]"
] | |
0c8153f27fb67a668ee75237e7cd43c5388cfa62
|
92773cbdd70812f45e1b9b97bbc024aee4b4b18d
|
/Chapter7. Iteration/loop.py
|
8765dd5a07dd4c28348fbcf1c1cc68b803ce3fd9
|
[] |
no_license
|
Abu-Kaisar/Python3Programming--Coursera
|
e46edc86294ac76109a89b2cb02e8b6af178dcce
|
e1b93899c4f507b9b32091283951e761e182b97a
|
refs/heads/master
| 2022-11-21T07:40:28.985698 | 2020-07-19T04:07:51 | 2020-07-19T04:07:51 | 280,787,750 | 0 | 0 | null | 2020-07-19T03:58:52 | 2020-07-19T03:58:52 | null |
UTF-8
|
Python
| false | false | 476 |
py
|
# mylist = ["yo","mujju","salman","thuss"]
# for i in mylist:
# print("Hi", i ,"Dawat hai kheenchny aao")
# mylist = "dgsadafdua"
# for char in mylist:
# print("Hi", char )
s = "python rocks"
for ch in s:
print("HELLO")
import turtle # set up alex
wn = turtle.Screen()
mujju = turtle.Turtle()
for aColor in ["yellow", "red", "purple", "blue"]:
alex.color(aColor) # repeat four times
mujju.forward(50)
mujju.left(90)
wn.exitonclick()
|
[
"[email protected]"
] | |
d533929137010a828e0c1fe70530eb874680c0e9
|
ca5fc43049f94a794d90a561fd8126f02b603599
|
/i3py/core/features/options.py
|
cdcfff349c6f5457d6e1affa7b82a7ef3f760806
|
[
"BSD-3-Clause"
] |
permissive
|
Exopy/i3py
|
32d9ee343d21d275680a2d030b660a80960e99ac
|
6f004d3e2ee2b788fb4693606cc4092147655ce1
|
refs/heads/master
| 2022-02-18T21:51:16.423188 | 2019-08-28T23:51:02 | 2019-08-28T23:51:02 | 63,874,745 | 1 | 0 |
BSD-3-Clause
| 2018-05-23T09:45:26 | 2016-07-21T14:07:58 |
Python
|
UTF-8
|
Python
| false | false | 1,960 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2018 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Feature for instrument options.
"""
from typing import Any, Union, Optional, Dict, Tuple
from .feature import Feature
from ..abstracts import AbstractOptions
class Options(Feature):
"""Feature used to access the options of an instrument.
Options in I3py are considered static (ie related to the hardware or
firmware) and are hence read only. Because there is no generic pattern
in the formatting of the options, the user is expected to implement
manually the getter function.
Parameters
----------
names : dict
Names of the different options, as returned by this feature. Hint about
the possible values can be provided as a type or a tuple of values.
"""
def __init__(self, getter: Any=True,
setter: Any=None,
names: Dict[str, Optional[Union[type, tuple]]]={},
extract: str='',
retries: int=0,
checks: Optional[str]=None,
discard: Optional[Union[Tuple[str, ...],
Dict[str, Tuple[str, ...]]]]=None,
options: Optional[str]=None) -> None:
if setter is not None:
raise ValueError('Options is read-only can have a setter.')
if not names:
raise ValueError('No names were provided for Options')
Feature.__init__(self, getter, None, extract, retries,
checks, discard, options)
self.creation_kwargs['names'] = names
self.names = names
AbstractOptions.register(Options)
|
[
"[email protected]"
] | |
220d417e2a532c64b69fe77a6dbb261b6f5561fc
|
a360a22af5e0b385db438b1324564ef317ff2f38
|
/bancor_module/migrations/0007_bancor_tsymbol.py
|
3b1d42b27ed85223c4faa9f0e0b7bf186e2d5cc0
|
[] |
no_license
|
ogglin/exchange_comparison
|
3eb2d849e731f94e67509e4ce9130e33bb37bbaf
|
f3feae64aff26b574f7ecd24e6f7aff7bb95ec65
|
refs/heads/master
| 2023-04-26T07:45:06.229584 | 2021-05-31T18:52:29 | 2021-05-31T18:52:29 | 287,036,194 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
# Generated by Django 3.1 on 2021-02-20 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bancor_module', '0006_bancor_volume'),
]
operations = [
migrations.AddField(
model_name='bancor',
name='tsymbol',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
[
"[email protected]"
] | |
eb2e7d1b25fa6419ac1847667a5fe019af42f82f
|
19bc1dfbf8a8b4b1bfc9d6ead51479c72602b12e
|
/tests/test_resource_analysis.py
|
fea2cef5999b8d3a27c8c10067bde6f075b62ce6
|
[
"MIT"
] |
permissive
|
procha2/caper
|
a5297d6cfe7cf649ac5ac3544558f513b427713d
|
e9ea0baa3517178ce7b850df8a59eba6479fbcb6
|
refs/heads/master
| 2023-08-10T17:37:40.840958 | 2021-07-01T22:57:45 | 2021-07-01T22:57:45 | 300,260,107 | 0 | 0 |
MIT
| 2020-10-01T11:48:13 | 2020-10-01T11:48:12 | null |
UTF-8
|
Python
| false | false | 3,119 |
py
|
"""Test is based on a metadata JSON file generated from
running atac-seq-pipeline v1.8.0 with the following input JSON.
gs://encode-pipeline-test-samples/encode-atac-seq-pipeline/ENCSR356KRQ_subsampled_caper.json
"""
import pytest
from caper.resource_analysis import LinearResourceAnalysis, ResourceAnalysis
def test_resource_analysis_abstract_class(gcp_res_analysis_metadata):
with pytest.raises(TypeError):
# abstract base-class
ResourceAnalysis()
def test_resource_analysis_analyze_task(gcp_res_analysis_metadata):
analysis = LinearResourceAnalysis()
analysis.collect_resource_data([gcp_res_analysis_metadata])
result_align1 = analysis.analyze_task(
'atac.align',
in_file_vars=['fastqs_R1'],
reduce_in_file_vars=None,
target_resources=['stats.max.mem', 'stats.mean.cpu_pct'],
)
assert result_align1['x'] == {'fastqs_R1': [15643136, 18963919]}
assert 'stats.mean.cpu_pct' in result_align1['y']
assert 'stats.max.mem' in result_align1['y']
assert 'stats.max.disk' not in result_align1['y']
assert list(result_align1['y'].keys()) == list(result_align1['coeffs'].keys())
assert result_align1['coeffs']['stats.mean.cpu_pct'][0][0] == pytest.approx(
1.6844513715565233e-06
)
assert result_align1['coeffs']['stats.mean.cpu_pct'][1] == pytest.approx(
42.28561239506905
)
assert result_align1['coeffs']['stats.max.mem'][0][0] == pytest.approx(
48.91222341236991
)
assert result_align1['coeffs']['stats.max.mem'][1] == pytest.approx(
124314029.09791338
)
result_align2 = analysis.analyze_task(
'atac.align', in_file_vars=['fastqs_R2'], reduce_in_file_vars=sum
)
assert result_align2['x'] == {'sum(fastqs_R2)': [16495088, 20184668]}
assert 'stats.mean.cpu_pct' not in result_align2['y']
assert 'stats.max.mem' in result_align2['y']
assert 'stats.max.disk' in result_align2['y']
assert list(result_align2['y'].keys()) == list(result_align2['coeffs'].keys())
result_align_star = analysis.analyze_task('atac.align*', reduce_in_file_vars=max)
assert result_align_star['x'] == {
'max(chrsz,fastqs_R1,fastqs_R2,idx_tar,tmp_fastqs)': [
32138224,
39148587,
3749246230,
3749246230,
]
}
def test_resource_analysis_analyze(gcp_res_analysis_metadata):
"""Test method analyze() which analyze all tasks defined in in_file_vars.
"""
analysis = LinearResourceAnalysis()
analysis.collect_resource_data([gcp_res_analysis_metadata])
result = analysis.analyze(
in_file_vars={
'atac.align*': ['fastqs_R1', 'fastqs_R2'],
'atac.filter*': ['bam'],
}
)
assert len(result) == 2
assert result['atac.align*']['x'] == {
'sum(fastqs_R1,fastqs_R2)': [32138224, 39148587, 32138224, 39148587]
}
assert result['atac.filter*']['x'] == {
'sum(bam)': [61315022, 76789196, 61315022, 76789196]
}
result_all = analysis.analyze()
# 38 tasks in total
assert len(result_all) == 38
|
[
"[email protected]"
] | |
b0e9d034f38256d73cecf9f4255f71cbf66f2f94
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/classes/_eigensolver1.py
|
b6ce82292c594e6f6578a8c6eb7978f16397aebd
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 468 |
py
|
from xcp2k.inputsection import InputSection
class _eigensolver1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.N = None
self.N_loop = None
self.Diag_method = None
self.Eigenvalues = None
self.Init_method = None
self._name = "EIGENSOLVER"
self._keywords = {'Diag_method': 'DIAG_METHOD', 'N_loop': 'N_LOOP', 'Init_method': 'INIT_METHOD', 'Eigenvalues': 'EIGENVALUES', 'N': 'N'}
|
[
"[email protected]"
] | |
97ce640d8f9e55d51546c4a93f3597a7132318cf
|
33a747246dab38960c25520d5232d5a37dfe2a01
|
/starbucks/address_to_gecoords.py
|
d842315ca462c234888776d81feaa308e92f2f34
|
[] |
no_license
|
Yxiaokuan/spider
|
6a79a950d170ea20dae13001697b9c214872f345
|
e51a398c7fdee1b1814c50c5a3121ce9a193e302
|
refs/heads/master
| 2022-04-02T16:01:18.104056 | 2020-02-11T03:49:44 | 2020-02-11T03:49:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,951 |
py
|
'''
@author:KongWeiKun
@file: address_to_gecoords.py
@time: 18-1-2 下午5:55
@contact: [email protected]
'''
import csv
import json
import random
import re
import requests
import time
'''
地址转经纬度
'''
from urllib.request import quote #URL编码
def getLngLat(url,timeOutRetry=5):
try:
response = requests.get(url)
return response.text
except Exception as e:
if timeOutRetry>0:
getLngLat(url,timeOutRetry=(timeOutRetry-1))
print("真的失败了")
def write_to_file(content):
with open('./resources/starbucks_result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n') # 写入文件,并且确定为汉字
f.close()
def pack_url(address):
ak='LVsGVvCzooeqcHGM1lnNzvTTSba7gtvU'
aks = 'fV9ODCmTALCdTtlbkRsheFUacvA9sL7A'
base_url = 'http://api.map.baidu.com/geocoder/v2/?address='
output = 'json'
callback = 'showLocation'
url = base_url+quote(address)+"&output="+output+"&ak="+ak+"&callback"+callback
return url
def readCsv(filename):
reader = csv.reader(open(filename))
return reader
def main():
starbucks = './resources/starbucks.csv'
reader = readCsv(starbucks)
for row in reader:
address = row[0]
url=pack_url(address)
gecoord=getLngLat(url)
print(gecoord)
pattern = re.compile('"lng":(.*?),"lat":(.*?)}')
lngLat=re.findall(pattern, gecoord)
if lngLat:
for ll in lngLat:
print(ll[0])
print('写入文件%s%s'%ll)
write_to_file(','.join(ll))
time.sleep(random.random()*5)
if __name__ == '__main__':
# main()
#利用localtime()
#函数将时间戳转化成localtime的格式
#利用strftime()
#函数重新格式化时间
start = time.time()
main()
end = time.time()
print("转换完成,共消耗%s"%(end-start))
|
[
"[email protected]"
] | |
6e1b0e5aa34daaa437c9eee45fc76dbcb0dc1c5a
|
2a5d93182aecc251462c9d3844e7c3e28013013e
|
/mysite/chat/tests.py
|
114198533bdb4a28861f61e98807da39f4a8fde4
|
[] |
no_license
|
burbaljaka/websocket_chat
|
20acc9908cd7b0e122a3b96252208defdc7460d9
|
ca6883987befb6bfad5973156b01bfe876b1414f
|
refs/heads/master
| 2021-05-26T22:58:31.151913 | 2020-04-08T19:37:16 | 2020-04-08T19:37:16 | 254,182,479 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,906 |
py
|
from channels.testing import ChannelsLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
import dill
class ChatTests(ChannelsLiveServerTestCase):
serve_static = True #emulate StaticLiveServerTestCase
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
# NOTE: Requires "chromedriver" binary to be installed in $PATH
cls.driver = webdriver.Chrome('C:\chromedriver.exe')
except:
super().tearDownClass()
raise
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_chat_message_posted_then_seen_by_everyone_in_same_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_1')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 2 from window 1')
finally:
self._close_all_new_windows()
def test_when_chat_message_posted_then_not_seen_by_anyone_in_different_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_2')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
self._post_message('world')
WebDriverWait(self.driver, 2).until(lambda _:
'world' in self._chat_log_value,
'Message was not received by window 2 from window 2')
self.assertTrue('hello' not in self._chat_log_value,
'Message was improperly received by window 2 from window 1')
finally:
self._close_all_new_windows()
# === Utility ===
def _enter_chat_room(self, room_name):
self.driver.get(self.live_server_url + '/chat/')
ActionChains(self.driver).send_keys(room_name + '\n').perform()
WebDriverWait(self.driver, 2).until(lambda _:
room_name in self.driver.current_url)
def _open_new_window(self):
self.driver.execute_script('window.open("about:blank", "_blank");')
self.driver.switch_to_.window(self.driver.window_handles[-1])
def _close_all_new_windows(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[-1])
self.driver.execute_script('window.close();')
if len(self.driver.window_handles) == 1:
self.driver.switch_to.window(self.driver.window_handles[0])
def _switch_to_window(self, window_index):
self.driver.switch_to.window(self.driver.window_handles[window_index])
def _post_message(self, message):
ActionChains(self.driver).send_keys(message + '\n').perform()
@property
def _chat_log_value(self):
return self.driver.find_element_by_css_selector('#chat-log').get_property('value')
|
[
"[email protected]"
] | |
56d6d53b07810d51b36c4842c6af1666223e5ee3
|
d82ac08e029a340da546e6cfaf795519aca37177
|
/chapter_05_dimensionality_reduction/05_kernel_principal_component.py
|
609ded2a21b83c00d4f66aca64610875be219164
|
[] |
no_license
|
CSwithJC/PythonMachineLearning
|
4409303c3f4d4177dc509c83e240d7a589b144a0
|
0c4508861e182a8eeacd4645fb93b51b698ece0f
|
refs/heads/master
| 2021-09-04T04:28:14.608662 | 2018-01-15T20:25:36 | 2018-01-15T20:25:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,810 |
py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import PCA
from matplotlib.ticker import FormatStrFormatter
""" Kernel PCA
Using Kernel PCA, we perform a nonlinear mapping that transforms
the data onto a higher-dimensional space and use standard PCA
in this higher-dimensional space to project the data back onto a
lower-dimensional space where the samples can be separated by a
linear classifier.
"""
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation
Parameters
----------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components:
Number of principal components to return
Returns
-------
X_pc: {NumPy ndarray}, shape = [n_samples, n_features]
Projected dataset
"""
# Calculate the pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
return X_pc
# Examples to apply kernel pca to some datasets:
#
# 1. Half-moon shapes:
#
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.show()
# Now, project the dataset via standard PCA:
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Now, try again using our rbf_kernel_pca function
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
# In this new plot, we see that the two classes (cirles and traingles)
# are lineraly well separated so that it becomes a suitable training
# dataset for linear classifiers.
#
# 2. Concentric circles:
#
X, y = make_circles(n_samples=1000, random_state=123,
noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.show()
# PCA Approach:
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Again, standard PCA does not produce a good result.
# Now, again using our RBF Kernel PCA Implementation:
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Again, RBF Kernel PCA projected the data onto a new
# subspace where the two classes become linearly separable .
# This is seen in the new plot.
|
[
"[email protected]"
] | |
20b0ea9579d0886baeaed4bfa6287cb7360d5595
|
d7bc476f610d8b7d4abbeaf1545af4d2d827a7ef
|
/projeto/estoque/admin.py
|
62fe30997551edf3ee208581329a1eb027dab2eb
|
[] |
no_license
|
CleitonSilvaT/ControleEstoque
|
0bcaa7168f93de124b10117aefeb636c492ac776
|
dd2c573fb53edb0904393c0897917b55f3afac13
|
refs/heads/master
| 2023-01-19T09:34:49.213727 | 2020-11-26T09:27:07 | 2020-11-26T09:27:07 | 260,329,018 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 836 |
py
|
from django.contrib import admin
from .models import EstoqueEntrada
from .models import EstoqueSaida
from .models import EstoqueItens
# Register your models here.
# admin.site.register(Estoque)
# admin.site.register(EstoqueItens)
class EstoqueItensInLine(admin.TabularInline):
model = EstoqueItens
extra = 0
@admin.register(EstoqueEntrada)
class EstoqueEntradaAdmin(admin.ModelAdmin):
inlines = (EstoqueItensInLine,)
list_display = ('__str__', 'nf', 'funcionario',)
search_fields = ('nf',)
list_filter = ('funcionario',)
date_hierarchy = 'created'
@admin.register(EstoqueSaida)
class EstoqueSaidaAdmin(admin.ModelAdmin):
inlines = (EstoqueItensInLine,)
list_display = ('__str__', 'nf', 'funcionario',)
search_fields = ('nf',)
list_filter = ('funcionario',)
date_hierarchy = 'created'
|
[
"[email protected]"
] | |
b3391ed1ddf7e3ff1d1e526f45e1f80873ff81b5
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/39_11.py
|
2ab73f925b4bb8f6b56c6ea625257db987241936
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,960 |
py
|
Python – Replace words from Dictionary
Given String, replace it’s words from lookup dictionary.
> **Input** : test_str = ‘geekforgeeks best for geeks’, repl_dict = {“geeks”
> : “all CS aspirants”}
> **Output** : geekforgeeks best for all CS aspirants
> **Explanation** : “geeks” word is replaced by lookup value.
>
> **Input** : test_str = ‘geekforgeeks best for geeks’, repl_dict = {“good” :
> “all CS aspirants”}
> **Output** : geekforgeeks best for geeks
> **Explanation** : No lookup value, unchanged result.
**Method #1 : Using split() + get() + join()**
In this, we initially split the list using split(), then look for lookups
using get(), and if found, replaced and joined back to string using join().
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Replace words from Dictionary
# Using split() + join() + get()
# initializing string
test_str = 'geekforgeeks best for geeks'
# printing original string
print("The original string is : " + str(test_str))
# lookup Dictionary
lookp_dict = {"best" : "good and better", "geeks" : "all CS
aspirants"}
# performing split()
temp = test_str.split()
res = []
for wrd in temp:
# searching from lookp_dict
res.append(lookp_dict.get(wrd, wrd))
res = ' '.join(res)
# printing result
print("Replaced Strings : " + str(res))
---
__
__
**Output**
The original string is : geekforgeeks best for geeks
Replaced Strings : geekforgeeks good and better for all CS aspirants
**Method #2 : Using list comprehension + join()**
Similar to above method, difference just being 1 liner rather than 3-4 steps
in separate lines.
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Replace words from Dictionary
# Using list comprehension + join()
# initializing string
test_str = 'geekforgeeks best for geeks'
# printing original string
print("The original string is : " + str(test_str))
# lookup Dictionary
lookp_dict = {"best" : "good and better", "geeks" : "all CS
aspirants"}
# one-liner to solve problem
res = " ".join(lookp_dict.get(ele, ele) for ele in
test_str.split())
# printing result
print("Replaced Strings : " + str(res))
---
__
__
**Output**
The original string is : geekforgeeks best for geeks
Replaced Strings : geekforgeeks good and better for all CS aspirants
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"[email protected]"
] | |
6c6e12fa925c57be0fddd4074aa52393dca4eb69
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02787/s069996134.py
|
5242a0c98d9fb0fe819f9bb08a60a75b923ddb4e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 634 |
py
|
import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
H, N, *AB = map(int, read().split())
magic = [0] * N
for i, (a, b) in enumerate(zip(*[iter(AB)] * 2)):
magic[i] = (a, b)
dp = [[INF] * (H + 1) for _ in range(N + 1)]
for i in range(N + 1):
dp[i][0] = 0
for i in range(N):
a, b = magic[i]
for j in range(H + 1):
dp[i + 1][j] = min(dp[i][j], dp[i + 1][max(j - a, 0)] + b)
print(dp[N][H])
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
64bacc9506ee8073571d4e6a9868841577b71d60
|
1f528631683f9d96c09dd4a4af243dd7213a4bd7
|
/thumbnail.py
|
6dffb24265b8649c5dce6c5d5183c7387998cc12
|
[] |
no_license
|
wd5/artcontactmeru
|
4f8ede2a3f072c11ac0c8c8a74e0960382824537
|
2057f1b00db8eb808e0b4fb91a95fbe0a3e4f223
|
refs/heads/master
| 2020-12-30T14:55:54.067776 | 2009-11-12T07:30:00 | 2009-11-12T07:30:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,255 |
py
|
# -*- coding: utf-8 -*-
import os, Image
THUMBNAILS = 'thumbnails'
SCALE_WIDTH = 'w'
SCALE_HEIGHT = 'h'
SCALE_BOTH = 'x'
def scale(max_x, pair):
x, y = pair
new_y = (float(max_x) / x) * y
return (int(max_x), int(new_y))
# Thumbnail filter based on code from
# http://batiste.dosimple.ch/blog/2007-05-13-1/
def thumbnail(media_url, original_image_path, arg):
if not original_image_path:
return ''
if arg.find(','):
size, upload_path = [a.strip() for a in arg.split(',')]
else:
size = arg
upload_path = ''
if (size.lower().endswith('h')):
mode = SCALE_HEIGHT
elif (size.lower().endswith('w')):
mode = SCALE_WIDTH
else:
mode = SCALE_BOTH
# defining the size
size = size[:-1]
max_size = int(size.strip())
# defining the filename and the miniature filename
basename, format = original_image_path.rsplit('.', 1)
basename, name = basename.rsplit(os.path.sep, 1)
upload_path = '/'.join(basename.rsplit(os.path.sep, 2)[1:])
miniature = name + '_' + str(max_size) + mode + '.' + format
thumbnail_path = os.path.join(basename, THUMBNAILS)
if not os.path.exists(thumbnail_path):
os.mkdir(thumbnail_path)
miniature_filename = os.path.join(thumbnail_path, miniature)
miniature_url = '/'.join((media_url, upload_path, THUMBNAILS, miniature))
# if the image wasn't already resized, resize it
if not os.path.exists(miniature_filename) \
or os.path.getmtime(original_image_path) > os.path.getmtime(miniature_filename):
image = Image.open(original_image_path)
image_x, image_y = image.size
if mode == SCALE_BOTH:
if image_x > image_y:
mode = SCALE_WIDTH
else:
mode = SCALE_HEIGHT
if mode == SCALE_HEIGHT:
image_y, image_x = scale(max_size, (image_y, image_x))
else:
image_x, image_y = scale(max_size, (image_x, image_y))
image = image.resize((image_x, image_y), Image.ANTIALIAS)
image.save(miniature_filename, image.format)
return miniature_url
|
[
"[email protected]"
] | |
7e57694591ccea12ade2aaeb5ac1524ce38461db
|
03dea3c0db7c8fafda71d23c3c2595f563ffb335
|
/SignalMC/python/AMSB_chargino900GeV_ctau1000cm_NoFilter_13TeV.py
|
d9b642184847712cc96ebbd952b587fe419eaacd
|
[] |
no_license
|
Andersen98/DisappTrks
|
3952e9bf8ba270e2d88aa2e8d9ef805cf25dfc46
|
140a5efdc4c51a30e5fced6d34b7813876c2f2ee
|
refs/heads/master
| 2020-06-27T03:41:59.136790 | 2017-07-12T15:19:18 | 2017-07-12T15:19:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,706 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(3),
filterEfficiency = cms.untracked.double(1.0),
# comEnergy = cms.double(8000.0),
comEnergy = cms.double(13000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'IMSS(1) = 11 ! Spectrum from external SLHA file',
'IMSS(21) = 33 ! LUN number for SLHA File (must be 33) ',
'IMSS(22) = 33 ! Read-in SLHA decay table ',
'MSEL = 0 ! General SUSY',
'MSUB(226) = 1 ! to double chargino',
'MSUB(229) = 1 ! to neutralino + chargino',
'MDCY(312,1) = 0 ! set the chargino stable.',
),
parameterSets = cms.vstring('pythiaUESettings', 'processParameters', 'SLHAParameters'),
SLHAParameters = cms.vstring('SLHAFILE = DisappTrks/SignalMC/data/AMSB_chargino_900GeV_Isajet780.slha'),
),
slhaFile = cms.untracked.string('DisappTrks/SignalMC/data/AMSB_chargino_900GeV_Isajet780.slha'),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(900),
particleFile = cms.untracked.string('DisappTrks/SignalMC/data/geant4_AMSB_chargino_900GeV_ctau1000cm.slha')
)
|
[
"[email protected]"
] | |
b818191531994619e2f64e216edd315786e81044
|
24927eac464cdb1bec665f1cb4bfee85728ec5e1
|
/entry_parser/balenciaga.py
|
ca5ea1f4cb43c5aaeece995f78a8da7d00683e75
|
[] |
no_license
|
yingl/fashion-spider
|
d72ea8dfd4a49270fd3e64e7a507d6fcbaaf492c
|
0698768cd21d509ec335d7202a753be4f6ad378b
|
refs/heads/master
| 2021-01-01T18:14:17.848732 | 2017-09-27T08:44:47 | 2017-09-27T08:44:47 | 98,282,505 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
""" Balenciaga """
# coding:utf-8
import sys
sys.path.append('../')
import util
PREFIXES = ['www.balenciaga.cn']
def parse(driver, url):
products = []
driver.get(url)
elements = util.find_elements_by_css_selector(driver, 'a.item-display-image-container')
for element in elements:
products.append(element.get_attribute('href').strip())
return ';'.join(products)
def main():
driver = util.create_chrome_driver()
print(parse(driver, sys.argv[1]))
driver.quit()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
60b22d53fbf1e5893ab6925972c16edebae8fa71
|
c6ae5d4c9b2a3acef09ee8254e1c1b67bc255b56
|
/okex/v5/billtype.py
|
1432ed9ee5152a20f37741012015561760391395
|
[
"MIT"
] |
permissive
|
PattonChen/okex-py
|
ce36a4ded7cb051a7167e3ba9aeb88bc4e0417f3
|
cced7f1419da0940a2200ce66d62b4f9539949f2
|
refs/heads/main
| 2023-05-04T06:00:38.540463 | 2021-05-16T08:30:42 | 2021-05-16T08:30:42 | 367,833,565 | 1 | 0 |
MIT
| 2021-05-16T09:14:59 | 2021-05-16T09:14:59 | null |
UTF-8
|
Python
| false | false | 302 |
py
|
from enum import Enum
class BillType(Enum):
# 划转
TRANSFER = 1
# 交易
TRADE = 2
# 交割
DELIVERY = 3
# 强制换币
FORCE_SWAP = 4
# 强平
FORCED_LIQUIDATION = 5
# ...
class BillSubType(Enum):
LINEAR = "linear"
INVERSE = "inverse"
# ...
|
[
"[email protected]"
] | |
1d80fe96db1c270109eea36af33536d1f681f4e0
|
0fa96aa3b1ee5cf752e20bad98ef02785c369225
|
/quaducom/quaducom/assess/assess_shell/mn_resistance/ecb_law_mn_diagram.py
|
971d0cc7a521f908aee65bce5fdb5716551192e2
|
[] |
no_license
|
simvisage/simvisage
|
7a56ce0d13211513a86e3631db1b55dc62e85986
|
acb2a8eb8b6caa57c1e9e15f724a2b43843c7b10
|
refs/heads/master
| 2020-04-04T07:32:47.598636 | 2018-12-10T15:10:43 | 2018-12-10T15:10:43 | 3,583,342 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,966 |
py
|
'''
Created on Sep 4, 2012
@author: rch
'''
from etsproxy.traits.api import \
HasTraits, Int, Instance, Property, cached_property, DelegatesTo, \
Event, Button
from util.traits.editors.mpl_figure_editor import \
MPLFigureEditor
from matplotlib.figure import \
Figure
from etsproxy.traits.ui.api import \
View, Item, Group, HSplit, VGroup, HGroup, RangeEditor, InstanceEditor
from ecb_law_calib import \
ECBLCalib
import numpy as np
class ECBLMNDiagram(HasTraits):
# calibrator supplying the effective material law
calib = Instance(ECBLCalib)
def _calib_default(self):
return ECBLCalib(notify_change=self.set_modified)
def _calib_changed(self):
self.calib.notify_change = self.set_modified
modified = Event
def set_modified(self):
print 'MN:set_modifeid'
self.modified = True
# cross section
cs = DelegatesTo('calib')
calibrated_ecb_law = Property(depends_on='modified')
@cached_property
def _get_calibrated_ecb_law(self):
print 'NEW CALIBRATION'
return self.calib.calibrated_ecb_law
eps_cu = Property()
def _get_eps_cu(self):
return -self.cs.cc_law.eps_c_u
eps_tu = Property()
def _get_eps_tu(self):
return self.calibrated_ecb_law.eps_tex_u
n_eps = Int(5, auto_set=False, enter_set=True)
eps_range = Property(depends_on='n_eps')
@cached_property
def _get_eps_range(self):
eps_c_space = np.linspace(self.eps_cu, 0, self.n_eps)
eps_t_space = np.linspace(0, self.eps_tu, self.n_eps)
eps_ccu = 0.8 * self.eps_cu
#eps_cc = self.eps_cu * np.ones_like(eps_c_space)
eps_cc = np.linspace(eps_ccu, self.eps_cu, self.n_eps)
eps_ct = self.eps_cu * np.ones_like(eps_t_space)
eps_tc = self.eps_tu * np.ones_like(eps_c_space)
eps_tt = self.eps_tu * np.ones_like(eps_t_space)
eps1 = np.vstack([eps_c_space, eps_cc])
eps2 = np.vstack([eps_t_space, eps_ct])
eps3 = np.vstack([eps_tc, eps_c_space])
eps4 = np.vstack([eps_tt, eps_t_space])
return np.hstack([eps1, eps2, eps3, eps4])
n_eps_range = Property(depends_on='n_eps')
@cached_property
def _get_n_eps_range(self):
return self.eps_range.shape[1]
#===========================================================================
# MN Diagram
#===========================================================================
def _get_MN_fn(self, eps_lo, eps_up):
self.cs.set(eps_lo=eps_lo,
eps_up=eps_up)
return (self.cs.M, self.cs.N)
MN_vct = Property(depends_on='modified')
def _get_MN_vct(self):
return np.vectorize(self._get_MN_fn)
MN_arr = Property(depends_on='modified')
@cached_property
def _get_MN_arr(self):
return self.MN_vct(self.eps_range[0, :], self.eps_range[1, :])
#===========================================================================
# f_eps Diagram
#===========================================================================
current_eps_idx = Int(0) # , auto_set = False, enter_set = True)
def _current_eps_idx_changed(self):
self._clear_fired()
self._replot_fired()
current_eps = Property(depends_on='current_eps_idx')
@cached_property
def _get_current_eps(self):
return self.eps_range[(0, 1), self.current_eps_idx]
current_MN = Property(depends_on='current_eps_idx')
@cached_property
def _get_current_MN(self):
return self._get_MN_fn(*self.current_eps)
#===========================================================================
# Plotting
#===========================================================================
figure = Instance(Figure)
def _figure_default(self):
figure = Figure(facecolor='white')
figure.add_axes([0.08, 0.13, 0.85, 0.74])
return figure
data_changed = Event
clear = Button
def _clear_fired(self):
self.figure.clear()
self.data_changed = True
replot = Button
def _replot_fired(self):
ax = self.figure.add_subplot(2, 2, 1)
ax.plot(-self.eps_range, [0, 0.06], color='black')
ax.plot(-self.current_eps, [0, 0.06], lw=3, color='red')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax = self.figure.add_subplot(2, 2, 2)
ax.plot(self.MN_arr[0], -self.MN_arr[1], lw=2, color='blue')
ax.plot(self.current_MN[0], -self.current_MN[1], 'g.', markersize=20.0, color='red')
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(b=None, which='major')
self.cs.set(eps_lo=self.current_eps[0],
eps_up=self.current_eps[1])
ax = self.figure.add_subplot(2, 2, 3)
self.cs.plot_eps(ax)
ax = self.figure.add_subplot(2, 2, 4)
self.cs.plot_sig(ax)
self.data_changed = True
view = View(HSplit(Group(
HGroup(
Group(Item('n_eps', springy=True),
label='Discretization',
springy=True
),
springy=True,
),
HGroup(
Group(VGroup(
Item('cs', label='Cross section', show_label=False, springy=True,
editor=InstanceEditor(kind='live'),
),
Item('calib', label='Calibration', show_label=False, springy=True,
editor=InstanceEditor(kind='live'),
),
springy=True,
),
label='Cross sectoin',
springy=True
),
springy=True,
),
scrollable=True,
),
Group(HGroup(
Item('replot', show_label=False),
Item('clear', show_label=False),
),
Item('current_eps_idx', editor=RangeEditor(low=0,
high_name='n_eps_range',
format='(%s)',
mode='slider',
auto_set=False,
enter_set=False,
),
show_label=False,
),
Item('figure', editor=MPLFigureEditor(),
resizable=True, show_label=False),
id='simexdb.plot_sheet',
label='plot sheet',
dock='tab',
),
),
width=1.0,
height=0.8,
resizable=True,
buttons=['OK', 'Cancel'])
if __name__ == '__main__':
c = ECBLCalib(
Mu=3.49,
width=0.20,
n_rovings=23,
ecb_law_type='fbm',
cc_law_type='quadratic' #eps_tu 0.0137279096658
)
mn = ECBLMNDiagram(calib=c,
n_eps=30,
)
mn.configure_traits()
|
[
"[email protected]"
] | |
34d8513c172a58e7d635c1cc20add3c5cfc710df
|
ffb4db36bf3959ed4a994f693c62d68092a91e63
|
/image_space/utils.py
|
405488ce25781aa69623a15abb722ef060c3b29d
|
[] |
no_license
|
quasiben/image_solr
|
8bc25db1e47f19d83d5b51b89e250f8da2cd285b
|
cc2baafa170bdbfecc1a0450ffd041de485f19fa
|
refs/heads/master
| 2021-01-10T02:10:19.064770 | 2015-03-05T23:37:24 | 2015-03-05T23:37:24 | 31,621,600 | 2 | 1 | null | 2015-03-05T23:25:21 | 2015-03-03T21:12:28 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,319 |
py
|
from image_space import app
from image_space.models import Image, IMAGE_TABLE_NAME
from image_space import db
# Upload Handling
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
# EXIF Processing
def process_exif(exif_data, img_path):
# Get the EXIF data from the image
LSVN = getattr(exif_data.get('EXIF LensSerialNumber'), 'values', None)
MSNF = getattr(exif_data.get('MakerNote SerialNumberFormat'), 'values', None)
BSN = getattr(exif_data.get('EXIF BodySerialNumber'), 'values', None)
MISN = getattr(exif_data.get('MakerNote InternalSerialNumber'), 'values', None)
MSN = getattr(exif_data.get('MakerNote SerialNumber'), 'values', None)
IBSN = getattr(exif_data.get('Image BodySerialNumber'), 'values', None)
image = Image(img_file = img_path,
EXIF_LensSerialNumber = LSVN,
MakerNote_SerialNumberFormat = MSNF,
EXIF_BodySerialNumber = BSN,
MakerNote_InternalSerialNumber = MISN,
MakerNote_SerialNumber = MSN,
Image_BodySerialNumber = IBSN,
Uploaded = 1,
)
# Add uploaded image to the database
db.session.add(image)
db.session.commit()
|
[
"[email protected]"
] | |
0c3eb34ca123217876148bd6cbe34e978632e747
|
6657a43ee360177e578f67cf966e6aef5debda3c
|
/test/test_warning_api.py
|
7b7edb07cebe3c9a13d48a3b983ac64425eaa37f
|
[
"MIT"
] |
permissive
|
NVE/python-varsom-avalanche-client
|
3cc8b9c366f566a99c6f309ccdfb477f73256659
|
c7787bf070d8ea91efd3a2a9e7782eedd4961528
|
refs/heads/master
| 2022-04-20T09:32:24.499284 | 2020-04-16T20:12:01 | 2020-04-16T20:12:01 | 256,318,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,017 |
py
|
# coding: utf-8
"""
Snøskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v5.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import varsom_avalanche_client
from api.warning_api import WarningApi # noqa: E501
from varsom_avalanche_client.rest import ApiException
class TestWarningApi(unittest.TestCase):
"""WarningApi unit test stubs"""
def setUp(self):
self.api = api.warning_api.WarningApi() # noqa: E501
def tearDown(self):
pass
def test_warning_all(self):
"""Test case for warning_all
"""
pass
def test_warning_id(self):
"""Test case for warning_id
"""
pass
def test_warning_region(self):
"""Test case for warning_region
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
65ee34428a001611cc81d153a87842166b3b521d
|
9fbbfb3dd1990be27acfada1a91af980f474c8f1
|
/Chapter 07/rangesumBST.py
|
603891b6170f47a25ae103aebce7585ed63a2d86
|
[
"MIT"
] |
permissive
|
bpbpublications/Python-Quick-Interview-Guide
|
61a48446f910144a050a5bb1515ad48567dc9481
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
refs/heads/main
| 2023-04-08T12:18:14.605193 | 2021-04-13T09:18:30 | 2021-04-13T09:18:30 | 350,315,060 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,665 |
py
|
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BinTree:
def printTree(self,root:TreeNode)->None:
LevelList = [root]
self.printLevel(LevelList)
def printLevel(self,LevelList:List[TreeNode])-> List[TreeNode]:
LevelStr = ""
outList = []
ListEmpty = True
for node in LevelList:
if node is None:
LevelStr += "None "
outList.append(None)
outList.append(None)
else:
LevelStr += (str(node.val) + " ")
outList.append(node.left)
outList.append(node.right)
ListEmpty = False
if not ListEmpty:
print(LevelStr)
self.printLevel(outList)
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
self.sum=0
def printInorder(root):
if root:
printInorder(root.left) #Recursively call left child
if root.val:
if L <= root.val <= R:self.sum += root.val
printInorder(root.right) #Recursively call right child
printInorder(root)
return self.sum
#Driver code
root = TreeNode(5)
root.left = TreeNode(3)
root.right = TreeNode(6)
root.left.left = TreeNode(2)
root.left.right = TreeNode(4)
root.right.left = TreeNode(None)
root.right.right = TreeNode(7)
bst = BinTree()
bst.printTree(root)
sol = Solution()
print("Range sum =",sol.rangeSumBST(root,2,6))
|
[
"[email protected]"
] | |
795f936423965063c44b347705c53fd1c306692f
|
bf2aa4eab14a6a5347fe4af65cc4a37f512a465d
|
/people/migrations/0111_auto_20200530_0632.py
|
7320c7e9f7c6db4746b6c85c51fb4fef42dfea53
|
[] |
no_license
|
drdavidknott/betterstart
|
0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c
|
59e2f8282b34b7c75e1e19e1cfa276b787118adf
|
refs/heads/master
| 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 |
Python
|
UTF-8
|
Python
| false | false | 623 |
py
|
# Generated by Django 3.0.3 on 2020-05-30 05:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0110_auto_20200530_0631'),
]
operations = [
migrations.AlterField(
model_name='site',
name='password_reset_email_from',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='site',
name='password_reset_email_title',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
[
"[email protected]"
] | |
c07f5f20db0ddcca9d3b07ecdb404f2a7e817bcb
|
1caf4418f3549567637f5e9893a445f52a38c6a0
|
/CmsAdmin/user_content/app/dtos/change_account_password_dto.py
|
29b05af87dbeee572f150ac6b43bf6236ec0b7b5
|
[] |
no_license
|
Final-Game/social_network_backend
|
c601563e08c0fd7de72a614944f354ef8d2d31d8
|
8111787d1d20eb87733ae360d8baa745a65e2743
|
refs/heads/master
| 2023-03-04T21:12:43.147084 | 2021-02-23T03:45:22 | 2021-02-23T03:45:22 | 290,542,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 560 |
py
|
from core.common import BaseApiException
class ChangeAccountPasswordDto(object):
old_password: str
new_password: str
def __init__(self, old_password: str, new_password: str) -> None:
self.old_password = old_password
self.new_password = new_password
self.validate()
def validate(self):
if not (self.old_password or self.new_password):
raise BaseApiException("Password not found")
if len(self.new_password) < 6:
raise BaseApiException("Password must be large 6 characters.")
|
[
"[email protected]"
] | |
ee71398911054d72c2440fe57f222ff41fe9d50c
|
ebf997ac5814bd20a44646b6690de6913669f2e1
|
/plugins/btsync/resources/btsyncUI/freenas/urls.py
|
2b3a85c40b8dfa13a5dc5e20d84814d6cc7c5f00
|
[] |
no_license
|
MadMarty/freenas-plugins-1
|
4add49728e07fb75191352902969a1ecea67b248
|
4940cd7cc39a26882ea7f4a61799bcea1cea6b34
|
refs/heads/master
| 2021-01-22T04:23:36.608602 | 2016-05-06T18:02:47 | 2016-05-06T18:02:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('btsyncUI.freenas.views',
url(r'^edit$', 'edit', name="btsync_edit"),
url(r'^treemenu-icon$', 'treemenu_icon', name="treemenu_icon"),
url(r'^_s/treemenu$', 'treemenu', name="treemenu"),
url(r'^_s/start$', 'start', name="start"),
url(r'^_s/stop$', 'stop', name="stop"),
url(r'^_s/status$', 'status', name="status"),
)
|
[
"[email protected]"
] | |
94525c4e1278e1b638d45df4e32589b8ea6e5133
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/pyasn1-0.1.8/test/codec/der/test_decoder.py
|
5f0bc0b8be193a1564d66e29a71c3e468855d6a1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 |
Apache-2.0
| 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null |
UTF-8
|
Python
| false | false | 963 |
py
|
from pyasn1.type import univ
from pyasn1.codec.der import decoder
from pyasn1.compat.octets import ints2octs
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
class OctetStringDecoderTestCase(unittest.TestCase):
def testShortMode(self):
assert decoder.decode(
'\004\017Quick brown fox'.encode()
) == ('Quick brown fox'.encode(), ''.encode())
def testIndefMode(self):
try:
decoder.decode(
ints2octs((36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
)
except PyAsn1Error:
pass
else:
assert 0, 'indefinite length encoding tolerated'
if __name__ == '__main__': unittest.main()
|
[
"[email protected]"
] | |
312b1e2bce53e43eed3cdd5faca54bc8a98d4c90
|
ce9d90ac5dfd61cc1d3ec57378186a4895323abb
|
/tests/filters/helpers.py
|
0bd99d08eb31da8ac72380689341d36b801acad4
|
[
"Apache-2.0"
] |
permissive
|
min-a-youn/plaso
|
61ee7f280c471a2e9fcc3407445ddd4f3c5eb125
|
082ff564872f7dd1d0d5d68cca23b3f7b8cdedcb
|
refs/heads/master
| 2020-09-20T22:07:34.631095 | 2019-08-28T06:06:48 | 2019-08-28T06:06:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,455 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the event filter helper functions and classes."""
from __future__ import unicode_literals
import unittest
from plaso.filters import helpers
from tests import test_lib as shared_test_lib
class CopyValueToDateTimeTest(shared_test_lib.BaseTestCase):
"""Tests the CopyValueToDateTime helper function."""
def testCopyValueToDateTime(self):
"""Tests the CopyValueToDateTime function."""
date_time = helpers.CopyValueToDateTime('2009-07-13T23:29:02.849131')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
date_time = helpers.CopyValueToDateTime('2009-07-13')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247443200000000)
date_time = helpers.CopyValueToDateTime('2009-07-13 23:29:02')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742000000)
date_time = helpers.CopyValueToDateTime('2009-07-13 23:29:02.849131')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
date_time = helpers.CopyValueToDateTime('1247527742849131')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
date_time = helpers.CopyValueToDateTime(1247527742849131)
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
with self.assertRaises(ValueError):
helpers.CopyValueToDateTime(None)
class GetUnicodeStringTest(shared_test_lib.BaseTestCase):
"""Tests the GetUnicodeString helper function."""
def testGetUnicodeString(self):
"""Tests the GetUnicodeString function."""
string = helpers.GetUnicodeString(['1', '2', '3'])
self.assertEqual(string, '123')
string = helpers.GetUnicodeString([1, 2, 3])
self.assertEqual(string, '123')
string = helpers.GetUnicodeString(123)
self.assertEqual(string, '123')
string = helpers.GetUnicodeString(b'123')
self.assertEqual(string, '123')
string = helpers.GetUnicodeString('123')
self.assertEqual(string, '123')
class TimeRangeCacheTest(shared_test_lib.BaseTestCase):
"""Tests the TimeRangeCache helper."""
# pylint: disable=protected-access
def testGetTimeRange(self):
"""Tests the GetTimeRange function."""
if hasattr(helpers.TimeRangeCache, '_lower'):
del helpers.TimeRangeCache._lower
if hasattr(helpers.TimeRangeCache, '_upper'):
del helpers.TimeRangeCache._upper
first, last = helpers.TimeRangeCache.GetTimeRange()
self.assertEqual(first, helpers.TimeRangeCache._INT64_MIN)
self.assertEqual(last, helpers.TimeRangeCache._INT64_MAX)
def testSetLowerTimestamp(self):
"""Tests the SetLowerTimestamp function."""
helpers.TimeRangeCache.SetLowerTimestamp(1247527742849131)
first, last = helpers.TimeRangeCache.GetTimeRange()
self.assertEqual(first, 1247527742849131)
self.assertEqual(last, helpers.TimeRangeCache._INT64_MAX)
del helpers.TimeRangeCache._lower
def testSetUpperTimestamp(self):
"""Tests the SetUpperTimestamp function."""
helpers.TimeRangeCache.SetUpperTimestamp(1247527742849131)
first, last = helpers.TimeRangeCache.GetTimeRange()
self.assertEqual(first, helpers.TimeRangeCache._INT64_MIN)
self.assertEqual(last, 1247527742849131)
del helpers.TimeRangeCache._upper
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
7bb4179aa4bbbb1f06631d13ab17c3564a767a29
|
3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6
|
/My Thought/reverse a string.py
|
296417613cb9272f294f6413f7eee55df1ccb0ce
|
[] |
no_license
|
shuvo14051/python-data-algo
|
9b6622d9260e95ca9ffabd39b02996f13bdf20d1
|
8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec
|
refs/heads/master
| 2023-02-03T03:04:01.183093 | 2020-12-13T10:13:15 | 2020-12-13T10:13:15 | 274,106,480 | 0 | 0 | null | 2020-07-05T06:33:28 | 2020-06-22T10:24:05 |
Python
|
UTF-8
|
Python
| false | false | 188 |
py
|
# this is a complete pythonnic way
# print(a[::-1])
a = "Shuvo"
li = []
for i in a:
li.append(i)
reverse = ''
for i in range(len(li)-1,-1,-1):
reverse += li[i]
print(reverse)
|
[
"[email protected]"
] | |
4097dc4928226ce67c6d923fff671e5ca2b9b7d3
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/job/job_private_delete_iter_key_td.py
|
7dde8422f159e87e74cc57255dc0b70f0ea0cce6
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,520 |
py
|
from netapp.netapp_object import NetAppObject
class JobPrivateDeleteIterKeyTd(NetAppObject):
"""
Key typedef for table jm_local_jobs_table_remove
"""
_key_2 = None
@property
def key_2(self):
"""
Field vserver
"""
return self._key_2
@key_2.setter
def key_2(self, val):
if val != None:
self.validate('key_2', val)
self._key_2 = val
_key_1 = None
@property
def key_1(self):
"""
Field id
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field node
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "job-private-delete-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-2',
'key-1',
'key-0',
]
def describe_properties(self):
return {
'key_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
[
"[email protected]"
] | |
b374191a7cf732d53d219ab1e5838ac5a74b3ab2
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/cherrypy/test/test_virtualhost.py
|
e9b88bd297cb6047933124c32d619fd6c0d22cc0
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null |
UTF-8
|
Python
| false | false | 3,718 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\cherrypy\test\test_virtualhost.py
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import cherrypy
from cherrypy.test import helper
class VirtualHostTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return 'Hello, world'
index.exposed = True
def dom4(self):
return 'Under construction'
dom4.exposed = True
def method(self, value):
return 'You sent %s' % repr(value)
method.exposed = True
class VHost:
def __init__(self, sitename):
self.sitename = sitename
def index(self):
return 'Welcome to %s' % self.sitename
index.exposed = True
def vmethod(self, value):
return 'You sent %s' % repr(value)
vmethod.exposed = True
def url(self):
return cherrypy.url('nextpage')
url.exposed = True
static = cherrypy.tools.staticdir.handler(section='/static', dir=curdir)
root = Root()
root.mydom2 = VHost('Domain 2')
root.mydom3 = VHost('Domain 3')
hostmap = {'www.mydom2.com': '/mydom2',
'www.mydom3.com': '/mydom3',
'www.mydom4.com': '/dom4'}
cherrypy.tree.mount(root, config={'/': {'request.dispatch': cherrypy.dispatch.VirtualHost(**hostmap)},
'/mydom2/static2': {'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html'}})
setup_server = staticmethod(setup_server)
def testVirtualHost(self):
self.getPage('/', [('Host', 'www.mydom1.com')])
self.assertBody('Hello, world')
self.getPage('/mydom2/', [('Host', 'www.mydom1.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom2.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom3.com')])
self.assertBody('Welcome to Domain 3')
self.getPage('/', [('Host', 'www.mydom4.com')])
self.assertBody('Under construction')
self.getPage('/method?value=root')
self.assertBody("You sent u'root'")
self.getPage('/vmethod?value=dom2+GET', [('Host', 'www.mydom2.com')])
self.assertBody("You sent u'dom2 GET'")
self.getPage('/vmethod', [('Host', 'www.mydom3.com')], method='POST', body='value=dom3+POST')
self.assertBody("You sent u'dom3 POST'")
self.getPage('/vmethod/pos', [('Host', 'www.mydom3.com')])
self.assertBody("You sent 'pos'")
self.getPage('/url', [('Host', 'www.mydom2.com')])
self.assertBody('%s://www.mydom2.com/nextpage' % self.scheme)
def test_VHost_plus_Static(self):
self.getPage('/static/style.css', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
self.getPage('/static2/dirback.jpg', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'image/jpeg')
self.getPage('/static2/', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertBody('Hello, world\r\n')
self.getPage('/static2', [('Host', 'www.mydom2.com')])
self.assertStatus(301)
|
[
"[email protected]"
] | |
152c01b7254082a6295aa8c64ce3f0600ca33d97
|
be134c181703b95aca1e48b6a31bcfdb7bcfcc76
|
/site/mezzanine_old/galleries/migrations/0001_initial.py
|
11f1937e16fbf9cff1135c9e2c992c658bbfd803
|
[] |
permissive
|
aldenjenkins/ThiccGaming
|
0245955a797394bcfeedb2cfb385f633653ba55d
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
refs/heads/master
| 2022-12-16T02:43:36.532981 | 2021-11-17T04:15:21 | 2021-11-17T04:15:21 | 154,858,818 | 0 | 0 |
BSD-3-Clause
| 2022-12-08T02:58:44 | 2018-10-26T15:52:39 |
Python
|
UTF-8
|
Python
| false | false | 1,837 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '__first__'),
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Gallery',
'verbose_name_plural': 'Galleries',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='GalleryImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_order', models.IntegerField(null=True, verbose_name='Order')),
('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')),
('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)),
('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery')),
],
options={
'ordering': ('_order',),
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
bases=(models.Model,),
),
]
|
[
"[email protected]"
] | |
b08be16b6f55bbb29dd93651676a710322f99cdd
|
2fcb5da42f0aff62c88189bd36fc5f61a40eb604
|
/vardautomation/timeconv.py
|
3b84b24deda8187b48a85d3ae7948559d45a7404
|
[
"MIT"
] |
permissive
|
tomato39/vardautomation
|
d45ec446a1cd06c2e7b7ec5378772953fa7b4caa
|
efa24d9420d6a6f732e8b0a846874a289a7cb095
|
refs/heads/master
| 2023-08-23T01:44:00.014196 | 2021-10-21T23:05:52 | 2021-10-21T23:09:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,647 |
py
|
"""Conversion time module"""
from fractions import Fraction
from .status import Status
class Convert:
"""Collection of methods to perform time conversion"""
@classmethod
def ts2f(cls, ts: str, fps: Fraction, /) -> int:
"""
Convert a timestamp hh:mm:ss.xxxx in number of frames
:param ts: Timestamp
:param fps: Framerate Per Second
:return: Frames
"""
s = cls.ts2seconds(ts)
f = cls.seconds2f(s, fps)
return f
@classmethod
def f2ts(cls, f: int, fps: Fraction, /, *, precision: int = 3) -> str:
"""
Convert frames in timestamp hh:mm:ss.xxxx
:param f: Frames
:param fps: Framerate Per Second
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
s = cls.f2seconds(f, fps)
ts = cls.seconds2ts(s, precision=precision)
return ts
@classmethod
def seconds2ts(cls, s: float, /, *, precision: int = 3) -> str:
"""
Convert seconds in timestamp hh:mm:ss.xxx
:param s: Seconds
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
m = s // 60
s %= 60
h = m // 60
m %= 60
return cls.composets(h, m, s, precision=precision)
@classmethod
def f2assts(cls, f: int, fps: Fraction, /) -> str:
"""
Convert frames to .ass timestamp hh:mm:ss.xx properly
by removing half of one frame per second of the specified framerate
:param f: Frames
:param fps: Framerate Per Second
:return: ASS timestamp
"""
s = cls.f2seconds(f, fps)
s -= fps ** -1 * 0.5
ts = cls.seconds2ts(max(0, s), precision=3)
return ts[:-1]
@classmethod
def assts2f(cls, assts: str, fps: Fraction, /) -> int:
"""
Convert .ass timestamp hh:mm:ss.xx to frames properly
by adding half of one frame per second of the specified framerate
:param assts: ASS timestamp
:param fps: Framerate Per Second
:return: Frames
"""
s = cls.ts2seconds(assts)
if s > 0:
s += fps ** -1 * 0.5
return cls.seconds2f(s, fps)
@staticmethod
def f2seconds(f: int, fps: Fraction, /) -> float:
"""
Convert frames to seconds
:param f: Frames
:param fps: Framerate Per Second
:return: Seconds
"""
if f == 0:
return 0.0
t = round(float(10 ** 9 * f * fps ** -1))
s = t / 10 ** 9
return s
@staticmethod
def ts2seconds(ts: str, /) -> float:
"""
Convert timestamp hh:mm:ss.xxxx to seconds
:param ts: Timestamp
:return: Seconds
"""
h, m, s = map(float, ts.split(':'))
return h * 3600 + m * 60 + s
@staticmethod
def seconds2f(s: float, fps: Fraction, /) -> int:
"""
Convert seconds to frames
:param s: Seconds
:param fps: Framerate Per Second
:return: Frames
"""
return round(s * fps)
@staticmethod
def samples2seconds(num_samples: int, sample_rate: int, /) -> float:
"""
Convert samples to seconds
:param num_samples: Samples
:param sample_rate: Playback sample rate
:return: Seconds
"""
return num_samples / sample_rate
@staticmethod
def seconds2samples(s: float, sample_rate: int, /) -> int:
"""
Convert seconds to samples
:param s: Seconds
:param sample_rate: Playback sample rate
:return: Samples
"""
return round(s * sample_rate)
@classmethod
def f2samples(cls, f: int, fps: Fraction, sample_rate: int) -> int:
"""
Convert frames to samples
:param f: Frames
:param fps: Framerate Per Second
:param sample_rate: Playback sample rate
:return: Samples
"""
s = cls.f2seconds(f, fps)
return cls.seconds2samples(s, sample_rate)
@classmethod
def samples2f(cls, num_samples: int, sample_rate: int, fps: Fraction) -> int:
"""
Convert sample to frames
:param num_samples: Samples
:param sample_rate: Playback sample rate
:param fps: Framerate Per Second
:return: Frame
"""
s = cls.samples2seconds(num_samples, sample_rate)
return cls.seconds2f(s, fps)
@staticmethod
def composets(h: float, m: float, s: float, /, *, precision: int = 3) -> str:
"""
Make a timestamp based on given hours, minutes and seconds
:param h: Hours
:param m: Minutes
:param s: Seconds
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
if precision == 0:
out = f"{h:02.0f}:{m:02.0f}:{round(s):02}"
elif precision == 3:
out = f"{h:02.0f}:{m:02.0f}:{s:06.3f}"
elif precision == 6:
out = f"{h:02.0f}:{m:02.0f}:{s:09.6f}"
elif precision == 9:
out = f"{h:02.0f}:{m:02.0f}:{s:012.9f}"
else:
Status.fail(f'composets: the precision {precision} must be a multiple of 3 (including 0)')
return out
|
[
"[email protected]"
] | |
c4cc3eae8ce8dc40427cfc6263c0d8d9207e33ce
|
e2590e0a78046a22131b69c76ebde21bf042cdd1
|
/ABC201_300/ABC275/A.py
|
6bc5a95d16891d1502a3adf5fbd2ff8aa0b3a6a3
|
[] |
no_license
|
masato-sso/AtCoderProblems
|
b8e23941d11881860dcf2942a5002a2b19b1f0c8
|
fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8
|
refs/heads/main
| 2023-01-22T23:57:58.509585 | 2023-01-21T14:07:47 | 2023-01-21T14:07:47 | 170,867,816 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 179 |
py
|
N = int(input())
H = list(map(int, input().split()))
maxValue = max(H)
ans = 0
for idx,h in enumerate(H):
if(h == maxValue):
ans = idx + 1
break
print(ans)
|
[
"[email protected]"
] | |
a0d3caee1fbf6c2afadd6139c75f0fb247dbe328
|
b24e45267a8d01b7d3584d062ac9441b01fd7b35
|
/Usuario/.history/views_20191102195546.py
|
879e6589a3c510e2404c8ff9b59bed87520c898f
|
[] |
no_license
|
slalbertojesus/merixo-rest
|
1707b198f31293ced38930a31ab524c0f9a6696c
|
5c12790fd5bc7ec457baad07260ca26a8641785d
|
refs/heads/master
| 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 |
Python
|
UTF-8
|
Python
| false | false | 3,630 |
py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.tokens import RefreshToken
from .models import Usuario
from .serializers import UsuarioSerializer
SUCCESS = 'exito'
ERROR = 'error'
DELETE_SUCCESS = 'eliminado'
UPDATE_SUCCESS = 'actualizado'
CREATE_SUCCESS = 'creado'
@api_view(['GET', ])
def api_detail_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UsuarioSerializer(usuario)
return Response(serializer.data)
@api_view(['PUT',])
def api_update_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data[SUCCESS] = UPDATE_SUCCESS
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def api_delete_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador=identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = usuario.delete()
data = {}
if operation:
data[SUCCESS] = DELETE_SUCCESS
return Response(data=data)
@api_view(['POST',])
@permission_classes([AllowAny,])
def api_create_usuario_view(request):
if request.method == 'POST':
serializer = UsuarioSerializer(data=request.data)
data = {}
if serializer.is_valid():
usuario = serializer.save()
data['response'] = "se registró de forma exitosa"
data['nombre'] = usuario.nombre
data['usuario'] = usuario.usuario
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
@permission_classes([AllowAny,])
def api_login(request):
usuario = request.data.get("usuario")
contraseña = request.data.get("contraseña")
if usuario is None or contraseña is None:
return Response({'error': 'No existen contraseña ni usuario'},
status=HTTP_400_BAD_REQUEST)
usuario = authenticate(usuario=usuario, contraseña=contraseña)
get_tokens_for_user(usuario)
return {
'refresh': str(token),
'access': str(token.access_token),
}
def for_user(cls, user):
"""
Returns an authorization token for the given user that will be provided
after authenticating the user's credentials.
"""
user_id = getattr(user, api_settings.USER_ID_FIELD)
if not isinstance(user_id, int):
user_id = str(user_id)
token = cls()
token[api_settings.USER_ID_CLAIM] = user_id
return token
refresh = RefreshToken.for_user(user)
def authenticate(usuario, contraseña):
usuario = Usuario.objects.get(usuario= usuario, contraseña=contraseña)
if not usuario:
raise serializers.ValidationError({'error': 'Usuario no existe'},
status=HTTP_404_NOT_FOUND)
return usuario
|
[
"[email protected]"
] | |
ae8caa3e5755b5b934074980647e9b8a044a2e9a
|
2d930aadf19b2ad6ea49725099d2f37475cd57f8
|
/test/functional/wallet-dump.py
|
c3f723a19bbd46584fb33bce6dba37487abcdcbe
|
[
"MIT"
] |
permissive
|
stratton-oakcoin/oakcoin
|
ea83774c9f6ea64adb8832770e6219ffb31edef6
|
fe53193a50bd3674211448f1dcc39c6f9f042bb2
|
refs/heads/master
| 2021-01-20T13:22:05.877005 | 2017-05-07T10:09:57 | 2017-05-07T10:09:57 | 90,477,972 | 1 | 2 | null | 2017-05-07T10:09:57 | 2017-05-06T16:58:05 |
C++
|
UTF-8
|
Python
| false | false | 4,770 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (start_nodes, start_node, assert_equal, oakcoind_processes)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
oakcoind_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90*2)
if __name__ == '__main__':
WalletDumpTest().main ()
|
[
"[email protected]"
] | |
4cf799ae31dfe4802a0d9299a2f9c9087c10afe6
|
0add969034a82912bc6e19abc427abe883ee65bb
|
/theta_en_time_polar.py
|
a9683111bde6bafb250a54492723f599975e5624
|
[] |
no_license
|
Michael-Gong/New_LPI_python_script
|
eefd162fdbbc3c614c66e2b157ea5296e3bc8492
|
9de109c6f19aa60bdeaf102e9a1ec0baff5669ad
|
refs/heads/master
| 2020-03-28T16:06:09.631550 | 2020-02-01T08:21:17 | 2020-02-01T08:21:17 | 148,659,608 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,511 |
py
|
#%matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
#mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 28,
}
font2 = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 15,
}
font_size = 28
font_size_2 = 15
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
upper = matplotlib.cm.jet(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_jet = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.viridis(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_viridis = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.rainbow(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_rainbow = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
def pxpy_to_energy(gamma, weight):
binsize = 200
en_grid = np.linspace(50,19950,200)
en_bin = np.linspace(0,20000.0,201)
en_value = np.zeros_like(en_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
en_value[i] = sum(weight[ (en_bin[i]<=gamma) & (gamma<en_bin[i+1]) ])
return (en_grid, en_value)
def theta_to_grid(theta, weight):
binsize = 240
theta_grid = np.linspace(-119.5,119.5,240)
theta_bin = np.linspace(-120,120,241)
theta_value = np.zeros_like(theta_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
theta_value[i] = sum(weight[ (theta_bin[i]<=theta) & (theta<theta_bin[i+1]) ])
return (theta_grid, theta_value)
if __name__ == "__main__":
part_number = 50000
from_path = './p50000_no_T150/'
nsteps = int(sum(1 for line in open(from_path+'t_tot_s.txt'))/part_number)
ntheta = 270
ngg = 120
from_path_list = ['./p50000_no_T150/','./p50000_rr_T150/','./p50000_qe_T150/']
#from_path_list = ['./Data_qe_T500_p50000_try/']
for i in range(np.size(from_path_list)):
from_path = from_path_list[i] #'./Data_qe_T050_p50000/'
to_path = from_path
t0 = np.loadtxt(from_path+'t_tot_s.txt')/2/np.pi
px0 = np.loadtxt(from_path+'px_tot_s.txt')
py0 = np.loadtxt(from_path+'py_tot_s.txt')
t0 = np.reshape(t0,(part_number,nsteps))
px0 = np.reshape(px0,(part_number,nsteps))
py0 = np.reshape(py0,(part_number,nsteps))
gg0 = (px0**2+py0**2+1)**0.5*0.51e-3
ww0 = np.zeros_like(gg0)+1
ww0 = np.zeros_like(gg0)+gg0
theta0 = np.arctan2(py0,px0)
theta_edges = np.linspace(-np.pi,np.pi, ntheta +1)
gg_edges = np.linspace(0.1, 6, ngg +1)
theta_edges_1 = np.linspace(-np.pi,np.pi,ntheta)
gg_edges_1 = np.linspace(0.1, 6, ngg)
for n in range(np.size(t0[0,:])):
H, _, _ = np.histogram2d(gg0[:,n], theta0[:,n], [gg_edges, theta_edges], weights=gg0[:,n])
print('Max H:',np.max(H))
Theta, R = np.meshgrid(theta_edges_1,gg_edges_1)
H_temp = np.sum(H[:,:]*R,0)
print('averaged |theta|=',np.sum(H_temp*abs(theta_edges_1))/np.sum(H_temp)/np.pi*180)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.set_facecolor('whitesmoke')
levels = np.logspace(1,5, 101)
H[H<0.01] = np.nan
img=ax.pcolormesh(Theta, R, H, norm=colors.LogNorm(vmin=0.01, vmax=1e3), cmap='viridis')
# cax = fig.add_axes([0.68,0.97,0.25,0.02])
# cbar=fig.colorbar(img,cax=cax, ticks=[1e3,1e5],orientation='horizontal')
# cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), fontsize=font_size_2)
# cbar.set_label(r'dI/d$\theta$dE [A.U.]',fontdict=font2)
# ax.tick_params(axis="y", pad=25)
ax.tick_params(axis="x", pad=10)
# ax.set_xticks([])
if (i%3 != 2):
ax.set_xticklabels([])
#ax.set_xlim(10,50)
#ax.set_ylim(0.,1.)
ax.set_xlabel(r'$\theta\ [^o]$',fontdict=font)
# ax.set_rlim(1e-1,1e3)
# ax.set_rmax(1e3)
l_r = np.array([0,1,2,3])
ax.set_rticks(l_r+1)
ax.set_yticklabels([])
# ax.set_yticklabels(['$10^%d$' % x for x in (l_r+1)])
ax.set_rlim(0, 6)
ax.set_rlabel_position(90)
# ax.set_rscale('log')
# ax.set_rscale('log')
# ax.set_thetamin(-90)
# ax.set_thetamax(90)
# ax.set_yticklabels([0.1,1,10,100,1000])
ax.set_xticklabels([0,90,180,270])
#ax.set_theta_zero_location('N')
# ax.set_ylabel(r'$\theta\ [^o]$',fontdict=font)
ax.tick_params(axis='x',labelsize=font_size)
ax.tick_params(axis='y',labelsize=font_size_2)
#ax.set_title('proton_angular_time='+str(time1), va='bottom', y=1., fontsize=20)
# plt.text(-100,650,' t = '++' fs',fontdict=font)
ax.grid(True,linestyle='--',linewidth=1.5,color='grey')
#plt.pcolormesh(x, y, ex.T, norm=mpl.colors.Normalize(vmin=0,vmax=100,clip=True), cmap=cm.cubehelix_r)
# plt.axis([x.min(), x.max(), y.min(), y.max()])
#### manifesting colorbar, changing label and axis properties ####
# cbar=plt.colorbar(pad=0.01)#ticks=[np.min(ex), -eee/2, 0, eee/2, np.min()])
# cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=font_size)
# cbar.set_label('dN/dE [A.U.]',fontdict=font)
# a0=200.0
# alpha=np.linspace(-3.5,0.5,501)
# plt.xlabel(r'$\theta$'+' [degree]',fontdict=font)
# plt.ylabel('time [fs]',fontdict=font)
# plt.xticks([-135,-90,-45,0,45,90,135],fontsize=font_size);
#plt.yticks([0,500,1000,1500],fontsize=font_size);
# plt.title(r'$dN/d\theta$'+' for no RR', fontsize=font_size)
# plt.xlim(-120,120)
# plt.ylim(0,1650)
#plt.title('electron at y='+str(round(y[n,0]/2/np.pi,4)),fontdict=font)
plt.subplots_adjust(top=0.90, bottom=0.11, left=0.1, right=0.93, hspace=0.10, wspace=0.05)
fig = plt.gcf()
fig.set_size_inches(6., 6.)
#fig.set_size_inches(5, 4.5)
fig.savefig(to_path+'theta_en_dist_'+to_path[7:-1]+'_'+str(n).zfill(4)+'.png',format='png',dpi=160)
plt.close("all")
|
[
"[email protected]"
] | |
373f9f9cd537df8df9fb85fee9220607f78f2be6
|
de5adea6b67660bfc45150ee56b6cf4957c8c4e7
|
/main_app/migrations/0001_initial.py
|
f522eb7c2263895a61cc3153af186e867e0d5fdf
|
[] |
no_license
|
arthuroe/treasure_gram
|
70049a25009318d947488dea28505f65816d9d84
|
5ce93ed21284fee17640b15546011848de3115ac
|
refs/heads/develop
| 2020-03-18T02:16:19.413381 | 2018-05-23T17:18:58 | 2018-05-23T17:24:16 | 134,182,468 | 0 | 0 | null | 2018-05-28T18:52:48 | 2018-05-20T20:02:49 |
Python
|
UTF-8
|
Python
| false | false | 824 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 21:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('materials', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('img_url', models.CharField(max_length=100)),
],
),
]
|
[
"[email protected]"
] | |
3b1a469d9c82b2869b62462652c2a0c924e3bb31
|
470e0a9dc07edfe13ca68f2a1b6d60d0e395e095
|
/3-2.py
|
b67172d7abbc097ec46a4caa894c73eba80c02c4
|
[] |
no_license
|
mj08021/ThisIsCodingTestforGetaJob
|
77ce8edab2bd855db9b96597982f58251d0bd31e
|
ad98b368956937065c6c396b2806351a4eaf12a2
|
refs/heads/main
| 2023-04-28T10:51:02.012344 | 2021-05-16T05:51:58 | 2021-05-16T05:51:58 | 316,853,768 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 614 |
py
|
# N, M, K를 공백으로 구분하여 입력받기
n, m, k = map(int, input().split())
# N개의 수를 공백으로 구분하여 입력받기
data = list(map(int, input().split()))
data.sort() # 입력받은 수 정렬
first = data[n - 1] # 가장 큰 수
second = data[n - 2] # 두 번째로 큰 수
# 가장 큰 수가 더해지는 횟수 계산
count = int(m / (k + 1)) * k
count += m % (k + 1)
result = 0
result += (count) * first # 가장 큰 수 더하기
result += (m - count) * second # 두 번째로 큰 수 더하기
print(result) # 최종 답안 출력
# ex) input
# 5 8 3
# 2 4 5 4 6
|
[
"[email protected]"
] | |
f2a7ee60c707d01abd0cb97f85cf647ce9ebf4e3
|
a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb
|
/article/migrations/0006_auto_20210311_1721.py
|
116d4f2900f9f0f393ad9eb58894d557a6c11b5c
|
[] |
no_license
|
Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev
|
5a7f210e51f1998e5d52cdeb42538f2786af3f9f
|
fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1
|
refs/heads/master
| 2023-05-03T17:01:59.066596 | 2021-05-26T13:28:41 | 2021-05-26T13:28:41 | 368,165,221 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
# Generated by Django 3.1.6 on 2021-03-11 17:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0005_auto_20210311_1319'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='tags',
new_name='tags_old',
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.