blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a5502d7f1c3da7c41263037451b3f748e01a83c
|
5cf5750bb8fb1cba865d25628aef91df3f28da87
|
/pyhnko/twilio/20-valid_parentheses.py
|
2ee473f6d84df0cd06dcc0b9668b0414ec9f15de
|
[] |
no_license
|
nextsde/daily-coding-prep
|
a3d8d4f8920ed06dc7077fa4968d22ad88af62fa
|
09d6a768ccadbff6ebc8bb36816ca3795e6d0f66
|
refs/heads/master
| 2021-05-20T10:36:18.290954 | 2020-07-12T16:54:31 | 2020-07-12T16:54:31 | 252,252,638 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 350 |
py
|
class Solution:
def isValid(self, s: str) -> bool:
parenthesis = {'(': ')', '{':'}', '[':']'}
stack = []
for char in s:
if char in '([{':
stack.append(char)
elif not stack or parenthesis[stack.pop()] != char: return False
return not stack
|
[
"[email protected]"
] | |
e5672144935aa5bf4c908a1be53ac0fbfde893a1
|
eb1148f75215739a7ca5ba35027fe46b9dcc5e30
|
/practice/0416_practice/SWEA_4366_정식이의 은행업무_이정민.py
|
d09dcc26aafb4b5bf91d31679301927909e68846
|
[] |
no_license
|
ljm9748/Algorithm_ProblemSolving
|
8ee594c0d1226ebe3670e772a7fc1c08ddf62e43
|
d1ebc34019ae2d795417ef47f74f1407a7f3cb9e
|
refs/heads/master
| 2023-05-02T20:00:01.784017 | 2021-04-26T13:57:29 | 2021-04-26T13:57:29 | 339,901,096 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,055 |
py
|
def bintonum():
two=1
tmpsum=0
for i in range(len(bininput)-1,-1,-1):
tmpsum+=bininput[i]*two
two*=2
return tmpsum
def tritonum():
tri = 1
tmpsum = 0
for i in range(len(triinput)-1, -1, -1):
tmpsum += triinput[i] * tri
tri *= 3
return tmpsum
for tc in range(int(input())):
bin=[]
bininput=list(map(int, list(input())))
triinput=list(map(int,list(input())))
for i in range(len(bininput)):
if bininput[i]==0:
bininput[i]=1
bin.append(bintonum())
bininput[i]=0
else:
bininput[i]=0
bin.append(bintonum())
bininput[i]=1
flag=False
for i in range(len(triinput)):
if flag:
break
for j in range(2):
triinput[i]=(triinput[i]+1)%3
tmpanswer=tritonum()
if tmpanswer in bin:
print('#{} {}'.format(tc+1,tmpanswer))
flag=True
break
triinput[i] = (triinput[i] + 1) % 3
|
[
"[email protected]"
] | |
b718e0a18a4216579a39362df3c537ddbf64dd79
|
423bbc654e6ebe426a27ae9daa1b48232b0e9047
|
/rpn_092.py
|
49a01607ecb344766f03fe5e68ee211426034da1
|
[] |
no_license
|
mcclosr5/Python-code
|
fb0824b0e6c250b44c85705db740e7ddb3efae41
|
794e43351327abf3a83ace5dd7f51e2ef011fb19
|
refs/heads/main
| 2023-07-11T06:08:45.292511 | 2021-08-18T13:13:02 | 2021-08-18T13:13:02 | 397,591,357 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 766 |
py
|
from stack_092 import Stack
def cal(sym, p1, p2):
if sym == "+":
return p1 + p2
elif sym == "-":
return p2 - p1
elif sym == "*":
return p1 * p2
elif sym == "/":
return p2 / p1
def cal2(sym, p):
if sym == "n":
return -p
elif sym == "r":
return p ** (1 / 2)
def calculator(line):
s = Stack()
d = ["+", "-", "*", "/"]
d2 = ["r", "n"]
l = line.split()
for chars in l:
if chars not in d and chars not in d2:
s.push(chars)
elif chars in d:
p1 = float(s.pop())
p2 = float(s.pop())
s.push(cal(chars, p1, p2))
elif chars in d2:
p = float(s.pop())
s.push(cal2(chars, p))
return float(s.top())
|
[
"[email protected]"
] | |
641b32b9ba936495238128b5b25732b687e2479d
|
39fa403d46a4456a07c761e1aaa8af2d418c5f87
|
/kid_readout/analysis/resources/experiments.py
|
728292c1d96bb19e30bd33d9770f53f432eda98c
|
[
"BSD-2-Clause"
] |
permissive
|
vapor36/kid_readout
|
72d94d96e964d6a2eef3aa57ed6fc814946cfe46
|
07202090d468669200cab78297122880c1c03e87
|
refs/heads/master
| 2020-12-12T13:32:47.267337 | 2018-11-11T15:36:40 | 2018-11-11T15:36:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 969 |
py
|
import bisect
import socket
import time
if socket.gethostname() == 'detectors':
default_cryostat = 'HPD'
else:
default_cryostat = 'StarCryo'
import starcryo_experiments
import hpd_experiments
def get_experiment_info_at(unix_time, cryostat=None):
if cryostat is None:
cryostat = default_cryostat
if cryostat.lower() == 'hpd':
_unix_time_index = hpd_experiments._unix_time_index
by_unix_time_table = hpd_experiments.by_unix_time_table
else:
_unix_time_index = starcryo_experiments._unix_time_index
by_unix_time_table = starcryo_experiments.by_unix_time_table
index = bisect.bisect(_unix_time_index, unix_time)
index = index - 1
if index < 0:
raise Exception("No experiment found for timestamp %s" % time.ctime(unix_time))
info = by_unix_time_table[index]
if info['optical_state'] == 'dark':
info['is_dark'] = True
else:
info['is_dark'] = False
return info
|
[
"[email protected]"
] | |
50fdea7cb51a62a4e0e88fd8f44cd9fa5d56b2ea
|
ba2b40a762feb52f6a0a1dc43da96b41112a3bbb
|
/code/experiment/dcase_simpleNetCNN/ex2/0.0001/testNetSimple.py
|
93ded45503a2c3bb4a12707a4d4f0c3baf51239c
|
[] |
no_license
|
YuanGongND/DeepVis2
|
e03a3c8b4231e61c6442314935490d9131b41046
|
6514f403c0df9dab4fa1c66b3a0a95ea0aea2ec3
|
refs/heads/master
| 2021-08-22T23:35:13.949128 | 2017-12-01T17:26:29 | 2017-12-01T17:26:29 | 112,768,671 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,398 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 11:25:16 2017
Keras model of
@author: Kyle
"""
import tensorflow as tf
import numpy as np
import keras
from keras.models import Model
from keras import regularizers
import math
import matplotlib.pyplot as plt
#%%
def genSineFilter( frequency, points = 64, sampleRate = 16000 ):
Ts = 1 /sampleRate
t = list( np.linspace( -points/2*Ts, points/2*Ts, num= points ) )
#t = list( xrange( -points/2*Ts, points/2*Ts-Ts, Ts ) )
sinFilter = [ math.sin( 2 * math.pi * frequency *elem) for elem in t ]
plt.plot( sinFilter )
return sinFilter
#%%
def sineInit( shape, dtype=None ):
print( shape )
InitKernal = np.zeros( shape )
# the rest filter
for filterIndex in range( 1, shape[ 3 ] ):
InitKernal[ 0, :, 0, filterIndex ] = genSineFilter( 150 *( filterIndex ), points = shape[ 1 ] )
InitKernal = InitKernal / shape[ 1 ]
InitKernal[ 0, :, 0, 0 ] = np.zeros( shape[ 1 ] )
InitKernal[ 0, 0, 0, 0 ] = 1
return InitKernal
#%% loadInit
def loadInit( shape, dtype = None, upbound = 95 ):
print( shape )
InitKernal = np.zeros( shape )
if __name__ == '__main__':
exterFilterFile = np.loadtxt( '../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
else:
exterFilterFile = np.loadtxt( '../../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
for filterIndex in range( 0, shape[ 3 ] ):
InitKernal[ 0, :, 0, filterIndex ] = exterFilterFile[ filterIndex, : ]
return InitKernal
#%% loadInit
def loadInitScene( shape, dtype = None, upbound = 50 ):
print( shape )
InitKernal = np.zeros( shape )
if __name__ == '__main__':
exterFilterFile = np.loadtxt( '../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
else:
exterFilterFile = np.loadtxt( '../../initializer/bandPassFilters_256_64' + '_' + str(upbound) + '.csv', delimiter = ',' )
for filterIndex in range( 0, shape[ 3 ] ):
InitKernal[ 0, :, 0, filterIndex ] = exterFilterFile[ filterIndex, : ]
print( 'Initializer' + str( upbound ) + 'is used' )
return InitKernal
#%%
def testNet( input, timeStep_num = 150, convLayer_num_front = 1, filter_num = 64, numClass = 4, init = 'glorot_uniform',\
activationUnit = 'relu', conv_filter_size_front = 256, pooling_size = 2, convLayer_num_back = 4, conv_filter_size_back = 40, l2_reg = 0.01,\
denseUnitNum = 64, task = 'emotion' ):
# the input shape is [ example_num, whole_audio_length ], e.g., [ 200 samples, 96000 points ]
# convert it to tensor
input = tf.convert_to_tensor( input )
# parameters of the network
example_num = input.get_shape().as_list()[ 0 ]
# length of each sub-sequence, e.g., 96000/timeStep(150)
subSequence_length = int( input.get_shape().as_list()[ 1 ] / timeStep_num )
# reshape into [ example_num * sequence, subsequence_length ]
input = tf.reshape( input, [ example_num *timeStep_num, 1, subSequence_length, 1 ] )
print( input.shape )
# first conduct average pooling
#input = tf.layers.batch_normalization( input )
input = keras.layers.pooling.AveragePooling2D( pool_size=( 1, 1 ), strides=None, padding='same' )( input )
print( input.shape )
# convLayer_num *( conv + maxpooling )
for i in range( convLayer_num_front ):
input = tf.layers.batch_normalization( input )
with tf.name_scope( 'conv' + str( i + 1 ) ):
if i == 0:
if task == 'scene':
input = keras.layers.convolutional.Conv2D( filter_num, ( 1, conv_filter_size_front ), padding='same', activation= 'relu', kernel_initializer = loadInitScene )( input )
else:
input = keras.layers.convolutional.Conv2D( filter_num, ( 1, conv_filter_size_front ), padding='same', activation= 'relu', kernel_initializer = loadInit )( input )
else:
pass
#input = keras.layers.convolutional.Conv2D( filter_num, ( 1, conv_filter_size_front ), padding='same', activation= activationUnit, kernel_regularizer=regularizers.l2( l2_reg ), kernel_initializer = init )( input )
print( input.shape )
print( input.shape )
print( i )
input = tf.abs( input )
#input = keras.layers.pooling.AveragePooling2D( ( 1, pooling_size **(convLayer_num_front + 8 ) ), padding='valid' )( input )
input = keras.layers.pooling.AveragePooling2D( ( 1, subSequence_length ), padding='valid' )( input )
#input = tf.scalar_mul( pooling_size **(convLayer_num_front - 2), input )
# reshape for preparision of LSTM layers
print( input.shape )
input = tf.transpose( input, [ 3, 0, 1, 2 ] ) # change the column order
print( input.shape )
restPoint = input.get_shape().as_list()[ -1 ]
print( input.shape )
input = tf.reshape( input, [ filter_num, 1, example_num, timeStep_num*restPoint ] )
print( input.shape )
input = tf.transpose( input, [ 2, 3, 0, 1 ] )
print( input.shape )
for i in range( convLayer_num_back ):
input = tf.layers.batch_normalization( input )
input = keras.layers.convolutional.Conv2D( 32, ( 3, 3 ), padding='same', activation= activationUnit, kernel_regularizer=regularizers.l2( l2_reg ), kernel_initializer = init )( input )
print( input.shape )
input = keras.layers.pooling.MaxPooling2D( ( 2, 2 ), padding='same' )( input )
print( input.shape )
print( i )
newSubSequence_length = input.get_shape().as_list()[ -3 ] *input.get_shape().as_list()[ -2 ] *input.get_shape().as_list()[ -1 ]
input = tf.reshape( input, [ example_num, newSubSequence_length ] )
print( input.shape )
# start the LSTM layers
input = keras.layers.core.Dense( denseUnitNum, activation = activationUnit, kernel_initializer = init )( input )
input = tf.nn.dropout( input, keep_prob = 0.5 )
print( input.shape )
output = keras.layers.core.Dense( numClass, activation = 'softmax' )( input )
print( output.shape )
return output
#%%
if __name__ == '__main__':
time_seq = list( range( 1, 16 ) )
testInput = np.zeros( [ 1, 441000 ] )
testNet( input = testInput )
|
[
"[email protected]"
] | |
d2aedbae6a5abb925a4d445183833edf30996453
|
ebc111217351cda19e1a64a8fe67c956db1ddef2
|
/urls_and_templates/settings.py
|
7d99534bbd58af5af255a625ffa81ee03389fee3
|
[] |
no_license
|
PeterM358/Python-web-2021
|
cf08beaa3330495afc53e640f4a2aaf0429049e9
|
96dc40fa433329ea3deaa39532934b2fab83489f
|
refs/heads/main
| 2023-07-09T15:09:08.868548 | 2021-07-02T12:55:58 | 2021-07-02T12:55:58 | 382,328,747 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,403 |
py
|
"""
Django settings for urls_and_templates project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os.path
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-(sozs*28h@re8^*mne-_a-b-(zmng!v8lr*x@wt54liv)t=+8p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main_app',
'secondary_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls_and_templates.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'urls_and_templates.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"estestveno"
] |
estestveno
|
4658eff33e7988405f1034b7b661e1b423697488
|
df308577ecb922b627a547cb2ff39c3e2030f96b
|
/pymr/pymr.py
|
2a1b22a528c887a379e327a6c79837b3f5c35f11
|
[] |
no_license
|
d2207197/pymr
|
49c13a5904b8b6ff5a807a2064d6d9dfba6f9d86
|
d802a175ccbef0bc7ee7ee7ba645cef3482b11ca
|
refs/heads/master
| 2021-03-12T20:15:48.134245 | 2015-08-09T16:54:11 | 2015-08-09T16:54:11 | 37,312,114 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,709 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import locale
import warnings
import abc
import fileinput
import sys
import argparse
import subprocess
import shlex
import os
from six.moves import map
from itertools import groupby
import six
if six.PY2:
import codecs
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
MAPPER, REDUCER = (sys.executable, sys.argv[0], 'map'), (sys.executable,
sys.argv[0], 'reduce')
class PyMR(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def mapper(self, line):
"""Method documentation"""
return
@abc.abstractmethod
def reducer(self, key, values):
count = sum(int(v) for v in values)
yield key, count
def _do_mapper(self, files):
# print(os.listdir('.'), file=sys.stderr)
for line in fileinput.input(files):
if type(line) == six.binary_type:
line = line.decode('utf8')
for key, value in self.mapper(line):
if type(key) == six.binary_type:
warnings.warn(
'mapper(self, line) output key should not be <type \'str\'>. Automatically converting to <type \'unicode\'> by utf8')
key = key.decode('utf8')
if type(value) == six.binary_type:
warnings.warn(
'mapper(self, line) output value should not be <type \'str\'>. Automatically converting to <type \'unicode\'> by utf8')
value = value.decode('utf8')
yield six.text_type(key), six.text_type(value)
def _do_reducer(self, files):
def line_to_keyvalue(line):
if type(line) == six.binary_type:
line = line.decode('utf8')
# print(type(line), file=sys.stderr)
# print(line, file = sys.stderr)
key, value = line.split('\t', 1)
return key, value
keyvalues = map(line_to_keyvalue, fileinput.input(files))
for key, grouped_keyvalues in groupby(keyvalues, key=lambda x: x[0]):
values = (v for k, v in grouped_keyvalues)
for key, value in self.reducer(key, values):
if type(key) == six.binary_type:
warnings.warn(
'mapper(self, line) output key should not be <type \'str\'>. Automatically converting to <type \'unicode\'> by utf8')
key = key.decode('utf8')
if type(value) == six.binary_type:
warnings.warn(
'mapper(self, line) output value should not be <type \'str\'>. Automatically converting to <type \'unicode\'> by utf8')
value = value.decode('utf8')
yield six.text_type(key), six.text_type(value)
@staticmethod
def _argparser(description):
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(help='sub-commands', dest='cmd')
parser_pipe = subparsers.add_parser('pipe', help='pipeline mode')
parser_localmr = subparsers.add_parser('lmr',
help='local mapreduce mode')
parser_hstreaming = subparsers.add_parser('hadoop',
help='Hadoop Streaming mode')
parser_map = subparsers.add_parser('map', help='mapper mode')
parser_reducer = subparsers.add_parser('reduce', help='reducer mode')
parser_hstreaming.add_argument('INPUT',
help='input folder/file in HDFS')
parser_hstreaming.add_argument('OUTPUT', help='output path in HDFS')
parser_hstreaming.add_argument(
'-j', '--hadoop-streaming-jar',
metavar='PATH_TO_JAR',
help=
'[HSTREAMING] hadoop streaming jar path. (default: %(default)s)',
default='/usr/lib/hadoop-mapreduce/hadoop-streaming.jar')
parser_hstreaming.add_argument(
'-n', '--num-reducer',
metavar='N',
type=int,
help='[LMR/HSTREAMING]number of reducer. (default: %(default)s)',
default=4)
parser_localmr.add_argument('INPUT',
help='input folder/file. `-\' for stdin')
parser_localmr.add_argument('OUTPUT', help='output path')
parser_localmr.add_argument(
'-c', '--lmr-cmd',
metavar='LMR_CMD',
help='[LMR] lmr command. (default: %(default)s)',
default='lmr')
parser_localmr.add_argument(
'-s', '--split-size',
metavar='SIZE',
help='[LMR] size of splits. (default: %(default)s)',
default='1m')
parser_localmr.add_argument(
'-n', '--num-reducer',
metavar='N',
type=int,
help='[LMR/HSTREAMING]number of reducer. (default: %(default)s)',
default=4)
parser_pipe.add_argument('FILE', nargs='*', help='input files')
parser_map.add_argument('FILE', nargs='*', help='input files')
parser_reducer.add_argument('FILE', nargs='*', help='input files')
# parser.add_argument('--mapper-argument', help='argument for mapper') # TODO
# parser.add_argument('--reducer-argument', help='argument for reducer') # TODO
return parser.parse_args()
def __init__(self, description='', files=[]):
self._files = files + [sys.argv[0], __file__]
self.description = description
def run(self):
args = PyMR._argparser(self.description)
# print(args, file= sys.stderr)
if args.cmd == 'map':
mapper_out = self._do_mapper(args.FILE)
# mapper_out = do_combiner(mapper_out)
for key, value in mapper_out:
# print(type('rchcrh'), type(key), type(value), sys.executable,
# file=sys.stderr)
# print(locale.getpreferredencoding(), file=sys.stderr)
print('{}\t{}'.format(key, value))
elif args.cmd == 'reduce':
reducer_out = self._do_reducer(args.FILE)
for key, value in reducer_out:
print('{}\t{}'.format(key, value))
elif args.cmd == 'pipe':
_localpipe(args.FILE)
elif args.cmd == 'lmr':
_localmr(args.INPUT, args.OUTPUT, args.lmr_cmd, args.split_size,
args.num_reducer)
elif args.cmd == 'hadoop':
_hstreaming(args.INPUT, args.OUTPUT, args.num_reducer,
args.hadoop_streaming_jar, self._files)
def _get_input_process(input):
if input == '-':
return sys.stdin
input = os.path.normpath(input)
assert os.access(input, os.R_OK)
if os.path.isdir(input):
input_files = os.listdir(input)
# return '{}/*'.format(input)
else:
input_files = [input]
return subprocess.Popen(['cat'] + input_files,
stdout=subprocess.PIPE).stdout
def _localmr(input, output, lmr_cmd, split_size, num_reducer):
input_data = _get_input_process(input)
lmr_process = subprocess.Popen(
[lmr_cmd, split_size, str(num_reducer), ' '.join(MAPPER),
' '.join(REDUCER), output],
stdin=input_data)
input_data.close()
lmr_process.wait()
def _localpipe(input_files):
input_process = subprocess.Popen(['cat'] + input_files,
stdout=subprocess.PIPE)
mapper_process = subprocess.Popen(
MAPPER,
stdin=input_process.stdout,
stdout=subprocess.PIPE)
input_process.stdout.close()
sort_process = subprocess.Popen(
shlex.split('sort -k1,1 -t"\t" -s'),
stdin=mapper_process.stdout,
stdout=subprocess.PIPE)
mapper_process.stdout.close()
reducer_process = subprocess.Popen(REDUCER, stdin=sort_process.stdout)
sort_process.stdout.close()
reducer_process.wait()
def _hstreaming(input, output, num_reducer, jar_path, files):
devnull = open('/dev/null', 'w')
yarn_command = 'yarn'
try:
subprocess.call(yarn_command, stdout=devnull, stderr=devnull)
except OSError:
yarn_command = 'hadoop'
# files = ','.join(__file__, sys.argv[0])
subprocess.Popen([yarn_command, 'jar', jar_path, '-files', ','.join(files),
'-mapper', ' '.join(MAPPER), '-reducer',
' '.join(REDUCER), '-input', input, '-output', output])
# if __name__ == '__main__':
# PyMR().run()
|
[
"[email protected]"
] | |
fdd3824c859362cf556b8c8c4267f2efe3a55e1a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/EWgdCtSDmRqJPrzoz_5.py
|
3e904f7b4d09347b6ea7552ef8f049cb30658149
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 69 |
py
|
def peel_layer_off(lst):
return [row[1:-1] for row in lst[1:-1]]
|
[
"[email protected]"
] | |
c67c21b30aa610dc55042a6e27204e50e29b4146
|
084e35c598426b1137f9cd502e1b5e7f09cdf034
|
/并查集/problem1202_交换字符串中的元素_DFS.py
|
5671b255c350dad26c0c50b2b8ea6bfec3f777be
|
[] |
no_license
|
sakurasakura1996/Leetcode
|
3a941dadd198ee2f54b69057ae3bbed99941974c
|
78f239959af98dd3bd987fb17a3544010e54ae34
|
refs/heads/master
| 2021-09-11T05:07:44.987616 | 2021-09-07T05:39:34 | 2021-09-07T05:39:34 | 240,848,992 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,179 |
py
|
"""
1202. 交换字符串中的元素
给你一个字符串 s,以及该字符串中的一些「索引对」数组 pairs,其中 pairs[i] = [a, b] 表示字符串中的两个索引(编号从 0 开始)。
你可以 任意多次交换 在 pairs 中任意一对索引处的字符。
返回在经过若干次交换后,s 可以变成的按字典序最小的字符串。
示例 1:
输入:s = "dcab", pairs = [[0,3],[1,2]]
输出:"bacd"
解释:
交换 s[0] 和 s[3], s = "bcad"
交换 s[1] 和 s[2], s = "bacd"
示例 2:
输入:s = "dcab", pairs = [[0,3],[1,2],[0,2]]
输出:"abcd"
解释:
交换 s[0] 和 s[3], s = "bcad"
交换 s[0] 和 s[2], s = "acbd"
交换 s[1] 和 s[2], s = "abcd"
示例 3:
输入:s = "cba", pairs = [[0,1],[1,2]]
输出:"abc"
解释:
交换 s[0] 和 s[1], s = "bca"
交换 s[1] 和 s[2], s = "bac"
交换 s[0] 和 s[1], s = "abc"
提示:
1 <= s.length <= 10^5
0 <= pairs.length <= 10^5
0 <= pairs[i][0], pairs[i][1] < s.length
s 中只含有小写英文字母
"""
# date: 2021/1/11 好像昨天周赛也是这样一道题,思路很相近,应该用并查集来做,好好做一做这道题
# 这种题目,发现用并查集的思路倒不是很难,因为题目很容易发现是一个关系转换的过程,比如位置0的字符可以和位置1的字符交换
# 顺序,位置1的字符可以和位置2的字符交换顺序,那么位置0和位置2的字符也是可以交换顺序的,那么他们三个都是在同一个集合中。
# 然后只要在同一个集合中的字符,我们是可以实现任意顺序排列的,那么只要按照字典排序就行了。
# 然后还需要注意的是,可能最后归纳之后,还有好几个集合,这个时候我们要字典排序之后然后再放回到该集合所占的位置上去。
from typing import List
from collections import defaultdict
class Solution:
def dfs(self, res, graph, visited, x):
for neighbor in graph[x]:
if not visited[neighbor]:
if __name__ == '__main__':
solu = Solution()
s = "dcab"
pairs = [[0,3],[1,2],[0,2]]
ans = solu.smallestStringWithSwaps(s, pairs)
print(ans)
|
[
"[email protected]"
] | |
d7bcc01c5138406388ef33179a20381a87be7be3
|
76a7dccc6aaa5ece69edf586a0493471e9a32103
|
/tests/ethpm/test_package_validation_utils.py
|
5c0d98c93d715e5ec4d9b2097fd359f342c0dc70
|
[
"MIT"
] |
permissive
|
LieutenantRoger/py-ethpm
|
5f95d77d47fd7111cd7f51d2fb3fc9fbc814572a
|
9e7a9f4d28ad922c9349c1ac9216a328bdb73527
|
refs/heads/master
| 2020-03-20T01:23:52.062447 | 2018-06-01T21:39:20 | 2018-06-01T21:39:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,458 |
py
|
import pytest
from ethpm.exceptions import ValidationError
from ethpm.utils.package_validation import (
validate_package_exists,
validate_package_against_schema,
validate_package_deployments,
)
def test_validate_package_exists_validates():
assert validate_package_exists("./v2-packages/safe-math-lib/1.0.0.json") is None
def test_validate_package_exists_invalidates():
with pytest.raises(ValidationError):
validate_package_exists("DNE")
def test_validate_package_validates(valid_package):
assert validate_package_against_schema(valid_package) is None
def test_validate_package_against_all_packages(all_packages):
for pkg in all_packages:
assert validate_package_against_schema(pkg) is None
def test_validate_package_invalidates(invalid_package):
with pytest.raises(ValidationError):
validate_package_against_schema(invalid_package)
def test_validate_deployed_contracts_present_validates(package_with_conflicting_deployments):
with pytest.raises(ValidationError):
validate_package_deployments(package_with_conflicting_deployments)
def test_validate_deployments(package_with_matching_deployment):
validate = validate_package_deployments(package_with_matching_deployment)
assert validate is None
def test_validate_deployed_contracts_pr(package_with_no_deployments):
validate = validate_package_deployments(package_with_no_deployments)
assert validate is None
|
[
"[email protected]"
] | |
b86b4367f67c264349af9081e3ad94946ca5a4ff
|
1b3178461535d5a1b318fa6d978bbbc427a0d1c4
|
/app/models/__init__.py
|
e5c3ff3afccdcce30f804f58b5231901385032be
|
[] |
no_license
|
cyr1z/api-movie-library-
|
29d3cf4a3b56b544fdddf69511651848c8cc1be6
|
b3a021bff8112a3eb81f553b3eb0df751a488adb
|
refs/heads/main
| 2023-06-19T13:09:09.320541 | 2021-07-23T11:46:12 | 2021-07-23T11:46:12 | 381,181,110 | 1 | 0 | null | 2021-07-23T11:46:13 | 2021-06-28T23:01:54 |
Python
|
UTF-8
|
Python
| false | false | 78 |
py
|
""" Models db """
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
[
"[email protected]"
] | |
232507fa6d1a15e2d78a9cacab939e0e2489a4eb
|
d2e2fb0ed2405ad5e43c73c1339cfb24d6863bb6
|
/src/improveo/wsgi.py
|
99d0cde2a9570b15d080c1e735b4c814e363be09
|
[] |
no_license
|
AsemAntar/Django-Made-Easy.-Build-an-application-for-companies
|
c23ce30af93537eb691003e600f9f31dc8e1abce
|
3957495c83e2abd8a011dcc33c70f3c83a1730b5
|
refs/heads/master
| 2022-12-11T20:55:03.800665 | 2020-09-01T17:09:34 | 2020-09-01T17:09:34 | 267,568,153 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
WSGI config for improveo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'improveo.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
9098f8ca2a82afa92a94c82966d8fd2360ba1df0
|
17a2234b237d79a257b8b8b493e6fe77e77547bd
|
/djreact/backend/env/lib/python2.7/encodings/hz.py
|
3b9055b1d25440b2d8dae4859f56c26733c742e1
|
[] |
no_license
|
Marcandy/python-dances
|
bec51264f92ca222afbd75e0204573fbf027229a
|
d268c5feccc2d040304d9b25ac47e055bdcdedfa
|
refs/heads/master
| 2020-04-25T02:15:36.752281 | 2019-03-19T05:42:53 | 2019-03-19T05:42:53 | 172,433,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
/usr/local/Cellar/python@2/2.7.14_3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/encodings/hz.py
|
[
"[email protected]"
] | |
d52040f5592facb0fcc3a45fc1defe8bedbed45d
|
4443b7ee1cdfd4dd21663230a7f5995aa0e3e079
|
/Word_Break.py
|
5c33fab731876e5683e67f9500300c9c24fcd6bf
|
[] |
no_license
|
pranabsarkar/Algorithm-Pratice-Questions-LeetCode
|
c56c754781e6afb38352f10e6b4993d8a6876e8d
|
c135bb322fbda8505f85deaa9cfe3b9ed279a443
|
refs/heads/master
| 2022-12-12T00:03:04.549840 | 2020-08-21T20:30:12 | 2020-08-21T20:30:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,684 |
py
|
# Given a non-empty string s and a dictionary wordDict containing a list of non-empty words,
# determine if s can be segmented into a space-separated sequence of one or more dictionary words.
#
# Note:
#
# The same word in the dictionary may be reused multiple times in the segmentation.
# You may assume the dictionary does not contain duplicate words.
# Example 1:
#
# Input: s = "leetcode", wordDict = ["leet", "code"]
# Output: true
# Explanation: Return true because "leetcode" can be segmented as "leet code".
# Example 2:
#
# Input: s = "applepenapple", wordDict = ["apple", "pen"]
# Output: true
# Explanation: Return true because "applepenapple" can be segmented as "apple pen apple".
# Note that you are allowed to reuse a dictionary word.
# Example 3:
#
# Input: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
# Output: false
class Solution:
# Brute Force 2^n Time and O(n) Space
def wordBreak(self, s, wordDict):
# if not s:
# return True
#
# for word in wordDict:
#
# if s[0:len(word)] == word and self.wordBreak(s[len(word):], wordDict):
# return True
#
# return False
memo = {}
return self.helper(s, wordDict, memo)
def helper(self, s, wordDict, memo):
if not s:
return True
elif s in memo:
return memo[s]
for word in wordDict:
if s[0:len(word)] == word and self.helper(s[len(word):], wordDict, memo):
memo[s] = True
return True
memo[s] = False
return False
# Using Memo O(n^2) Time and O(n) Space
|
[
"[email protected]"
] | |
cd18161f73be5325aacfb94fa29cdc0da7c8de5e
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Lib/objc/_QuickLookSupport.py
|
fe4f4ad4cc1e4346f428551f9e388c66c59e649c
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 |
MIT
| 2023-02-26T21:34:04 | 2018-09-15T22:29:07 |
C
|
UTF-8
|
Python
| false | false | 939 |
py
|
"""
Classes from the 'QuickLookSupport' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
QLExtensionPreview = _Class("QLExtensionPreview")
QLURLExtensionPreview = _Class("QLURLExtensionPreview")
QLCoreSpotlightExtensionPreview = _Class("QLCoreSpotlightExtensionPreview")
QLZipArchive = _Class("QLZipArchive")
QLZipArchiveEntry = _Class("QLZipArchiveEntry")
QLExtension = _Class("QLExtension")
QLGracePeriodTimer = _Class("QLGracePeriodTimer")
QLPlatformImage = _Class("QLPlatformImage")
QLExtensionManager = _Class("QLExtensionManager")
QLUTIManager = _Class("QLUTIManager")
QLThumbnailUTICache = _Class("QLThumbnailUTICache")
QLExtensionThumbnailGenerator = _Class("QLExtensionThumbnailGenerator")
QLExtensionManagerCache = _Class("QLExtensionManagerCache")
|
[
"[email protected]"
] | |
8e63afaf2bfa0ac779abb55ef2e140ed86259012
|
356740062993a5967717098a7a3ee78ac6c6cf3f
|
/chapter01/examples/hello.py
|
ac2bb10551cf24337d234a8dcf96e4904245bf41
|
[] |
no_license
|
xerifeazeitona/autbor
|
79588302f14c0c09b1f9f57fcb973e656ee1da5c
|
c37ccbfa87c1ac260e728a3a91a8f2be97978f04
|
refs/heads/main
| 2023-04-03T18:01:34.588984 | 2021-04-07T17:59:26 | 2021-04-07T17:59:26 | 348,749,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
"""
This program says hello and asks for my name.
"""
print('Hello, world!')
print('What is your name') # prompts for name
myName = input() # stores input text into myName
print('It is good to meet you, ' + myName)
print('The length of your name is: ')
print(len(myName))
print('What is your age?')
myAge = input()
print('You will be ' + str(int(myAge) + 1) + ' in a year.')
|
[
"[email protected]"
] | |
6bb20e9b387afb48bc5562cd4cf07a53bf8cb999
|
30d5943710f6b2468b7a844166deb7c2f8ec4d52
|
/pal/writer/writer_factory.py
|
038bc5e63be3a1a13e43e734eb5e23f0ed888303
|
[
"MIT"
] |
permissive
|
qazxsw1597532018/pal
|
7c3d1f35fdcfcf3ca3b361ada85c390b3d78c4a1
|
f47fa19bdad8898b42479d41ac18f5d88f028d07
|
refs/heads/master
| 2023-08-10T21:50:51.396604 | 2021-09-28T03:43:36 | 2021-09-29T23:19:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,963 |
py
|
from pal.writer.abstract_writer import AbstractWriter
from pal.writer.register.c.register_writer import CRegisterWriter
from pal.writer.register.cxx11.register_writer import Cxx11RegisterWriter
from pal.writer.register.rust.register_writer import RustRegisterWriter
from pal.writer.register.yaml import YamlRegisterWriter
from pal.writer.register.none import NoneRegisterWriter
from pal.writer.access_mechanism.gnu_inline_x64 import \
GnuInlineX64AccessMechanismWriter
from pal.writer.access_mechanism.gnu_inline_aarch64 import \
GnuInlineAarch64AccessMechanismWriter
from pal.writer.access_mechanism.gnu_inline_aarch32 import \
GnuInlineAarch32AccessMechanismWriter
from pal.writer.access_mechanism.libpal import \
LibpalAccessMechanismWriter
from pal.writer.access_mechanism.rust_libpal import \
RustLibpalAccessMechanismWriter
from pal.writer.access_mechanism.cxx_test import \
CxxTestAccessMechanismWriter
from pal.writer.access_mechanism.c_test import \
CTestAccessMechanismWriter
from pal.writer.access_mechanism.yaml import \
YamlAccessMechanismWriter
from pal.writer.access_mechanism.none import \
NoneAccessMechanismWriter
from pal.writer.print_mechanism.printf_utf8 import PrintfUtf8PrintMechanismWriter
from pal.writer.print_mechanism.rust_println import RustPrintlnPrintMechanismWriter
from pal.writer.print_mechanism.none import NonePrintMechanismWriter
from pal.writer.file_format.unix import UnixFileFormatWriter
from pal.writer.file_format.windows import WindowsFileFormatWriter
from pal.writer.file_format.yaml import YamlFileFormatWriter
from pal.writer.file_format.none import NoneFileFormatWriter
from pal.writer.comment.c_multiline import CMultilineCommentWriter
from pal.writer.comment.rust import RustCommentWriter
from pal.writer.comment.yaml import YamlCommentWriter
from pal.writer.comment.none import NoneCommentWriter
from pal.writer.instruction.gnu_inline import GnuInlineInstructionWriter
from pal.writer.instruction.libpal_c import LibpalCInstructionWriter
from pal.writer.instruction.libpal_cxx11 import LibpalCxx11InstructionWriter
from pal.writer.instruction.libpal_rust import LibpalRustInstructionWriter
from pal.writer.instruction.none import NoneInstructionWriter
from pal.writer.peripheral.none import NonePeripheralWriter
from pal.writer.peripheral.c import CPeripheralWriter
from pal.writer.peripheral.cxx11 import Cxx11PeripheralWriter
language_options = [
"c",
"c++11",
"rust",
"yaml",
"none",
]
access_mechanism_options = [
"gnu_inline",
"libpal",
"test",
"yaml",
"none",
]
print_mechanism_options = {
"printf_utf8": PrintfUtf8PrintMechanismWriter,
"rust_println": RustPrintlnPrintMechanismWriter,
"none": NonePrintMechanismWriter,
}
file_format_options = {
"unix": UnixFileFormatWriter,
"windows": WindowsFileFormatWriter,
"yaml": YamlFileFormatWriter,
"none": NoneFileFormatWriter,
}
def get_access_mechanism_writer(config):
if config.execution_state == "intel_64bit" and config.access_mechanism == "gnu_inline":
return GnuInlineX64AccessMechanismWriter
elif config.execution_state == "armv8a_aarch64" and config.access_mechanism == "gnu_inline":
return GnuInlineAarch64AccessMechanismWriter
elif config.execution_state == "armv8a_aarch32" and config.access_mechanism == "gnu_inline":
return GnuInlineAarch32AccessMechanismWriter
elif config.access_mechanism == "test" and config.language == "c++11":
return CxxTestAccessMechanismWriter
elif config.access_mechanism == "test" and config.language == "c":
return CTestAccessMechanismWriter
elif config.access_mechanism == "yaml":
return YamlAccessMechanismWriter
elif config.access_mechanism == "libpal" and config.language == "c++11":
return LibpalAccessMechanismWriter
elif config.access_mechanism == "libpal" and config.language == "c":
return LibpalAccessMechanismWriter
elif config.access_mechanism == "libpal" and config.language == "rust":
return RustLibpalAccessMechanismWriter
else:
return NoneAccessMechanismWriter
def get_register_writer(config):
if config.language == "c":
return CRegisterWriter
elif config.language == "c++11":
return Cxx11RegisterWriter
elif config.language == "rust":
return RustRegisterWriter
elif config.language == "yaml":
return YamlRegisterWriter
else:
return NoneRegisterWriter
def get_instruction_writer(config):
if config.language == "c" and config.access_mechanism == "libpal":
return LibpalCInstructionWriter
elif config.language == "c" and config.access_mechanism == "gnu_inline":
return GnuInlineInstructionWriter
elif config.language == "c++11" and config.access_mechanism == "libpal":
return LibpalCxx11InstructionWriter
elif config.language == "c++11" and config.access_mechanism == "gnu_inline":
return GnuInlineInstructionWriter
elif config.language == "rust":
return LibpalRustInstructionWriter
else:
return NoneInstructionWriter
def get_peripheral_writer(config):
if config.language == "c":
return CPeripheralWriter
elif config.language == "c++11":
return Cxx11PeripheralWriter
else:
return NonePeripheralWriter
def get_comment_writer(config):
if config.language == "c":
return CMultilineCommentWriter
elif config.language == "c++11":
return CMultilineCommentWriter
elif config.language == "rust":
return RustCommentWriter
elif config.language == "yaml":
return YamlCommentWriter
else:
return NoneCommentWriter
def make_writer(config):
if config.language not in language_options:
raise Exception("invalid language option: " + str(language))
if config.access_mechanism not in access_mechanism_options:
raise Exception("invalid access mechanism option: " + str(access_mechanism))
if config.print_mechanism not in print_mechanism_options:
raise Exception("invalid print_mechanism option: " +
str(print_mechanism))
if config.file_format not in file_format_options:
raise Exception("invalid file_format option: " + str(file_format))
access_mechanism_writer = get_access_mechanism_writer(config)
register_writer = get_register_writer(config)
instruction_writer = get_instruction_writer(config)
peripheral_writer = get_peripheral_writer(config)
comment_writer = get_comment_writer(config)
class Writer(
AbstractWriter,
register_writer,
instruction_writer,
peripheral_writer,
access_mechanism_writer,
print_mechanism_options[config.print_mechanism],
file_format_options[config.file_format],
comment_writer
):
pass
return Writer()
|
[
"[email protected]"
] | |
ebdefe8dd598697cd1d89a4f1635fbd2df1723a0
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/resources/v20200601/deployment_at_management_group_scope.py
|
b9676c933cc7c442e3ae0b12ae49c1b0fa0e359b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 |
Apache-2.0
| 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null |
UTF-8
|
Python
| false | false | 11,773 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DeploymentAtManagementGroupScopeArgs', 'DeploymentAtManagementGroupScope']
@pulumi.input_type
class DeploymentAtManagementGroupScopeArgs:
def __init__(__self__, *,
group_id: pulumi.Input[str],
properties: pulumi.Input['DeploymentPropertiesArgs'],
deployment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DeploymentAtManagementGroupScope resource.
:param pulumi.Input[str] group_id: The management group ID.
:param pulumi.Input['DeploymentPropertiesArgs'] properties: The deployment properties.
:param pulumi.Input[str] deployment_name: The name of the deployment.
:param pulumi.Input[str] location: The location to store the deployment data.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Deployment tags
"""
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "properties", properties)
if deployment_name is not None:
pulumi.set(__self__, "deployment_name", deployment_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[str]:
"""
The management group ID.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def properties(self) -> pulumi.Input['DeploymentPropertiesArgs']:
"""
The deployment properties.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input['DeploymentPropertiesArgs']):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="deploymentName")
def deployment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the deployment.
"""
return pulumi.get(self, "deployment_name")
@deployment_name.setter
def deployment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location to store the deployment data.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class DeploymentAtManagementGroupScope(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deployment_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Deployment information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] deployment_name: The name of the deployment.
:param pulumi.Input[str] group_id: The management group ID.
:param pulumi.Input[str] location: The location to store the deployment data.
:param pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']] properties: The deployment properties.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Deployment tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeploymentAtManagementGroupScopeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Deployment information.
:param str resource_name: The name of the resource.
:param DeploymentAtManagementGroupScopeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeploymentAtManagementGroupScopeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deployment_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeploymentAtManagementGroupScopeArgs.__new__(DeploymentAtManagementGroupScopeArgs)
__props__.__dict__["deployment_name"] = deployment_name
if group_id is None and not opts.urn:
raise TypeError("Missing required property 'group_id'")
__props__.__dict__["group_id"] = group_id
__props__.__dict__["location"] = location
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:resources/v20200601:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20190501:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190501:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20190510:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190510:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20190701:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190701:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20190801:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190801:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20191001:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20191001:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20200801:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20200801:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20201001:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20201001:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20210101:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20210101:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-native:resources/v20210401:DeploymentAtManagementGroupScope"), pulumi.Alias(type_="azure-nextgen:resources/v20210401:DeploymentAtManagementGroupScope")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DeploymentAtManagementGroupScope, __self__).__init__(
'azure-native:resources/v20200601:DeploymentAtManagementGroupScope',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DeploymentAtManagementGroupScope':
"""
Get an existing DeploymentAtManagementGroupScope resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DeploymentAtManagementGroupScopeArgs.__new__(DeploymentAtManagementGroupScopeArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return DeploymentAtManagementGroupScope(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
the location of the deployment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.DeploymentPropertiesExtendedResponse']:
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the deployment.
"""
return pulumi.get(self, "type")
|
[
"[email protected]"
] | |
df850616b919ac895c1f9ec1575ccec50881de0e
|
c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105
|
/vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/third_party/2and3/Crypto/Util/__init__.pyi
|
4848491e304fdf0fd61d7bbb92e89cc418a3299b
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ryangniadek/.dotfiles
|
ddf52cece49c33664b56f01b17d476cf0f1fafb1
|
be272baf6fb7d7cd4f4db1f6812b710196511ffe
|
refs/heads/master
| 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 |
MIT
| 2020-09-12T17:28:01 | 2020-02-24T02:50:06 |
Python
|
UTF-8
|
Python
| false | false | 101 |
pyi
|
# Names in __all__ with no definition:
# RFC1751
# asn1
# number
# randpool
# strxor
|
[
"[email protected]"
] | |
80623741abfd8ad0974b4f7aaf1ffa0c5a93a268
|
5ab43d03001ae459fae26e9bd17b659f2e9decb1
|
/web/urls/sql.py
|
fad91f6f121765c49eaa457040e8477436106364
|
[] |
no_license
|
bradbann/dbops
|
c022f0bf15e1af3438c4726a57ede139bfcbfc18
|
1097e142ba03406c99ac7d007001f35a2db39fcd
|
refs/heads/master
| 2023-02-05T18:50:55.351188 | 2020-12-28T00:50:22 | 2020-12-28T00:50:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,026 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/5 12:33
# @Author : 马飞
# @File : logon.py.py
# @Software: PyCharm
from web.services.sql import sqlquery,sql_query,sqlrelease,sql_check,sql_format,sql_check_result,sql_release,sqlaudit,sql_audit,sqlrun,sql_run,sql_audit_query,sql_audit_detail,sql_run_query
from web.services.sql import get_tree_by_sql
# 功能:数据库操作API
sql = [
(r"/sql/query", sqlquery),
(r"/sql/_query", sql_query),
(r"/sql/release", sqlrelease),
(r"/sql/_release", sql_release),
(r"/sql/_check", sql_check),
(r"/sql/_check/result", sql_check_result),
(r"/sql/audit", sqlaudit),
(r"/sql/_audit", sql_audit),
(r"/sql/audit/query", sql_audit_query),
(r"/sql/audit/detail", sql_audit_detail),
(r"/sql/_format", sql_format),
(r"/sql/run", sqlrun),
(r"/sql/_run", sql_run),
(r"/sql/run/query", sql_run_query),
(r"/get_tree", get_tree_by_sql),
]
|
[
"[email protected]"
] | |
b9cbeabe33fbd53d97f99921215dbbdf94b0ebb5
|
8c39fa8241e1ecefab6c693862bee127fd3e1461
|
/proyectoferreteria/apps/gestionadmin/migrations/0073_auto_20200325_1923.py
|
10e23128d48a25bd9985d70d6b4e39300a33539f
|
[] |
no_license
|
ujcv4273/Ferreteriav-0.0.5
|
b5433e727b68e318204386f84416208f99470446
|
9dd16363ce9f4a012a177aa3d5414051b79cd3a2
|
refs/heads/master
| 2022-11-29T16:50:19.066725 | 2020-08-01T18:16:35 | 2020-08-01T18:16:35 | 284,314,116 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 720 |
py
|
# Generated by Django 3.0.4 on 2020-03-26 01:23
from django.db import migrations, models
import proyectoferreteria.apps.gestionadmin.models
class Migration(migrations.Migration):
dependencies = [
('gestionadmin', '0072_auto_20200325_1913'),
]
operations = [
migrations.AlterField(
model_name='planilla',
name='IHSS',
field=models.IntegerField(validators=[proyectoferreteria.apps.gestionadmin.models.validarnegativos]),
),
migrations.AlterField(
model_name='planilla',
name='RAP',
field=models.IntegerField(validators=[proyectoferreteria.apps.gestionadmin.models.validarnegativos]),
),
]
|
[
"[email protected]"
] | |
3883e7c11bd8857f07067e92b0a5ca46c2de6bbf
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/Bert-CRF_for_PyTorch/examples/training_trick/task_sentiment_exponential_moving_average_warmup.py
|
125a35edeb7a0b9cf9fb56a694fcacd20820b621
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 6,678 |
py
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
#! -*- coding:utf-8 -*-
# 情感分类任务, 指数滑动平均ema+warmup两种策略
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.optimizers import extend_with_exponential_moving_average, get_linear_schedule_with_warmup
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
optimizer = optim.Adam(model.parameters(), lr=2e-5)
ema_schedule = extend_with_exponential_moving_average(model, decay=0.99)
warmup_scheduler = get_linear_schedule_with_warmup(optimizer, len(train_dataloader), num_training_steps=len(train_dataloader)*10, last_epoch=-1)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optimizer,
scheduler=[ema_schedule, warmup_scheduler],
metrics=['accuracy']
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
ema_schedule.apply_ema_weights() # 使用滑动平均的ema权重
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
ema_schedule.restore_raw_weights() # 恢复原来模型的参数
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
|
[
"[email protected]"
] | |
dc7b2e654c8f78b1e9ffc0af46f43d30baa54e55
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03329/s613563948.py
|
e52a2eae61fa6172d407b873df69a031908af97c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
n = int(input())
dp = [i for i in range(n+1)]
for i in range(n):
dp[i+1] = min(dp[i+1],dp[i] + 1)
cou = 1
while True:
if i + 6**cou <= n:
dp[i+6**cou] = min(dp[i+6**cou],dp[i]+1)
cou += 1
elif i + 6**cou > n:
break
cou = 1
while True:
if i + 9**cou <= n:
dp[i+9**cou] = min(dp[i+9**cou],dp[i]+1)
cou += 1
elif i + 9**cou > n:
break
print(dp[n])
|
[
"[email protected]"
] | |
15f70fff1dbdd73ae7becb2f3e22c2671385319c
|
b6c09a1b87074d6e58884211ce24df8ec354da5c
|
/714. 买卖股票的最佳时机含手续费.py
|
9c9467240aa30698f8796de96dccb3b8fbea196d
|
[] |
no_license
|
fengxiaolong886/leetcode
|
a0ee12d67c4a10fb12d6ca4369762ab5b090cab1
|
4c0897bc06a297fa9225a0c46d8ec9217d876db8
|
refs/heads/master
| 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 896 |
py
|
'''
给定一个整数数组 prices,其中第 i 个元素代表了第 i 天的股票价格 ;非负整数 fee 代表了交易股票的手续费用。
你可以无限次地完成交易,但是你每笔交易都需要付手续费。如果你已经购买了一个股票,在卖出它之前你就不能再继续购买股票了。
返回获得利润的最大值。
注意:这里的一笔交易指买入持有并卖出股票的整个过程,每笔交易你只需要为支付一次手续费。
'''
def maxProfit(prices, fee):
dp = [[0 for i in prices] for _ in range(2)]
dp[0][0] = -prices[0]
dp[1][0] = 0
for i in range(1, len(prices)):
# buy
dp[0][i] = max(dp[1][i-1] - prices[i], dp[0][i-1])
# sell
dp[1][i] = max(dp[0][i-1] + prices[i] - fee, dp[1][i-1])
return dp[1][-1]
print(maxProfit(prices = [1, 3, 2, 8, 4, 9], fee = 2))
|
[
"[email protected]"
] | |
b63e4c9ebaa4ca58ea3ebbd502a5151c669d1102
|
9238c5adf211d66cbe9bea5a89e97ca02c31da9a
|
/bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/modules/core/cloud/amazon/ec2_tag.py
|
409041f906b23179804c4dd7ffd517fea198c8e4
|
[
"MIT"
] |
permissive
|
marcusramberg/dotfiles
|
803d27fb88da8e46abb283b2e2987e51a83b08aa
|
413727089a15e775f532d2da363c03d9fb3fb90a
|
refs/heads/main
| 2023-03-04T17:08:40.123249 | 2023-03-01T07:46:51 | 2023-03-01T07:46:51 | 7,285,450 | 4 | 2 |
MIT
| 2022-12-22T14:39:35 | 2012-12-22T11:57:42 |
Python
|
UTF-8
|
Python
| false | false | 4,962 |
py
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
description:
- The EC2 resource id.
required: true
default: null
aliases: []
state:
description:
- Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
required: false
default: present
choices: ['present', 'absent', 'list']
aliases: []
region:
description:
- region in which the resource exists.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
author: Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic example of adding tag(s)
tasks:
- name: tag a resource
ec2_tag: resource=vol-XXXXXX region=eu-west-1 state=present
args:
tags:
Name: ubervol
env: prod
# Playbook example of adding tag(s) to spawned instances
tasks:
- name: launch some instances
ec2: keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1
register: ec2
- name: tag my launched instances
ec2_tag: resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances
args:
tags:
Name: webserver
env: prod
'''
import sys
import time
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
resource = module.params.get('resource')
tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
dictremove = {}
baddict = {}
tagdict = {}
for tag in gettags:
tagdict[tag.name] = tag.value
if state == 'present':
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
tagger = ec2.create_tags(resource, dictadd)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
if not tags:
module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
baddict[key] = value
if set(baddict) == set(tags):
module.exit_json(msg="Nothing to remove here. Move along.", changed=False)
for (key, value) in set(tags.items()):
if (key, value) in set(tagdict.items()):
dictremove[key] = value
tagger = ec2.delete_tags(resource, dictremove)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
[
"[email protected]"
] | |
16b34bb9d976490f20e07b25bc22a0a7588fbd8e
|
04e080a00f37a3501c5060380d65c5a6cd669d90
|
/thonnycontrib/m5stack/esp32_api_stubs/inisetup.py
|
b809474e6edb80c845e1089c6fbfc4b79b8a9fa2
|
[
"MIT"
] |
permissive
|
thonny/thonny-m5stack
|
473a2876e72b88d283d8b9d64189028ef7fea111
|
a502579ad5e264342ae0bc2c554c78527053693b
|
refs/heads/master
| 2020-04-20T14:57:15.605699 | 2019-11-18T22:28:36 | 2019-11-18T22:28:36 | 168,914,658 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 107 |
py
|
bdev = None
def check_bootsec():
pass
def fs_corrupted():
pass
def setup():
pass
uos = None
|
[
"[email protected]"
] | |
7a807bb9bd34fd9fcc6a9ff4ba2affba6b6b435c
|
6392354e74cce4a303a544c53e13d0a7b87978ee
|
/m6/MyBlog/venv/Lib/site-packages/django/contrib/gis/admin/widgets.py
|
c02d31c5101d1b4d47789fc374fb418343dc8180
|
[] |
no_license
|
music51555/wxPythonCode
|
dc35e42e55d11850d7714a413da3dde51ccdd37e
|
f77b71ed67d926fbafd1cfec89de8987d9832016
|
refs/heads/master
| 2020-04-11T20:20:38.136446 | 2019-04-01T09:17:34 | 2019-04-01T09:17:34 | 162,067,449 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,723 |
py
|
import logging
from django.contrib.gis.gdal import GDALException
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.forms.widgets import Textarea
from django.utils import translation
# Creating a templates context that contains Django settings
# values needed by admin map templates.
geo_context = {'LANGUAGE_BIDI': translation.get_language_bidi()}
logger = logging.getLogger('django.contrib.gis')
class OpenLayersWidget(Textarea):
"""
Render an OpenLayers map using the WKT of the geometry.
"""
def get_context(self, name, value, attrs):
# Update the templates parameters with any attributes passed in.
if attrs:
self.params.update(attrs)
self.params['editable'] = self.params['modifiable']
else:
self.params['editable'] = True
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if value and isinstance(value, str):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError) as err:
logger.error("Error creating geometry from value '%s' (%s)", value, err)
value = None
if (value and value.geom_type.upper() != self.geom_type and
self.geom_type != 'GEOMETRY'):
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-', '_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)",
value.srid, srid, err
)
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
self.params.update(geo_context)
return self.params
def map_options(self):
"""Build the map options hash for the OpenLayers templates."""
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % extent
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options
|
[
"[email protected]"
] | |
c9db57b26f26ac2232980fef4c2269f1b22bb554
|
9a486a87e028303a551fbd0d1e1b6b650387ea14
|
/deal_xzj_log/guild_battle.py
|
512946b2d6e02df97e80609b5411c54e41c4496a
|
[] |
no_license
|
shanlihou/pythonFunc
|
7b8e7064fddd4522e492c915c086cc6c5abc6eec
|
646920256551ccd8335446dd4fe11aa4b9916f64
|
refs/heads/master
| 2022-08-24T20:33:12.287464 | 2022-07-21T12:00:10 | 2022-07-21T12:00:10 | 24,311,639 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,459 |
py
|
import re
import utils
class GuildBattle(object):
def __init__(self, filename):
self.gbid_set = set()
self.account_set = set()
self.day = 0
self.init_data(filename)
def parse_day(self, line):
pat = re.compile(r'\[\d+\-\d+\-(\d+)')
find = pat.search(line)
if find:
day = int(find.group(1))
self.day = day
def init_data(self, filename):
pat = re.compile(r'gbId\:([^,]+), kill\:')
with open(filename, encoding='utf-8') as fr:
for line in fr:
find = pat.search(line)
if find:
gbid = int(find.group(1))
if utils.is_gbid_inter(gbid):
continue
self.gbid_set.add(gbid)
account = utils.get_account(gbid)
self.account_set.add(account)
else:
self.parse_day(line)
def generate_col(self, day_dict):
login_account_num = day_dict.get(self.day)
col = []
col.append(self.day)
col.append(len(self.gbid_set))
account_num = len(self.account_set)
col.append(account_num)
col.append(account_num / login_account_num)
return col
def deal_guild_battle(filename, day_dict):
gb = GuildBattle(filename)
return gb.generate_col(day_dict)
if __name__ == '__main__':
deal_guild_battle()
|
[
"[email protected]"
] | |
f37125991539e1ace1a0ac7ce58f30bd0805aa5c
|
15608a179d97e399ca08be0f017296c4f4ded881
|
/docs/migrations/0001_squashed_0005_remove_version.py
|
b870e51c1e00658d8b78e452390e4e2d9260e024
|
[
"BSD-3-Clause"
] |
permissive
|
shivangi1801/djangoproject.com
|
1f33bef39b8c3cce136f47008eee5d4aae8d6aa4
|
e51eba97f7e226d2e9deb31f8c23b1e00df04b9c
|
refs/heads/master
| 2021-01-16T20:37:51.813671 | 2016-02-08T22:51:01 | 2016-02-08T22:51:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,768 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 07:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('docs', '0001_initial'), ('docs', '0002_simplify_documentrelease'), ('docs', '0003_add_fk_to_release'), ('docs', '0004_populate_fk_to_release'), ('docs', '0005_remove_version')]
initial = True
dependencies = [
('releases', '0001_squashed_0004_make_release_date_nullable'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=500)),
('title', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='DocumentRelease',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lang', models.CharField(choices=[('af', 'Afrikaans'), ('ar', 'Arabic'), ('ast', 'Asturian'), ('az', 'Azerbaijani'), ('bg', 'Bulgarian'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('br', 'Breton'), ('bs', 'Bosnian'), ('ca', 'Catalan'), ('cs', 'Czech'), ('cy', 'Welsh'), ('da', 'Danish'), ('de', 'German'), ('el', 'Greek'), ('en', 'English'), ('en-au', 'Australian English'), ('en-gb', 'British English'), ('eo', 'Esperanto'), ('es', 'Spanish'), ('es-ar', 'Argentinian Spanish'), ('es-co', 'Colombian Spanish'), ('es-mx', 'Mexican Spanish'), ('es-ni', 'Nicaraguan Spanish'), ('es-ve', 'Venezuelan Spanish'), ('et', 'Estonian'), ('eu', 'Basque'), ('fa', 'Persian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy', 'Frisian'), ('ga', 'Irish'), ('gd', 'Scottish Gaelic'), ('gl', 'Galician'), ('he', 'Hebrew'), ('hi', 'Hindi'), ('hr', 'Croatian'), ('hu', 'Hungarian'), ('ia', 'Interlingua'), ('id', 'Indonesian'), ('io', 'Ido'), ('is', 'Icelandic'), ('it', 'Italian'), ('ja', 'Japanese'), ('ka', 'Georgian'), ('kk', 'Kazakh'), ('km', 'Khmer'), ('kn', 'Kannada'), ('ko', 'Korean'), ('lb', 'Luxembourgish'), ('lt', 'Lithuanian'), ('lv', 'Latvian'), ('mk', 'Macedonian'), ('ml', 'Malayalam'), ('mn', 'Mongolian'), ('mr', 'Marathi'), ('my', 'Burmese'), ('nb', 'Norwegian Bokmal'), ('ne', 'Nepali'), ('nl', 'Dutch'), ('nn', 'Norwegian Nynorsk'), ('os', 'Ossetic'), ('pa', 'Punjabi'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('pt-br', 'Brazilian Portuguese'), ('ro', 'Romanian'), ('ru', 'Russian'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('sq', 'Albanian'), ('sr', 'Serbian'), ('sr-latn', 'Serbian Latin'), ('sv', 'Swedish'), ('sw', 'Swahili'), ('ta', 'Tamil'), ('te', 'Telugu'), ('th', 'Thai'), ('tr', 'Turkish'), ('tt', 'Tatar'), ('udm', 'Udmurt'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('vi', 'Vietnamese'), ('zh-hans', 'Simplified Chinese'), ('zh-hant', 'Traditional Chinese')], default='en', max_length=2)),
('is_default', models.BooleanField(default=False)),
('release', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='releases.Release')),
],
),
migrations.AlterUniqueTogether(
name='documentrelease',
unique_together=set([('lang', 'release')]),
),
migrations.AddField(
model_name='document',
name='release',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='documents', to='docs.DocumentRelease'),
),
migrations.AlterUniqueTogether(
name='document',
unique_together=set([('release', 'path')]),
),
]
|
[
"[email protected]"
] | |
f11c0be7aa67af6235a14f8e6198576a7e95013e
|
d780df6e068ab8a0f8007acb68bc88554a9d5b50
|
/python/g1/scripts/g1/scripts/bases.py
|
e710b0e363fa3d1cf71663dc650af30be31f0d9e
|
[
"MIT"
] |
permissive
|
clchiou/garage
|
ed3d314ceea487b46568c14b51e96b990a50ed6f
|
1d72863d3a5f5d620b170f4dd36f605e6b72054f
|
refs/heads/master
| 2023-08-27T13:57:14.498182 | 2023-08-15T07:09:57 | 2023-08-15T19:53:52 | 32,647,497 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,980 |
py
|
"""Helpers for constructing ``subprocess.run`` calls contextually.
NOTE: For the ease of use, this module implements context with global
variables, and thus is not concurrent safe (not thread safe and not
asynchronous coroutine safe). Although we could redesign the interface
to make it concurrent safe (like requiring passing around context
explicitly or using stdlib's contextvars), for now we think ease of use
is more important then concurrent safe (we might change our mind later).
"""
__all__ = [
'popen',
'run',
# Context manipulations.
'doing_capture_output',
'doing_capture_stderr',
'doing_capture_stdout',
'doing_check',
'doing_dry_run',
'get_cwd',
'get_dry_run',
'merging_env',
'preserving_sudo_env',
'using_cwd',
'using_env',
'using_input',
'using_relative_cwd',
'using_prefix',
'using_stderr',
'using_stdin',
'using_stdout',
'using_sudo',
]
import contextlib
import logging
import os
import subprocess
from pathlib import Path
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
# We don't use threading.local here since we don't pretend this module
# is thread safe.
_CONTEXT = {}
# Context entry names and default values.
_CAPTURE_OUTPUT = 'capture_output'
_CHECK = 'check'
_CWD = 'cwd'
_DRY_RUN = 'dry_run'
_ENV = 'env'
_INPUT = 'input'
_PREFIX = 'prefix'
_STDIN = 'stdin'
_STDOUT = 'stdout'
_STDERR = 'stderr'
_SUDO = 'sudo'
_SUDO_ENV = 'sudo_env'
_DEFAULTS = {
_CAPTURE_OUTPUT: False,
_CHECK: True,
_CWD: None,
_DRY_RUN: False,
_ENV: None,
_INPUT: None,
_PREFIX: (),
_STDIN: None,
_STDOUT: None,
_STDERR: None,
_SUDO: False,
_SUDO_ENV: (),
}
def _get(name):
return _get2(name)[0]
def _get2(name):
"""Return (value, is_default) pair."""
try:
return _CONTEXT[name], False
except KeyError:
return ASSERT.getitem(_DEFAULTS, name), True
@contextlib.contextmanager
def _using(name, new_value):
"""Context of using an entry value."""
old_value, is_default = _get2(name)
_CONTEXT[name] = new_value
try:
yield old_value
finally:
if is_default:
_CONTEXT.pop(name)
else:
_CONTEXT[name] = old_value
def doing_capture_output(capture_output=True):
return _using(_CAPTURE_OUTPUT, capture_output)
def doing_capture_stdout(capture_stdout=True):
return using_stdout(subprocess.PIPE if capture_stdout else None)
def doing_capture_stderr(capture_stderr=True):
return using_stderr(subprocess.PIPE if capture_stderr else None)
def doing_check(check=True):
return _using(_CHECK, check)
def get_dry_run():
return _get(_DRY_RUN)
def doing_dry_run(dry_run=True):
return _using(_DRY_RUN, dry_run)
def get_cwd():
cwd = _get(_CWD)
if cwd is None:
cwd = Path.cwd()
if not isinstance(cwd, Path):
cwd = Path(cwd)
return cwd
def using_cwd(cwd):
"""Context of using an absolute cwd value."""
return _using(_CWD, cwd)
def using_relative_cwd(relative_cwd):
"""Context of using a relative cwd value."""
if relative_cwd is None:
return _using(_CWD, None)
else:
return _using(_CWD, get_cwd() / relative_cwd)
def using_env(env):
"""Context of using an environment dict.
NOTE: This replaces, not merges, the environment dict.
"""
return _using(_ENV, env)
def merging_env(env):
"""Context of merging an environment dict.
If the current `env` is None (which is the default), the given
environment dict will be merged with os.environ.
"""
old_env, is_default = _get2(_ENV)
if env:
new_env = dict(os.environ if is_default else old_env)
new_env.update(env)
return using_env(new_env)
else:
return contextlib.nullcontext(old_env)
def using_input(input): # pylint: disable=redefined-builtin
return _using(_INPUT, input)
def using_stdin(stdin):
return _using(_STDIN, stdin)
def using_stdout(stdout):
return _using(_STDOUT, stdout)
def using_stderr(stderr):
return _using(_STDERR, stderr)
def using_prefix(prefix):
return _using(_PREFIX, prefix)
def using_sudo(sudo=True):
return _using(_SUDO, sudo)
def preserving_sudo_env(sudo_env):
# Typically sudo is configured to reset PATH to a known good value
# via secure_path option. So we forbid preserving PATH here.
return _using(_SUDO_ENV, ASSERT.not_contains(sudo_env, 'PATH'))
def popen(args):
LOG.debug('popen: args=%s, context=%s', args, _CONTEXT)
# It does not seem like we can return a fake Popen object.
ASSERT.false(_get(_DRY_RUN))
return subprocess.Popen(_prepare_args(args), **_prepare_kwargs())
def run(args):
LOG.debug('run: args=%s, context=%s', args, _CONTEXT)
if _get(_DRY_RUN):
# It seems better to return a fake value than None.
return subprocess.CompletedProcess(args, 0, b'', b'')
return subprocess.run(
_prepare_args(args),
capture_output=_get(_CAPTURE_OUTPUT),
check=_get(_CHECK),
input=_get(_INPUT),
**_prepare_kwargs(),
)
def _prepare_args(args):
args = list(map(str, args))
if _get(_SUDO):
sudo_env = _get(_SUDO_ENV)
if sudo_env:
preserve_env_arg = ('--preserve-env=%s' % ','.join(sudo_env), )
else:
preserve_env_arg = ()
args[:0] = ['sudo', '--non-interactive', *preserve_env_arg]
prefix = _get(_PREFIX)
if prefix:
args[:0] = prefix
return args
def _prepare_kwargs():
kwargs = {
'cwd': _get(_CWD),
'env': _get(_ENV),
}
# Work around subprocess.run limitation that it checks presence of
# stdin, stdout, and stderr in kwargs, not whether their value is
# not None.
for key in (_STDIN, _STDOUT, _STDERR):
value = _get(key)
if value is not None:
kwargs[key] = value
return kwargs
|
[
"[email protected]"
] | |
e2ef3dc9a479b8b4e011179eff0820b73109b14a
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/course_wiki/tests/test_middleware.py
|
b4ce9c0f3de1609b2abfcc018e4c85d9fd6326bf
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 |
MIT
| 2021-11-22T12:12:31 | 2019-01-02T14:21:30 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,544 |
py
|
"""
Tests for wiki middleware.
"""
from django.test.client import Client
from wiki.models import URLPath
from common.djangoapps.student.tests.factories import InstructorFactory
from lms.djangoapps.course_wiki.views import get_or_create_root
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestWikiAccessMiddleware(ModuleStoreTestCase):
"""Tests for WikiAccessMiddleware."""
def setUp(self):
"""Test setup."""
super().setUp()
self.wiki = get_or_create_root()
self.course_math101 = CourseFactory.create(org='edx', number='math101', display_name='2014', metadata={'use_unique_wiki_id': 'false'}) # lint-amnesty, pylint: disable=line-too-long
self.course_math101_instructor = InstructorFactory(course_key=self.course_math101.id, username='instructor', password='secret') # lint-amnesty, pylint: disable=line-too-long
self.wiki_math101 = URLPath.create_article(self.wiki, 'math101', title='math101')
self.client = Client()
self.client.login(username='instructor', password='secret')
def test_url_tranform(self):
"""Test that the correct prefix ('/courses/<course_id>') is added to the urls in the wiki."""
response = self.client.get('/courses/edx/math101/2014/wiki/math101/')
self.assertContains(response, '/courses/edx/math101/2014/wiki/math101/_edit/')
self.assertContains(response, '/courses/edx/math101/2014/wiki/math101/_settings/')
|
[
"[email protected]"
] | |
9b020c89f6757fa760ac71bcb71c214d50005d12
|
999879f8d18e041d7fa313132408b252aded47f8
|
/01-codes/scikit-learn-master/sklearn/ensemble/weight_boosting.py
|
d07a00cefb6cb0b829d42e5d7fd27bd4ba4839e7
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
QPanProjects/Surrogate-Model
|
ebcaf05728e82dcbcd924c2edca1b490ab085173
|
848c7128201218b0819c9665e2cec72e3b1d29ac
|
refs/heads/master
| 2022-10-11T19:03:55.224257 | 2020-06-09T14:37:35 | 2020-06-09T14:37:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 40,759 |
py
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
from ..externals.six.moves import xrange as range
from ..externals.six.moves import zip
from ..tree._tree import DTYPE
from .base import BaseEnsemble
from .forest import BaseForest
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..metrics import accuracy_score, r2_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..utils import check_array, check_X_y, check_random_state
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
[
"[email protected]"
] | |
4160757e1eb7885ffb9361bec4a8b1841ce5c5eb
|
f2502813aa34cb6262bb2780debb51bb080aecd4
|
/toeplitz_decomp_gpu/run_real_new.py
|
16ae80f0f1ba99c14b3424a16cb3240a716315f7
|
[] |
no_license
|
sufkes/scintillometry
|
6fafc7601f12ea32b3dfa142ae5ef6beec7e3585
|
a9624eecd307bc0ea5ce2e412feec6909bd762aa
|
refs/heads/master
| 2021-01-22T03:49:27.370870 | 2018-02-23T21:37:06 | 2018-02-23T21:37:06 | 93,168,533 | 2 | 1 | null | 2017-06-02T13:32:59 | 2017-06-02T13:32:59 | null |
UTF-8
|
Python
| false | false | 1,257 |
py
|
import os,sys
from mpi4py import MPI
import numpy as np
from new_factorize_parallel import ToeplitzFactorizor
from time import time
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if len(sys.argv) != 8 and len(sys.argv) != 9:
if rank==0:
print "Please pass in the following arguments: method offsetn offsetm n m p pad"
else:
method = sys.argv[1]
offsetn = int(sys.argv[2])
offsetm = int(sys.argv[3])
n = int(sys.argv[4])
m = int(sys.argv[5])
p = int(sys.argv[6])
pad = sys.argv[7] == "1" or sys.argv[7] == "True"
detailedSave = False
if len(sys.argv) == 9:
detailedSave = sys.argv[8] == "1" or sys.argv[8] == "True"
if not os.path.exists("processedData/"):
os.makedirs("processedData/")
if pad == 0:
folder = "gate0_numblock_{}_meff_{}_offsetn_{}_offsetm_{}".format(n, m, offsetn, offsetm)
c = ToeplitzFactorizor(folder, n, m, pad, detailedSave)
if pad == 1:
folder = "gate0_numblock_{}_meff_{}_offsetn_{}_offsetm_{}".format(n, m*2, offsetn, offsetm)
c = ToeplitzFactorizor(folder, n, m*2, pad, detailedSave)
for i in range(0, n*(1 + pad)//size):
c.addBlock(rank + i*size)
c.fact(method, p)
|
[
"[email protected]"
] | |
31a0238162c4142eafff1a78205a6c5ea1531adb
|
2aeb619d07ba15ca95607238d41ad33b88cf51c7
|
/src/courcelles/urban/dataimport/architects/mappers.py
|
36823093a58a185857dbc706e1bf11d0f2409771
|
[] |
no_license
|
IMIO/courcelles.urban.dataimport
|
65edd044b60cbc148e1345b701e609b4a6446828
|
0d36fbe8c61a5c8da15b05df5452d7970e645509
|
refs/heads/master
| 2020-05-18T16:43:57.841783 | 2016-01-04T13:44:38 | 2016-01-04T13:44:38 | 39,009,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,131 |
py
|
# -*- coding: utf-8 -*-
from imio.urban.dataimport.mapper import Mapper
from imio.urban.dataimport.factory import BaseFactory
from Products.CMFPlone.utils import normalizeString
# Factory
class ArchitectFactory(BaseFactory):
def getCreationPlace(self, factory_args):
return self.site.urban.architects
def getPortalType(self, container, **kwargs):
return 'Architect'
class IdMapper(Mapper):
def mapId(self, line):
name = '%s' % self.getData('nom')
name = name.replace(' ', '').replace('-', '')
contact_id = normalizeString(self.site.portal_urban.generateUniqueId(name))
return contact_id
class PhoneMapper(Mapper):
def mapPhone(self, line):
phone = self.getData('téléphone')
gsm = self.getData('gsm')
if (phone and phone != '-') and (gsm and gsm != '-'):
phones = '{phone}, {gsm}'.format(
phone=phone,
gsm=gsm,
)
return phones
elif phone and phone != '-':
return phone
elif gsm and gsm != '-':
return gsm
return ''
|
[
"[email protected]"
] | |
f290779cf5cd4101e1f193d1a539899e561f95af
|
8567438779e6af0754620a25d379c348e4cd5a5d
|
/third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/cpp.py
|
e4b3ead63ddcc6cc663255c8c88926b1c5b284dd
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] |
permissive
|
thngkaiyuan/chromium
|
c389ac4b50ccba28ee077cbf6115c41b547955ae
|
dab56a4a71f87f64ecc0044e97b4a8f247787a68
|
refs/heads/master
| 2022-11-10T02:50:29.326119 | 2017-04-08T12:28:57 | 2017-04-08T12:28:57 | 84,073,924 | 0 | 1 |
BSD-3-Clause
| 2022-10-25T19:47:15 | 2017-03-06T13:04:15 | null |
UTF-8
|
Python
| false | false | 152,855 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010, 2012 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is the modified version of Google's cpplint. The original code is
# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
"""Support for check-webkit-style."""
import math # for log
import os
import os.path
import re
import sre_compile
import unicodedata
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
_DEPRECATED_MACROS = [
['ASSERT', 'DCHECK or its variants'],
['ASSERT_UNUSED', 'DCHECK or its variants'],
['ASSERT_NOT_REACHED', 'NOTREACHED'],
['WTF_LOG', 'DVLOG']
]
# These constants define types of headers for use with
# _IncludeState.check_next_include_order().
_PRIMARY_HEADER = 0
_OTHER_HEADER = 1
# The regexp compilation caching is inlined in all regexp functions for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
_regexp_compile_cache = {}
def match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def sub(pattern, replacement, s):
"""Substitutes occurrences of a pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(replacement, s)
def subn(pattern, replacement, s):
"""Substitutes occurrences of a pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].subn(replacement, s)
def iteratively_replace_matches_with_char(pattern, char_replacement, s):
"""Returns the string with replacement done.
Every character in the match is replaced with char.
Due to the iterative nature, pattern should not match char or
there will be an infinite loop.
Example:
pattern = r'<[^>]>' # template parameters
char_replacement = '_'
s = 'A<B<C, D>>'
Returns 'A_________'
Args:
pattern: The regex to match.
char_replacement: The character to put in place of every
character of the match.
s: The string on which to do the replacements.
Returns:
True, if the given line is blank.
"""
while True:
matched = search(pattern, s)
if not matched:
return s
start_match_index = matched.start(0)
end_match_index = matched.end(0)
match_length = end_match_index - start_match_index
s = s[:start_match_index] + char_replacement * match_length + s[end_match_index:]
def _find_in_lines(regex, lines, start_position, not_found_position):
"""Does a find starting at start position and going forward until
a match is found.
Returns the position where the regex started.
"""
current_row = start_position.row
# Start with the given row and trim off everything before what should be matched.
current_line = lines[start_position.row][start_position.column:]
starting_offset = start_position.column
while True:
found_match = search(regex, current_line)
if found_match:
return Position(current_row, starting_offset + found_match.start())
# A match was not found so continue forward.
current_row += 1
starting_offset = 0
if current_row >= len(lines):
return not_found_position
current_line = lines[current_row]
def _rfind_in_lines(regex, lines, start_position, not_found_position):
"""Does a reverse find starting at start position and going backwards until
a match is found.
Returns the position where the regex ended.
"""
# Put the regex in a group and proceed it with a greedy expression that
# matches anything to ensure that we get the last possible match in a line.
last_in_line_regex = r'.*(' + regex + ')'
current_row = start_position.row
# Start with the given row and trim off everything past what may be matched.
current_line = lines[start_position.row][:start_position.column]
while True:
found_match = match(last_in_line_regex, current_line)
if found_match:
return Position(current_row, found_match.end(1))
# A match was not found so continue backward.
current_row -= 1
if current_row < 0:
return not_found_position
current_line = lines[current_row]
def _convert_to_lower_with_underscores(text):
"""Converts all text strings in camelCase or PascalCase to lowers with underscores."""
# First add underscores before any capital letter followed by a lower case letter
# as long as it is in a word.
# (This put an underscore before Password but not P and A in WPAPassword).
text = sub(r'(?<=[A-Za-z0-9])([A-Z])(?=[a-z])', r'_\1', text)
# Next add underscores before capitals at the end of words if it was
# preceded by lower case letter or number.
# (This puts an underscore before A in isA but not A in CBA).
text = sub(r'(?<=[a-z0-9])([A-Z])(?=\b)', r'_\1', text)
# Next add underscores when you have a capital letter which is followed by a capital letter
# but is not proceeded by one. (This puts an underscore before A in 'WordADay').
text = sub(r'(?<=[a-z0-9])([A-Z][A-Z_])', r'_\1', text)
return text.lower()
def _create_acronym(text):
"""Creates an acronym for the given text."""
# Removes all lower case letters except those starting words.
text = sub(r'(?<!\b)[a-z]', '', text)
return text.upper()
def up_to_unmatched_closing_paren(s):
"""Splits a string into two parts up to first unmatched ')'.
Args:
s: a string which is a substring of line after '('
(e.g., "a == (b + c))").
Returns:
A pair of strings (prefix before first unmatched ')',
remainder of s after first unmatched ')'), e.g.,
up_to_unmatched_closing_paren("a == (b + c)) { ")
returns "a == (b + c)", " {".
Returns None, None if there is no unmatched ')'
"""
i = 1
for pos, c in enumerate(s):
if c == '(':
i += 1
elif c == ')':
i -= 1
if i == 0:
return s[:pos], s[pos + 1:]
return None, None
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call check_next_include_order() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, check_next_include_order will raise an error.
_INITIAL_SECTION = 0
_PRIMARY_SECTION = 1
_OTHER_SECTION = 2
_TYPE_NAMES = {
_PRIMARY_HEADER: 'header this file implements',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing.",
_PRIMARY_SECTION: 'a header this file implements.',
_OTHER_SECTION: 'other header.',
}
def __init__(self):
dict.__init__(self)
self._section = self._INITIAL_SECTION
self._visited_primary_section = False
self.header_types = dict()
def visited_primary_section(self):
return self._visited_primary_section
def check_next_include_order(self, header_type, file_is_header, primary_header_exists):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
file_is_header: Whether the file that owns this _IncludeState is itself a header
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
if header_type == _PRIMARY_HEADER and file_is_header:
return 'Header file should not contain itself.'
error_message = ''
if self._section != self._OTHER_SECTION:
before_error_message = ('Found %s before %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section + 1]))
after_error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
if header_type == _PRIMARY_HEADER:
if self._section >= self._PRIMARY_SECTION:
error_message = after_error_message
self._section = self._PRIMARY_SECTION
self._visited_primary_section = True
else:
assert header_type == _OTHER_HEADER
if not file_is_header and self._section < self._PRIMARY_SECTION:
if primary_header_exists:
error_message = before_error_message
self._section = self._OTHER_SECTION
return error_message
class Position(object):
"""Holds the position of something."""
def __init__(self, row, column):
self.row = row
self.column = column
def __str__(self):
return '(%s, %s)' % (self.row, self.column)
def __cmp__(self, other):
return self.row.__cmp__(other.row) or self.column.__cmp__(other.column)
class Parameter(object):
"""Information about one function parameter."""
def __init__(self, parameter, parameter_name_index, row):
self.type = parameter[:parameter_name_index].strip()
# Remove any initializers from the parameter name (e.g. int i = 5).
self.name = sub(r'=.*', '', parameter[parameter_name_index:]).strip()
self.row = row
@memoized
def lower_with_underscores_name(self):
"""Returns the parameter name in the lower with underscores format."""
return _convert_to_lower_with_underscores(self.name)
class SingleLineView(object):
"""Converts multiple lines into a single line (with line breaks replaced by a
space) to allow for easier searching.
"""
def __init__(self, lines, start_position, end_position):
"""Create a SingleLineView instance.
Args:
lines: a list of multiple lines to combine into a single line.
start_position: offset within lines of where to start the single line.
end_position: just after where to end (like a slice operation).
"""
# Get the rows of interest.
trimmed_lines = lines[start_position.row:end_position.row + 1]
# Remove the columns on the last line that aren't included.
trimmed_lines[-1] = trimmed_lines[-1][:end_position.column]
# Remove the columns on the first line that aren't included.
trimmed_lines[0] = trimmed_lines[0][start_position.column:]
# Create a single line with all of the parameters.
self.single_line = ' '.join(trimmed_lines)
self.single_line = _RE_PATTERN_CLEANSE_MULTIPLE_STRINGS.sub('""', self.single_line)
# Keep the row lengths, so we can calculate the original row number
# given a column in the single line (adding 1 due to the space added
# during the join).
self._row_lengths = [len(line) + 1 for line in trimmed_lines]
self._starting_row = start_position.row
def convert_column_to_row(self, single_line_column_number):
"""Convert the column number from the single line into the original
line number.
Special cases:
* Columns in the added spaces are considered part of the previous line.
* Columns beyond the end of the line are consider part the last line
in the view.
"""
total_columns = 0
row_offset = 0
while (row_offset < len(self._row_lengths) - 1 and
single_line_column_number >= total_columns + self._row_lengths[row_offset]):
total_columns += self._row_lengths[row_offset]
row_offset += 1
return self._starting_row + row_offset
def create_skeleton_parameters(all_parameters):
"""Converts a parameter list to a skeleton version.
The skeleton only has one word for the parameter name, one word for the type,
and commas after each parameter and only there. Everything in the skeleton
remains in the same columns as the original.
"""
all_simplifications = (
# Remove template parameters, function declaration parameters, etc.
r'(<[^<>]*?>)|(\([^\(\)]*?\))|(\{[^\{\}]*?\})',
# Remove all initializers.
r'=[^,]*',
# Remove :: and everything before it.
r'[^,]*::',
# Remove modifiers like &, *.
r'[&*]',
# Remove const modifiers.
r'\bconst\s+(?=[A-Za-z])',
# Remove numerical modifiers like long.
r'\b(unsigned|long|short)\s+(?=unsigned|long|short|int|char|double|float)')
skeleton_parameters = all_parameters
for simplification in all_simplifications:
skeleton_parameters = iteratively_replace_matches_with_char(simplification, ' ', skeleton_parameters)
# If there are any parameters, then add a , after the last one to
# make a regular pattern of a , following every parameter.
if skeleton_parameters.strip():
skeleton_parameters += ','
return skeleton_parameters
def find_parameter_name_index(skeleton_parameter):
"""Determines where the parameter name starts given the skeleton parameter."""
# The first space from the right in the simplified parameter is where the parameter
# name starts unless the first space is before any content in the simplified parameter.
before_name_index = skeleton_parameter.rstrip().rfind(' ')
if before_name_index != -1 and skeleton_parameter[:before_name_index].strip():
return before_name_index + 1
return len(skeleton_parameter)
def parameter_list(elided_lines, start_position, end_position):
"""Generator for a function's parameters."""
# Create new positions that omit the outer parenthesis of the parameters.
start_position = Position(row=start_position.row, column=start_position.column + 1)
end_position = Position(row=end_position.row, column=end_position.column - 1)
single_line_view = SingleLineView(elided_lines, start_position, end_position)
skeleton_parameters = create_skeleton_parameters(single_line_view.single_line)
end_index = -1
while True:
# Find the end of the next parameter.
start_index = end_index + 1
end_index = skeleton_parameters.find(',', start_index)
# No comma means that all parameters have been parsed.
if end_index == -1:
return
row = single_line_view.convert_column_to_row(end_index)
# Parse the parameter into a type and parameter name.
skeleton_parameter = skeleton_parameters[start_index:end_index]
name_offset = find_parameter_name_index(skeleton_parameter)
parameter = single_line_view.single_line[start_index:end_index]
yield Parameter(parameter, name_offset, row)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body.
Attributes:
min_confidence: The minimum confidence level to use while checking style.
"""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self, min_confidence):
self.min_confidence = min_confidence
self.current_function = ''
self.in_a_function = False
self.lines_in_function = 0
# Make sure these will not be mistaken for real positions (even when a
# small amount is added to them).
self.body_start_position = Position(-1000, 0)
self.end_position = Position(-1000, 0)
def begin(self, function_name, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
function_name_start_position: Position in elided where the function name starts.
body_start_position: Position in elided of the { or the ; for a prototype.
end_position: Position in elided just after the final } (or ; is.
parameter_start_position: Position in elided of the '(' for the parameters.
parameter_end_position: Position in elided just after the ')' for the parameters.
clean_lines: A CleansedLines instance containing the file.
"""
self.in_a_function = True
self.lines_in_function = -1 # Don't count the open brace line.
self.current_function = function_name
self.function_name_start_position = function_name_start_position
self.body_start_position = body_start_position
self.end_position = end_position
self.is_declaration = clean_lines.elided[body_start_position.row][body_start_position.column] == ';'
self.parameter_start_position = parameter_start_position
self.parameter_end_position = parameter_end_position
self.is_pure = False
if self.is_declaration:
characters_after_parameters = SingleLineView(
clean_lines.elided, parameter_end_position, body_start_position).single_line
self.is_pure = bool(match(r'\s*=\s*0\s*', characters_after_parameters))
self._clean_lines = clean_lines
self._parameter_list = None
def modifiers_and_return_type(self):
"""Returns the modifiers and the return type."""
# Go backwards from where the function name is until we encounter one of several things:
# ';' or '{' or '}' or 'private:', etc. or '#' or return Position(0, 0)
elided = self._clean_lines.elided
start_modifiers = _rfind_in_lines(r';|\{|\}|((private|public|protected):)|(#.*)',
elided, self.parameter_start_position, Position(0, 0))
return SingleLineView(elided, start_modifiers, self.function_name_start_position).single_line.strip()
def parameter_list(self):
if not self._parameter_list:
# Store the final result as a tuple since that is immutable.
self._parameter_list = tuple(parameter_list(self._clean_lines.elided,
self.parameter_start_position, self.parameter_end_position))
return self._parameter_list
def count(self, line_number):
"""Count line in current function body."""
if self.in_a_function and line_number >= self.body_start_position.row:
self.lines_in_function += 1
def check(self, error, line_number):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
line_number: The number of the line to check.
"""
if match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2 ** self.min_confidence
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(line_number, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def end(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def full_name(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def repository_name(self):
"""Full name after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.full_name()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we
# recursively look up the directory tree for the top
# of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN? Try to find a git top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir)
and not os.path.exists(os.path.join(root_dir, ".git"))):
root_dir = os.path.dirname(root_dir)
if os.path.exists(os.path.join(root_dir, ".git")):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cpp', Split() would
return ('chrome/browser', 'browser', '.cpp')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.repository_name()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def base_name(self):
"""File base name - text after the final slash, before the final period."""
return self.split()[1]
def extension(self):
"""File extension - text following the final period."""
return self.split()[2]
def no_extension(self):
"""File has no source file extension."""
return '/'.join(self.split()[0:2])
def is_source(self):
"""File has a source file extension."""
return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multiple strings (after the above cleanses) which can be concatenated.
_RE_PATTERN_CLEANSE_MULTIPLE_STRINGS = re.compile(r'"("\s*")+"')
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def is_cpp_string(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def find_next_multi_line_comment_start(lines, line_index):
"""Find the beginning marker for a multiline comment."""
while line_index < len(lines):
if lines[line_index].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[line_index].strip().find('*/', 2) < 0:
return line_index
line_index += 1
return len(lines)
def find_next_multi_line_comment_end(lines, line_index):
"""We are inside a comment, find the end marker."""
while line_index < len(lines):
if lines[line_index].strip().endswith('*/'):
return line_index
line_index += 1
return len(lines)
def remove_multi_line_comments_from_range(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def remove_multi_line_comments(lines, error):
"""Removes multiline (c-style) comments from lines."""
line_index = 0
while line_index < len(lines):
line_index_begin = find_next_multi_line_comment_start(lines, line_index)
if line_index_begin >= len(lines):
return
line_index_end = find_next_multi_line_comment_end(lines, line_index_begin)
if line_index_end >= len(lines):
error(line_index_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1)
line_index = line_index_end + 1
def cleanse_comments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
comment_position = line.find('//')
if comment_position != -1 and not is_cpp_string(line[:comment_position]):
line = line[:comment_position]
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self._num_lines = len(lines)
for line_number in range(len(lines)):
self.lines.append(cleanse_comments(lines[line_number]))
elided = self.collapse_strings(lines[line_number])
self.elided.append(cleanse_comments(elided))
def num_lines(self):
"""Returns the number of lines represented."""
return self._num_lines
@staticmethod
def collapse_strings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
elided = _RE_PATTERN_CLEANSE_MULTIPLE_STRINGS.sub('""', elided)
return elided
def close_expression(elided, position):
"""If input points to ( or { or [, finds the position that closes it.
If elided[position.row][position.column] points to a '(' or '{' or '[',
finds the line_number/pos that correspond to the closing of the expression.
Args:
elided: A CleansedLines.elided instance containing the file.
position: The position of the opening item.
Returns:
The Position *past* the closing brace, or Position(len(elided), -1)
if we never find a close. Note we ignore strings and comments when matching.
"""
line = elided[position.row]
start_character = line[position.column]
if start_character == '(':
enclosing_character_regex = r'[\(\)]'
elif start_character == '[':
enclosing_character_regex = r'[\[\]]'
elif start_character == '{':
enclosing_character_regex = r'[\{\}]'
else:
return Position(len(elided), -1)
current_column = position.column + 1
line_number = position.row
net_open = 1
for line in elided[position.row:]:
line = line[current_column:]
# Search the current line for opening and closing characters.
while True:
next_enclosing_character = search(enclosing_character_regex, line)
# No more on this line.
if not next_enclosing_character:
break
current_column += next_enclosing_character.end(0)
line = line[next_enclosing_character.end(0):]
if next_enclosing_character.group(0) == start_character:
net_open += 1
else:
net_open -= 1
if not net_open:
return Position(line_number, current_column)
# Proceed to the next line.
line_number += 1
current_column = 0
# The given item was not closed.
return Position(len(elided), -1)
def check_for_copyright(lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I):
break
else: # means no copyright line was found
error(0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
# TODO(jww) After the transition of Blink into the Chromium repo, this function
# should be removed. This will strictly enforce Chromium-style header guards,
# rather than allowing traditional WebKit header guards and Chromium-style
# simultaneously.
def get_legacy_header_guard_cpp_variable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that style checker is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
standard_name = sub(r'[-.\s]', '_', os.path.basename(filename))
# Files under WTF typically have header guards that start with WTF_.
if '/wtf/' in filename:
special_name = "WTF_" + standard_name
else:
special_name = standard_name
return (special_name, standard_name)
def get_header_guard_cpp_variable(filename):
"""Returns the CPP variable that should be used as a header guard in Chromium-style.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file in Chromium-style.
"""
# Restores original filename in case that style checker is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
# If it's a full path and starts with Source/, replace Source with blink
# since that will be the new style directory.
filename = sub(r'^Source\/', 'blink/', filename)
standard_name = sub(r'[-.\s\/]', '_', filename).upper() + '_'
return standard_name
def check_for_header_guard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
legacy_cpp_var = get_legacy_header_guard_cpp_variable(filename)
cpp_var = get_header_guard_cpp_variable(filename)
ifndef = None
ifndef_line_number = 0
define = None
for line_number, line in enumerate(lines):
line_split = line.split()
if len(line_split) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and line_split[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = line_split[1]
ifndef_line_number = line_number
if not define and line_split[0] == '#define':
define = line_split[1]
if define and ifndef:
break
if not ifndef or not define or ifndef != define:
error(0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
legacy_cpp_var[0])
return
# The guard should be File_h or, for Chromium style, BLINK_PATH_TO_FILE_H_.
if ifndef not in legacy_cpp_var and ifndef != cpp_var:
error(ifndef_line_number, 'build/header_guard', 5,
'#ifndef header guard has wrong style, please use: %s' % legacy_cpp_var[0])
def check_for_unicode_replacement_characters(lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for line_number, line in enumerate(lines):
if u'\ufffd' in line:
error(line_number, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def check_for_new_line_at_eof(lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def check_for_multiline_comments_and_strings(clean_lines, line_number, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(line_number, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(line_number, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
_THREADING_LIST = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def check_posix_threading(clean_lines, line_number, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
for single_thread_function, multithread_safe_function in _THREADING_LIST:
index = line.find(single_thread_function)
# Comparisons made explicit for clarity
if index >= 0 and (index == 0 or (not line[index - 1].isalnum()
and line[index - 1] not in ('_', '.', '>'))):
error(line_number, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def check_invalid_increment(clean_lines, line_number, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(line_number, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, line_number):
self.name = name
self.line_number = line_number
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_line_number = None
self.has_virtual_destructor = False
self.brace_depth = 0
self.unsigned_bitfields = []
self.bool_bitfields = []
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def check_finished(self, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpp_style_unittest.py for an example of this.
error(self.classinfo_stack[0].line_number, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
class _FileState(object):
def __init__(self, clean_lines, file_extension):
self._did_inside_namespace_indent_warning = False
self._clean_lines = clean_lines
if file_extension in ['m', 'mm']:
self._is_objective_c = True
self._is_c = False
elif file_extension == 'h':
# In the case of header files, it is unknown if the file
# is c / objective c or not, so set this value to None and then
# if it is requested, use heuristics to guess the value.
self._is_objective_c = None
self._is_c = None
elif file_extension == 'c':
self._is_c = True
self._is_objective_c = False
else:
self._is_objective_c = False
self._is_c = False
def set_did_inside_namespace_indent_warning(self):
self._did_inside_namespace_indent_warning = True
def did_inside_namespace_indent_warning(self):
return self._did_inside_namespace_indent_warning
def is_objective_c(self):
if self._is_objective_c is None:
for line in self._clean_lines.elided:
# Starting with @ or #import seem like the best indications
# that we have an Objective C file.
if line.startswith("@") or line.startswith("#import"):
self._is_objective_c = True
break
else:
self._is_objective_c = False
return self._is_objective_c
def is_c(self):
if self._is_c is None:
for line in self._clean_lines.lines:
# if extern "C" is found, then it is a good indication
# that we have a C header file.
if line.startswith('extern "C"'):
self._is_c = True
break
else:
self._is_c = False
return self._is_c
def is_c_or_objective_c(self):
"""Return whether the file extension corresponds to C or Objective-C."""
return self.is_c() or self.is_objective_c()
class _EnumState(object):
"""Maintains whether currently in an enum declaration, and checks whether
enum declarations follow the style guide.
"""
def __init__(self):
self.in_enum_decl = False
def process_clean_line(self, line):
# FIXME: The regular expressions for expr_all_uppercase and expr_enum_end only accept integers
# and identifiers for the value of the enumerator, but do not accept any other constant
# expressions. However, this is sufficient for now (11/27/2012).
expr_all_uppercase = r'\s*[A-Z][0-9_]*[A-Z][A-Z0-9_]*\s*(?:=\s*[a-zA-Z0-9]+\s*)?,?\s*$'
expr_starts_lowercase = r'\s*[a-jl-z]|k[a-z]'
expr_enum_end = r'}\s*(?:[a-zA-Z0-9]+\s*(?:=\s*[a-zA-Z0-9]+)?)?\s*;\s*'
expr_enum_start = r'\s*enum(?:\s+[a-zA-Z0-9]+)?\s*\{?\s*'
if self.in_enum_decl:
if match(r'\s*' + expr_enum_end + r'$', line):
self.in_enum_decl = False
elif match(expr_all_uppercase, line):
return False
elif match(expr_starts_lowercase, line):
return False
else:
matched = match(expr_enum_start + r'$', line)
if matched:
self.in_enum_decl = True
else:
matched = match(expr_enum_start + r'(?P<members>.*)' + expr_enum_end + r'$', line)
if matched:
members = matched.group('members').split(',')
for member in members:
if match(expr_all_uppercase, member):
return False
if match(expr_starts_lowercase, member):
return False
return True
return True
def check_for_non_standard_constructs(clean_lines, line_number,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations as it
is very convenient to do so while checking for gcc-2 compliance.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes parameters:
line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[line_number]
if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(line_number, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if search(r'printf\s*\(.*".*%\d+\$', line):
error(line_number, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if search(r'("|\').*\\(%|\[|\(|{)', line):
error(line_number, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[line_number]
if search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(line_number, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if match(r'\s*#\s*endif\s*[^/\s]+', line):
error(line_number, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(line_number, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line):
error(line_number, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if search(r'\w+<.*<.*>\s+>', line):
error(line_number, 'readability/templatebrackets', 3,
'Use >> for ending template instead of > >.')
if search(r'\w+<\s+::\w+>', line):
error(line_number, 'readability/templatebrackets', 3,
'Use <:: for template start instead of < ::.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration
class_decl_match = match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if ';' in line:
classinfo_stack.pop()
return
classinfo.seen_open_brace = ('{' in line)
# Look for a bare ':'
if search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args
and args.group(1) != 'void'
and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
args.group(1).strip())):
error(line_number, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if search(r'\bvirtual\b', line):
classinfo.virtual_method_line_number = line_number
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_line_number is not None)
and (not classinfo.has_virtual_destructor)
and (not classinfo.is_derived)): # Only warn for base classes
error(classinfo.line_number, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_line_number))
# Look for mixed bool and unsigned bitfields.
if classinfo.bool_bitfields and classinfo.unsigned_bitfields:
bool_list = ', '.join(classinfo.bool_bitfields)
unsigned_list = ', '.join(classinfo.unsigned_bitfields)
error(classinfo.line_number, 'runtime/bitfields', 5,
'The class %s contains mixed unsigned and bool bitfields, '
'which will pack into separate words on the MSVC compiler.\n'
'Bool bitfields are [%s].\nUnsigned bitfields are [%s].\n'
'Consider converting bool bitfields to unsigned.'
% (classinfo.name, bool_list, unsigned_list))
else:
classinfo.brace_depth = brace_depth
well_typed_bitfield = False
# Look for bool <name> : 1 declarations.
args = search(r'\bbool\s+(\S*)\s*:\s*\d+\s*;', line)
if args:
classinfo.bool_bitfields.append('%d: %s' % (line_number, args.group(1)))
well_typed_bitfield = True
# Look for unsigned <name> : n declarations.
args = search(r'\bunsigned\s+(?:int\s+)?(\S+)\s*:\s*\d+\s*;', line)
if args:
classinfo.unsigned_bitfields.append('%d: %s' % (line_number, args.group(1)))
well_typed_bitfield = True
# Look for other bitfield declarations. We don't care about those in
# size-matching structs.
if not (well_typed_bitfield or classinfo.name.startswith('SameSizeAs') or
classinfo.name.startswith('Expected')):
args = match(r'\s*(\S+)\s+(\S+)\s*:\s*\d+\s*;', line)
if args:
error(line_number, 'runtime/bitfields', 4,
'Member %s of class %s defined as a bitfield of type %s. '
'Please declare all bitfields as unsigned.'
% (args.group(2), classinfo.name, args.group(1)))
def is_blank_line(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def detect_functions(clean_lines, line_number, function_state, error):
"""Finds where functions start and end.
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
# Are we now past the end of a function?
if function_state.end_position.row + 1 == line_number:
function_state.end()
# If we're in a function, don't try to detect a new one.
if function_state.in_a_function:
return
lines = clean_lines.lines
line = lines[line_number]
raw = clean_lines.raw_lines
raw_line = raw[line_number]
# Lines ending with a \ indicate a macro. Don't try to check them.
if raw_line.endswith('\\'):
return
regexp = r'\s*(\w(\w|::|\*|\&|\s|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(' # decls * & space::name( ...
match_result = match(regexp, line)
if not match_result:
return
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name != 'TEST' and function_name != 'TEST_F' and match(r'[A-Z_]+$', function_name):
return
joined_line = ''
for start_line_number in xrange(line_number, clean_lines.num_lines()):
start_line = clean_lines.elided[start_line_number]
joined_line += ' ' + start_line.lstrip()
body_match = search(r'{|;', start_line)
if body_match:
body_start_position = Position(start_line_number, body_match.start(0))
# Replace template constructs with _ so that no spaces remain in the function name,
# while keeping the column numbers of other characters the same as "line".
line_with_no_templates = iteratively_replace_matches_with_char(r'<[^<>]*>', '_', line)
match_function = search(r'((\w|:|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(', line_with_no_templates)
if not match_function:
return # The '(' must have been inside of a template.
# Use the column numbers from the modified line to find the
# function name in the original line.
function = line[match_function.start(1):match_function.end(1)]
function_name_start_position = Position(line_number, match_function.start(1))
if match(r'TEST', function): # Handle TEST... macros
parameter_regexp = search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
parameter_start_position = Position(line_number, match_function.end(1))
parameter_end_position = close_expression(clean_lines.elided, parameter_start_position)
if parameter_end_position.row == len(clean_lines.elided):
# No end was found.
return
if start_line[body_start_position.column] == ';':
end_position = Position(body_start_position.row, body_start_position.column + 1)
else:
end_position = close_expression(clean_lines.elided, body_start_position)
# Check for nonsensical positions. (This happens in test cases which check code snippets.)
if parameter_end_position > body_start_position:
return
function_state.begin(function, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines)
return
# No body for the function (or evidence of a non-function) was found.
error(line_number, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
def check_for_function_lengths(clean_lines, line_number, function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[line_number]
raw = clean_lines.raw_lines
raw_line = raw[line_number]
if function_state.end_position.row == line_number: # last line
if not search(r'\bNOLINT\b', raw_line):
function_state.check(error, line_number)
elif not match(r'^\s*$', line):
function_state.count(line_number) # Count non-blank/non-comment lines.
def _check_parameter_name_against_text(parameter, text, error):
"""Checks to see if the parameter name is contained within the text.
Return false if the check failed (i.e. an error was produced).
"""
# Treat 'lower with underscores' as a canonical form because it is
# case insensitive while still retaining word breaks. (This ensures that
# 'elate' doesn't look like it is duplicating of 'NateLate'.)
canonical_parameter_name = parameter.lower_with_underscores_name()
# Appends "object" to all text to catch variables that did the same (but only
# do this when the parameter name is more than a single character to avoid
# flagging 'b' which may be an ok variable when used in an rgba function).
if len(canonical_parameter_name) > 1:
text = sub(r'(\w)\b', r'\1Object', text)
canonical_text = _convert_to_lower_with_underscores(text)
# Used to detect cases like ec for ExceptionCode.
acronym = _create_acronym(text).lower()
if canonical_text.find(canonical_parameter_name) != -1 or acronym.find(canonical_parameter_name) != -1:
error(parameter.row, 'readability/parameter_name', 5,
'The parameter name "%s" adds no information, so it should be removed.' % parameter.name)
return False
return True
def check_function_definition(filename, file_extension, clean_lines, line_number, function_state, error):
"""Check that function definitions for style issues.
Specifically, check that parameter names in declarations add information.
Args:
filename: Filename of the file that is being processed.
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
if line_number != function_state.body_start_position.row:
return
modifiers_and_return_type = function_state.modifiers_and_return_type()
if filename.find('/chromium/') != -1 and search(r'\bWEBKIT_EXPORT\b', modifiers_and_return_type):
if filename.find('/chromium/public/') == -1 and filename.find('/chromium/tests/') == - \
1 and filename.find('chromium/platform') == -1:
error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
'WEBKIT_EXPORT should only appear in the chromium public (or tests) directory.')
elif not file_extension == "h":
error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
'WEBKIT_EXPORT should only be used in header files.')
elif not function_state.is_declaration or search(r'\binline\b', modifiers_and_return_type):
error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
'WEBKIT_EXPORT should not be used on a function with a body.')
elif function_state.is_pure:
error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
'WEBKIT_EXPORT should not be used with a pure virtual function.')
parameter_list = function_state.parameter_list()
for parameter in parameter_list:
# Do checks specific to function declarations and parameter names.
if not function_state.is_declaration or not parameter.name:
continue
# Check the parameter name against the function name for single parameter set functions.
if len(parameter_list) == 1 and match('set[A-Z]', function_state.current_function):
trimmed_function_name = function_state.current_function[len('set'):]
if not _check_parameter_name_against_text(parameter, trimmed_function_name, error):
continue # Since an error was noted for this name, move to the next parameter.
# Check the parameter name against the type.
if not _check_parameter_name_against_text(parameter, parameter.type, error):
continue # Since an error was noted for this name, move to the next parameter.
def check_pass_ptr_usage(clean_lines, line_number, function_state, error):
"""Check for proper usage of Pass*Ptr.
Currently this is limited to detecting declarations of Pass*Ptr
variables inside of functions.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
if not function_state.in_a_function:
return
lines = clean_lines.lines
line = lines[line_number]
if line_number > function_state.body_start_position.row:
matched_pass_ptr = match(r'^\s*Pass([A-Z][A-Za-z]*)Ptr<', line)
if matched_pass_ptr:
type_name = 'Pass%sPtr' % matched_pass_ptr.group(1)
error(line_number, 'readability/pass_ptr', 5,
'Local variables should never be %s (see '
'http://webkit.org/coding/RefPtr.html).' % type_name)
def check_for_leaky_patterns(clean_lines, line_number, function_state, error):
"""Check for constructs known to be leak prone.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[line_number]
matched_get_dc = search(r'\b(?P<function_name>GetDC(Ex)?)\s*\(', line)
if matched_get_dc:
error(line_number, 'runtime/leaky_pattern', 5,
'Use the class HWndDC instead of calling %s to avoid potential '
'memory leaks.' % matched_get_dc.group('function_name'))
matched_create_dc = search(r'\b(?P<function_name>Create(Compatible)?DC)\s*\(', line)
matched_own_dc = search(r'\badoptPtr\b', line)
if matched_create_dc and not matched_own_dc:
error(line_number, 'runtime/leaky_pattern', 5,
'Use adoptPtr and OwnPtr<HDC> when calling %s to avoid potential '
'memory leaks.' % matched_create_dc.group('function_name'))
def check_spacing(file_extension, clean_lines, line_number, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't have too many
blank lines in a row.
Args:
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # get rid of comments and strings
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if search(r':\s*;\s*$', line):
error(line_number, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif search(r'^\s*;\s*$', line):
error(line_number, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line)
and line.count('(') == line.count(')')
# Allow do {} while();
and not search(r'}\s*while', line)):
error(line_number, 'whitespace/semicolon', 5,
'Semicolon defining empty statement for this loop. Use { } instead.')
def get_previous_non_blank_line(clean_lines, line_number):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
line_number: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
previous_line_number = line_number - 1
while previous_line_number >= 0:
previous_line = clean_lines.elided[previous_line_number]
if not is_blank_line(previous_line): # if not a blank line...
return (previous_line, previous_line_number)
previous_line_number -= 1
return ('', -1)
def check_enum_casing(clean_lines, line_number, enum_state, error):
"""Looks for incorrectly named enum values.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
enum_state: A _EnumState instance which maintains enum declaration state.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
if not enum_state.process_clean_line(line):
error(line_number, 'readability/enum_casing', 4,
'enum members should use InterCaps with an initial capital letter.')
def get_initial_spaces_for_line(clean_line):
initial_spaces = 0
while initial_spaces < len(clean_line) and clean_line[initial_spaces] == ' ':
initial_spaces += 1
return initial_spaces
def check_using_std(clean_lines, line_number, file_state, error):
"""Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
if not using_std_match:
return
method_name = using_std_match.group('method_name')
# Exception for the established idiom for swapping objects in generic code.
if method_name == 'swap':
return
error(line_number, 'build/using_std', 4,
"Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
def check_max_min_macros(clean_lines, line_number, file_state, error):
"""Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
if not max_min_macros_search:
return
max_min_macro = max_min_macros_search.group('max_min_macro')
max_min_macro_lower = max_min_macro.lower()
error(line_number, 'runtime/max_min_macros', 4,
'Use std::%s() or std::%s<type>() instead of the %s() macro.'
% (max_min_macro_lower, max_min_macro_lower, max_min_macro))
def check_ctype_functions(clean_lines, line_number, file_state, error):
"""Looks for use of the standard functions in ctype.h and suggest they be replaced
by use of equivalent ones in <wtf/ASCIICType.h>?.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
ctype_function_search = search(
(r'\b(?P<ctype_function>(isalnum|isalpha|isascii|isblank|iscntrl|isdigit|isgraph|'
r'islower|isprint|ispunct|isspace|isupper|isxdigit|toascii|tolower|toupper))\s*\('), line)
if not ctype_function_search:
return
ctype_function = ctype_function_search.group('ctype_function')
error(line_number, 'runtime/ctype_function', 4,
'Use equivalent function in <wtf/ASCIICType.h> instead of the %s() function.'
% (ctype_function))
def check_braces(clean_lines, line_number, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
previous_line_number = line_number
while True:
(previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number)
if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'):
line = previous_line + line
else:
break
if (search(r'{.*}\s*;', line)
and line.count('{') == line.count('}')
and not search(r'struct|class|enum|\s*=\s*{', line)):
error(line_number, 'readability/braces', 4,
"You don't need a ; after a }")
def check_exit_statement_simplifications(clean_lines, line_number, error):
"""Looks for else or else-if statements that should be written as an
if statement when the prior if concludes with a return, break, continue or
goto statement.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line)
if not else_match:
return
else_indentation = else_match.group('else_indentation')
inner_indentation = else_indentation + ' ' * 2
previous_lines = clean_lines.elided[:line_number]
previous_lines.reverse()
line_offset = 0
encountered_exit_statement = False
for current_line in previous_lines:
line_offset -= 1
# Skip not only empty lines but also those with preprocessor directives
# and goto labels.
if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line):
continue
# Skip lines with closing braces on the original indentation level.
# Even though the styleguide says they should be on the same line as
# the "else if" statement, we also want to check for instances where
# the current code does not comply with the coding style. Thus, ignore
# these lines and proceed to the line before that.
if current_line == else_indentation + '}':
continue
current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line)
current_indentation = current_indentation_match.group('indentation')
remaining_line = current_indentation_match.group('remaining_line')
# As we're going up the lines, the first real statement to encounter
# has to be an exit statement (return, break, continue or goto) -
# otherwise, this check doesn't apply.
if not encountered_exit_statement:
# We only want to find exit statements if they are on exactly
# the same level of indentation as expected from the code inside
# the block. If the indentation doesn't strictly match then we
# might have a nested if or something, which must be ignored.
if current_indentation != inner_indentation:
break
if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line):
encountered_exit_statement = True
continue
break
# When code execution reaches this point, we've found an exit statement
# as last statement of the previous block. Now we only need to make
# sure that the block belongs to an "if", then we can throw an error.
# Skip lines with opening braces on the original indentation level,
# similar to the closing braces check above. ("if (condition)\n{")
if current_line == else_indentation + '{':
continue
# Skip everything that's further indented than our "else" or "else if".
if current_indentation.startswith(else_indentation) and current_indentation != else_indentation:
continue
# So we've got a line with same (or less) indentation. Is it an "if"?
# If yes: throw an error. If no: don't throw an error.
# Whatever the outcome, this is the end of our loop.
if match(r'if\s*\(', remaining_line):
if else_match.start('else') != -1:
error(line_number + line_offset, 'readability/control_flow', 4,
'An else statement can be removed when the prior "if" '
'concludes with a return, break, continue or goto statement.')
else:
error(line_number + line_offset, 'readability/control_flow', 4,
'An else if statement should be written as an if statement '
'when the prior "if" concludes with a return, break, '
'continue or goto statement.')
break
def replaceable_check(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
def check_check(clean_lines, line_number, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[line_number].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[line_number] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if replaceable_check(operator, current_macro, line):
error(line_number, 'readability/check', 2,
'Consider using %s(a, b) instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def check_deprecated_macros(clean_lines, line_number, error):
"""Checks the use of obsolete macros.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
for pair in _DEPRECATED_MACROS:
if search(r'\b' + pair[0] + r'\(', line):
error(line_number, 'build/deprecated', 5,
'%s is deprecated. Use %s instead.' % (pair[0], pair[1]))
def check_for_comparisons_to_boolean(clean_lines, line_number, error):
# Get the line without comments and strings.
line = clean_lines.elided[line_number]
# Must include NULL here, as otherwise users will convert NULL to 0 and
# then we can't catch it, since it looks like a valid integer comparison.
if search(r'[=!]=\s*(NULL|nullptr|true|false)[^\w.]', line) or search(r'[^\w.](NULL|nullptr|true|false)\s*[=!]=', line):
if not search('LIKELY', line) and not search('UNLIKELY', line):
error(line_number, 'readability/comparison_to_boolean', 5,
'Tests for true/false and null/non-null should be done without equality comparisons.')
def check_for_null(clean_lines, line_number, file_state, error):
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number]
# Don't warn about NULL usage in g_*(). See Bug 32858 and 39372.
if search(r'\bg(_[a-z]+)+\b', line):
return
# Don't warn about NULL usage in gst_*(). See Bug 70498.
if search(r'\bgst(_[a-z]+)+\b', line):
return
# Don't warn about NULL usage in gdk_pixbuf_save_to_*{join,concat}(). See Bug 43090.
if search(r'\bgdk_pixbuf_save_to\w+\b', line):
return
# Don't warn about NULL usage in gtk_widget_style_get(),
# gtk_style_context_get_style(), or gtk_style_context_get(). See Bug 51758
if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b',
line) or search(r'\bgtk_style_context_get\(\w+\b', line):
return
# Don't warn about NULL usage in soup_server_new(). See Bug 77890.
if search(r'\bsoup_server_new\(\w+\b', line):
return
if search(r'\bNULL\b', line):
error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.')
return
line = clean_lines.raw_lines[line_number]
# See if NULL occurs in any comments in the line. If the search for NULL using the raw line
# matches, then do the check with strings collapsed to avoid giving errors for
# NULLs occurring in strings.
if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)):
error(line_number, 'readability/null', 4, 'Use 0 or null instead of NULL (even in *comments*).')
def get_line_width(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for c in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(c) in ('W', 'F'):
width += 2
elif not unicodedata.combining(c):
width += 1
return width
return len(line)
def check_conditional_and_loop_bodies_for_brace_violations(clean_lines, line_number, error):
"""Scans the bodies of conditionals and loops, and in particular
all the arms of conditionals, for violations in the use of braces.
Specifically:
(1) If an arm omits braces, then the following statement must be on one
physical line.
(2) If any arm uses braces, all arms must use them.
These checks are only done here if we find the start of an
'if/for/foreach/while' statement, because this function fails fast
if it encounters constructs it doesn't understand. Checks
elsewhere validate other constraints, such as requiring '}' and
'else' to be on the same line.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
# We work with the elided lines. Comments have been removed, but line
# numbers are preserved, so we can still find situations where
# single-expression control clauses span multiple lines, or when a
# comment preceded the expression.
lines = clean_lines.elided
line = lines[line_number]
# Match control structures.
control_match = match(r'\s*(if|foreach|for|while)\s*\(', line)
if not control_match:
return
# Found the start of a conditional or loop.
# The following loop handles all potential arms of the control clause.
# The initial conditions are the following:
# - We start on the opening paren '(' of the condition, *unless* we are
# handling an 'else' block, in which case there is no condition.
# - In the latter case, we start at the position just beyond the 'else'
# token.
expect_conditional_expression = True
know_whether_using_braces = False
using_braces = False
search_for_else_clause = control_match.group(1) == "if"
current_pos = Position(line_number, control_match.end() - 1)
while True:
if expect_conditional_expression:
# Try to find the end of the conditional expression,
# potentially spanning multiple lines.
open_paren_pos = current_pos
close_paren_pos = close_expression(lines, open_paren_pos)
if close_paren_pos.column < 0:
return
current_pos = close_paren_pos
end_line_of_conditional = current_pos.row
# Find the start of the body.
current_pos = _find_in_lines(r'\S', lines, current_pos, None)
if not current_pos:
return
current_arm_uses_brace = False
if lines[current_pos.row][current_pos.column] == '{':
current_arm_uses_brace = True
if know_whether_using_braces:
if using_braces != current_arm_uses_brace:
error(current_pos.row, 'whitespace/braces', 4,
'If one part of an if-else statement uses curly braces, the other part must too.')
return
know_whether_using_braces = True
using_braces = current_arm_uses_brace
if using_braces:
# Skip over the entire arm.
current_pos = close_expression(lines, current_pos)
if current_pos.column < 0:
return
else:
# Skip over the current expression.
current_pos = _find_in_lines(r';', lines, current_pos, None)
if not current_pos:
return
# If the end of the expression is beyond the line just after
# the close parenthesis or control clause, we've found a
# single-expression arm that spans multiple lines. (We don't
# fire this error for expressions ending on the same line; that
# is a different error, handled elsewhere.)
if current_pos.row > 1 + end_line_of_conditional:
error(current_pos.row, 'whitespace/braces', 4,
'A conditional or loop body must use braces if the statement is more than one line long.')
return
current_pos = Position(current_pos.row, 1 + current_pos.column)
# At this point current_pos points just past the end of the last
# arm. If we just handled the last control clause, we're done.
if not search_for_else_clause:
return
# Scan forward for the next non-whitespace character, and see
# whether we are continuing a conditional (with an 'else' or
# 'else if'), or are done.
current_pos = _find_in_lines(r'\S', lines, current_pos, None)
if not current_pos:
return
next_nonspace_string = lines[current_pos.row][current_pos.column:]
next_conditional = match(r'(else\s*if|else)', next_nonspace_string)
if not next_conditional:
# Done processing this 'if' and all arms.
return
if next_conditional.group(1) == "else if":
current_pos = _find_in_lines(r'\(', lines, current_pos, None)
else:
current_pos.column += 4 # skip 'else'
expect_conditional_expression = False
search_for_else_clause = False
# End while loop
def check_redundant_virtual(clean_lines, linenum, error):
"""Checks if line contains a redundant "virtual" function-specifier.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual:
return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if match(r'^.*[^:]:[^:].*$', line):
return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_position = Position(-1, -1)
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.num_lines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
end_position = close_expression(
clean_lines.elided, Position(start_line, start_col + len(parameter_list.group(1))))
break
start_col = 0
if end_position.column < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_position.row, min(end_position.row + 3, clean_lines.num_lines())):
line = clean_lines.elided[i][end_position.column:]
override_or_final = search(r'\b(override|final)\b', line)
if override_or_final:
error(linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % override_or_final.group(1)))
if search(r'[^\w]\s*$', line):
break
def check_redundant_override(clean_lines, linenum, error):
"""Checks if line contains a redundant "override" virt-specifier.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if search(r'\boverride\b', fragment) and search(r'\bfinal\b', fragment):
error(linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
def check_style(clean_lines, line_number, file_extension, class_state, file_state, enum_state, error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 4-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
file_state: A _FileState instance which maintains information about
the state of things in the file.
enum_state: A _EnumState instance which maintains the current enum state.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[line_number]
# Some more style checks
check_using_std(clean_lines, line_number, file_state, error)
check_max_min_macros(clean_lines, line_number, file_state, error)
check_ctype_functions(clean_lines, line_number, file_state, error)
check_braces(clean_lines, line_number, error)
check_exit_statement_simplifications(clean_lines, line_number, error)
check_spacing(file_extension, clean_lines, line_number, error)
check_check(clean_lines, line_number, error)
check_deprecated_macros(clean_lines, line_number, error)
check_for_comparisons_to_boolean(clean_lines, line_number, error)
check_for_null(clean_lines, line_number, file_state, error)
check_enum_casing(clean_lines, line_number, enum_state, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _drop_common_suffixes(filename):
"""Drops common suffixes like _test.cpp or -inl.h from filename.
For example:
>>> _drop_common_suffixes('foo/foo-inl.h')
'foo/foo'
>>> _drop_common_suffixes('foo/bar/foo.cpp')
'foo/bar/foo'
>>> _drop_common_suffixes('foo/foo_internal.h')
'foo/foo'
>>> _drop_common_suffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix)
and filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _classify_include(filename, include, is_system, include_state):
"""Figures out what kind of header 'include' is.
Args:
filename: The current file cpp_style is running over.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
include_state: An _IncludeState instance in which the headers are inserted.
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _classify_include('foo.cpp', 'foo.h', False)
_PRIMARY_HEADER
>>> _classify_include('foo.cpp', 'bar.h', False)
_OTHER_HEADER
"""
# If it is a system header we know it is classified as _OTHER_HEADER.
if is_system and not include.startswith('public/'):
return _OTHER_HEADER
# There cannot be primary includes in header files themselves. Only an
# include exactly matches the header filename will be is flagged as
# primary, so that it triggers the "don't include yourself" check.
if filename.endswith('.h') and filename != include:
return _OTHER_HEADER
# If the target file basename starts with the include we're checking
# then we consider it the primary header.
target_base = FileInfo(filename).base_name()
include_base = FileInfo(include).base_name()
# If we haven't encountered a primary header, then be lenient in checking.
if not include_state.visited_primary_section():
if target_base.find(include_base) != -1:
return _PRIMARY_HEADER
# If we already encountered a primary header, perform a strict comparison.
# In case the two filename bases are the same then the above lenient check
# probably was a false positive.
elif include_state.visited_primary_section() and target_base == include_base:
return _PRIMARY_HEADER
return _OTHER_HEADER
def _does_primary_header_exist(filename):
"""Return a primary header file name for a file, or empty string
if the file is not source file or primary header does not exist.
"""
fileinfo = FileInfo(filename)
if not fileinfo.is_source():
return False
primary_header = fileinfo.no_extension() + ".h"
return os.path.isfile(primary_header)
def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# FIXME: For readability or as a possible optimization, consider
# exiting early here by checking whether the "build/include"
# category should be checked for the given filename. This
# may involve having the error handler classes expose a
# should_check() method, in addition to the usual __call__
# method.
line = clean_lines.lines[line_number]
matched = _RE_PATTERN_INCLUDE.search(line)
if not matched:
return
include = matched.group(2)
is_system = (matched.group(1) == '<')
# Look for any of the stream classes that are part of standard C++.
if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
error(line_number, 'readability/streams', 3,
'Streams are highly discouraged.')
# Look for specific includes to fix.
if include.startswith('wtf/') and is_system:
error(line_number, 'build/include', 4,
'wtf includes should be "wtf/file.h" instead of <wtf/file.h>.')
if filename.find('/chromium/') != -1 and include.startswith('cc/CC'):
error(line_number, 'build/include', 4,
'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".')
duplicate_header = include in include_state
if duplicate_header:
error(line_number, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = line_number
header_type = _classify_include(filename, include, is_system, include_state)
primary_header_exists = _does_primary_header_exist(filename)
include_state.header_types[line_number] = header_type
# Only proceed if this isn't a duplicate header.
if duplicate_header:
return
# We want to ensure that headers appear in the right order:
# 1) for implementation files: primary header, blank line, alphabetically sorted
# 2) for header files: alphabetically sorted
# The include_state object keeps track of the last type seen
# and complains if the header types are out of order or missing.
error_message = include_state.check_next_include_order(header_type,
file_extension == "h",
primary_header_exists)
# Check to make sure we have a blank line after primary header.
if not error_message and header_type == _PRIMARY_HEADER:
next_line = clean_lines.raw_lines[line_number + 1]
if not is_blank_line(next_line):
error(line_number, 'build/include_order', 4,
'You should add a blank line after implementation file\'s own header.')
if error_message:
if file_extension == 'h':
error(line_number, 'build/include_order', 4,
'%s Should be: alphabetically sorted.' %
error_message)
else:
error(line_number, 'build/include_order', 4,
'%s Should be: primary header, blank line, and then alphabetically sorted.' %
error_message)
def check_language(filename, clean_lines, line_number, file_extension, include_state,
file_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[line_number]
if not line:
return
matched = _RE_PATTERN_INCLUDE.search(line)
if matched:
check_include_line(filename, file_extension, clean_lines, line_number, include_state, error)
return
# FIXME: figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
matched = search(
r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if matched:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts.
if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
error(line_number, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched.group(1))
check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)',
error)
# This doesn't catch all cases. Consider (const char * const)"hello".
check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(line_number, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
matched = match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
matched.group(3)):
error(line_number, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(matched.group(1), matched.group(2)))
# Check that we're not using RTTI outside of testing code.
if search(r'\bdynamic_cast<', line):
error(line_number, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(line_number, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# FIXME: check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in check_for_non_standard_constructs for now)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if search(r'\bshort port\b', line):
if not search(r'\bunsigned short port\b', line):
error(line_number, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
# When snprintf is used, the second argument shouldn't be a literal.
matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if matched:
error(line_number, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (matched.group(1), matched.group(2)))
# Check if some verboten C functions are being used.
if search(r'\bsprintf\b', line):
error(line_number, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
matched = search(r'\b(strcpy|strcat)\b', line)
if matched:
error(line_number, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % matched.group(1))
if search(r'\bsscanf\b', line):
error(line_number, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if search(r'\}\s*if\s*\(', line):
error(line_number, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
if matched:
error(line_number, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (matched.group(1), matched.group(2)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)):
error(line_number, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (matched.group(1), matched.group(2)))
# Detect variable-length arrays.
matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and
matched.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if search(r'sizeof\(.+\)', tok):
continue
if search(r'arraysize\(\w+\)', tok):
continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok:
continue
if match(r'\d+', tok):
continue
if match(r'0[xX][0-9a-fA-F]+', tok):
continue
if match(r'k[A-Z0-9]\w*', tok):
continue
if match(r'(.+::)?k[A-Z0-9]\w*', tok):
continue
if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(line_number, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(line_number, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
# Check for plain bitfields declared without either "singed" or "unsigned".
# Most compilers treat such bitfields as signed, but there are still compilers like
# RVCT 4.0 that use unsigned by default.
matched = re.match(
r'\s*((const|mutable)\s+)?(char|(short(\s+int)?)|int|long(\s+(long|int))?)\s+[a-zA-Z_][a-zA-Z0-9_]*\s*:\s*\d+\s*;', line)
if matched:
error(line_number, 'runtime/bitfields', 5,
'Please declare integral type bitfields with either signed or unsigned.')
check_identifier_name_in_declaration(filename, line_number, line, file_state, error)
# Check for unsigned int (should be just 'unsigned')
if search(r'\bunsigned int\b', line):
error(line_number, 'runtime/unsigned', 1,
'Omit int when using unsigned')
# Check for usage of static_cast<Classname*>.
check_for_object_static_cast(filename, line_number, line, error)
def check_identifier_name_in_declaration(filename, line_number, line, file_state, error):
"""Checks if identifier names contain any underscores.
As identifiers in libraries we are using have a bunch of
underscores, we only warn about the declarations of identifiers
and don't check use of identifiers.
Args:
filename: The name of the current file.
line_number: The number of the line to check.
line: The line of code to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# We don't check return and delete statements and conversion operator declarations.
if match(r'\s*(return|delete|operator)\b', line):
return
# Basically, a declaration is a type name followed by whitespaces
# followed by an identifier. The type name can be complicated
# due to type adjectives and templates. We remove them first to
# simplify the process to find declarations of identifiers.
# Convert "long long", "long double", and "long long int" to
# simple types, but don't remove simple "long".
line = sub(r'long (long )?(?=long|double|int)', '', line)
# Convert unsigned/signed types to simple types, too.
line = sub(r'(unsigned|signed) (?=char|short|int|long)', '', line)
line = sub(r'\b(inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
# Remove "new" and "new (expr)" to simplify, too.
line = sub(r'new\s*(\([^)]*\))?', '', line)
# Remove all template parameters by removing matching < and >.
# Loop until no templates are removed to remove nested templates.
while True:
line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
if not number_of_replacements:
break
# Declarations of local variables can be in condition expressions
# of control flow statements (e.g., "if (LayoutObject* p = o->parent())").
# We remove the keywords and the first parenthesis.
#
# Declarations in "while", "if", and "switch" are different from
# other declarations in two aspects:
#
# - There can be only one declaration between the parentheses.
# (i.e., you cannot write "if (int i = 0, j = 1) {}")
# - The variable must be initialized.
# (i.e., you cannot write "if (int i) {}")
#
# and we will need different treatments for them.
line = sub(r'^\s*for\s*\(', '', line)
line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
# Detect variable and functions.
type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
identifier_regexp = r'(?P<identifier>[\w:]+)'
maybe_bitfield_regexp = r'(:\s*\d+\s*)?'
character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
declaration_without_type_regexp = r'\s*' + identifier_regexp + \
r'\s*' + maybe_bitfield_regexp + character_after_identifier_regexp
declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
is_function_arguments = False
number_of_identifiers = 0
while True:
# If we are seeing the first identifier or arguments of a
# function, there should be a type name before an identifier.
if not number_of_identifiers or is_function_arguments:
declaration_regexp = declaration_with_type_regexp
else:
declaration_regexp = declaration_without_type_regexp
matched = match(declaration_regexp, line)
if not matched:
return
identifier = matched.group('identifier')
character_after_identifier = matched.group('character_after_identifier')
# If we removed a non-for-control statement, the character after
# the identifier should be '='. With this rule, we can avoid
# warning for cases like "if (val & INT_MAX) {".
if control_statement and character_after_identifier != '=':
return
is_function_arguments = is_function_arguments or character_after_identifier == '('
# Remove "m_" and "s_" to allow them.
modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
if not file_state.is_objective_c() and modified_identifier.find('_') >= 0:
# Various exceptions to the rule: JavaScript op codes functions, const_iterator.
if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('op_') >= 0)
and not (filename.find('gtk') >= 0 and modified_identifier.startswith('webkit_') >= 0)
and not (filename.find('StructTraits.h') >= 0)
and not modified_identifier.startswith('tst_')
and not modified_identifier.startswith('webkit_dom_object_')
and not modified_identifier.startswith('webkit_soup')
and not modified_identifier.startswith('NPN_')
and not modified_identifier.startswith('NPP_')
and not modified_identifier.startswith('NP_')
and not modified_identifier.startswith('qt_')
and not modified_identifier.startswith('_q_')
and not modified_identifier.startswith('cairo_')
and not modified_identifier.startswith('Ecore_')
and not modified_identifier.startswith('Eina_')
and not modified_identifier.startswith('Evas_')
and not modified_identifier.startswith('Ewk_')
and not modified_identifier.startswith('cti_')
and not modified_identifier.find('::qt_') >= 0
and not modified_identifier.find('::_q_') >= 0
and not modified_identifier == "const_iterator"
and not modified_identifier == "vm_throw"
and not modified_identifier == "DFG_OPERATION"):
error(line_number, 'readability/naming/underscores', 4, identifier +
" is incorrectly named. Don't use underscores in your identifier names.")
# Check for variables named 'l', these are too easy to confuse with '1' in some fonts
if modified_identifier == 'l':
error(line_number, 'readability/naming', 4, identifier +
" is incorrectly named. Don't use the single letter 'l' as an identifier name.")
# There can be only one declaration in non-for-control statements.
if control_statement:
return
# We should continue checking if this is a function
# declaration because we need to check its arguments.
# Also, we need to check multiple declarations.
if character_after_identifier != '(' and character_after_identifier != ',':
return
number_of_identifiers += 1
line = line[matched.end():]
def check_for_toFoo_definition(filename, pattern, error):
"""Reports for using static_cast instead of toFoo convenience function.
This function will output warnings to make sure you are actually using
the added toFoo conversion functions rather than directly hard coding
the static_cast<Classname*> call. For example, you should toHTMLELement(Node*)
to convert Node* to HTMLElement*, instead of static_cast<HTMLElement*>(Node*)
Args:
filename: The name of the header file in which to check for toFoo definition.
pattern: The conversion function pattern to grep for.
error: The function to call with any errors found.
"""
def get_abs_filepath(filename):
fileSystem = FileSystem()
base_dir = fileSystem.path_to_module(FileSystem.__module__).split('WebKit', 1)[0]
base_dir = ''.join((base_dir, 'WebKit/Source'))
for root, _, names in os.walk(base_dir):
if filename in names:
return os.path.join(root, filename)
return None
def grep(lines, pattern, error):
matches = []
function_state = None
for line_number in xrange(lines.num_lines()):
line = (lines.elided[line_number]).rstrip()
try:
if pattern in line:
if not function_state:
function_state = _FunctionState(1)
detect_functions(lines, line_number, function_state, error)
# Exclude the match of dummy conversion function. Dummy function is just to
# catch invalid conversions and shouldn't be part of possible alternatives.
result = re.search(r'%s(\s+)%s' % ("void", pattern), line)
if not result:
matches.append([line, function_state.body_start_position.row, function_state.end_position.row + 1])
function_state = None
except UnicodeDecodeError:
# There would be no non-ascii characters in the codebase ever. The only exception
# would be comments/copyright text which might have non-ascii characters. Hence,
# it is perfectly safe to catch the UnicodeDecodeError and just pass the line.
pass
return matches
def check_in_mock_header(filename, matches=None):
if not filename == 'Foo.h':
return False
header_file = None
try:
header_file = CppChecker.fs.read_text_file(filename)
except IOError:
return False
line_number = 0
for line in header_file:
line_number += 1
matched = re.search(r'\btoFoo\b', line)
if matched:
matches.append(['toFoo', line_number, line_number + 3])
return True
# For unit testing only, avoid header search and lookup locally.
matches = []
mock_def_found = check_in_mock_header(filename, matches)
if mock_def_found:
return matches
# Regular style check flow. Search for actual header file & defs.
file_path = get_abs_filepath(filename)
if not file_path:
return None
try:
f = open(file_path)
clean_lines = CleansedLines(f.readlines())
finally:
f.close()
# Make a list of all genuine alternatives to static_cast.
matches = grep(clean_lines, pattern, error)
return matches
def check_for_object_static_cast(processing_file, line_number, line, error):
"""Checks for a Cpp-style static cast on objects by looking for the pattern.
Args:
processing_file: The name of the processing file.
line_number: The number of the line to check.
line: The line of code to check.
error: The function to call with any errors found.
"""
matched = search(r'\bstatic_cast<(\s*\w*:?:?\w+\s*\*+\s*)>', line)
if not matched:
return
class_name = re.sub(r'[\*]', '', matched.group(1))
class_name = class_name.strip()
# Ignore (for now) when the casting is to void*,
if class_name == 'void':
return
namespace_pos = class_name.find(':')
if not namespace_pos == -1:
class_name = class_name[namespace_pos + 2:]
header_file = ''.join((class_name, '.h'))
matches = check_for_toFoo_definition(header_file, ''.join(('to', class_name)), error)
# Ignore (for now) if not able to find the header where toFoo might be defined.
# TODO: Handle cases where Classname might be defined in some other header or cpp file.
if matches is None:
return
report_error = True
# Ensure found static_cast instance is not from within toFoo definition itself.
if os.path.basename(processing_file) == header_file:
for item in matches:
if line_number in range(item[1], item[2]):
report_error = False
break
if report_error:
if len(matches):
# toFoo is defined - enforce using it.
# TODO: Suggest an appropriate toFoo from the alternatives present in matches.
error(line_number, 'runtime/casting', 4,
'static_cast of class objects is not allowed. Use to%s defined in %s.' %
(class_name, header_file))
else:
# No toFoo defined - enforce definition & usage.
# TODO: Automate the generation of toFoo() to avoid any slippages ever.
error(line_number, 'runtime/casting', 4,
'static_cast of class objects is not allowed. Add to%s in %s and use it instead.' %
(class_name, header_file))
def check_c_style_cast(line_number, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
line_number: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast or static_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
"""
matched = search(pattern, line)
if not matched:
return
# e.g., sizeof(int)
sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1])
if sizeof_match:
error(line_number, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return
remainder = line[matched.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
if function_match:
if (not function_match.group(3)
or function_match.group(3) == ';'
or raw_line.find('/*') < 0):
error(line_number, 'readability/function', 3,
'All parameters should be named in a function')
return
# At this point, all that should be left is actual casts.
error(line_number, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, matched.group(1)))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
# We can trust with reasonable confidence that map gives us pair<>, too.
'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
}
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def files_belong_to_same_module(filename_cpp, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cpp contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cpp', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cpp: is the path for the .cpp file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cpp and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cpp.endswith('.cpp'):
return (False, '')
filename_cpp = filename_cpp[:-len('.cpp')]
if filename_cpp.endswith('_unittest'):
filename_cpp = filename_cpp[:-len('_unittest')]
elif filename_cpp.endswith('_test'):
filename_cpp = filename_cpp[:-len('_test')]
filename_cpp = filename_cpp.replace('/public/', '/')
filename_cpp = filename_cpp.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cpp.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cpp[:-len(filename_h)]
return files_belong_to_same_module, common_path
def update_include_state(filename, include_state):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
header_file = None
try:
header_file = CppChecker.fs.read_text_file(filename)
except IOError:
return False
line_number = 0
for line in header_file:
line_number += 1
clean_line = cleanse_comments(line)
matched = _RE_PATTERN_INCLUDE.search(clean_line)
if matched:
include = matched.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, line_number))
return True
def check_for_include_what_you_use(filename, clean_lines, include_state, error):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
"""
required = {} # A map of header name to line_number and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for line_number in xrange(clean_lines.num_lines()):
line = clean_lines.elided[line_number]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
if _RE_PATTERN_STRING.search(line):
required['<string>'] = (line_number, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (line_number, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (line_number, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cpp. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = os.path.abspath(filename)
# For Emacs's flymake.
# If cpp_style is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cpp'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cpp$', '.cpp', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
for header in include_state.keys(): # NOLINT
(same_module, common_path) = files_belong_to_same_module(abs_filename, header)
fullpath = common_path + header
if same_module and update_include_state(fullpath, include_state):
header_found = True
# If we can't find the header file for a .cpp, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# FIXME: Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cpp') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
if [True for header in headers if header in include_state]:
continue
if required_header_unstripped.strip('<>"') not in include_state:
error(required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
def process_line(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, file_state, enum_state, error):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
file_state: A _FileState instance which maintains information about
the state of things in the file.
enum_state: A _EnumState instance which maintains an enum declaration
state.
error: A callable to which errors are reported, which takes arguments:
line number, error level, and message
"""
raw_lines = clean_lines.raw_lines
detect_functions(clean_lines, line, function_state, error)
check_for_function_lengths(clean_lines, line, function_state, error)
if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
return
if match(r'\s*\b__asm\b', raw_lines[line]): # Ignore asm lines as they format differently.
return
check_function_definition(filename, file_extension, clean_lines, line, function_state, error)
check_pass_ptr_usage(clean_lines, line, function_state, error)
check_for_leaky_patterns(clean_lines, line, function_state, error)
check_for_multiline_comments_and_strings(clean_lines, line, error)
check_style(clean_lines, line, file_extension, class_state, file_state, enum_state, error)
check_language(filename, clean_lines, line, file_extension, include_state,
file_state, error)
check_for_non_standard_constructs(clean_lines, line, class_state, error)
check_posix_threading(clean_lines, line, error)
check_invalid_increment(clean_lines, line, error)
check_conditional_and_loop_bodies_for_brace_violations(clean_lines, line, error)
check_redundant_virtual(clean_lines, line, error)
check_redundant_override(clean_lines, line, error)
def _process_lines(filename, file_extension, lines, error, min_confidence):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState(min_confidence)
class_state = _ClassState()
check_for_copyright(lines, error)
if file_extension == 'h':
check_for_header_guard(filename, lines, error)
remove_multi_line_comments(lines, error)
clean_lines = CleansedLines(lines)
file_state = _FileState(clean_lines, file_extension)
enum_state = _EnumState()
for line in xrange(clean_lines.num_lines()):
process_line(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, file_state,
enum_state, error)
class_state.check_finished(error)
check_for_include_what_you_use(filename, clean_lines, include_state, error)
# We check here rather than inside process_line so that we see raw
# lines rather than "cleaned" lines.
check_for_unicode_replacement_characters(lines, error)
check_for_new_line_at_eof(lines, error)
class CppChecker(object):
"""Processes C++ lines for checking style."""
# This list is used to--
#
# (1) generate an explicit list of all possible categories,
# (2) unit test that all checked categories have valid names, and
# (3) unit test that all categories are getting unit tested.
#
categories = set([
'build/class',
'build/deprecated',
'build/endif_comment',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'build/using_std',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/comparison_to_boolean',
'readability/constructors',
'readability/control_flow',
'readability/enum_casing',
'readability/fn_size',
'readability/function',
# TODO(dcheng): Turn on the clang plugin checks and remove this.
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/parameter_name',
'readability/naming',
'readability/naming/underscores',
'readability/null',
'readability/pass_ptr',
'readability/streams',
'readability/templatebrackets',
'readability/todo',
'readability/utf8',
'readability/webkit_export',
'runtime/arrays',
'runtime/bitfields',
'runtime/casting',
'runtime/ctype_function',
'runtime/explicit',
'runtime/init',
'runtime/int',
'runtime/invalid_increment',
'runtime/leaky_pattern',
'runtime/max_min_macros',
'runtime/memset',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/unsigned',
'runtime/virtual',
'whitespace/braces',
'whitespace/ending_newline',
'whitespace/semicolon',
])
fs = None
def __init__(self, file_path, file_extension, handle_style_error,
min_confidence, fs=None):
"""Create a CppChecker instance.
Args:
file_extension: A string that is the file extension, without
the leading dot.
"""
self.file_extension = file_extension
self.file_path = file_path
self.handle_style_error = handle_style_error
self.min_confidence = min_confidence
CppChecker.fs = fs or FileSystem()
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CppChecker instance is equal to another."""
if self.file_extension != other.file_extension:
return False
if self.file_path != other.file_path:
return False
if self.handle_style_error != other.handle_style_error:
return False
if self.min_confidence != other.min_confidence:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce __ne__() from __eq__().
return not self.__eq__(other)
def check(self, lines):
_process_lines(self.file_path, self.file_extension, lines,
self.handle_style_error, self.min_confidence)
# FIXME: Remove this function (requires refactoring unit tests).
def process_file_data(filename, file_extension, lines, error, min_confidence, fs=None):
checker = CppChecker(filename, file_extension, error, min_confidence, fs)
checker.check(lines)
|
[
"[email protected]"
] | |
d83bf71c167c90ccf97b5d889705125b50d33234
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/connectedvmwarevsphere/v20201001preview/cluster.py
|
3f7309627667f0124ded506d0be8c84e7c474fd4
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,192 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ClusterArgs', 'Cluster']
@pulumi.input_type
class ClusterArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
cluster_name: Optional[pulumi.Input[str]] = None,
extended_location: Optional[pulumi.Input['ExtendedLocationArgs']] = None,
inventory_item_id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
mo_ref_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
v_center_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Cluster resource.
:param pulumi.Input[str] resource_group_name: The Resource Group Name.
:param pulumi.Input[str] cluster_name: Name of the cluster.
:param pulumi.Input['ExtendedLocationArgs'] extended_location: Gets or sets the extended location.
:param pulumi.Input[str] inventory_item_id: Gets or sets the inventory Item ID for the cluster.
:param pulumi.Input[str] kind: Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
:param pulumi.Input[str] location: Gets or sets the location.
:param pulumi.Input[str] mo_ref_id: Gets or sets the vCenter MoRef (Managed Object Reference) ID for the cluster.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the Resource tags.
:param pulumi.Input[str] v_center_id: Gets or sets the ARM Id of the vCenter resource in which this cluster resides.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if extended_location is not None:
pulumi.set(__self__, "extended_location", extended_location)
if inventory_item_id is not None:
pulumi.set(__self__, "inventory_item_id", inventory_item_id)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if mo_ref_id is not None:
pulumi.set(__self__, "mo_ref_id", mo_ref_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if v_center_id is not None:
pulumi.set(__self__, "v_center_id", v_center_id)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The Resource Group Name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional[pulumi.Input['ExtendedLocationArgs']]:
"""
Gets or sets the extended location.
"""
return pulumi.get(self, "extended_location")
@extended_location.setter
def extended_location(self, value: Optional[pulumi.Input['ExtendedLocationArgs']]):
pulumi.set(self, "extended_location", value)
@property
@pulumi.getter(name="inventoryItemId")
def inventory_item_id(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the inventory Item ID for the cluster.
"""
return pulumi.get(self, "inventory_item_id")
@inventory_item_id.setter
def inventory_item_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inventory_item_id", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="moRefId")
def mo_ref_id(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the vCenter MoRef (Managed Object Reference) ID for the cluster.
"""
return pulumi.get(self, "mo_ref_id")
@mo_ref_id.setter
def mo_ref_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mo_ref_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Gets or sets the Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vCenterId")
def v_center_id(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the ARM Id of the vCenter resource in which this cluster resides.
"""
return pulumi.get(self, "v_center_id")
@v_center_id.setter
def v_center_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "v_center_id", value)
class Cluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
inventory_item_id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
mo_ref_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
v_center_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Define the cluster.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_name: Name of the cluster.
:param pulumi.Input[pulumi.InputType['ExtendedLocationArgs']] extended_location: Gets or sets the extended location.
:param pulumi.Input[str] inventory_item_id: Gets or sets the inventory Item ID for the cluster.
:param pulumi.Input[str] kind: Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
:param pulumi.Input[str] location: Gets or sets the location.
:param pulumi.Input[str] mo_ref_id: Gets or sets the vCenter MoRef (Managed Object Reference) ID for the cluster.
:param pulumi.Input[str] resource_group_name: The Resource Group Name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets the Resource tags.
:param pulumi.Input[str] v_center_id: Gets or sets the ARM Id of the vCenter resource in which this cluster resides.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Define the cluster.
:param str resource_name: The name of the resource.
:param ClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
inventory_item_id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
mo_ref_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
v_center_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterArgs.__new__(ClusterArgs)
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["extended_location"] = extended_location
__props__.__dict__["inventory_item_id"] = inventory_item_id
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["mo_ref_id"] = mo_ref_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["v_center_id"] = v_center_id
__props__.__dict__["custom_resource_name"] = None
__props__.__dict__["datastore_ids"] = None
__props__.__dict__["mo_name"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_ids"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["statuses"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["uuid"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:connectedvmwarevsphere:Cluster")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Cluster, __self__).__init__(
'azure-native:connectedvmwarevsphere/v20201001preview:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ClusterArgs.__new__(ClusterArgs)
__props__.__dict__["custom_resource_name"] = None
__props__.__dict__["datastore_ids"] = None
__props__.__dict__["extended_location"] = None
__props__.__dict__["inventory_item_id"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["mo_name"] = None
__props__.__dict__["mo_ref_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_ids"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["statuses"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["uuid"] = None
__props__.__dict__["v_center_id"] = None
return Cluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="customResourceName")
def custom_resource_name(self) -> pulumi.Output[str]:
"""
Gets the name of the corresponding resource in Kubernetes.
"""
return pulumi.get(self, "custom_resource_name")
@property
@pulumi.getter(name="datastoreIds")
def datastore_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Gets or sets the datastore ARM ids.
"""
return pulumi.get(self, "datastore_ids")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:
"""
Gets or sets the extended location.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="inventoryItemId")
def inventory_item_id(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets the inventory Item ID for the cluster.
"""
return pulumi.get(self, "inventory_item_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Gets or sets the location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="moName")
def mo_name(self) -> pulumi.Output[str]:
"""
Gets or sets the vCenter Managed Object name for the cluster.
"""
return pulumi.get(self, "mo_name")
@property
@pulumi.getter(name="moRefId")
def mo_ref_id(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets the vCenter MoRef (Managed Object Reference) ID for the cluster.
"""
return pulumi.get(self, "mo_ref_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Gets or sets the name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkIds")
def network_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Gets or sets the network ARM ids.
"""
return pulumi.get(self, "network_ids")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Gets or sets the provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def statuses(self) -> pulumi.Output[Sequence['outputs.ResourceStatusResponse']]:
"""
The resource status information.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system data.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Gets or sets the Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Gets or sets the type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uuid(self) -> pulumi.Output[str]:
"""
Gets or sets a unique identifier for this resource.
"""
return pulumi.get(self, "uuid")
@property
@pulumi.getter(name="vCenterId")
def v_center_id(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets the ARM Id of the vCenter resource in which this cluster resides.
"""
return pulumi.get(self, "v_center_id")
|
[
"[email protected]"
] | |
a3885d1f776f0b334e529ebd57270412a8f1d539
|
6e57bdc0a6cd18f9f546559875256c4570256c45
|
/cts/apps/CameraITS/tests/scene1/test_linearity.py
|
1f4aa142984e847efe3d0edd3bdeaca2436c1d99
|
[] |
no_license
|
dongdong331/test
|
969d6e945f7f21a5819cd1d5f536d12c552e825c
|
2ba7bcea4f9d9715cbb1c4e69271f7b185a0786e
|
refs/heads/master
| 2023-03-07T06:56:55.210503 | 2020-12-07T04:15:33 | 2020-12-07T04:15:33 | 134,398,935 | 2 | 1 | null | 2022-11-21T07:53:41 | 2018-05-22T10:26:42 | null |
UTF-8
|
Python
| false | false | 4,201 |
py
|
# Copyright 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.caps
import its.device
import its.objects
import its.target
import numpy
import math
from matplotlib import pylab
import os.path
import matplotlib
import matplotlib.pyplot
NAME = os.path.basename(__file__).split('.')[0]
RESIDUAL_THRESHOLD = 0.0003 # approximately each sample is off by 2/255
# The HAL3.2 spec requires that curves up to 64 control points in length
# must be supported.
L = 64
LM1 = float(L-1)
def main():
"""Test that device processing can be inverted to linear pixels.
Captures a sequence of shots with the device pointed at a uniform
target. Attempts to invert all the ISP processing to get back to
linear R,G,B pixel data.
"""
gamma_lut = numpy.array(
sum([[i/LM1, math.pow(i/LM1, 1/2.2)] for i in xrange(L)], []))
inv_gamma_lut = numpy.array(
sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.per_frame_control(props))
debug = its.caps.debug_mode()
largest_yuv = its.objects.get_largest_yuv_format(props)
if debug:
fmt = largest_yuv
else:
match_ar = (largest_yuv['width'], largest_yuv['height'])
fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
e,s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
s /= 2
sens_range = props['android.sensor.info.sensitivityRange']
sensitivities = [s*1.0/3.0, s*2.0/3.0, s, s*4.0/3.0, s*5.0/3.0]
sensitivities = [s for s in sensitivities
if s > sens_range[0] and s < sens_range[1]]
req = its.objects.manual_capture_request(0, e)
req['android.blackLevel.lock'] = True
req['android.tonemap.mode'] = 0
req['android.tonemap.curve'] = {
'red': gamma_lut.tolist(),
'green': gamma_lut.tolist(),
'blue': gamma_lut.tolist()}
r_means = []
g_means = []
b_means = []
for sens in sensitivities:
req["android.sensor.sensitivity"] = sens
cap = cam.do_capture(req, fmt)
img = its.image.convert_capture_to_rgb_image(cap)
its.image.write_image(
img, '%s_sens=%04d.jpg' % (NAME, sens))
img = its.image.apply_lut_to_image(img, inv_gamma_lut[1::2] * LM1)
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
rgb_means = its.image.compute_image_means(tile)
r_means.append(rgb_means[0])
g_means.append(rgb_means[1])
b_means.append(rgb_means[2])
pylab.title(NAME)
pylab.plot(sensitivities, r_means, '-ro')
pylab.plot(sensitivities, g_means, '-go')
pylab.plot(sensitivities, b_means, '-bo')
pylab.xlim([sens_range[0], sens_range[1]/2])
pylab.ylim([0, 1])
pylab.xlabel('sensitivity(ISO)')
pylab.ylabel('RGB avg [0, 1]')
matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
# Check that each plot is actually linear.
for means in [r_means, g_means, b_means]:
line, residuals, _, _, _ = numpy.polyfit(range(len(sensitivities)),
means, 1, full=True)
print 'Line: m=%f, b=%f, resid=%f'%(line[0], line[1], residuals[0])
assert residuals[0] < RESIDUAL_THRESHOLD
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b84a33a09205fab4a4ef2fd167efdb65ffba572a
|
96c4e333510091368070e2a655b11d09e41bef5a
|
/favo/admin.py
|
ca09258fbbc3aa3185380cb52be5fb6876d2755a
|
[] |
no_license
|
Tuss4/trickfeed.django
|
dced804b72ed0b6ac1917d17061afc0f1503c4b1
|
948f3dd9f2fb9929c61fd47b42ada400772327aa
|
refs/heads/master
| 2016-08-05T06:00:35.729887 | 2013-07-01T14:25:46 | 2013-07-01T14:25:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 89 |
py
|
from django.contrib import admin
from favo.models import *
admin.site.register(Favorite)
|
[
"[email protected]"
] | |
f2c806b3579aa78fdc0528554e68e4e95bfd6ba4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03745/s708529071.py
|
fda6321c5f6a0b6b2235ba3390bb482ce3d143d3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 356 |
py
|
from collections import deque
n = int(input())
a = list(map(int,input().split()))
d = deque(a)
tmp = []
cnt = 0
while d:
v = d.popleft()
if len(tmp)<=1:
pass
else:
if not (v >= tmp[-1] >= tmp[-2] >= tmp[0] or v <= tmp[-1] <= tmp[-2] <= tmp[0]):
tmp = []
cnt += 1
tmp.append(v)
# print(d,tmp,cnt)
if tmp:
cnt+=1
print(cnt)
|
[
"[email protected]"
] | |
cf0b16781cb06a5c1d1a297c66310a9e41261b13
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part004569.py
|
3176b40d2fc0c98546282a3dac165244dfbd02f6
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,300 |
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher82715(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.4.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher82715._instance is None:
CommutativeMatcher82715._instance = CommutativeMatcher82715()
return CommutativeMatcher82715._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 82714
return
yield
from collections import deque
|
[
"[email protected]"
] | |
af83d06559cbd367b6208cb44f53dde62ca2c08b
|
212ccad4e9f19fdcdf7d6b15b16eb3724d18c363
|
/bioprocs/scripts/cnvkit/pCNVkitSeg.py
|
a442568d6a62de0baec24d3fa06b2e9a0d88e1e1
|
[
"MIT"
] |
permissive
|
LeaveYeah/bioprocs
|
997792add2150467f668f42bea57d195ec7db9ff
|
c5d2ddcc837f5baee00faf100e7e9bd84222cfbf
|
refs/heads/master
| 2020-04-16T16:48:15.924979 | 2019-02-15T23:02:52 | 2019-02-15T23:02:52 | 165,750,151 | 0 | 0 |
MIT
| 2019-02-15T23:02:53 | 2019-01-14T23:14:24 |
HTML
|
UTF-8
|
Python
| false | false | 521 |
py
|
from pyppl import Box
from bioprocs.utils import shell
cnvkit = {{args.cnvkit | quote}}
infile = {{i.infile | quote}}
outfile = {{o.outfile | quote}}
nthread = {{args.nthread | repr}}
params = {{args.params}}
shell.TOOLS['cnvkit'] = cnvkit
envs = dict(
OPENBLAS_NUM_THREADS = 1,
OMP_NUM_THREADS = 1,
NUMEXPR_NUM_THREADS = 1,
MKL_NUM_THREADS = 1
)
ckshell = shell.Shell(subcmd = True, equal = ' ', envs = envs).cnvkit
params.o = outfile
params.p = nthread
ckshell.segment(infile, **params).run()
|
[
"[email protected]"
] | |
f20293425cb4e9ee0276d3820ee193b9e800b864
|
01fdd206c8c825b30870bdd3f6e75f0aa113b849
|
/test/record/parser/test_response_whois_isoc_org_il_status_available.py
|
77cdabf8416280afc5c848dd46cffbb481885417
|
[
"MIT"
] |
permissive
|
huyphan/pyyawhois
|
0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
refs/heads/master
| 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,368 |
py
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.isoc.org.il/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisIsocOrgIlStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.isoc.org.il/status_available.txt"
host = "whois.isoc.org.il"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
|
[
"[email protected]"
] | |
be6a5b9cac41caba2672d3a0aa55352a03c162ab
|
02560440f9f91e583fe98d80ab11e18aa6c7a525
|
/apps/proyectos/migrations/0003_proyecto_estado.py
|
8cdc85e82f4402f242bcf094fc85b50367c9d673
|
[] |
no_license
|
eduardogpg/wamadeusV1
|
a36c89176543e638486009620c5131f46743edbc
|
82d93293dc6afc95a6661f727162f4055ab83a43
|
refs/heads/master
| 2020-12-28T01:57:47.831689 | 2015-01-08T05:14:25 | 2015-01-08T05:14:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('proyectos', '0002_remove_proyecto_estado'),
]
operations = [
migrations.AddField(
model_name='proyecto',
name='estado',
field=models.OneToOneField(default=0, to='proyectos.Estado'),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
a958e8c86174eda061fc353adafef21d96d696d5
|
b98e9e5ec77b65bf307b87ea1129d3d778a915af
|
/python/15684_ladder.py
|
6fc8f06fdc00ef951c1ae26bfe1f2500d03d3802
|
[] |
no_license
|
KimYeong-su/Baekjoon
|
8ea5e5fab711d05c0f273e68a849750fdcdbae4b
|
0e56b2cfdf67c0e6ffbbe3119e2ab944d418f919
|
refs/heads/master
| 2021-07-08T11:25:00.019106 | 2021-04-21T12:47:49 | 2021-04-21T12:47:49 | 239,902,643 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 996 |
py
|
import sys
input = sys.stdin.readline
N, M, H = map(int, input().rstrip('\n').split())
if M == 0:
print(0)
exit()
maps = [[False]*N for _ in range(H)]
for _ in range(M):
a, b = map(lambda x: int(x)-1, input().rstrip('\n').split())
maps[a][b] = True
answer = 4
def check():
for s in range(N):
tmp = s
for x in range(H):
if maps[x][tmp]:
tmp += 1
elif tmp > 0 and maps[x][tmp-1]:
tmp -= 1
if s != tmp:
return False
return True
def dfs(cnt, x, y):
global answer
if cnt >= answer:
return
if check():
if answer > cnt:
answer = cnt
return
for i in range(x,H):
k = y if i==x else 0
for j in range(k,N-1):
if not maps[i][j] and not maps[i][j+1]:
maps[i][j] = True
dfs(cnt+1, i, j+2)
maps[i][j] = False
dfs(0,0,0)
print(answer) if answer < 4 else print(-1)
|
[
"[email protected]"
] | |
b5658157564b48b4a45e26c602cb8f7359e1d74e
|
94120f2c22fb2ff44e47a6a545daa9ecbb95c3eb
|
/Analysis/HiggsTauTau/scripts/compareSystShapes.py
|
63240e650ec30733eb71fa6567095179270af6ee
|
[] |
no_license
|
DebabrataBhowmik/ICHiggsTauTau
|
6d4ad3807209232f58d7310858c83d0ce316b495
|
c9568974a523c41326df069c0efe1ce86ba4166a
|
refs/heads/master
| 2020-03-31T10:20:44.757167 | 2018-10-08T15:46:16 | 2018-10-08T15:46:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,464 |
py
|
import ROOT
import UserCode.ICHiggsTauTau.plotting as plotting
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datacard', '-d', help= 'Data card')
parser.add_argument('--systematic', '-s', help= 'Name of systematic to make the comparrison plots for')
parser.add_argument('--output', '-o', help= 'Output directory')
args = parser.parse_args()
infile = ROOT.TFile(args.datacard)
chan='mt'
if 'htt_tt.' in args.datacard: chan='tt'
if 'htt_et.' in args.datacard: chan='et'
if 'htt_em.' in args.datacard: chan='em'
to_print=[]
for key in infile.GetListOfKeys():
if isinstance(infile.Get(key.GetName()),ROOT.TDirectory):
dirname=key.GetName()
directory = infile.Get(dirname)
for dirkey in directory.GetListOfKeys():
name = dirkey.GetName()
if 'norm' in name and 'jetFakes' in name: continue
if args.systematic in name and 'Up' in name:
histo_up = directory.Get(name)
if isinstance(histo_up,ROOT.TH1D) or isinstance(histo_up,ROOT.TH1F):
histo_nom = directory.Get(name.replace('_'+args.systematic+'Up',''))
histo_down = directory.Get(name.replace('Up','Down'))
if isinstance(histo_up,ROOT.TDirectory): continue
plot_name = '%s/systs_%s_%s' % (args.output, dirname, name.replace('Up',''))
plotting.CompareSysts([histo_nom,histo_up,histo_down],
plot_name,
dirname+"_"+name.replace('Up',''))
proc=name.replace('_'+args.systematic+'Up','')
noPrint=False
if '0jet' in dirname: binnum=1
elif 'boosted' in dirname and 'dijet' not in dirname: binnum=2
elif 'dijet_loosemjj_lowboost' in dirname: binnum=3
elif 'dijet_loosemjj_boosted' in dirname: binnum=4
elif 'dijet_tightmjj_lowboost' in dirname: binnum=5
elif 'dijet_tightmjj_boosted' in dirname: binnum=6
else: noPrint = True
if '_jhu_' in proc or '_ph_' in proc or '_total_bkg_' in proc or '_ZTT_' in proc or 'plus' in proc or 'minus' in proc: noPrint=True
if histo_nom.Integral() > 0 and not noPrint:
up = histo_up.Integral()/histo_nom.Integral()
down = histo_down.Integral()/histo_nom.Integral()
to_print.append('({\"%s\"}, {%i}, {\"%s\"}, %.3f, %.3f)' % (chan, binnum, proc, down, up))
directory.Close()
infile.Close()
for i in to_print: print i
|
[
"[email protected]"
] | |
378ea714e209d6c5672a433a408cfb3c7ae34d93
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02536/s373980977.py
|
6ff28363fe4056683fdd331524dbb4a7e04042e3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,120 |
py
|
def main():
def find(target):
if parent[target] < 0:
return target
else:
parent[target] = find(parent[target])
return parent[target]
def is_same(x, y):
return find(x) == find(y)
def union(x, y):
root_x = find(x)
root_y = find(y)
if root_x == root_y:
return
if parent[root_x] > parent[root_y]:
root_x, root_y = root_y, root_x
parent[root_x] += parent[root_y]
parent[root_y] = root_x
# 今回これ使わないけど、どこに誰がいるのかはこれでわかる
def members(n, x):
root = find(x)
return [i for i in range(n) if find(i) == root]
def get_size(x):
return -parent[find(x)]
def get_root():
return [i for i, root in enumerate(parent) if root < 0]
n, m = map(int, input().split())
parent = [-1 for _ in range(n)]
for _ in range(m):
a, b = map(lambda x: int(x) - 1, input().split())
union(a, b)
ans = len(get_root()) - 1
print(ans)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
0bff5d925eabd40deb4c7de2c92931e939a44306
|
89acc97d24548e0393201151975bf7e262949fcb
|
/examples/embed.py
|
32d7d2052e0058bbedbf9feaf377e1606293d8b3
|
[] |
no_license
|
FrankWork/tf-tutorials
|
40b15cc35379ecc03a9f2de1015585c43e5ecc3e
|
bb698f37fc9a3baa5f8e3cddc523d11872794e0f
|
refs/heads/master
| 2022-10-09T21:30:53.851685 | 2018-09-13T06:34:16 | 2018-09-13T06:34:16 | 82,744,146 | 1 | 1 | null | 2022-10-01T07:13:11 | 2017-02-22T01:00:40 |
Python
|
UTF-8
|
Python
| false | false | 619 |
py
|
import tensorflow as tf
import numpy as np
vocab_size = 5
embed_size = 3
with tf.Graph().as_default(), tf.Session() as sess:
# unk = tf.get_variable("unk", shape=[1, embed_size],
# dtype=tf.float32, initializer=tf.ones_initializer())
# embed = [unk]
# embed.append(tf.convert_to_tensor(np.zeros((vocab_size, embed_size)), dtype=tf.float32))
# embed = tf.concat(embed, axis=0, name='concat_embed')
embed = tf.get_variable('embed', initializer=np.ones((vocab_size, embed_size)))
val = tf.trainable_variables()
sess.run(tf.global_variables_initializer())
val_np = sess.run(val)
print(val_np)
|
[
"[email protected]"
] | |
2cd05f45755b2cbdc09fb9ab6925cbcc9782dfc8
|
6c7355ae404490d0ff26c4ec925384242b7e9067
|
/django introduction/demo/migrations/0004_auto_20200820_1622.py
|
711cb98804700c2496efc2395cdbf97aa81b3e14
|
[] |
no_license
|
mahidulmoon/udemy-fullstack-practice
|
7de8946a97224e554b97c490d18e71b0dc969e08
|
e0f9ddd2b4dd3fa5ad486d92b7c9bdac8c77f05f
|
refs/heads/master
| 2022-12-12T01:12:56.188124 | 2020-08-30T17:04:51 | 2020-08-30T17:04:51 | 288,985,619 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 864 |
py
|
# Generated by Django 3.0.6 on 2020-08-20 19:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('demo', '0003_auto_20200820_1147'),
]
operations = [
migrations.CreateModel(
name='BookNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isbn_10', models.CharField(blank=True, max_length=10)),
('isbn_13', models.CharField(blank=True, max_length=10)),
],
),
migrations.AddField(
model_name='book',
name='number',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='demo.BookNumber'),
),
]
|
[
"[email protected]"
] | |
7bb0b5eb5e3c901c314723c3cb2459ec0ae664d1
|
66b3d81d66491bf6c488f19896661eb7d99a0535
|
/src/sample.py
|
86ab8a34897a0d19f96b760eba326834ca5fe11e
|
[
"LicenseRef-scancode-other-permissive",
"MIT-Modern-Variant",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
quminzi/harfbuzz
|
fa5597ce6619d1ca86c0c86b9099e558872e8b98
|
2cd5323531dcd800549b2cb1cb51d708e72ab2d8
|
refs/heads/master
| 2020-04-06T03:44:34.844047 | 2015-01-07T03:16:38 | 2015-01-07T03:16:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,071 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from gi.repository import HarfBuzz as hb
from gi.repository import GLib
# Python 2/3 compatibility
try:
unicode
except NameError:
unicode = str
def tounicode(s, encoding='utf-8'):
if not isinstance(s, unicode):
return s.decode(encoding)
else:
return s
fontdata = open (sys.argv[1], 'rb').read ()
blob = hb.glib_blob_create (GLib.Bytes.new (fontdata))
face = hb.face_create (blob, 0)
del blob
font = hb.font_create (face)
upem = hb.face_get_upem (face)
del face
hb.font_set_scale (font, upem, upem)
#hb.ft_font_set_funcs (font)
hb.ot_font_set_funcs (font)
buf = hb.buffer_create ()
hb.buffer_add_utf8 (buf, tounicode("Hello بهداد").encode('utf-8'), 0, -1)
hb.buffer_guess_segment_properties (buf)
hb.shape (font, buf, [])
del font
infos = hb.buffer_get_glyph_infos (buf)
positions = hb.buffer_get_glyph_positions (buf)
for info,pos in zip(infos, positions):
gid = info.codepoint
cluster = info.cluster
advance = pos.x_advance
print(gid, cluster, advance)
|
[
"[email protected]"
] | |
f7e85d419b45b467ec0f9ecb8344fa2b19b9e103
|
62922a76e40003f3d3a7d02282853f9a2b76c6fc
|
/NLP/nltk1.py
|
75d047d7cba31598635ee4accf293af060a56548
|
[] |
no_license
|
cchangcs/ai_learning_record
|
a7d0d9c7fcdc1e97d8869aa7e63b535f8cf62df2
|
235a90ff5fe0205334376a927d462b8ae64e4e70
|
refs/heads/master
| 2020-04-01T16:59:31.203223 | 2018-11-21T11:12:34 | 2018-11-21T11:12:34 | 153,408,023 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 629 |
py
|
import nltk
import jieba
raw = open('liangjian.TXT', 'rb').read()
# jieba.lcut()与jieba.cut()的区别在于:
# jieba.cut()返回一个可迭代的generator,可以使用for循环获得粉刺后得到的每一个词语
# jieba.lcut()直接返回list
text = nltk.text.Text(jieba.lcut(raw))
# 显示出现的上下文
print(text.concordance(u'驻岛'))
# 对同义词的使用习惯,显示words出现的相同模式
print(text.common_contexts(['小日本', '鬼子']))
# 显示最常用的二次搭配
print(text.collocations())
# 查看关心的词在文中出现的位置
text.dispersion_plot(['李云龙', '秀芹'])
|
[
"[email protected]"
] | |
a83c05009a51ae8f8b57f8b7d8da648bc21db2e3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02731/s681505508.py
|
9a2bdda7173e3c9fcb29134e837f0d34ddc1f593
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32 |
py
|
N = int(input())
print((N/3)**3)
|
[
"[email protected]"
] | |
d16c1374c115f76767b9316071a5126edd9b63bb
|
bad85cd8d547a071baf4b6590f7e81d13ef1ec0d
|
/assistant/weblink_channel/migrations/0019_auto_20200819_0907.py
|
c43e45d76cfb514f560bbdcbba6e6be45084f979
|
[
"MIT"
] |
permissive
|
kapiak/ware_prod
|
92e11671059642e14219d5aa8334e0564403db77
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
refs/heads/master
| 2023-01-06T04:36:43.173093 | 2020-09-21T04:06:51 | 2020-09-21T04:06:51 | 310,320,165 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,794 |
py
|
# Generated by Django 3.1 on 2020-08-19 09:07
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20200813_0326'),
('products', '0004_auto_20200813_0203'),
('weblink_channel', '0018_auto_20200819_0648'),
]
operations = [
migrations.AlterField(
model_name='purchaseorder',
name='sales_order',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='weblink_purchase_orders', to='weblink_channel.weblinkorder'),
),
migrations.AlterField(
model_name='purchaseorder',
name='supplier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='weblink_purchase_orders', to='products.supplier'),
),
migrations.AlterField(
model_name='purchaseorderitem',
name='customer_order_item',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='weblink_purchase_order_item', to='orders.lineitem'),
),
migrations.AlterField(
model_name='purchaseorderitem',
name='purchase_order',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='weblink_items', to='weblink_channel.purchaseorder'),
),
migrations.AlterField(
model_name='purchaseorderitem',
name='sales_order_item',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='web_purchase_order_item', to='weblink_channel.weblinkorderitem'),
),
]
|
[
"[email protected]"
] | |
da5a5a8909a894ea58797a8fdf4371f2e78b6d80
|
a097ecf40fee329cfa9e3f77e4b6e9e29a8f148a
|
/6_section/6_1.py
|
56d5eb74962f87b2a93396b6c14889c23568ce73
|
[] |
no_license
|
FumihisaKobayashi/The_self_taught_python
|
1e7008b17050db3e615c2f3aa68df2edc7f93192
|
329d376689029b75da73a6f98715cc7e83e8cc2c
|
refs/heads/master
| 2021-01-06T16:04:13.382955 | 2020-07-28T14:39:24 | 2020-07-28T14:39:24 | 241,389,313 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 131 |
py
|
#行を変えるときは"""
""" line1
line2
kine3
"""
#a[int]で参照する。
a = "AbcMart"
print(a[0])
print(a[1])
|
[
"[email protected]"
] | |
6686bdad5ef3c5ba55c384bccfe5c43b3a94c08c
|
cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45
|
/Personal/Jaipur2/Jaipur2/urls.py
|
9a5afd8833e795b3f97b7337d736eae3b22cdbdc
|
[] |
no_license
|
ProsenjitKumar/PycharmProjects
|
d90d0e7c2f4adc84e861c12a3fcb9174f15cde17
|
285692394581441ce7b706afa3b7af9e995f1c55
|
refs/heads/master
| 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null |
UTF-8
|
Python
| false | false | 793 |
py
|
"""Jaipur2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls')),
]
|
[
"[email protected]"
] | |
a7802d009966a5c9860b3f8685852ec97a59fc37
|
3efa9b57670d318b006d5eec837595683a7cb751
|
/run_tests.py
|
3601df35ca4471c85f461ad30e540d264eb03275
|
[] |
no_license
|
Erotemic/supersetup
|
c76d56eeb1e0a7322604510f8bb27c8faff6d593
|
ab2b75be470b6db524acd74fc6e8235f1ab6f522
|
refs/heads/main
| 2023-08-28T00:27:25.517794 | 2021-11-05T20:24:16 | 2021-11-05T20:24:16 | 376,892,092 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
import pytest
import sys
package_name = 'supersetup'
pytest_args = [
'--cov-config', '.coveragerc',
'--cov-report', 'html',
'--cov-report', 'term',
'--xdoctest',
'--cov=' + package_name,
]
pytest_args = pytest_args + sys.argv[1:]
sys.exit(pytest.main(pytest_args))
|
[
"[email protected]"
] | |
1950318d50fddf94d54003132392554f9c5b0dac
|
75e24fc71cf0833bb6040fa5037a0523c67d4581
|
/nlplingo/tasks/sequence/bpjson.py
|
24262871b860357be3a865693a02f463871edd05
|
[
"Apache-2.0"
] |
permissive
|
BBN-E/nlplingo
|
53d5ff2aa17d03a1c6db8afc8ed2b0cf683b1c55
|
32ff17b1320937faa3d3ebe727032f4b3e7a353d
|
refs/heads/main
| 2022-12-19T19:28:11.666850 | 2020-10-09T01:16:32 | 2020-10-09T01:16:32 | 302,090,268 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,582 |
py
|
def document_prediction_to_bp_json(documents, corpus_id):
"""
:type document: list[DocumentPrediction]
"""
bp = dict()
bp['corpus-id'] = corpus_id
bp['format-type'] = 'bp-corpus'
bp['format-version'] = 'v8f'
bp['entries'] = dict()
for document in documents:
for sentence in document.sentences.values():
entry_id = sentence.id
bp['entries'][entry_id] = dict()
bp['entries'][entry_id]['doc-id'] = entry_id.split('_')[0]
bp['entries'][entry_id]['sent-id'] = entry_id.split('_')[1]
bp['entries'][entry_id]['entry-id'] = entry_id
bp['entries'][entry_id]['segment-type'] = 'sentence'
bp['entries'][entry_id]['segment-text'] = sentence.text
bp['entries'][entry_id]['annotation-sets'] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events'] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events']['events'] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'] = dict()
spans = set()
# first collect all the spans from triggers and arguments
for event in sentence.events.values():
spans.add(event.trigger.text)
spans.update(argument.text for argument in event.arguments.values())
span_to_id = dict()
for i, span in enumerate(sorted(spans)):
span_to_id[span] = 'ss-{}'.format(str(i+1))
for span in span_to_id:
span_id = span_to_id[span]
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id]['ssid'] = span_id
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id]['spans'] = []
span_d = dict()
span_d['hstring'] = span
span_d['string'] = span
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id]['spans'].append(span_d)
for i, event in enumerate(sentence.events.values()):
event_d = dict()
event_id = 'event{}'.format(str(i+1))
assert event.trigger.text in span_to_id
trigger_id = span_to_id[event.trigger.text]
event_d['anchors'] = trigger_id
event_d['eventid'] = event_id
assert len(event.trigger.labels) == 1
event_types = list(event.trigger.labels.keys())[0].split('.')
assert len(event_types) == 2
event_d['helpful-harmful'] = event_types[0]
event_d['material-verbal'] = event_types[1]
event_d['agents'] = []
event_d['patients'] = []
for argument in event.arguments.values():
assert argument.text in span_to_id
argument_id = span_to_id[argument.text]
assert len(argument.labels) == 1
argument_role = list(argument.labels.keys())[0].lower()
if argument_role == 'agent':
event_d['agents'].append(argument_id)
elif argument_role == 'patient':
event_d['patients'].append(argument_id)
bp['entries'][entry_id]['annotation-sets']['abstract-events']['events'][event_id] = event_d
return bp
|
[
"[email protected]"
] | |
69c168177c43574cbd853f4f8cc6a2858db2c7c0
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/wordy/2cb74d70ed0f4ca582f53ed59ded5843.py
|
780680aef39b375639045d1d0ccfbfb865385441
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 1,867 |
py
|
def calculate(s):
# Assume we have the 'What is..' opening
s = s.strip('What is ')
# Rather than tokenizing the 'by' following multiplied
# or divided, let's just remove it. The phrase won't
# lose any cdetail for computation.
for each in [' by',' to the',' power','th','nd','st','rd']:
s = s.replace(each,"")
# Divide our current phrase into four parts
items = s.split(' ',3)
if len(items) == 3 and items[2][-1] == '?':
items[2] = items[2].strip('?')
items.append('?')
# Check for errors in input
if not items[0].strip('-').isdigit() or not items[2].strip('-').isdigit():
raise ValueError("Incorrect sequence of items for wordy calculate.")
elif items[1] not in definitions():
raise ValueError("Operation not found in definitions.")
# Perform the first operation
ans = definitions()[items[1]](int(items[0]),int(items[2]))
# Subsequent operations will operate on the above answer
s = items[3]
items = s.split(" ",2)
# Continue operating until the end
while '?' not in items[0]:
if '?' in items[1]:
items[1] = items[1].strip('?')
items.append('?')
if not items[1].strip('-').isdigit():
raise ValueError("Incorrect sequence of items for wordy calculate.")
elif items[0] not in definitions():
raise ValueError("Operation not found in definitions.")
ans = definitions()[items[0]](ans,int(items[1]))
s = items[2]
items = s.split(" ",2)
return ans
def definitions():
return {'plus': lambda x,y:x+y,
'minus': lambda x,y:x-y,
'multiplied': lambda x,y:x*y,
'divided': lambda x,y:x/y,
'raised': lambda x,y:x**y
}
|
[
"[email protected]"
] | |
88fac84782b5350baeb552fe4770e2150129d53d
|
caaf1b0754db1e676c37a6f1e58f19183754e654
|
/sdk/network/azure-mgmt-network/generated_samples/virtual_network_tap_delete.py
|
a04f9b29d2d098aa8941dee37c28d31718ac4001
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
rdomenzain/azure-sdk-for-python
|
45dfb39121a0abda048c22e7309733a56259f525
|
58984255aeb904346b6958c5ba742749a2cc7d1b
|
refs/heads/master
| 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 |
MIT
| 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null |
UTF-8
|
Python
| false | false | 1,513 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python virtual_network_tap_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
client.virtual_network_taps.begin_delete(
resource_group_name="rg1",
tap_name="test-vtap",
).result()
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/VirtualNetworkTapDelete.json
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b017ebc4063d176c60cd9d5d47c2fd34455ecfe4
|
2e996d6870424205bc6af7dabe8685be9b7f1e56
|
/code/processing/20181219_r3_O1O3_IND_titration_flow/processing.py
|
4c382b4d6b125f48bd4b3786e5fec98e64c5c186
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
minghao2016/mwc_mutants
|
fd705d44e57e3b2370d15467f31af0ee3945dcc2
|
0f89b3920c6f7a8956f48874615fd1977891e33c
|
refs/heads/master
| 2023-03-25T03:56:33.199379 | 2020-06-26T20:09:00 | 2020-06-26T20:09:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,121 |
py
|
import numpy as np
import pandas as pd
import glob
import imp
import sys
sys.path.insert(0, '../../../')
import mut.flow
# Define the experiment parameters
DATE = 20181219
RUN_NO = 3
USERNAME = 'gchure'
CLASS = 'IND'
gating_fraction = 0.4
# Load all files.
files = glob.glob('../../../data/flow/csv/{0}*_r{1}*.csv'.format(DATE, RUN_NO))
# Set up the DataFrame
colnames = ['date', 'username', 'mutant', 'operator', 'strain', 'IPTGuM',
'mean_FITC_H']
df = pd.DataFrame([], columns=colnames)
for f in files:
# Get the identifying finformation.
date, _, operator, strain, mutant, conc = f.split('/')[-1].split('_')
conc = float(conc.split('uM')[0])
rep = int(strain.split('R')[-1])
# Load in the data
data = pd.read_csv(f)
gated = mut.flow.gaussian_gate(data, gating_fraction)
# Compute the mean
mean_FITC = gated['FITC-H'].mean()
# Assemble the dictionary
samp_dict = dict(date=date, username=USERNAME, mutant=mutant,
operator=operator, strain=strain, IPTGuM=conc,
mean_FITC_H=mean_FITC, repressors=rep)
df = df.append(samp_dict, ignore_index=True)
fc_dfs = []
grouped = df[df['mutant']!='auto'].groupby(['IPTGuM', 'operator'])
mean_auto_df = df[df['mutant'] == 'auto']
for g, d in grouped:
mean_auto = mean_auto_df[mean_auto_df['IPTGuM']==g[0]]['mean_FITC_H'].values[0]
mean_delta = d.loc[d['mutant'] == 'delta']['mean_FITC_H'].values[0]
d['fold_change'] = (d['mean_FITC_H'] - mean_auto) / (mean_delta - mean_auto)
fc_dfs.append(d)
fold_change_df = pd.concat(fc_dfs, axis=0)
# Save to a CSV.
fold_change_df.to_csv(
'output/{0}_r{1}_{2}_fold_change.csv'.format(DATE, RUN_NO, CLASS))
# Add the comments and save to the data/csv file.
target = '../../../data/csv/{0}_r{1}_{2}_fold_change.csv'.format(DATE, RUN_NO,
mutant)
with open('comments.txt', 'r') as f:
comments = f.read().splitlines()
with open(target, 'a') as f:
for line in comments:
f.write(line)
fold_change_df.to_csv(f, mode='a', index=False)
|
[
"[email protected]"
] | |
2f719719bb503926708f3f1e7b6dc163f7417df6
|
8ea2acd4b2b15f5edd4608dfc20cb6fed49995cd
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.DataObjectComposite.1.py
|
288e6c8a0d5e5757a5ff188f805efc25fcd8ecea
|
[] |
no_license
|
timechild/Phoenix
|
cbace6e93f69eaa5f998ff7861dc8b763fe7eef7
|
2c2d44f3750d01692a99f96f65d8d70f39174528
|
refs/heads/master
| 2021-06-27T04:26:52.037016 | 2017-09-15T04:48:42 | 2017-09-15T04:48:42 | 103,706,741 | 1 | 0 | null | 2017-09-15T22:31:21 | 2017-09-15T22:31:21 | null |
UTF-8
|
Python
| false | false | 1,068 |
py
|
def MyDropTarget(self):
dataobj = wx.DataObjectComposite()
dataobj.Add(wx.BitmapDataObject(), True)
dataobj.Add(wx.FileDataObject())
self.SetDataObject(dataobj)
def OnData(self, x, y, defaultDragResult):
dragResult = wx.DropTarget.OnData(x, y, defaultDragResult)
if dragResult == defaultDragResult:
dataobjComp = self.GetDataObject()
format = dataObjects.GetReceivedFormat()
dataobj = dataobjComp.GetObject(format)
if format.GetType() == wx.DF_BITMAP:
dataobjBitmap = dataobj
# ... use dataobj.GetBitmap() ...
elif format.GetType() == wx.DF_FILENAME:
dataobjFile = dataobj
# ... use dataobj.GetFilenames() ...
else:
raise Exception("unexpected data object format")
return dragResult
|
[
"[email protected]"
] | |
63a26d8b55aaeaa632f4571a62a47a7c52bc529d
|
61b87099017a2456c5c7b733a1c6559b988e4ebe
|
/user_portrait/cron/flow5/check_process.py
|
d17e151fb5e3e86b84d0aff6ae9cc34a1b0b0a2e
|
[] |
no_license
|
zhuty16/user_portrait
|
4fc4cc08e550864ebcfa3eef200127a94f07043b
|
a5e8ea3d28316e0d8822f92951462aad5c3d2355
|
refs/heads/master
| 2021-05-29T07:05:58.520759 | 2015-08-08T13:07:50 | 2015-08-08T13:07:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,645 |
py
|
# -*- coding:utf-8 -*-
import subprocess
import sys
import os
import time
def check(p_name):
cmd = 'ps -ef|grep %s|grep -v "grep"' % p_name
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
if p.wait() == 0:
val = p.stdout.read()
print val
if p_name in val:
print "ok - %s python process is running" % p_name
else:
print "no process is running!"
os.system("python ./%s &" % p_name)
def check_redis(p_name):
cmd = 'ps -ef|grep %s|grep -v "grep"' % p_name
restart_cmd = 'cd /home/ubuntu3/huxiaoqian/redis-2.8.13 && src/redis-server redis.conf'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(stdoutput, erroutput) = p.communicate()
val = stdoutput
if p_name in val:
print "ok - %s process is running" % p_name
else:
os.system(restart_cmd)
def check_elasticsearch(p_name):
cmd = 'ps -ef|grep %s|grep -v "grep"' % p_name
restart_cmd = 'cd /home/ubuntu3/yuankun/elasticsearch-1.6.0 && bin/elasticsearch -Xmx15g -Xms15g -Des.max-open-files=true -d'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(stdoutput, erroutput) = p.communicate()
if p_name in stdoutput:
print "%s ok - %s process is running" % (time.ctime(), p_name)
else:
os.system(restart_cmd)
if __name__ == '__main__':
# test procedure running
d_name = ['zmq_work_weibo_flow5.py']
for item in d_name:
check(item)
'''
# test redis running
check_redis("redis")
# test elasticsearch running
check_elasticsearch("elasticsearch")
sys.exit(0)
'''
|
[
"[email protected]"
] | |
0f3fecb82dc87e5057514645409a59e8b1fd5246
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/serviceconnector/_params.py
|
656868842eb2bd0f35811474f70b3d09f6eeec01
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 |
MIT
| 2023-09-14T11:11:05 | 2016-02-04T00:21:51 |
Python
|
UTF-8
|
Python
| false | false | 16,545 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.parameters import (
get_enum_type,
get_location_type,
get_three_state_flag
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from ._validators import (
validate_params,
validate_kafka_params,
validate_local_params,
get_default_object_id_of_current_user
)
from ._resource_config import (
AUTH_TYPE,
RESOURCE,
SOURCE_RESOURCES_PARAMS,
SOURCE_RESOURCES_CREATE_PARAMS,
TARGET_RESOURCES_PARAMS,
TARGET_RESOURCES_CONNECTION_STRING,
AUTH_TYPE_PARAMS,
SUPPORTED_AUTH_TYPE,
SUPPORTED_CLIENT_TYPE,
TARGET_SUPPORT_SERVICE_ENDPOINT,
TARGET_SUPPORT_PRIVATE_ENDPOINT,
LOCAL_CONNECTION_PARAMS
)
from ._addon_factory import AddonFactory
from knack.arguments import CLIArgumentType
from .action import AddCustomizedKeys
def add_source_resource_block(context, source, enable_id=True, validate_source_id=False):
source_args = SOURCE_RESOURCES_PARAMS.get(source)
for resource, args in SOURCE_RESOURCES_PARAMS.items():
if resource != source:
for arg in args:
if arg not in source_args:
context.ignore(arg)
required_args = []
for arg, content in SOURCE_RESOURCES_PARAMS.get(source).items():
id_arg = '\'--id\'' if enable_id else '\'--source-id\''
context.argument(arg, configured_default=content.get('configured_default'),
options_list=content.get('options'), type=str,
help='{}. Required if {} is not specified.'.format(content.get('help'), id_arg))
required_args.append(content.get('options')[0])
validator_kwargs = {
'validator': validate_params} if validate_source_id else {}
if not enable_id:
context.argument('source_id', options_list=['--source-id'], type=str,
help="The resource id of a {source}. Required if {required_args} "
"are not specified.".format(
source=source.value, required_args=str(required_args)),
**validator_kwargs)
else:
required_args.append('--connection')
context.argument('indentifier', options_list=['--id'], type=str,
help="The resource id of the connection. {required_args} are required "
"if '--id' is not specified.".format(required_args=str(required_args)))
context.ignore('source_id')
# scope parameter
if source == RESOURCE.KubernetesCluster:
context.argument('scope', options_list=['--kube-namespace'], type=str, default='default',
help="The kubernetes namespace where the connection information "
"will be saved into (as kubernetes secret).")
context.argument('enable_csi', options_list=['--enable-csi'], arg_type=get_three_state_flag(),
help="Use keyvault as a secrets store via a CSI volume. "
"If specified, AuthType Arguments are not needed.")
elif source == RESOURCE.ContainerApp:
for arg, content in SOURCE_RESOURCES_CREATE_PARAMS.get(source).items():
context.argument(arg, options_list=content.get(
'options'), type=str, help=content.get('help'))
context.ignore('enable_csi')
else:
context.ignore('scope')
context.ignore('enable_csi')
def add_auth_block(context, source, target):
support_auth_types = SUPPORTED_AUTH_TYPE.get(
source, {}).get(target, [])
for auth_type in AUTH_TYPE_PARAMS:
if auth_type in support_auth_types:
validator = None
if auth_type == AUTH_TYPE.UserAccount:
validator = get_default_object_id_of_current_user
for arg, params in AUTH_TYPE_PARAMS.get(auth_type).items():
context.argument(arg, options_list=params.get('options'), action=params.get('action'), nargs='*',
help=params.get('help'), arg_group='AuthType', validator=validator)
else:
for arg in AUTH_TYPE_PARAMS.get(auth_type):
context.ignore(arg)
def add_local_connection_block(context, show_id=True):
context.argument('location', arg_type=CLIArgumentType(
arg_type=get_location_type(context),
required=False,
validator=get_default_location_from_resource_group))
if show_id:
context.argument('id', options_list=[
'--id'], type=str, help='The id of connection.', validator=validate_local_params)
params = LOCAL_CONNECTION_PARAMS.get('connection_name')
context.argument('connection_name', options_list=params.get('options'), type=params.get('type'),
help=params.get('help'), validator=validate_local_params)
def add_target_resource_block(context, target):
target_args = TARGET_RESOURCES_PARAMS.get(target)
for resource, args in TARGET_RESOURCES_PARAMS.items():
if resource != target:
for arg in args:
if arg not in target_args:
context.ignore(arg)
required_args = []
for arg, content in TARGET_RESOURCES_PARAMS.get(target).items():
context.argument(arg, options_list=content.get('options'), type=str,
help='{}. Required if \'--target-id\' is not specified.'.format(content.get('help')))
required_args.append(content.get('options')[0])
context.argument('target_id', type=str,
help='The resource id of target service. Required if {required_args} '
'are not specified.'.format(required_args=str(required_args)))
if target != RESOURCE.KeyVault:
context.ignore('enable_csi')
def add_connection_name_argument(context, source):
context.argument('connection_name', options_list=['--connection'], type=str,
help='Name of the {} connection.'.format(source.value), validator=validate_params)
def add_client_type_argument(context, source, target):
client_types = SUPPORTED_CLIENT_TYPE.get(source).get(target, [])
client_types = [item.value for item in client_types]
context.argument('client_type', options_list=['--client-type'], arg_type=get_enum_type(client_types),
help='The client type used on the {}'.format(source.value))
def add_customized_keys_argument(context):
context.argument('customized_keys', options_list=['--customized-keys'], action=AddCustomizedKeys, nargs='*',
help='The customized keys used to change default configuration names. Key is the original '
'name, value is the customized name.')
def add_target_type_argument(context, source):
TARGET_TYPES = [
elem.value for elem in SUPPORTED_AUTH_TYPE.get(source).keys()]
context.argument('target_resource_type', options_list=['--target-type', '-t'],
arg_type=get_enum_type(TARGET_TYPES), help='The target resource type')
def add_new_addon_argument(context, source, target):
if AddonFactory.get(target, None):
context.argument('new_addon', options_list=['--new'], arg_type=get_three_state_flag(), default=False,
help='Indicates whether to create a new {} when '
'creating the {} connection'.format(target.value, source.value))
else:
context.ignore('new_addon')
def add_secret_store_argument(context):
context.argument('key_vault_id', options_list=[
'--vault-id'], help='The id of key vault to store secret value')
def add_vnet_block(context, target):
if target not in TARGET_SUPPORT_SERVICE_ENDPOINT:
context.ignore('service_endpoint')
else:
context.argument('service_endpoint', options_list=['--service-endpoint'], arg_type=get_three_state_flag(),
default=None, arg_group='NetworkSolution',
help='Connect target service by service endpoint. Source resource must be in the VNet'
' and target SKU must support service endpoint feature.')
if target not in TARGET_SUPPORT_PRIVATE_ENDPOINT:
context.ignore('private_endpoint')
else:
context.argument('private_endpoint', options_list=['--private-endpoint'], arg_type=get_three_state_flag(),
default=None, arg_group='NetworkSolution',
help='Connect target service by private endpoint. '
'The private endpoint in source virtual network must be created ahead.')
def add_connection_string_argument(context, source, target):
if source == RESOURCE.WebApp and target in TARGET_RESOURCES_CONNECTION_STRING:
context.argument('store_in_connection_string', options_list=['--config-connstr'],
arg_type=get_three_state_flag(), default=False, is_preview=True,
help='Store configuration into connection strings, '
'only could be used together with dotnet client_type')
else:
context.ignore('store_in_connection_string')
def add_confluent_kafka_argument(context):
context.argument('bootstrap_server', options_list=[
'--bootstrap-server'], help='Kafka bootstrap server url')
context.argument('kafka_key', options_list=[
'--kafka-key'], help='Kafka API-Key (key)')
context.argument('kafka_secret', options_list=[
'--kafka-secret'], help='Kafka API-Key (secret)')
context.argument('schema_registry', options_list=[
'--schema-registry'], help='Schema registry url')
context.argument('schema_key', options_list=[
'--schema-key'], help='Schema registry API-Key (key)')
context.argument('schema_secret', options_list=[
'--schema-secret'], help='Schema registry API-Key (secret)')
context.argument('connection_name', options_list=['--connection'],
help='Name of the connection', validator=validate_kafka_params)
def load_arguments(self, _): # pylint: disable=too-many-statements
for source in SOURCE_RESOURCES_PARAMS:
with self.argument_context('{} connection list'.format(source.value)) as c:
add_source_resource_block(
c, source, enable_id=False, validate_source_id=True)
with self.argument_context('{} connection show'.format(source.value)) as c:
add_source_resource_block(c, source)
add_connection_name_argument(c, source)
with self.argument_context('{} connection delete'.format(source.value)) as c:
add_connection_name_argument(c, source)
add_source_resource_block(c, source)
with self.argument_context('{} connection list-configuration'.format(source.value)) as c:
add_connection_name_argument(c, source)
add_source_resource_block(c, source)
with self.argument_context('{} connection validate'.format(source.value)) as c:
add_connection_name_argument(c, source)
add_source_resource_block(c, source)
with self.argument_context('{} connection list-support-types'.format(source.value)) as c:
add_target_type_argument(c, source)
with self.argument_context('{} connection wait'.format(source.value)) as c:
add_connection_name_argument(c, source)
add_source_resource_block(c, source)
for target in TARGET_RESOURCES_PARAMS:
with self.argument_context('{} connection create {}'.format(source.value, target.value)) as c:
add_client_type_argument(c, source, target)
add_connection_name_argument(c, source)
add_source_resource_block(c, source, enable_id=False)
add_target_resource_block(c, target)
add_auth_block(c, source, target)
add_new_addon_argument(c, source, target)
add_secret_store_argument(c)
add_vnet_block(c, target)
add_connection_string_argument(c, source, target)
add_customized_keys_argument(c)
with self.argument_context('{} connection update {}'.format(source.value, target.value)) as c:
add_client_type_argument(c, source, target)
add_connection_name_argument(c, source)
add_source_resource_block(c, source)
add_auth_block(c, source, target)
add_secret_store_argument(c)
add_vnet_block(c, target)
add_connection_string_argument(c, source, target)
add_customized_keys_argument(c)
# special target resource: independent implementation
target = RESOURCE.ConfluentKafka
with self.argument_context('{} connection create {}'.format(source.value, target.value)) as c:
add_client_type_argument(c, source, target)
add_source_resource_block(c, source, enable_id=False)
add_confluent_kafka_argument(c)
add_secret_store_argument(c)
add_customized_keys_argument(c)
with self.argument_context('{} connection update {}'.format(source.value, target.value)) as c:
add_client_type_argument(c, source, target)
add_source_resource_block(c, source, enable_id=False)
add_confluent_kafka_argument(c)
add_secret_store_argument(c)
add_customized_keys_argument(c)
# local connection
with self.argument_context('connection list') as c:
add_local_connection_block(c, show_id=False)
with self.argument_context('connection show') as c:
add_local_connection_block(c)
with self.argument_context('connection delete') as c:
add_local_connection_block(c)
with self.argument_context('connection generate-configuration') as c:
add_local_connection_block(c)
with self.argument_context('connection validate') as c:
add_local_connection_block(c)
with self.argument_context('connection list-support-types') as c:
add_target_type_argument(c, source)
with self.argument_context('connection wait') as c:
add_local_connection_block(c)
source = RESOURCE.Local
for target in TARGET_RESOURCES_PARAMS:
with self.argument_context('connection preview-configuration {}'.format(target.value)) as c:
add_auth_block(c, source, target)
add_client_type_argument(c, source, target)
with self.argument_context('connection create {}'.format(target.value)) as c:
add_client_type_argument(c, source, target)
add_target_resource_block(c, target)
add_auth_block(c, source, target)
add_new_addon_argument(c, source, target)
add_secret_store_argument(c)
add_vnet_block(c, target)
add_local_connection_block(c)
add_customized_keys_argument(c)
with self.argument_context('connection update {}'.format(target.value)) as c:
add_client_type_argument(c, source, target)
add_auth_block(c, source, target)
add_secret_store_argument(c)
add_vnet_block(c, target)
add_local_connection_block(c)
add_customized_keys_argument(c)
# special target resource: independent implementation
target = RESOURCE.ConfluentKafka
with self.argument_context('connection create {}'.format(target.value)) as c:
add_client_type_argument(c, source, target)
add_confluent_kafka_argument(c)
add_secret_store_argument(c)
add_local_connection_block(c, show_id=False)
add_customized_keys_argument(c)
with self.argument_context('connection update {}'.format(target.value)) as c:
add_client_type_argument(c, source, target)
add_confluent_kafka_argument(c)
add_secret_store_argument(c)
add_local_connection_block(c, show_id=False)
add_customized_keys_argument(c)
with self.argument_context('connection preview-configuration {}'.format(target.value)) as c:
add_auth_block(c, source, target)
add_client_type_argument(c, source, target)
|
[
"[email protected]"
] | |
fe3840957fde644b7fc61ad6d81141e5c485844f
|
0c9e35012baf61ee678bc719588b8cb2ccbe449e
|
/product/migrations/0078_locationstudio.py
|
c35d80b4b58d4926bd6221b180331d428b08a63c
|
[] |
no_license
|
rickyakilimali/approeco
|
6f0f62d57b6e5361b5c5dd473038f2999bac1413
|
fd96ca6d70dabf20668d2a582c67e5d409a4a097
|
refs/heads/master
| 2018-09-21T12:44:27.414394 | 2018-06-06T16:35:40 | 2018-06-06T16:35:40 | 113,836,591 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,185 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-31 12:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('category', '0002_auto_20180118_1155'),
('product', '0077_communique'),
]
operations = [
migrations.CreateModel(
name='LocationStudio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=250)),
('is_active', models.BooleanField()),
('duree_location', models.CharField(choices=[('30 MIN', '30 MIN'), ('1H', '1H')], max_length=100, verbose_name='DUREE LOCATION')),
('prix', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='PRIX')),
('units', models.CharField(choices=[('USD', 'US$'), ('EUROS', 'EUROS'), ('USD/JOUR', 'USD/JOUR'), ('USD/PAGE', 'USD/PAGE'), ('USD/M2', 'USD/M2'), ('USD/MOIS', 'USD/MOIS'), ('USD/PIECE', 'USD/PIECE'), ('USD', 'USD'), ('USD/KG', 'USD/KG'), ('USD/PERSONNE', 'USD/PERSONNE'), ('USD/GARDIEN/MOIS', 'USD/GARDIEN/MOIS'), ('USD/LITRE', 'USD/LITRE'), ('USD/SPLIT', 'USD/SPLIT'), ('%', '%'), ('% DU SALAIRE', '% DU SALAIRE'), ('% DU 1ER SALAIRE', '% DU 1ER SALAIRE'), ('% DES FONDS TRANSPORTES', '% DES FONDS TRANSPORTES'), ('USD/PERSONNE/JOUR', 'USD/PERSONNE/JOUR'), ('USD/THEME', 'USD/THEME'), ('USD/KG', 'USD/KG'), ('USD/AN', 'USD/AN'), ('USD/HEURE', 'USD/HEURE'), ('USD/MODULE', 'USD/MODULE'), ('USD/KG OU L', 'USD/KG OU L'), ('% DU DEVIS', '% DU DEVIS')], max_length=50, verbose_name='UNITÉS')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='category.Category')),
('vendeur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['prix'],
},
),
]
|
[
"[email protected]"
] | |
0c952a070f362f63b5390ebbb68b549f1f653ea8
|
29e08aa28f26e73358c6b8f2f309c216dcf4400b
|
/4/openstack-dashboard/openstack_dashboard/dashboards/admin/metering/tabs.py
|
40bca2fb36973361822388acf18fa774bb0720fd
|
[] |
no_license
|
TsinghuaCloud/TsinghuaCloud2.0-gui
|
fefe76318fc21ebf56f90f7fac81f4c273f1f6b6
|
4c91ccc048b846037ab281f8f62221f45e8edf43
|
refs/heads/master
| 2021-01-17T07:01:25.125829 | 2016-04-18T15:41:58 | 2016-04-18T15:41:58 | 39,073,412 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,154 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.api import ceilometer
class GlobalStatsTab(tabs.Tab):
name = _("Stats")
slug = "stats"
template_name = ("admin/metering/stats.html")
preload = False
@staticmethod
def _get_flavor_names(request):
try:
flavors = api.nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def get_context_data(self, request):
query = [{"field": "metadata.OS-EXT-AZ:availability_zone",
"op": "eq",
"value": "nova"}]
try:
instances = ceilometer.resource_list(request, query,
ceilometer_usage_object=None)
meters = ceilometer.meter_list(request)
except Exception:
instances = []
meters = []
exceptions.handle(request,
_('Unable to retrieve Nova Ceilometer '
'metering information.'))
instance_ids = set([i.resource_id for i in instances])
instance_meters = set([m.name for m in meters
if m.resource_id in instance_ids])
meter_titles = {"instance": _("Duration of instance"),
"memory": _("Volume of RAM in MB"),
"cpu": _("CPU time used"),
"cpu_util": _("Average CPU utilisation"),
"vcpus": _("Number of VCPUs"),
"disk.read.requests": _("Number of read requests"),
"disk.write.requests": _("Number of write requests"),
"disk.read.bytes": _("Volume of reads in B"),
"disk.write.bytes": _("Volume of writes in B"),
"disk.root.size": _("Size of root disk in GB"),
"disk.ephemeral.size": _("Size of ephemeral disk "
"in GB"),
"network.incoming.bytes": _("Number of incoming bytes "
"on the network for a VM interface"),
"network.outgoing.bytes": _("Number of outgoing bytes "
"on the network for a VM interface"),
"network.incoming.packets": _("Number of incoming "
"packets for a VM interface"),
"network.outgoing.packets": _("Number of outgoing "
"packets for a VM interface")}
for flavor in self._get_flavor_names(request):
name = 'instance:%s' % flavor
hint = (_('Duration of instance type %s (openstack flavor)') %
flavor)
meter_titles[name] = hint
class MetersWrap(object):
""" A quick wrapper for meter and associated titles. """
def __init__(self, meter, meter_titles):
self.name = meter
self.title = meter_titles.get(meter, "")
meters_objs = [MetersWrap(meter, meter_titles)
for meter in sorted(instance_meters)]
context = {'meters': meters_objs}
return context
class CeilometerOverviewTabs(tabs.TabGroup):
slug = "ceilometer_overview"
tabs = (GlobalStatsTab,)
sticky = True
|
[
"root@controller.(none)"
] |
root@controller.(none)
|
075416780ca0fd70d872b7a9401baae8344ab08a
|
156d6c4638773e5af6027b10336f60cca9a23252
|
/src/ros_main.py
|
57eabd65f11ea48dbd012d4adbe9913f69c1d2f8
|
[
"MIT"
] |
permissive
|
lijh1024/ros-RandLA-Net
|
6f5d9a5095b97466acccc1bffc1dfdb9371ce4b8
|
5f15a6687bdced16615f7215fc1aa9ffacbc0ad2
|
refs/heads/master
| 2023-03-17T07:41:08.353403 | 2020-08-24T02:47:56 | 2020-08-24T02:47:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('ros_randla_net')
import sys
import rospy
from ros_node import InferenceNode
def main(args):
rospy.init_node('~', anonymous=True)
node = InferenceNode()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
if __name__ == '__main__':
main(sys.argv)
|
[
"[email protected]"
] | |
feba3d89953b0b88ad4f1147481b481a8e68b566
|
dc9865587582f65e5be010a8a831972845c8dbb4
|
/django_mri/utils/compression.py
|
50991175ab3b0cce9db71e8c9035155fcb07b45f
|
[
"Apache-2.0"
] |
permissive
|
TheLabbingProject/django_mri
|
d31bd15613c82a55e2a42eba1fa3617e952e8b72
|
5b5ca1b119144d01e526825d2b2a2b87541b4d4a
|
refs/heads/master
| 2023-04-07T03:22:09.366540 | 2023-03-29T08:18:01 | 2023-03-29T08:18:01 | 205,411,473 | 7 | 2 |
Apache-2.0
| 2023-02-17T08:22:53 | 2019-08-30T15:42:45 |
Python
|
UTF-8
|
Python
| false | false | 2,131 |
py
|
"""
Definition of the :func:`~django_mri.utils.compression.compress` and
:func:`~django_mri.utils.compression.uncompress` utility functions.
"""
import gzip
import shutil
from pathlib import Path
def uncompress(
source: Path, destination: Path = None, keep_source: bool = True
) -> Path:
"""
Uncompresses the provided (compressed) *source* file.
Parameters
----------
source : Path
File to uncompress
destination : Path, optional
Uncompressed output file path, by default None
keep_source : bool, optional
Whether to keep the source file or not, by default True
Returns
-------
Path
Output file path
"""
destination = destination or source.with_suffix("")
try:
with gzip.open(source, "rb") as compressed_data:
with open(destination, "wb") as uncompressed_data:
shutil.copyfileobj(compressed_data, uncompressed_data)
except FileNotFoundError:
if destination.exists():
return destination
else:
raise
else:
if not keep_source:
source.unlink()
return destination
def compress(
source: Path, destination: Path = None, keep_source: bool = True
) -> Path:
"""
Compresses the provided *source* file.
Parameters
----------
source : Path
File to compress
destination : Path, optional
Compressed output file path, by default None
keep_source : bool, optional
Whether to keep the source file or not, by default True
Returns
-------
Path
Output file path
"""
destination = destination or source.with_suffix(source.suffix + ".gz")
try:
with open(source, "rb") as uncompressed_data:
with gzip.open(destination, "wb") as compressed_file:
shutil.copyfileobj(uncompressed_data, compressed_file)
except FileNotFoundError:
if destination.exists():
return destination
else:
raise
else:
if not keep_source:
source.unlink()
return destination
|
[
"[email protected]"
] | |
c6b6ef9cc4eb38a80c21d9622919755f9d0305b4
|
5e382a50c521e4cd874ed4e94799e5ef062994a1
|
/services/web/server/src/simcore_service_webserver/rest.py
|
a9b29a0b75d27657ed565de0e4f3a730bc284f99
|
[
"MIT"
] |
permissive
|
KZzizzle/osparc-simcore
|
71103bcfb81d6ea90e0ac9529e8f08568685166c
|
981bc8d193f3f5d507e3225f857e0308c339e163
|
refs/heads/master
| 2021-05-25T08:46:52.704734 | 2020-10-07T14:07:34 | 2020-10-07T14:07:34 | 253,747,491 | 0 | 0 |
MIT
| 2020-04-07T09:29:23 | 2020-04-07T09:29:22 | null |
UTF-8
|
Python
| false | false | 3,270 |
py
|
""" Restful API
- Loads and validates openapi specifications (oas)
- Adds check and diagnostic routes
- Activates middlewares
"""
import logging
from pathlib import Path
from typing import Optional
import openapi_core
import yaml
from aiohttp import web
from aiohttp_swagger import setup_swagger
from openapi_core.schema.specs.models import Spec as OpenApiSpecs
from servicelib import openapi
from servicelib.application_setup import ModuleCategory, app_module_setup
from servicelib.rest_middlewares import (
envelope_middleware_factory,
error_middleware_factory,
)
from simcore_service_webserver.resources import resources
from . import rest_routes
from .__version__ import api_version_prefix
from .rest_config import APP_CONFIG_KEY, APP_OPENAPI_SPECS_KEY, get_rest_config
log = logging.getLogger(__name__)
def get_openapi_specs_path(api_version_dir: Optional[str] = None) -> Path:
if api_version_dir is None:
api_version_dir = api_version_prefix
return resources.get_path(f"api/{api_version_dir}/openapi.yaml")
def load_openapi_specs(spec_path: Optional[Path] = None) -> OpenApiSpecs:
if spec_path is None:
spec_path = get_openapi_specs_path()
with spec_path.open() as fh:
spec_dict = yaml.safe_load(fh)
specs: OpenApiSpecs = openapi_core.create_spec(spec_dict, spec_path.as_uri())
return specs
@app_module_setup(
__name__,
ModuleCategory.ADDON,
depends=["simcore_service_webserver.security"],
logger=log,
)
def setup(app: web.Application, *, swagger_doc_enabled: bool = True):
cfg = get_rest_config(app)
api_version_dir = cfg["version"]
spec_path = get_openapi_specs_path(api_version_dir)
# validated openapi specs
app[APP_OPENAPI_SPECS_KEY] = specs = load_openapi_specs(spec_path)
# version check
base_path = openapi.get_base_path(specs)
major, *_ = specs.info.version
if f"/v{major}" != base_path:
raise ValueError(
f"REST API basepath {base_path} does not fit openapi.yml version {specs.info.version}"
)
if api_version_prefix != f"v{major}":
raise ValueError(
f"__version__.api_version_prefix {api_version_prefix} does not fit openapi.yml version {specs.info.version}"
)
# diagnostics routes
routes = rest_routes.create(specs)
app.router.add_routes(routes)
# middlewares
# NOTE: using safe get here since some tests use incomplete configs
is_diagnostics_enabled = (
app[APP_CONFIG_KEY].get("diagnostics", {}).get("enabled", {})
)
app.middlewares.extend(
[
error_middleware_factory(
api_version_prefix, log_exceptions=not is_diagnostics_enabled,
),
envelope_middleware_factory(api_version_prefix),
]
)
#
# rest API doc at /dev/doc (optional, e.g. for testing since it can be heavy)
#
# NOTE: avoid /api/* since traeffik uses for it's own API
#
log.debug("OAS loaded from %s ", spec_path)
if swagger_doc_enabled:
setup_swagger(
app,
swagger_url="/dev/doc",
swagger_from_file=str(spec_path),
ui_version=3,
)
# alias
setup_rest = setup
__all__ = "setup_rest"
|
[
"[email protected]"
] | |
36edc52492d3275c17b86992cdd04a1f593c7095
|
f9308d5a8efe2dbb48e9cc87cd06405b60a9dc7b
|
/samples/python/apidocs/ee_featurecollection_aggregate_total_sd.py
|
9051df9261f5d9a4c7d1decb6d917fa14bbeb55c
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
google/earthengine-community
|
4e054b421f66f03507d58668084aee981062fc24
|
ce931040c518860f8788b4888c0acfdebd2952fc
|
refs/heads/master
| 2023-09-01T14:47:54.812703 | 2023-08-31T23:01:00 | 2023-08-31T23:01:39 | 200,732,820 | 428 | 552 |
Apache-2.0
| 2023-09-13T21:46:51 | 2019-08-05T21:42:11 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,023 |
py
|
# Copyright 2023 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_featurecollection_aggregate_total_sd]
# FeatureCollection of power plants in Belgium.
fc = ee.FeatureCollection('WRI/GPPD/power_plants').filter(
'country_lg == "Belgium"')
print('Total std. deviation of power plant capacities (MW):',
fc.aggregate_total_sd('capacitymw').getInfo()) # 462.9334545609107
# [END earthengine__apidocs__ee_featurecollection_aggregate_total_sd]
|
[
"[email protected]"
] | |
4823214d088fd443d5467e603ac8599f491e5548
|
ef9ab6d3ebb22fea68901c0e681abc25e5379fa6
|
/FanFilmE2/fanfilm/resources/lib/libraries/jsunfuck.py
|
bc1f5f8a45c9aa018b2ab4042fe44cdb5fd26d1e
|
[] |
no_license
|
OpenPE/eePlugins
|
b2098a082ee5a5d929a29683e2334dc3895cb4b5
|
8f4a2963d5489e760eb778a10f00c3b49356d517
|
refs/heads/master
| 2020-07-30T11:27:28.198034 | 2019-09-16T15:13:55 | 2019-09-16T15:13:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,439 |
py
|
#!/usr/bin/python
"""
Covenant Add-on
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import string
import sys
import urllib
class JSUnfuck(object):
numbers = None
words = {
"(![]+[])": "false",
"([]+{})": "[object Object]",
"(!![]+[])": "true",
"([][[]]+[])": "undefined",
"(+{}+[])": "NaN",
"([![]]+[][[]])": "falseundefined",
"([][f+i+l+t+e+r]+[])": "function filter() { [native code] }",
"(!![]+[][f+i+l+t+e+r])": "truefunction filter() { [native code] }",
"(+![]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0function String() { [native code] }",
"(+![]+[![]]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0falsefunction String() { [native code] }",
"([]+[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +l+o+c+a+t+i+o+n)())": "https://123movies.to",
"([]+[])[f+o+n+t+c+o+l+o+r]()": '<font color="undefined"></font>',
"(+(+!![]+e+1+0+0+0)+[])": "Infinity",
"(+[![]]+[][f+i+l+t+e+r])": 'NaNfunction filter() { [native code] }',
'(+[![]]+[+(+!+[]+(!+[]+[])[3]+[1]+[0]+[0]+[0])])': 'NaNInfinity',
'([]+[])[i+t+a+l+i+c+s]()': '<i></i>',
'[[]][c+o+n+c+a+t]([[]])+[]': ',',
'([][f+i+l+l]+[])': 'function fill() { [native code]}',
'(!![]+[][f+i+l+l])': 'truefunction fill() { [native code]}',
'((+[])[c+o+n+s+t+r+u+c+t+o+r]+[])': 'function Number() {[native code]} _display:45:1',
'(+(+!+[]+[1]+e+[2]+[0])+[])': '1.1e+21',
'([]+[])[c+o+n+s+t+r+u+c+t+o+r][n+a+m+e]': 'S+t+r+i+n+g',
'([][e+n+t+r+i+e+s]()+[])': '[object Array Iterator]',
'([]+[])[l+i+n+k](")': '<a href="""></a>',
'(![]+[0])[i+t+a+l+i+c+s]()': '<i>false0</i>',
# dummy to force array dereference
'DUMMY1': '6p',
'DUMMY2': '2x',
'DUMMY3': '%3C',
'DUMMY4': '%5B',
'DUMMY5': '6q',
'DUMMY6': '4h',
}
uniqs = {
'[t+o+S+t+r+i+n+g]': 1,
'[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,
'[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,
'[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,
'[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,
}
def __init__(self, js):
self.js = js
def decode(self, replace_plus=True):
while True:
start_js = self.js
self.repl_words(self.words)
self.repl_numbers()
self.repl_arrays(self.words)
self.repl_uniqs(self.uniqs)
if start_js == self.js:
break
if replace_plus:
self.js = self.js.replace('+', '')
self.js = re.sub('\[[A-Za-z]*\]', '', self.js)
self.js = re.sub('\[(\d+)\]', '\\1', self.js)
return self.js
def repl_words(self, words):
while True:
start_js = self.js
for key, value in sorted(words.items(), key=lambda x: len(x[0]), reverse=True):
self.js = self.js.replace(key, value)
if self.js == start_js:
break
def repl_arrays(self, words):
for word in sorted(words.values(), key=lambda x: len(x), reverse=True):
for index in xrange(0, 100):
try:
repl = word[index]
self.js = self.js.replace('%s[%d]' % (word, index), repl)
except:
pass
def repl_numbers(self):
if self.numbers is None:
self.numbers = self.__gen_numbers()
while True:
start_js = self.js
for key, value in sorted(self.numbers.items(), key=lambda x: len(x[0]), reverse=True):
self.js = self.js.replace(key, value)
if self.js == start_js:
break
def repl_uniqs(self, uniqs):
for key, value in uniqs.iteritems():
if key in self.js:
if value == 1:
self.__handle_tostring()
elif value == 2:
self.__handle_escape(key)
elif value == 3:
self.__handle_unescape(key)
def __handle_tostring(self):
for match in re.finditer('(\d+)\[t\+o\+S\+t\+r\+i\+n\+g\](\d+)', self.js):
repl = to_base(match.group(1), match.group(2))
self.js = self.js.replace(match.group(0), repl)
def __handle_escape(self, key):
while True:
start_js = self.js
offset = self.js.find(key) + len(key)
if self.js[offset] == '(' and self.js[offset + 2] == ')':
c = self.js[offset + 1]
self.js = self.js.replace('%s(%s)' % (key, c), urllib.quote(c))
if start_js == self.js:
break
def __handle_unescape(self, key):
start = 0
while True:
start_js = self.js
offset = self.js.find(key, start)
if offset == -1: break
offset += len(key)
expr = ''
extra = ''
last_c = self.js[offset - 1]
abort = False
for i, c in enumerate(self.js[offset:]):
extra += c
if c == ')':
break
elif (i > 0 and c == '(') or (c == '[' and last_c != '+'):
abort = True
break
elif c == '%' or c in string.hexdigits:
expr += c
last_c = c
if not abort:
self.js = self.js.replace(key + extra, urllib.unquote(expr))
if start_js == self.js:
break
else:
start = offset
def __gen_numbers(self):
n = {'!+[]+!![]+!![]+!![]+!![]+!![]+!![]+!![]+!![]': '9',
'!+[]+!![]+!![]+!![]+!![]': '5', '!+[]+!![]+!![]+!![]': '4',
'!+[]+!![]+!![]+!![]+!![]+!![]': '6', '!+[]+!![]': '2',
'!+[]+!![]+!![]': '3', '(+![]+([]+[]))': '0', '(+[]+[])': '0', '+[]': '0',
'(+!![]+[])': '1', '!+[]+!![]+!![]+!![]+!![]+!![]+!![]': '7',
'!+[]+!![]+!![]+!![]+!![]+!![]+!![]+!![]': '8', '+!![]': '1',
'[+[]]': '[0]', '!+[]+!+[]': '2', '[+!+[]]': '[1]', '(+20)': '20',
'[+!![]]': '[1]', '[+!+[]+[+[]]]': '[10]', '+(1+1)': '11'}
for i in xrange(2, 20):
key = '+!![]' * (i - 1)
key = '!+[]' + key
n['(' + key + ')'] = str(i)
key += '+[]'
n['(' + key + ')'] = str(i)
n['[' + key + ']'] = '[' + str(i) + ']'
for i in xrange(2, 10):
key = '!+[]+' * (i - 1) + '!+[]'
n['(' + key + ')'] = str(i)
n['[' + key + ']'] = '[' + str(i) + ']'
key = '!+[]' + '+!![]' * (i - 1)
n['[' + key + ']'] = '[' + str(i) + ']'
for i in xrange(0, 10):
key = '(+(+!+[]+[%d]))' % (i)
n[key] = str(i + 10)
key = '[+!+[]+[%s]]' % (i)
n[key] = '[' + str(i + 10) + ']'
for tens in xrange(2, 10):
for ones in xrange(0, 10):
key = '!+[]+' * (tens) + '[%d]' % (ones)
n['(' + key + ')'] = str(tens * 10 + ones)
n['[' + key + ']'] = '[' + str(tens * 10 + ones) + ']'
for hundreds in xrange(1, 10):
for tens in xrange(0, 10):
for ones in xrange(0, 10):
key = '+!+[]' * hundreds + '+[%d]+[%d]))' % (tens, ones)
if hundreds > 1: key = key[1:]
key = '(+(' + key
n[key] = str(hundreds * 100 + tens * 10 + ones)
return n
def to_base(n, base, digits="0123456789abcdefghijklmnopqrstuvwxyz"):
n, base = int(n), int(base)
if n < base:
return digits[n]
else:
return to_base(n // base, base, digits).lstrip(digits[0]) + digits[n % base]
def cfunfuck(fuckedup):
fuck = re.findall(r's,t,o,p,b,r,e,a,k,i,n,g,f,\s*(\w+=).*?:\+?\(?(.*?)\)?\}', fuckedup)
fucks = re.findall(r'(\w+)\.\w+([\+\-\*\/]=)\+?\(?(.*?)\)?;', fuckedup)
endunfuck = fuck[0][0].split('=')[0]
unfuck = JSUnfuck(fuck[0][1]).decode()
unfuck = re.sub(r'[\(\)]', '', unfuck)
unfuck = fuck[0][0] + unfuck
exec (unfuck)
for fucker in fucks:
unfucker = JSUnfuck(fucker[2]).decode()
unfucker = re.sub(r'[\(\)]', '', unfucker)
unfucker = fucker[0] + fucker[1] + unfucker
exec (unfucker)
return str(eval(endunfuck))
def main():
with open(sys.argv[1]) as f:
start_js = f.read()
print JSUnfuck(start_js).decode()
if __name__ == '__main__':
sys.exit(main())
|
[
"[email protected]"
] | |
c45b64bfb7dc8d638eb42afda514c9af33168a82
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/4549962f8535c6892637c74c8c7dd3f8953a1678-<compare_rules>-bug.py
|
064c8abad58ff66bf4f274985b7d40b35b3c1717
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,130 |
py
|
def compare_rules(self):
'\n\n :return:\n '
rules_to_modify = []
rules_to_delete = []
rules_to_add = deepcopy(self.rules)
for current_rule in self.current_rules:
current_rule_passed_to_module = False
for new_rule in self.rules[:]:
if (current_rule['Priority'] == new_rule['Priority']):
current_rule_passed_to_module = True
rules_to_add.remove(new_rule)
modified_rule = self._compare_rule(current_rule, new_rule)
if modified_rule:
modified_rule['Priority'] = int(current_rule['Priority'])
modified_rule['RuleArn'] = current_rule['RuleArn']
modified_rule['Actions'] = new_rule['Actions']
modified_rule['Conditions'] = new_rule['Conditions']
rules_to_modify.append(modified_rule)
break
if ((not current_rule_passed_to_module) and (not current_rule['IsDefault'])):
rules_to_delete.append(current_rule['RuleArn'])
return (rules_to_add, rules_to_modify, rules_to_delete)
|
[
"[email protected]"
] | |
968b4a3eb22447b3acbffbd207b2a8cff46648d8
|
c7b4baa2779a0fc02e363f07c88b4d1d8cc33ffe
|
/gahtc/website/migrations/0034_merge.py
|
a748bb606b00e44de2215f0d021cbbdb0548dd72
|
[] |
no_license
|
NiJeLorg/GAHTC
|
6d5c8b2d4b9244c8874ad60c16cd7d55a3535075
|
8ba3360f6e2a8ad0b937a60c3c022eaac4a7cd46
|
refs/heads/master
| 2022-12-08T19:26:05.800635 | 2018-06-07T02:31:43 | 2018-06-07T02:31:43 | 41,111,268 | 2 | 0 | null | 2022-11-22T01:43:36 | 2015-08-20T18:07:02 |
HTML
|
UTF-8
|
Python
| false | false | 295 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0032_auto_20161108_0210'),
('website', '0033_profile_verified'),
]
operations = [
]
|
[
"[email protected]"
] | |
5a4893383d5c402e74ad89801720feac3f460235
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=60/params.py
|
cb30c863dd706689a5cbcad57cb8e6da369c00fe
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.636000',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 60,
'utils': 'uni-medium-3'}
|
[
"[email protected]"
] | |
f53273e4fb7d5bfa568073ebd17ceb3d4f151c7b
|
9b4e80a2dc6a660a33e3599515100a172038f922
|
/python/HI/dijet_analysis/pp/Pythia6_Dijet250_pp_TuneZ2_5020GeV_cff.py
|
e1c952c1ed897b630e420d7910bef5bc8c0f8c8f
|
[] |
no_license
|
Jelov/genproductions
|
9013901ebcc58e6cfd13c69a52692dfc1994e280
|
c65eab700fd6026ebec068d4b90366cc1387e51b
|
refs/heads/master
| 2021-01-21T09:29:16.399045 | 2015-12-07T16:19:22 | 2015-12-07T16:19:22 | 48,558,617 | 0 | 0 | null | 2015-12-24T22:45:32 | 2015-12-24T22:45:32 | null |
UTF-8
|
Python
| false | false | 1,701 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pyquen2015Settings_cff import *
generator = cms.EDFilter("PyquenGeneratorFilter",
collisionParameters5020GeV,
qgpParameters,
pyquenParameters,
doQuench = cms.bool(False),
bFixed = cms.double(0.0), ## fixed impact param (fm); valid only if cflag_=0
PythiaParameters = cms.PSet(pyquenPythiaDefaultBlock,
parameterSets = cms.vstring('pythiaUESettings',
'ppJets',
'kinematics'),
kinematics = cms.vstring ("CKIN(3)=250", #min pthat
"CKIN(4)=9999" #max pthat
)
),
cFlag = cms.int32(0), ## centrality flag
bMin = cms.double(0.0), ## min impact param (fm); valid only if cflag_!=0
bMax = cms.double(0.0) ## max impact param (fm); valid only if cflag_!=0
)
generator.doIsospin = cms.bool(False)
configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('PYTHIA (unquenched) dijets in NN (pt-hat > 250 GeV) at sqrt(s) = 2.76TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"[email protected]"
] | |
39f57f94034ec65afb9a31d785b493155269c325
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/googlecloudsdk/core/resource/yaml_printer.py
|
541fac881b57f5f76872b5fb01b72c1a324f928a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 |
NOASSERTION
| 2019-02-04T19:04:40 | 2019-02-04T18:58:36 |
Python
|
UTF-8
|
Python
| false | false | 5,210 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YAML format printer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from googlecloudsdk.core.resource import resource_printer_base
from googlecloudsdk.core.resource import resource_transform
from googlecloudsdk.core.yaml import dict_like
from googlecloudsdk.core.yaml import list_like
import six
from six.moves import range # pylint: disable=redefined-builtin
class YamlPrinter(resource_printer_base.ResourcePrinter):
"""Prints the YAML representations of JSON-serializable objects.
[YAML](http://www.yaml.org), YAML ain't markup language.
Printer attributes:
null=string: Display string instead of `null` for null/None values.
no-undefined: Does not display resource data items with null values.
For example:
printer = YamlPrinter(log.out)
printer.AddRecord({'a': ['hello', 'world'], 'b': {'x': 'bye'}})
produces:
---
a:
- hello
- world
b:
- x: bye
Attributes:
_yaml: Reference to the `yaml` module. Imported locally to improve startup
performance.
"""
def __init__(self, *args, **kwargs):
super(YamlPrinter, self).__init__(*args, retain_none_values=True, **kwargs)
# pylint:disable=g-import-not-at-top, Delay import for performance.
from ruamel import yaml
self._yaml = yaml
null = self.attributes.get('null')
def _FloatPresenter(unused_dumper, data):
return yaml.nodes.ScalarNode(
'tag:yaml.org,2002:float', resource_transform.TransformFloat(data))
def _LiteralLinesPresenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
def _NullPresenter(dumper, unused_data):
if null in ('null', None):
return dumper.represent_scalar('tag:yaml.org,2002:null', 'null')
return dumper.represent_scalar('tag:yaml.org,2002:str', null)
def _OrderedDictPresenter(dumper, data):
return dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
def _UndefinedPresenter(dumper, data):
r = repr(data)
if r == '[]':
return dumper.represent_list([])
if r == '{}':
return dumper.represent_dict({})
dumper.represent_undefined(data)
self._yaml.add_representer(float,
_FloatPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(YamlPrinter._LiteralLines,
_LiteralLinesPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(None,
_UndefinedPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(type(None),
_NullPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(collections.OrderedDict,
_OrderedDictPresenter,
Dumper=yaml.dumper.SafeDumper)
class _LiteralLines(six.text_type):
"""A yaml representer hook for literal strings containing newlines."""
def _UpdateTypesForOutput(self, val):
"""Dig through a dict of list of primitives to help yaml output.
Args:
val: A dict, list, or primitive object.
Returns:
An updated version of val.
"""
if isinstance(val, six.string_types) and '\n' in val:
return YamlPrinter._LiteralLines(val)
if list_like(val):
for i in range(len(val)):
val[i] = self._UpdateTypesForOutput(val[i])
return val
if dict_like(val):
for key in val:
val[key] = self._UpdateTypesForOutput(val[key])
return val
return val
def _AddRecord(self, record, delimit=True):
"""Immediately prints the given record as YAML.
Args:
record: A YAML-serializable Python object.
delimit: Prints resource delimiters if True.
"""
record = self._UpdateTypesForOutput(record)
self._yaml.safe_dump(
record,
stream=self._out,
default_flow_style=False,
indent=resource_printer_base.STRUCTURED_INDENTATION,
explicit_start=delimit,
# By default, the yaml module uses encoding=None on Py3 and
# encoding=utf8 on Py2. This is probably so you can write it directly to
# stdout and have it work, but since we put everything through the log
# module that handles the encoding there, we want to maintain everything
# as unicode strings here.
encoding=None)
|
[
"[email protected]"
] | |
366f823f67768d162e93c901c9222d0be85ff8d5
|
f0da7ad429b9a8820359bd51eee5d31e51cc1d77
|
/env/lib/python3.6/encodings/cp866.py
|
495d313c07b8d9299eb18024e9595f02c502b47c
|
[] |
no_license
|
DavidNganga/bag
|
e190ae1a2d33439394859c03456a19676c85be36
|
eb67ba9b62a99dc41bcff8aae267541f46023800
|
refs/heads/master
| 2020-05-03T15:12:17.909935 | 2019-05-04T05:36:25 | 2019-05-04T05:36:25 | 178,699,714 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 66 |
py
|
/home/david/.pyenv/versions/3.6.5/lib/python3.6/encodings/cp866.py
|
[
"[email protected]"
] | |
e8d1869e7a5f6cef61fa69fbf0439981ffd4f27f
|
402ed5374ab189c8599b56864c5ce066f34b26c6
|
/zfit/minimize.py
|
9fff38606b3bd8f93797462dbabbb11864a9a636
|
[
"BSD-3-Clause"
] |
permissive
|
kailiu77/zfit
|
db354e9c3eb4a41274af5363834fe231823c6d66
|
8bddb0ed3a0d76fde0aa2cdbf74434b0ee0ae8bb
|
refs/heads/master
| 2020-10-01T23:49:55.751825 | 2019-12-06T15:48:47 | 2019-12-06T15:48:47 | 227,650,723 | 1 | 0 |
BSD-3-Clause
| 2019-12-12T16:33:54 | 2019-12-12T16:33:53 | null |
UTF-8
|
Python
| false | false | 533 |
py
|
# Copyright (c) 2019 zfit
# from .minimizers.optimizers_tf import RMSPropMinimizer, GradientDescentMinimizer, AdagradMinimizer, AdadeltaMinimizer,
from .minimizers.optimizers_tf import Adam, WrapOptimizer
from .minimizers.minimizer_minuit import Minuit
from .minimizers.minimizers_scipy import Scipy
AdamMinimizer = Adam # legacy
MinuitMinimizer = Minuit # legacy
ScipyMinimizer = Scipy # legacy
__all__ = ['MinuitMinimizer', 'ScipyMinimizer', 'AdamMinimizer',
"WrapOptimizer",
"Adam", "Minuit", "Scipy"]
|
[
"[email protected]"
] | |
eca89db388397ccdddb20b4aa4430caec6456bc8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_reran.py
|
0bd435238cf900398ead11d36e4a0cb93b838549
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
from xai.brain.wordbase.nouns._rerun import _RERUN
#calss header
class _RERAN(_RERUN, ):
def __init__(self,):
_RERUN.__init__(self)
self.name = "RERAN"
self.specie = 'nouns'
self.basic = "rerun"
self.jsondata = {}
|
[
"[email protected]"
] | |
6ebd1a7c1b5cc55a594b078e34b3f6bc74f6b175
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03283/s356634724.py
|
66f46b19caea622c6741e3c3d780abc573ed2012
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 735 |
py
|
import sys
sys.setrecursionlimit(10010010)
def abc106_d():
n, m, q = map(int, input().split())
table = [[0]*(n+1) for _ in range(n+1)]
for _ in range(m):
l, r = map(int, input().split())
table[l][r] += 1
query = [tuple(map(int, input().split())) for _ in range(q)]
dp = [[-1]*(n+1) for _ in range(n+1)]
def calc(l, r):
nonlocal dp
if l == 0 or r == 0: return 0
if dp[l][r] != -1: return dp[l][r]
res = calc(l-1, r) + calc(l, r-1) - calc(l-1, r-1) + table[l][r]
dp[l][r] = res
return res
for p, q in query:
ans = calc(q, q) - calc(q, p-1) - calc(p-1, q) + calc(p-1, p-1)
print(ans)
if __name__ == '__main__':
abc106_d()
|
[
"[email protected]"
] | |
cc317c0a962ac26af9736f63157cf024a9de03ae
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_091615.49+132833.1/sdB_sdssj_091615.49+132833.1_lc.py
|
9d48a955080db7419fd8f8baacbfa6e2382d130a
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 371 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[139.064542,13.475861], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_091615.49+132833.1/sdB_sdssj_091615.49+132833.1_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c167ec7756b4bc4ef5d1f3840ebfad46f6b0f822
|
356b5455a4fb86c49c800a6181323b7fabef2b08
|
/ppci/lang/ocaml/io.py
|
5170f0ba3e9070d2451879fd2abe2a52d781cc15
|
[
"BSD-2-Clause"
] |
permissive
|
obround/ppci
|
be7d1ce7832513629ee1301e7b67c0ceda38d668
|
ba0840bc5f4ffd889f882a814fb26f88cd854379
|
refs/heads/master
| 2023-02-11T13:47:35.439871 | 2021-01-05T22:33:08 | 2021-01-05T22:33:08 | 327,131,704 | 0 | 0 |
BSD-2-Clause
| 2021-01-05T22:08:23 | 2021-01-05T22:08:23 | null |
UTF-8
|
Python
| false | false | 598 |
py
|
""" OCaml i/o helpers.
"""
from ...format.io import BaseIoReader
class FileReader(BaseIoReader):
""" OCaml file reader helper with low level primitive read functions. """
def read_byte(self):
return self.read_bytes(1)[0]
def read_u8(self):
return self.read_fmt("B")
def read_s8(self):
return self.read_fmt("b")
def read_u16(self):
return self.read_fmt(">H")
def read_s16(self):
return self.read_fmt(">h")
def read_u32(self):
return self.read_fmt(">I")
def read_s32(self):
return self.read_fmt(">i")
|
[
"[email protected]"
] | |
c01ac9f16fe7691cc7f818e01188598024e8e91e
|
1c5f4a13a5d67201b3a21c6e61392be2d9071f86
|
/.VirtualEnv/Lib/site-packages/influxdb_client/domain/log_event.py
|
0de871390b38b09d09f1940547d519a8f48899d8
|
[] |
no_license
|
ArmenFirman/FastAPI-InfluxDB
|
19e3867c2ec5657a9428a05ca98818ca7fde5fd0
|
b815509c89b5420f72abf514562e7f46dcd65436
|
refs/heads/main
| 2023-06-24T20:55:08.361089 | 2021-07-29T00:11:18 | 2021-07-29T00:11:18 | 390,462,832 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,328 |
py
|
# coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class LogEvent(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'time': 'datetime',
'message': 'str',
'run_id': 'str'
}
attribute_map = {
'time': 'time',
'message': 'message',
'run_id': 'runID'
}
def __init__(self, time=None, message=None, run_id=None): # noqa: E501,D401,D403
"""LogEvent - a model defined in OpenAPI.""" # noqa: E501
self._time = None
self._message = None
self._run_id = None
self.discriminator = None
if time is not None:
self.time = time
if message is not None:
self.message = message
if run_id is not None:
self.run_id = run_id
@property
def time(self):
"""Get the time of this LogEvent.
Time event occurred, RFC3339Nano.
:return: The time of this LogEvent.
:rtype: datetime
""" # noqa: E501
return self._time
@time.setter
def time(self, time):
"""Set the time of this LogEvent.
Time event occurred, RFC3339Nano.
:param time: The time of this LogEvent.
:type: datetime
""" # noqa: E501
self._time = time
@property
def message(self):
"""Get the message of this LogEvent.
A description of the event that occurred.
:return: The message of this LogEvent.
:rtype: str
""" # noqa: E501
return self._message
@message.setter
def message(self, message):
"""Set the message of this LogEvent.
A description of the event that occurred.
:param message: The message of this LogEvent.
:type: str
""" # noqa: E501
self._message = message
@property
def run_id(self):
"""Get the run_id of this LogEvent.
the ID of the task that logged
:return: The run_id of this LogEvent.
:rtype: str
""" # noqa: E501
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Set the run_id of this LogEvent.
the ID of the task that logged
:param run_id: The run_id of this LogEvent.
:type: str
""" # noqa: E501
self._run_id = run_id
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, LogEvent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
[
"[email protected]"
] | |
8170718f2b68613f34342a77d012941363b0f6fb
|
840c19fdeb97216ad66b3e7fe236cfc17a061606
|
/python/python08_집합형1_문자열.py
|
4c76c35c9c641f15d0c2509a96c5dd9896bec337
|
[] |
no_license
|
choicoding1026/data
|
07b431abdf36bcf7aefdf249fd1251acfd1e0334
|
684ca791108bc6ba0c315a70e3fa712c0ab2cca6
|
refs/heads/master
| 2022-12-24T17:55:25.092085 | 2020-10-08T04:54:39 | 2020-10-08T04:54:39 | 302,201,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 870 |
py
|
'''
집합형
1) 문자열
a. 문자열 생성 방법
b. 문자열 제공 함수
c. 인덱싱 및 슬라이싱
d. 문자열 특징
'''
# 1. 문자열 생성 방법 4가지
m = "hello"
m2 = 'hello'
m3 = '''hello'''
m4 = """hello"""
print(m, type(m))
print(m2, type(m2))
print(m3, type(m3))
print(m4, type(m4))
# triple 문자 사용 용도 ==> 문자열이 매우 길때 및 특정한 포맷(들여쓰기, 탭)으로 출력할 때
# 1. "" 또는 '' 사용한 경우
s = "asdfasdfasfasfasfasfasfewbdgaserfaserfwqesfafsdfasfdas" \
"fasfdasdfasfasfdasdfasfdasdfasfdasfdasfdasfasfdasfasfasfa" \
"sfasfasfasfasfasfasfasfasfas"
print(s)
# 2. triple 사용한 경우 ==> 들여쓰기,탭등 포맷형식이 유지된다. 따라서 가독성이 매우 높다.
s2 = '''
<html>
<body>
</body>
</html>
'''
print(s2)
|
[
"[email protected]"
] | |
2bef8052418e92e9d29c3d72a0a6fa8684c78926
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02607/s387198280.py
|
23e0aeefbb0e2b34051edcabc0f6fe62bf503efd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
N = int(input())
A = [int(i) for i in input().split()]
ans = 0
for i,a in enumerate(A):
idx = i+1
if a%2 == 1 and idx%2 == 1:
ans += 1
print(ans)
|
[
"[email protected]"
] | |
f623b790a9c765f28d970b0da70fdaa7a05f6d52
|
8e5cc540c7a734073b3e761ecb058dc6cd27a8ef
|
/Windprof2.py
|
de7a66618b3659e2b6190af665b1e05a55280b96
|
[] |
no_license
|
rvalenzuelar/windprof_vis
|
05502bb4d0c6020144aa7c0320c03dbe80569e8a
|
b4fe48356401b9713731508f29d98e6234dd2317
|
refs/heads/master
| 2020-04-03T22:02:47.807828 | 2017-06-27T20:26:27 | 2017-06-27T20:26:27 | 41,705,667 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 26,896 |
py
|
"""
Plot NOAA wind profiler.
Files have extension HHw, where HH is UTC hour
Raul Valenzuela
August, 2015
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
import numpy as np
import numpy.ma as ma
import os
import Meteoframes as mf
import sounding as ps
from datetime import datetime, timedelta
from matplotlib import colors
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import interp1d
from rv_utilities import add_colorbar, format_xaxis2, \
fill2D_with_nans, discrete_cmap
''' set directory and input files '''
# local_directory='/home/rvalenzuela/'
# local_directory='/Users/raulv/Documents/'
local_directory = os.path.expanduser('~')
base_directory = local_directory + '/WINDPROF'
def plot_vertical_shear(ax=None, wind=None, time=None, height=None):
diff = np.diff(wind, axis=0)
nrows, ncols = diff.shape
# cmap = custom_cmap(17)
# norm = colors.BoundaryNorm(np.arange(-20, 20), cmap.N)
img = ax.imshow(diff, interpolation='nearest', origin='lower',
# cmap=cmap,
cmap='RdBu',
vmin=-20, vmax=20,
# norm=norm,
extent=[0, ncols, 0, nrows],
aspect='auto')
add_colorbar(img, ax)
format_xaxis(ax, time)
format_yaxis(ax, height)
ax.invert_xaxis()
plt.draw()
def plot_single():
print base_directory
usr_case = raw_input('\nIndicate case number (i.e. 1): ')
wprof_resmod = raw_input(
'\nIndicate resolution mode (f = fine; c = coarse): ')
''' get wind profiler file names '''
wpfiles = get_filenames(usr_case)
# print wpfiles
''' make profile arrays '''
if wprof_resmod == 'f':
res = 'fine' # 60 [m]
elif wprof_resmod == 'c':
res = 'coarse' # 100 [m]
else:
print 'Error: indicate correct resolution (f or c)'
wspd, wdir, time, hgt = make_arrays(
files=wpfiles, resolution=res, surface=True, case=usr_case)
''' make time-height section of total wind speed '''
ax = plot_time_height(wspd, time, hgt, vrange=[
0, 20], cname='YlGnBu_r', title='Total wind speed')
l1 = 'BBY wind profiler - Total wind speed (color coded)'
''' add wind staffs '''
# palette = sns.color_palette()
# color = palette[2]
u, v = add_windstaff(wspd, wdir, time, hgt, ax=ax, color=color)
''' add balloon sounding time-height section '''
# add_soundingTH('bvf_dry',usr_case,ax=ax,wptime=time,sigma=3)
# # l2 = '\nBBY balloon soundings - Relative humidity (%, contours)'
# # l2 = '\nBBY balloon soundings - Air pressure (hPa, contours)'
# # l2 = '\nBBY balloon soundings - Mixing ratio '+r'($g kg^{-1}$, contours)'
# # l2 = '\nBBY balloon soundings - Air temperature '+r'($^\circ$C, contours)'
# # l2 = '\nBBY balloon soundings - Brunt-Vaisala freq moist '+r'(x$10^{-4} [s^{-2}]$, contours)'
# l2 = '\nBBY balloon soundings - Brunt-Vaisala freq dry '+r'(x$10^{-4} [s^{-2}]$, contours)'
# l3 = '\nDate: '+ time[0].strftime('%Y-%m')
# plt.suptitle(l1+l2+l3)
''' make time-height section of meridional wind speed '''
# ax=plot_time_height(v, time, hgt, vrange=range(-20,22,2),cname='BrBG',title='Meridional component')
# add_windstaff(wspd,wdir,time,hgt,ax=ax, color=color)
''' make time-height section of zonal wind speed '''
# ax=plot_time_height(u, time, hgt, vrange=range(-20,22,2),cname='BrBG',title='Zonal component')
# add_windstaff(wspd,wdir,time,hgt,ax=ax, color=color)
def plot_time_height(ax=None, wspd=None, time=None, height=None,
spd_range=None,spd_delta=None, cmap=None,
title=None,cbar=None,cbarinvi=False,
timelabstep=None,kind='pcolormesh',
cbar_label=None):
''' NOAA wind profiler files after year 2000 indicate
the start time of averaging period; so a timestamp of
13 UTC indicates average between 13 and 14 UTC '''
''' make a color map of fixed colors '''
if len(spd_range) == 2:
bounds = range(spd_range[0], spd_range[1]+spd_delta, spd_delta)
else:
bounds = spd_range
cmap = discrete_cmap(len(bounds)+1,
base_cmap=cmap,
norm_range=[0.2,1.0])
norm = colors.BoundaryNorm(bounds, cmap.N)
x = np.linspace(0,len(time),len(time)+1)
y = np.linspace(0,height.size, height.size+1)
wspdm = ma.masked_where(np.isnan(wspd),wspd)
# wspdm = ma.masked_where(wspdm<0, wspdm)
if kind == 'contourf':
x,y = np.meshgrid(np.linspace(0,len(time),len(time)),
np.linspace(0,height.size, height.size))
img = ax.contourf(x,y,wspdm,cmap=cmap, norm=norm)
else:
img = ax.pcolormesh(x,y,wspdm,cmap=cmap, norm=norm)
hcbar = add_colorbar(cbar, img,
loc='right',
label=cbar_label,
labelpad=20,
fontsize=12,
invisible=cbarinvi)
ax.set_xlim([-1.0, len(time) + 1.0])
if timelabstep is None:
timelabstep='1H'
format_xaxis2(ax, time, timelabstep=timelabstep)
ax.invert_xaxis()
ax.set_xlabel(r'$\leftarrow UTC \left[\stackrel{day}{time}\right]$',
fontsize=20)
ax.set_ylabel('Altitude MSL [km]')
if title is not None:
ax.text(0., 1.01, title, transform=ax.transAxes)
plt.subplots_adjust(left=0.08, right=0.95)
return [ax,hcbar,img]
def add_windstaff(wspd, wdir, time, hgt, ax=None, color='k',
vdensity=0,hdensity=0,head_size=0.05,
tail_length=4):
''' derive U and V components '''
U = -wspd * np.sin(wdir * np.pi / 180.)
V = -wspd * np.cos(wdir * np.pi / 180.)
x = np.linspace(0.5,len(time)-0.5,len(time))
y = np.linspace(0.5,hgt.size-0.5, hgt.size)
X,Y=np.meshgrid(x, y)
''' change arrays density '''
U = fill2D_with_nans(inarray=U,
start=[1,1],
size=[vdensity,hdensity])
V = fill2D_with_nans(inarray=V,
start=[1,1],
size=[vdensity,hdensity])
Uzero = U - U
Vzero = V - V
Um = ma.masked_where(np.isnan(U),U)
Vm = ma.masked_where(np.isnan(V),V)
''' barb tail '''
ax.barbs(X, Y, Um, Vm,
color=color,
sizes={'height': 0},
length=tail_length,
linewidth=0.8,
barb_increments={'half': 1})
''' barb head '''
ax.barbs(X, Y, Uzero, Vzero, color=color,
sizes={'emptybarb': head_size},
fill_empty=True)
# ax.set_xlim([-3.0, len(time) + 3.0])
# format_xaxis(ax, time, delta_hours=1)
# ax.invert_xaxis()
format_yaxis2(ax, hgt)
# return U, V
def plot_colored_staff(ax=None, wspd=None, wdir=None, time=None,
height=None, cmap=None, spd_range=None,
spd_delta=None,
vdensity=None, hdensity=None, title=None,
cbar=True):
'''
NOAA wind profiler files after year 2000 indicate
the start time of averaging period; so a timestamp of
13 UTC indicates average between 13 and 14 UTC
There is a bug that plots wrong staff when using
meridional winds only
'''
from matplotlib import rcParams
rcParams['mathtext.default'] = 'sf'
spd_array = wspd
time_array = time
height_array = height
''' make a color map of fixed colors '''
cmap = discrete_cmap(24,base_cmap=cmap,norm_range=[0.1,0.9])
if len(spd_range) == 2:
bounds = range(spd_range[0], spd_range[1] + spd_delta, spd_delta)
# vmin = spd_range[0]
# vmax = spd_range[1]
else:
bounds = spd_range
# vmin = spd_range[0]
# vmax = spd_range[-1]
norm = colors.BoundaryNorm(bounds, cmap.N)
nrows, ncols = spd_array.shape
''' derive U and V components '''
U = -wspd * np.sin(wdir * np.pi / 180.)
V = -wspd * np.cos(wdir * np.pi / 180.)
x = np.array(range(len(time))) + 0.5 # wind staff in the middle of pixel
# wind staff in the middle of pixel
y = np.array(range(height_array.size)) + 0.5
X = np.tile(x, (y.size, 1)) # repeats x y.size times to make 2D array
Y = np.tile(y, (x.size, 1)).T # repeates y x.size times to make 2D array
Uzero = U - U
Vzero = V - V
''' change arrays density '''
if vdensity == 0 or hdensity == 0:
pass
else:
U = fill2D_with_nans(inarray=U,start=[3,0],
size=[vdensity,hdensity])
V = fill2D_with_nans(inarray=V,start=[3,0],
size=[vdensity,hdensity])
ax.barbs(X, Y, U, V, np.sqrt(U * U + V * V), sizes={'height': 0},
length=5, linewidth=0.5, barb_increments={'half': 1},
cmap=cmap, norm=norm)
barb = ax.barbs(X, Y, Uzero, Vzero, np.sqrt(U * U + V * V),
sizes={'emptybarb': 0.05}, fill_empty=True,
cmap=cmap, norm=norm)
if isinstance(cbar, maxes._subplots.Axes):
hcbar = add_colorbar(cbar, barb,loc='right',
label='[m s-1]',labelpad=20,
fontsize=12)
elif isinstance(cbar,bool) and cbar:
hcbar = add_colorbar(ax, barb,loc='right')
else:
hcbar = None
ax.set_xlim([-3.0, len(time_array) + 3.0])
format_xaxis(ax, time_array, delta_hours=1)
ax.invert_xaxis()
ax.set_xlabel(r'$\leftarrow UTC \left[\stackrel{day}{time}\right]$',
fontsize=12)
format_yaxis2(ax, height_array)
ax.set_ylabel('Altitude MSL [km]')
if title is not None:
ax.text(0., 1.01, title, transform=ax.transAxes)
plt.subplots_adjust(left=0.08, right=0.95)
plt.draw()
return [ax,hcbar]
def plot_scatter(ax=None, wspd=None, wdir=None, hgt=None, title=None):
if ax is None:
fig, ax = plt.subplots(
2, 2, sharex=True, sharey=True, figsize=(11, 8.5))
axes = [ax[0, 0], ax[0, 1], ax[1, 0], ax[1, 1]]
f = interp1d(hgt, range(len(hgt)))
HIDX = f([0.12, 0.5, 1.0, 2.0])
HIDX = np.round(HIDX, 0).astype(int)
wd_array = wdir
x = wd_array[0, :]
TIDX = ~np.isnan(x)
x = x[TIDX]
y1 = wd_array[HIDX[0], TIDX] # 120 m AGL
y2 = wd_array[HIDX[1], TIDX] # 500 m AGL
y3 = wd_array[HIDX[2], TIDX] # 1000 m AGL
y4 = wd_array[HIDX[3], TIDX] # 2000 m AGL
ys = [y1, y2, y3, y4]
s = 100
hue = 1.0
alpha = 0.5
colors = ['navy', 'green', 'red', 'purple']
labels = ['120m AGL', '500m AGL', '1000m AGL', '2000m AGL']
for ax, co, y, lab, n in zip(axes, colors, ys, labels, range(4)):
ax.scatter(x, y, s=s, color=co, edgecolors='none', alpha=alpha)
ax.text(0, 1.0, lab, transform=ax.transAxes)
ax.set_xticks(range(0, 360, 30))
ax.set_yticks(range(0, 360, 30))
ax.set_xlim([0, 360])
ax.set_ylim([0, 360])
if n in [0, 2]:
ax.set_ylabel('wind aloft')
if n in [2, 3]:
ax.set_xlabel('surface wind')
ax.axvline(180, linewidth=2, color='k')
ax.axhline(180, linewidth=2, color='k')
ax.invert_xaxis()
plt.suptitle(title)
plt.subplots_adjust(hspace=0.05, wspace=0.05)
plt.draw()
def plot_scatter2(ax=None, wspd=None, wdir=None, hgt=None, time=None,
mAGL=None, lim_surf=None, lim_aloft=None, color=None):
x = wdir[0, :]
TIDX = ~np.isnan(x) # time index where surf obs is not nan
x = x[TIDX] # surf obs
f = interp1d(hgt, range(len(hgt)))
HIDX = f(mAGL / 1000.)
HIDX = np.round(HIDX, 0).astype(int)
y = wdir[HIDX, TIDX] # obs aloft
s = 100
hue = 1.0
alpha = 0.5
colors = ['navy', 'green', 'red', 'purple']
ax.scatter(x, y, s=s, color=color, edgecolors='none', alpha=alpha)
ax.set_xticks(range(0, 360, 180))
ax.set_yticks(range(0, 360, 180))
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_xlim([0, 360])
ax.set_ylim([0, 360])
ax.axvline(lim_surf, linewidth=2, color=(0.5, 0.5, 0.5))
ax.axhline(lim_aloft, linewidth=2, color=(0.5, 0.5, 0.5))
ax.invert_xaxis()
plt.draw()
# time=np.asarray(time)
# timex=time[TIDX]
# for t,x, y in zip(timex, x, y):
# print [t,np.round(x,0), np.round(y,0)]
if time is not None:
TTA_IDX = np.where((x <= lim_surf) & (y <= lim_aloft))[0]
time = np.asarray(time)
time = time[TIDX]
# xtta=x[TTA_IDX]
# ytta=y[TTA_IDX]
timetta = time[TTA_IDX]
# for x,y,t in zip(xtta,ytta,timetta):
# print [t, np.round(x,1), np.round(y,1)]
return timetta
def get_tta_times(resolution='coarse', surface=True, case=None,
lim_surf=125, lim_aloft=170, mAGL=120,
continuous=True, homedir=None):
'''
Note:
I calibrated default values by comparing retrieved times with
windprof time-height section plots for all ground radar cases (RV)
'''
_, wdir, time, hgt = make_arrays(
resolution=resolution, surface=surface, case=case,
homedir=homedir)
x = wdir[0, :]
TIDX = ~np.isnan(x) # time index where surf obs is not nan
x = x[TIDX] # surf obs
f = interp1d(hgt, range(len(hgt)))
HIDX = f(mAGL / 1000.)
HIDX = np.round(HIDX, 0).astype(int)
y = wdir[HIDX, TIDX] # obs aloft
TTA_IDX = np.where((x <= lim_surf) & (y <= lim_aloft))[0]
time = np.asarray(time)
time = time[TIDX]
timetta = time[TTA_IDX]
if continuous:
''' fills with datetime when there is 1hr gap and remove
portions that are either post frontal (case 9)
or shorter than 5hr (case13)'''
diff = np.diff(timetta)
onehrgaps = np.where(diff == timedelta(seconds=7200))
onehr = timedelta(hours=1)
timetta_cont = np.append(timetta, timetta[onehrgaps] + onehr)
timetta_cont = np.sort(timetta_cont)
diff = np.diff(timetta_cont)
jump_idx = np.where(diff > timedelta(seconds=3600))[0]
if jump_idx:
if len(timetta_cont) - jump_idx > jump_idx:
return timetta_cont[jump_idx + 1:]
else:
return timetta_cont[:jump_idx + 1]
else:
return timetta_cont
else:
return timetta
def get_surface_data(usr_case, homedir=None):
''' set directory and input files '''
case = 'case' + usr_case.zfill(2)
casedir = homedir + '/' + case
out = os.listdir(casedir)
out.sort()
files = []
for f in out:
if f[-3:] in ['met','urf']:
files.append(f)
file_met = []
for f in files:
if f[:3] == 'bby':
file_met.append(casedir + '/' + f)
df = []
for f in file_met:
meteo = mf.parse_surface(f)
df.append(meteo)
if len(df) > 1:
surface = pd.concat(df)
else:
surface = df[0]
return surface
def make_arrays(resolution='coarse', surface=False,
case=None, period=False,
homedir=None):
wpfiles = get_filenames(case, homedir=homedir+'/WINDPROF')
wp = []
ncols = 0 # number of timestamps
for f in wpfiles:
if resolution == 'fine':
wp.append(mf.parse_windprof(f, 'fine'))
elif resolution == 'coarse':
wp.append(mf.parse_windprof(f, 'coarse'))
else:
print 'Error: resolution has to be "fine" or "coarse"'
ncols += 1
''' creates 2D arrays with spd and dir '''
nrows = len(
wp[0].HT.values) # number of altitude gates (fine same as coarse)
hgt = wp[0].HT.values
# print len(hgt)
wspd = np.empty([nrows, ncols])
wdir = np.empty([nrows, ncols])
timestamp = []
for i, p in enumerate(wp):
timestamp.append(p.timestamp)
''' fine resolution '''
spd = p.SPD.values
wspd[:, i] = spd
dirr = p.DIR.values
wdir[:, i] = dirr
if surface:
''' add 2 bottom rows for adding surface obs '''
bottom_rows = 2
na = np.zeros((bottom_rows, ncols))
na[:] = np.nan
wspd = np.flipud(np.vstack((np.flipud(wspd), na)))
wdir = np.flipud(np.vstack((np.flipud(wdir), na)))
''' make surface arrays '''
surface = get_surface_data(case, homedir=homedir+'/SURFACE')
hour = pd.TimeGrouper('H')
surf_wspd = surface.wspd.groupby(hour).mean()
surf_wdir = surface.wdir.groupby(hour).mean()
surf_st = np.where(np.asarray(timestamp) == surf_wspd.index[0])[0][0]
surf_en = np.where(np.asarray(timestamp) == surf_wspd.index[-1])[0][0]
wspd[0, surf_st:surf_en + 1] = surf_wspd
wdir[0, surf_st:surf_en + 1] = surf_wdir
hgt = np.hstack(([0., 0.05], hgt))
''' add last column for 00 UTC of last date '''
add_left = 1
nrows, _ = wspd.shape
na = np.zeros((nrows, add_left))
na[:] = np.nan
wspd = np.hstack((wspd, na))
wdir = np.hstack((wdir, na))
timestamp.append(timestamp[-1] + timedelta(hours=1))
if period:
time = np.asarray(timestamp)
ini = datetime(*(period['ini'] + [0, 0]))
end = datetime(*(period['end'] + [0, 0]))
idx = np.where((time >= ini) & (time <= end))[0]
return wspd[:, idx], wdir[:, idx], time[idx], hgt
return wspd, wdir, timestamp, hgt
def make_arrays2(resolution='coarse', add_surface=False, case=None,
period=False, interp_hgts=None):
'''
interpolates to grid with 40 gates,
first and last gate at 160 and 3750 m
(92 m resolution)
'''
import os
try:
wprofdir = os.environ['WPROF_PATH']
except KeyError:
print('*** Need to provide datadir or export WPROF_PATH ***')
try:
surfdir = os.environ['SURFACE_PATH']
except KeyError:
print('*** Need to provide datadir or export SURFACE_PATH ***')
wpfiles = get_filenames(case, homedir=wprofdir)
wpfiles.sort()
wp = []
ncols = 0 # number of timestamps
for f in wpfiles:
# print f
if resolution == 'fine':
wp.append(mf.parse_windprof(f, 'fine'))
elif resolution == 'coarse':
wp.append(mf.parse_windprof(f, 'coarse'))
else:
print 'Error: resolution has to be "fine" or "coarse"'
ncols += 1
''' creates 2D arrays with spd and dir '''
hgt = wp[0].HT.values
if interp_hgts is None:
# upper limit exceeds value in cases [3,7]
newh = np.linspace(0.160, 3.750, 40)
else:
# so we use custom limit
newh = interp_hgts
wspd = np.zeros(len(newh))
wdir = np.zeros(len(newh))
timestamp = []
first = True
for p in wp:
''' for each hourly profile '''
timestamp.append(p.timestamp)
fs = interp1d(hgt, p.SPD.values)
fd = interp1d(hgt, p.DIR.values)
news = fs(newh)
newd = fd(newh)
if first:
wspd = news
wdir = newd
first = False
else:
wspd = np.vstack((wspd, news))
wdir = np.vstack((wdir, newd))
wspd = wspd.T
wdir = wdir.T
timestamp = np.asarray(timestamp)
''' add 2 bottom rows for adding surface obs '''
bottom_rows = 2
na = np.zeros((bottom_rows, ncols)) + np.nan
wspd = np.flipud(np.vstack((np.flipud(wspd), na)))
wdir = np.flipud(np.vstack((np.flipud(wdir), na)))
if add_surface:
hour = pd.TimeGrouper('H')
''' make surface arrays '''
try:
surface = get_surface_data(case, homedir=surfdir)
surf_wspd = surface.wspd
surf_wdir = surface.wdir
u = -surf_wspd*np.sin(np.radians(surf_wdir))
v = -surf_wspd*np.cos(np.radians(surf_wdir))
u_mean = u.groupby(hour).mean()
v_mean = v.groupby(hour).mean()
surf_wspd = np.sqrt(u_mean**2 + v_mean**2)
surf_wdir = 270 - (np.arctan2(v_mean,u_mean)*180/np.pi)
surf_wdir[surf_wdir>360] = surf_wdir[surf_wdir>360]-360
surf_st = np.where(surf_wspd.index == timestamp[0])[0][0]
surf_en = np.where(surf_wspd.index == timestamp[-1])[0][0]
wspd[0, surf_st:surf_en + 1] = surf_wspd.values
wdir[0, surf_st:surf_en + 1] = surf_wdir.values
except ValueError:
wspd[0, surf_st:surf_en + 1] = surf_wspd.iloc[:surf_en+1].values
wdir[0, surf_st:surf_en + 1] = surf_wdir.iloc[:surf_en+1].values
hgt = np.hstack(([0., 0.05], newh))
''' add last column for 00 UTC of last date
(caution with some storms ending at 22UTC)
'''
# add_left = 1
# nrows, _ = wspd.shape
# na = np.zeros((nrows, add_left))
# na[:] = np.nan
# wspd = np.hstack((wspd, na))
# wdir = np.hstack((wdir, na))
# timestamp.append(timestamp[-1] + timedelta(hours=1))
if period:
time = np.asarray(timestamp)
ini = datetime(*(period['ini'] + [0, 0]))
end = datetime(*(period['end'] + [0, 0]))
idx = np.where((time >= ini) & (time <= end))[0]
return wspd[:, idx], wdir[:, idx], time[idx], hgt
return wspd, wdir, timestamp, hgt
def add_soundingTH(soundvar, usr_case, homedir=None, ax=None,
sigma=None, wptime=None, wphgt=None):
''' call 2D array made from soundings '''
out = ps.get_interp_array(soundvar, case=usr_case,
homedir=homedir)
sarray, shgt, stimestamp, _ = out
if sigma is not None:
sarray = gaussian_filter(sarray, sigma, mode='nearest')
' find sounding index corresponding to top of wp '
f = interp1d(shgt / 1000., range(len(shgt)))
soundtop_idx = int(f(wphgt[-1]))
if soundvar in ['TE', 'TD']:
sarray = sarray - 273.15
elif soundvar in ['RH']:
sarray[sarray > 100.] = 100.
elif soundvar in ['bvf_moist', 'bvf_dry']:
sarray = sarray * 10000.
ini = wptime[0].strftime('%Y-%m-%d %H:%M')
foo = wptime[-1] + timedelta(hours=1)
end = foo.strftime('%Y-%m-%d %H:%M')
wp_timestamp = np.arange(ini, end, dtype='datetime64[20m]')
''' allocate the array in the corresponding time '''
booleans = np.in1d(wp_timestamp, stimestamp)
idx = np.nonzero(booleans)
''' scale idx so has same dimensions as ncols of
windprof data (usend in imshow-extent); since
we have sounding data every 20m there are
3 observations per hour in sounding'''
idx = idx[0] / 3.
''' create TH sounding meshgrid using axes values of
imshow-extent (cols,rows of windprof image); '''
x = idx
vertical_gates = shgt[:soundtop_idx].shape[0]
''' y values are correct for wp coarse resolution; check
modifications when plotting wp fine resolution '''
y = np.linspace(0, 40, vertical_gates)
X, Y = np.meshgrid(x, y)
if soundvar == 'theta':
levels = range(282, 298)
elif soundvar == 'thetaeq':
levels = range(280, 311)
elif soundvar in ['bvf_moist', 'bvf_dry']:
levels = np.arange(-2.5, 3.5, 1.0)
# print levels
try:
cs = ax.contour(X, Y, sarray[:soundtop_idx, :],
levels=levels, colors='k', linewidths=0.8)
# ax.clabel(cs, levels, fmt='%1.1f', fontsize=12)
ax.contourf(X, Y, sarray[:soundtop_idx, :],
levels=levels, colors='none',
hatches=['*', '.', None, '/', '//'], zorder=10000)
except UnboundLocalError:
cs = ax.contour(X, Y, sarray, colors='k', linewidths=0.8)
ax.clabel(cs, fmt='%1.0f', fontsize=12)
def get_filenames(usr_case, homedir=None):
case = 'case' + usr_case.zfill(2)
casedir = homedir + '/' + case
out = os.listdir(casedir)
out.sort()
file_list = []
period = get_period(case=int(usr_case),outfmt='%y%j.%Hw')
for f in out:
if f[-9:] in period:
file_list.append(casedir + '/' + f)
return file_list
def get_period(case=None, outfmt=None):
reqdates = {'1': {'ini': [1998, 1, 18, 15], 'end': [1998, 1, 18, 20]},
# '2': {'ini': [1998, 1, 26, 4], 'end': [1998, 1, 26, 9]},
# '3': {'ini': [2001, 1, 23, 21], 'end': [2001, 1, 24, 2]},
'3': pd.date_range(start='2001-01-23 00:00',periods=31,freq='60T'),
# '4': {'ini': [2001, 1, 25, 15], 'end': [2001, 1, 25, 20]},
# '5': {'ini': [2001, 2, 9, 10], 'end': [2001, 2, 9, 15]},
# '6': {'ini': [2001, 2, 11, 3], 'end': [2001, 2, 11, 8]},
# '7': {'ini': [2001, 2, 17, 17], 'end': [2001, 2, 17, 22]},
'7': pd.date_range(start='2001-02-17 00:00',periods=25,freq='60T'),
'8': pd.date_range(start='2003-01-12 00:00',periods=72,freq='60T'),
'9': pd.date_range(start='2003-01-21 00:00',periods=72,freq='60T'),
'10': pd.date_range(start='2003-02-15 00:00',periods=48,freq='60T'),
'11': pd.date_range(start='2004-01-09 00:00',periods=24,freq='60T'),
'12': pd.date_range(start='2004-02-02 00:00',periods=24,freq='60T'),
'13': pd.date_range(start='2004-02-16 00:00',periods=96,freq='60T'),
'14': pd.date_range(start='2004-02-25 00:00',periods=24,freq='60T')
}
if outfmt is None:
return reqdates[str(case)]
else:
outs = []
dr = reqdates[str(case)]
for d in dr:
outs.append(d.strftime(outfmt))
return outs
def format_yaxis(ax, hgt, **kwargs):
hgt_res = np.unique(np.diff(hgt))[0]
if 'toplimit' in kwargs:
toplimit = kwargs['toplimit']
''' extentd hgt to toplimit km so all
time-height sections have a common yaxis'''
hgt = np.arange(hgt[0], toplimit, hgt_res)
f = interp1d(hgt, range(len(hgt)))
ys = np.arange(np.ceil(hgt[0]), hgt[-1], 0.2)
new_yticks = f(ys)
ytlabel = ['{:2.1f}'.format(y) for y in ys]
ax.set_yticks(new_yticks + 0.5)
ax.set_yticklabels(ytlabel)
def format_yaxis2(ax, hgt):
lasthgt = hgt[-1]+(hgt[-1]-hgt[-2])
hgt = np.append(hgt,lasthgt)
lenhgt = len(hgt)
ax.set_ylim(-5.0, lenhgt)
f = interp1d(hgt, range(len(hgt)))
ys = np.arange(0, 4.0, 0.2)
new_yticks = f(ys)
ytlabel = ['{:2.1f}'.format(y) for y in ys]
ax.set_yticks(new_yticks+0.5)
ax.set_yticklabels(ytlabel)
|
[
"[email protected]"
] | |
7af3612998917d3a3fe539480b61fad8f29ac57e
|
b852bcf96bd21f8aad61df473e29249315043af5
|
/tests/parsers/plist_plugins/ipod.py
|
aa0d497cb41ed32f0578e24465c098532344aa88
|
[
"Apache-2.0"
] |
permissive
|
tjemg/plaso
|
cad131da318bd6b23835b0f351f464e7edcdbc4a
|
58dd7d03463624c628187edea97eb2665069c29f
|
refs/heads/master
| 2020-04-08T21:53:54.863677 | 2016-09-12T14:17:59 | 2016-09-12T14:17:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,945 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the iPod plist plugin."""
import unittest
from plaso.formatters import ipod as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.plist_plugins import ipod
from tests.parsers.plist_plugins import test_lib
class TestIPodPlugin(test_lib.PlistPluginTestCase):
"""Tests for the iPod plist plugin."""
def testProcess(self):
"""Tests the Process function."""
plist_name = u'com.apple.iPod.plist'
plugin_object = ipod.IPodPlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin_object, [plist_name], plist_name)
self.assertEqual(len(storage_writer.events), 4)
event_object = storage_writer.events[1]
timestamp = timelib.Timestamp.CopyFromString(u'2013-10-09 19:27:54')
self.assertEqual(event_object.timestamp, timestamp)
expected_message = (
u'Device ID: 4C6F6F6E65000000 '
u'Type: iPhone [10016] '
u'Connected 1 times '
u'Serial nr: 526F676572 '
u'IMEI [012345678901234]')
expected_message_short = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_message_short)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.LAST_CONNECTED)
self.assertEqual(event_object.device_class, u'iPhone')
self.assertEqual(event_object.device_id, u'4C6F6F6E65000000')
self.assertEqual(event_object.firmware_version, 256)
self.assertEqual(event_object.imei, u'012345678901234')
self.assertEqual(event_object.use_count, 1)
event_object = storage_writer.events[3]
timestamp = timelib.Timestamp.CopyFromString(u'1995-11-22 18:25:07')
self.assertEqual(event_object.timestamp, timestamp)
self.assertEqual(event_object.device_id, u'0000A11300000000')
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1bdf3cc297d6507dd8194bb94ae2200a0fed1fbe
|
8f5c1ad76f3f9aa67d6720154b4884c9fab2ecbc
|
/toontown/election/DistributedToonfestCogAI.py
|
fb45a06666860c1eaf845c616553eefdc9c5a469
|
[] |
no_license
|
RegDogg/ttr-2014-dev
|
eb0d9da3e91b9504b83804c27e1a00d87a0b7220
|
8a392ea4697cf15bd83accd01dcf26d0f87557eb
|
refs/heads/master
| 2023-07-13T02:40:56.171517 | 2021-07-12T00:31:28 | 2021-07-12T00:31:28 | 372,103,145 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,233 |
py
|
# Embedded file name: toontown.election.DistributedToonfestCogAI
from direct.interval.IntervalGlobal import *
from direct.fsm.FSM import FSM
from otp.ai.MagicWordGlobal import *
from toontown.election.DistributedHotAirBalloonAI import DistributedHotAirBalloonAI
from DistributedElectionCameraManagerAI import DistributedElectionCameraManagerAI
from DistributedSafezoneInvasionAI import DistributedSafezoneInvasionAI
from DistributedInvasionSuitAI import DistributedInvasionSuitAI
from InvasionMasterAI import InvasionMasterAI
from toontown.toonbase import ToontownGlobals
import SafezoneInvasionGlobals
import ElectionGlobals
import random
from otp.distributed.OtpDoGlobals import *
from direct.task import Task
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.election import *
class DistributedToonfestCogAI(DistributedObjectAI, FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedToonfestTowerAI')
def __init__(self, air, operation = 'SpeedUp'):
DistributedObjectAI.__init__(self, air)
FSM.__init__(self, 'ToonfestCogFSM')
self.air = air
self.validOperations = ['SpeedUp', 'SlowDown', 'Reverse']
if operation in self.validOperations:
self.operation = operation
else:
print 'DistributedToonfestCogAI: Operation %s is not a valid operation.' % operation
self.operation = 'SpeedUp'
def enterOff(self):
self.requestDelete()
def setPos(self, x, y, z):
self.sendUpdate('setPosThroughAI', [x, y, z])
def setId(self, id):
self.sendUpdate('setIdThroughAI', [id])
def enterDown(self):
pass
def enterUp(self):
pass
def updateTower(self):
if not isinstance(self.air.toonfestTower, DistributedToonfestTowerAI) or not self.air.toonfestTower:
print 'DistributedToonfestCogAI: ERROR! Could not find the ToonFest Tower.'
else:
base = random.randrange(0, 3)
self.air.toonfestTower.updateTower(self.operation, base)
print 'DistributedToonfestCogAI: Told Tower to ' + self.operation + ' base number ' + str(base + 1)
|
[
"[email protected]"
] | |
a5d31969ad26e8a685b7b7d70d52b06ac9f25a93
|
fa0bd730981a4a7333e7858c03e2a16c75e9cf5c
|
/Chapter 1/mnist_V7.py
|
d9038c4c2272d53acc72f1b12840acee6575d0cc
|
[
"MIT"
] |
permissive
|
PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras
|
4cb5f7249dcd1efe6ea5a5263fb862240ce303bb
|
e23d2b4a4292386b70977473805acb2f93ef16ca
|
refs/heads/master
| 2023-02-13T04:04:57.531730 | 2023-02-07T19:23:47 | 2023-02-07T19:23:47 | 228,759,428 | 311 | 214 |
MIT
| 2021-06-01T14:06:06 | 2019-12-18T04:42:07 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,946 |
py
|
import tensorflow as tf
import numpy as np
from tensorflow import keras
# network and training
EPOCHS = 20
BATCH_SIZE = 256
VERBOSE = 1
NB_CLASSES = 10 # number of outputs = number of digits
N_HIDDEN = 128
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION
DROPOUT = 0.3
# loading MNIST dataset
# verify
# the split between train and test is 60,000, and 10,000 respectly
# one-hot is automatically applied
mnist = keras.datasets.mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784
RESHAPED = 784
#
X_train = X_train.reshape(60000, RESHAPED)
X_test = X_test.reshape(10000, RESHAPED)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
#normalize in [0,1]
X_train, X_test = X_train / 255.0, X_test / 255.0
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
#one-hot
Y_train = tf.keras.utils.to_categorical(Y_train, NB_CLASSES)
Y_test = tf.keras.utils.to_categorical(Y_test, NB_CLASSES)
#build the model
model = tf.keras.models.Sequential()
model.add(keras.layers.Dense(N_HIDDEN,
input_shape=(RESHAPED,),
name='dense_layer', activation='relu'))
model.add(keras.layers.Dropout(DROPOUT))
model.add(keras.layers.Dense(N_HIDDEN,
name='dense_layer_2', activation='relu'))
model.add(keras.layers.Dropout(DROPOUT))
model.add(keras.layers.Dense(NB_CLASSES,
name='dense_layer_3', activation='softmax'))
# summary of the model
model.summary()
# compiling the model
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
#training the moodel
model.fit(X_train, Y_train,
batch_size=BATCH_SIZE, epochs=EPOCHS,
verbose=VERBOSE, validation_split=VALIDATION_SPLIT)
#evalute the model
test_loss, test_acc = model.evaluate(X_test, Y_test)
print('Test accuracy:', test_acc)
# making prediction
predictions = model.predict(X_test)
|
[
"[email protected]"
] | |
117898cb3c43f04c6bf25322181fd0aa10d335c2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02757/s809101133.py
|
792d3c9136512c8391a1450d9437aaf983b40525
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 421 |
py
|
n, p = map(int, input().split())
s = input()[::-1]
if p == 2:
ans = 0
for num, i in enumerate(s):
if int(i)%2 == 0:
ans += n-num
elif p == 5:
ans = 0
for num, i in enumerate(s):
if int(i)%5 == 0:
ans += n-num
else:
C = [0]*p
now = 0
for num, i in enumerate(s):
a = int(i)
now = (now+pow(10, num, p)*a)%p
C[now] += 1
ans = C[0]
for c in C:
ans += c*(c-1)//2
print(ans)
|
[
"[email protected]"
] | |
df4c5f3748bd5945cdcc4ac29d61933da4417e69
|
eb38517d24bb32cd8a33206d4588c3e80f51132d
|
/proyecto_nn.py
|
112dc9751e2184c3eda81cf9370647918e561ce6
|
[] |
no_license
|
Fernando23296/l_proy
|
2c6e209892112ceafa00c3584883880c856b6983
|
b7fdf99b9bd833ca1c957d106b2429cbd378abd3
|
refs/heads/master
| 2020-04-01T18:01:41.333302 | 2018-12-04T23:45:53 | 2018-12-04T23:45:53 | 153,466,681 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,892 |
py
|
from matplotlib import pyplot as plt
import numpy as np
'''
____
1
283,70
1
308,63
1
253,60
1
281,61
1
303,54
1
279,54
1
343,55
1
335,49
1
290,34
1
327,34
1
299,32
1
287,22
1
169,15
1
0,0
1
287,5
____
2
0,0
2
263,66
2
325,58
2
266,57
2
290,45
2
0,0
2
275,34
2
0,0
2
0,0
2
338,19
2
292,14
2
335,10
2
276,9
2
357,9
2
333,5
2
290,6
2
0,0
____
3
278,74
3
237,67
3
291,63
3
242,43
3
0,0
3
270,34
3
268,29
3
247,22
3
284,9
3
314,7
3
259,7
3
271,6
____
4
0,0
4
229,63
4
0,0
4
0,0
4
226,53
4
0,0
4
280,42
4
226,37
4
282,4
4
237,0
____
5
0,0
5
239,72
5
294,64
5
268,61
5
247,57
5
290,56
5
0,0
5
285,36
5
267,36
5
276,30
5
269,23
5
231,25
5
0,0
5
266,13
5
286,12
5
225,12
5
240,10
5
0,0
5
229,1
____
6
254,70
6
303,67
6
327,61
6
352,51
6
249,52
6
0,0
6
245,20
6
0,0
6
297,16
6
278,11
6
294,9
6
289,2
____
7
0,0
7
241,74
7
0,0
7
259,48
7
260,32
7
244,24
7
258,20
7
255,12
7
254,2
____
8
0,0
8
0,0
8
209,71
8
214,64
8
263,60
8
0,0
8
187,55
8
243,49
8
198,42
8
209,41
8
186,39
8
191,39
8
245,34
8
189,30
8
0,0
8
187,24
8
177,20
8
264,10
8
0,0
8
174,8
8
0,0
8
247,1
8
229,1
____
9
0,0
9
302,72
9
0,0
9
278,69
9
356,68
9
205,70
9
341,50
9
366,49
9
342,44
9
216,43
9
363,43
9
251,41
9
289,35
9
353,36
9
314,33
9
236,34
9
303,30
9
280,30
9
255,30
9
0,0
9
352,25
9
211,22
9
351,19
9
388,16
9
223,14
9
301,11
9
0,0
9
210,12
9
288,7
9
0,0
9
0,0
9
220,5
9
0,0
9
0,0
____
10
209,71
10
311,69
10
258,71
10
215,56
10
0,0
10
0,0
10
224,53
10
383,54
10
339,51
10
325,50
10
201,49
10
363,46
10
202,44
10
293,42
10
277,43
10
243,36
10
313,30
10
374,27
10
353,20
10
365,19
10
314,11
10
0,0
10
310,4
10
304,4
10
221,4
____
11
200,74
11
349,68
11
378,69
11
254,59
11
248,57
11
236,56
11
241,53
11
205,52
11
197,49
11
232,46
11
314,46
11
322,42
11
265,31
11
320,25
11
246,15
11
268,13
11
0,0
11
212,3
11
263,1
____
12
386,0
12
201,0
ex2.jpg
data 1
1[282,0]
2[383,54]
3[356,144]
4[263,212]
5[278,315]
6[276,410]
7[290,718]
8[282,915]
data 2
1[282,0]
2[383,54]
3[278,145]
4[241,302]
5[297,320]
6[226,493]
7[268,561]
8[290,653]
9[282,9915]
data 3
1[[282,0],
2[311,69],
3[243,201],
4[254,230],
5[245,324],
6[247,554],
7[325,666],
8[290,718],
9[282,915]]
data 4
1[[282,0],
2[302,148],
3[243,201],
4[259,276],
5[226,493],
6[268,561],
7[325,666],
8[279,738],
9[282,915]]
data 5
1[[282,0],
2[258,248],
3[297,320],
4[285,416],
5[226,509],
6[325,666],
7[343,739],
8[277,808],
9[282,915]]
data 6
1[[282,0],
2[278,145],
3[241,302],
4[245,324],
5[294,444],
6[226,493],
7[284,541],
8[303,738],
9[282,915]]
data 7
1[[282,0],
2[224,53],
3[205,146],
4[327,365],
5[226,493],
6[268,561],
7[303,738],
8[338,807],
9[282,915]]
especial
[[282 215 278 209 259 297 247 280 291 275 283 282]
[ 0 56 145 193 276 320 437 498 595 642 754 915]]
[[209,71],
[311,69],
[258,71],
[215,56],
[224,53],
[383,54],
[339,51],
[325,50],
[201,49],
[363,46],
[202,44],
[293,42],
[277,43],
[243,36],
[313,30],
[374,27],
[353,20],
[365,19],
[314,11],
[310,4],
[304,4],
[221,4]]
SORTED
[[201, 49], [202, 44], [209, 71], [215, 56], [221, 4], [224, 53], [243, 36], [258, 71], [277, 43], [293, 42], [304, 4], [310, 4], [311, 69], [313, 30], [314, 11], [325, 50], [339, 51], [353, 20], [363, 46], [365, 19], [374, 27], [383, 54]]
____
'''
data = [[304, 4, 1],
[310, 4, 1],
[311, 69, 1],
[313, 30, 1],
[314, 11, 1],
[325, 50, 1],
[339, 51, 1],
[353, 20, 1],
[363, 46, 1],
[365, 19, 1],
[374, 27, 1],
[383, 54, 1],
[282, 20, 0],
[280, 77, 0],
[278, 33, 0],
[270, 88, 0],
[268, 53, 0],
[290, 45, 0],
[320, 34,0],
[323, 44,0],
[330, 45,0]]
valor_random = [298,32]
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_p(x):
return sigmoid(x) * (1-sigmoid(x))
def train():
w1 = np.random.randn()
w2 = np.random.randn()
b = np.random.randn()
iterations = 10000
learning_rate = 0.1
costs = []
for i in range(iterations):
ri = np.random.randint(len(data))
point = data[ri]
z = point[0] * w1 + point[1] * w2 + b
pred = sigmoid(z)
target = point[2]
# cost for current random point
cost = np.square(pred - target)
dcost_dpred = 2 * (pred - target)
dpred_dz = sigmoid_p(z)
dz_dw1 = point[0]
dz_dw2 = point[1]
dz_db = 1
dcost_dz = dcost_dpred * dpred_dz
dcost_dw1 = dcost_dz * dz_dw1
dcost_dw2 = dcost_dz * dz_dw2
dcost_db = dcost_dz * dz_db
w1 = w1 - learning_rate * dcost_dw1
w2 = w2 - learning_rate * dcost_dw2
b = b - learning_rate * dcost_db
return costs, w1, w2, b
costs, w1, w2, b = train()
print("valor w1:", w1)
print("valor w2:", w2)
print("valor b:", b)
cero=valor_random[0]
uno=valor_random[1]
z = w1 * cero + w2 * uno + b
pred = sigmoid(z)
print(pred)
|
[
"[email protected]"
] | |
c6efa3651b14b09fdfb53dbc5d496a53f514a83b
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/tests/expectations/core/test_expect_column_value_z_scores_to_be_less_than.py
|
286de2251857f9da62734fd75119a0cf4b4e6d2d
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518 | 2023-09-02T00:00:13 | 2023-09-02T00:00:13 | 103,071,520 | 8,931 | 1,535 |
Apache-2.0
| 2023-09-14T19:57:16 | 2017-09-11T00:18:46 |
Python
|
UTF-8
|
Python
| false | false | 3,442 |
py
|
from typing import Optional, cast
import pandas as pd
import pytest
from great_expectations import DataContext
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.self_check.util import get_test_validator_with_data
from great_expectations.util import build_in_memory_runtime_context
@pytest.fixture
def z_score_validation_result():
return ExpectationValidationResult(
success=True,
expectation_config={
"expectation_type": "expect_column_value_z_scores_to_be_less_than",
"kwargs": {
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
"meta": {},
},
result={
"element_count": 6,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
exception_info={
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
meta={},
)
@pytest.mark.unit
def test_pandas_expect_column_value_z_scores_to_be_less_than_impl(
z_score_validation_result,
):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10]})
context: Optional[DataContext] = cast(
DataContext, build_in_memory_runtime_context(include_spark=False)
)
validator = get_test_validator_with_data(
execution_engine="pandas",
data=df,
context=context,
)
result = validator.expect_column_value_z_scores_to_be_less_than(
column="a", mostly=0.9, threshold=4, double_sided=True
)
assert result == z_score_validation_result
@pytest.mark.postgresql
def test_sa_expect_column_value_z_scores_to_be_less_than_impl(
z_score_validation_result, test_backends
):
if "postgresql" not in test_backends:
pytest.skip("test_database_store_backend_get_url_for_key requires postgresql")
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10]})
context: Optional[DataContext] = cast(
DataContext, build_in_memory_runtime_context(include_spark=False)
)
validator = get_test_validator_with_data(
execution_engine="postgresql",
table_name="expect_column_value_z_scores_to_be_less_than_impl_1",
data=df,
context=context,
)
result = validator.expect_column_value_z_scores_to_be_less_than(
column="a", mostly=0.9, threshold=4, double_sided=True
)
assert result == z_score_validation_result
# noinspection PyUnusedLocal
@pytest.mark.spark
def test_spark_expect_column_value_z_scores_to_be_less_than_impl(
spark_session, basic_spark_df_execution_engine, z_score_validation_result
):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10]})
context: Optional[DataContext] = cast(
DataContext, build_in_memory_runtime_context(include_pandas=False)
)
validator = get_test_validator_with_data(
execution_engine="spark",
data=df,
context=context,
)
result = validator.expect_column_value_z_scores_to_be_less_than(
column="a", mostly=0.9, threshold=4, double_sided=True
)
assert result == z_score_validation_result
|
[
"[email protected]"
] | |
ec00e7616ff2b1478df290425ccd36f44d326853
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_24/models/policy_member_response.py
|
004d52f22194a5598a5e1b219c11dcc074aa1f85
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 |
BSD-2-Clause
| 2023-09-08T09:08:30 | 2018-12-04T17:02:51 |
Python
|
UTF-8
|
Python
| false | false | 3,933 |
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class PolicyMemberResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[PolicyMember]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.PolicyMember]
):
"""
Keyword args:
items (list[PolicyMember]): Displays a list of all items after filtering. The values are displayed for each name, if meaningful.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyMemberResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyMemberResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
77d3496406cf164d4e8e89fe0a5dca548d70ffbf
|
bae04a67b13b5848ba2bd160792aa563738e9ec9
|
/botauth.py
|
613e76dbdbd951694cfce1b4de02f761eb2ef361
|
[] |
no_license
|
igrekus/stan_bot
|
0fb28bf4efed219b117b2d640590565691c24b45
|
43eda93c0799e6f5b2b3676e8cb1a7db32eeae4f
|
refs/heads/master
| 2023-03-18T12:53:45.736623 | 2021-03-09T12:55:25 | 2021-03-09T12:55:25 | 254,895,923 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,778 |
py
|
import dataset
class BotAuth:
def __init__(self, path='quotes.db'):
# TODO store active permits in memory
self.db = dataset.connect(f'sqlite:///{path}')
self.authorized: dataset.table.Table = self.db['tg_user']
self.permits: dataset.table.Table = self.db['tg_permits']
self.user_permit_map: dataset.table.Table = self.db['tg_user_permits']
self.base_permits = list(self.permits.find(title=['post links', 'post media']))
def bot_user_exists(self, tg_user):
return bool(list(self.authorized.find(tg_id=tg_user.id)))
def register_tg_user(self, tg_user):
if self.bot_user_exists(tg_user):
return False
# TODO make bot user class
new_bot_user = self._upsert_bot_user(tg_user)
self._add_base_permits(new_bot_user)
return True
def has_base_permits(self, bot_user):
if not list(self.authorized.find(tg_id=bot_user.id)):
return False
return bool(list(
self.user_permit_map.find(tg_user=bot_user.id, tg_permit=[perm['id'] for perm in self.base_permits])
))
def voice(self, tg_user):
if not self.bot_user_exists(tg_user):
return False
return self._voice_bot_user({
'tg_id': tg_user.id,
'username': tg_user.username,
'first_name': tg_user.first_name,
'last_name': tg_user.last_name,
})
def devoice(self, tg_user):
if not self.bot_user_exists(tg_user):
return False
return self._devoice_bot_user({
'tg_id': tg_user.id,
'username': tg_user.username,
'first_name': tg_user.first_name,
'last_name': tg_user.last_name,
})
def _voice_bot_user(self, bot_user):
# TODO error handling
self._add_base_permits(bot_user)
return True
def _devoice_bot_user(self, bot_user):
# TODO error handling
self._revoke_base_permits(bot_user)
return True
def _upsert_bot_user(self, user):
new_bot_user = {
'tg_id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
}
self.authorized.upsert(new_bot_user, ['tg_id'])
return new_bot_user
def _add_base_permits(self, new_bot_user):
for perm in self.base_permits:
self.user_permit_map.upsert({
'tg_user': new_bot_user['tg_id'],
'tg_permit': perm['id']
}, ['tg_user', 'tg_permit'])
def _revoke_base_permits(self, bot_user):
for perm in self.base_permits:
self.user_permit_map.delete(tg_user=bot_user['tg_id'], tg_permit=perm['id'])
|
[
"[email protected]"
] | |
4b5037c080276fe1ebc9d520708a9920f70310e5
|
2061caff7999645ff8c590acf77ad5bf2b6da305
|
/source/toolkit.py
|
3ef5171ebbb3ef34f7536f0930c917808250278e
|
[
"CC0-1.0"
] |
permissive
|
wezu/pyweek21
|
63e36639fe52f3c6fad2616dbd5c27eb7e4f4bbd
|
aff8f2b6f6250e45763e77c12595c3dca177e864
|
refs/heads/master
| 2021-01-09T21:58:33.818859 | 2016-03-19T18:25:08 | 2016-03-19T18:25:08 | 52,712,685 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,683 |
py
|
from panda3d.core import *
from panda3d.bullet import *
def loadObject(model, H, pos, world, worldNP, root=render, collision_solid=None):
new_model=loader.loadModel(model)
new_model.clearModelNodes()
new_model.reparentTo(root)
new_model.setPos(render, pos)
new_model.setH(render, H)
new_model.setShader(Shader.load(Shader.SLGLSL, path+'shaders/default_v.glsl', path+'shaders/default_f.glsl'))
if collision_solid:
collision_mesh=loader.loadModel(collision_solid)
else:
collision_mesh=loader.loadModel(model)
collision_mesh.setPos(render, pos)
collision_mesh.setH(render, H)
collision_mesh.flattenStrong()
bullet_mesh = BulletTriangleMesh()
geomNodes = collision_mesh.findAllMatches('**/+GeomNode')
geomNode = geomNodes.getPath(0).node()
geom = geomNode.getGeom(0)
bullet_mesh.addGeom(geom)
shape = BulletTriangleMeshShape(bullet_mesh, dynamic=False, bvh=True )
collision = worldNP.attachNewNode(BulletRigidBodyNode('object'))
collision.node().addShape(shape)
collision.setCollideMask(BitMask32.allOn())
world.attachRigidBody(collision.node())
return (new_model, collision)
def tex(file_name, srgb=False):
texture=loader.loadTexture(file_name)
tex_format=texture.getFormat()
if srgb:
if tex_format==Texture.F_rgb:
tex_format=Texture.F_srgb
elif tex_format==Texture.F_rgba:
tex_format=Texture.F_srgb_alpha
texture.setFormat(tex_format)
return texture
def pos2d(x,y):
return Point3(x,0,-y)
def rec2d(width, height):
return (-width, 0, 0, height)
def resetPivot2d(frame):
size=frame['frameSize']
frame.setPos(-size[0], 0, -size[3])
frame.flattenLight()
frame.setTransparency(TransparencyAttrib.MAlpha)
def fixSrgbTextures(model):
for tex_stage in model.findAllTextureStages():
tex=model.findTexture(tex_stage)
if tex:
file_name=tex.getFilename()
tex_format=tex.getFormat()
#print tex_stage, file_name, tex_format
newTex=loader.loadTexture(file_name)
if tex_stage.getMode()==TextureStage.M_normal:
tex_stage.setMode(TextureStage.M_normal_gloss)
if tex_stage.getMode()!=TextureStage.M_normal_gloss:
if tex_format==Texture.F_rgb:
tex_format=Texture.F_srgb
elif tex_format==Texture.F_rgba:
tex_format=Texture.F_srgb_alpha
newTex.setFormat(tex_format)
model.setTexture(tex_stage, newTex, 1)
|
[
"[email protected]"
] | |
341ecbd011b6ea477a13fd38a1dc991763043d9b
|
eada891517a2bad7f8a7a06d8a0304fa96647f62
|
/tests/test_model_definition/test_models.py
|
71d12aa1118e71b2b35e8d462fb2f124b03a9e6a
|
[
"MIT"
] |
permissive
|
rs-store-personal/ormar
|
75a16ca71fcdc1b6d7bfc6b912bbdb1e1dd54794
|
4dfbd79fc89d6e4dddd2e4a10c4624425057e37b
|
refs/heads/master
| 2023-07-11T00:28:59.122663 | 2021-08-10T02:33:49 | 2021-08-10T02:33:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,424 |
py
|
import asyncio
import base64
import datetime
import os
import uuid
from typing import List
import databases
import pydantic
import pytest
import sqlalchemy
import ormar
from ormar.exceptions import ModelError, NoMatch, QueryDefinitionError
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class JsonSample(ormar.Model):
class Meta:
tablename = "jsons"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
test_json = ormar.JSON(nullable=True)
blob = b"test"
blob2 = b"test2icac89uc98"
class LargeBinarySample(ormar.Model):
class Meta:
tablename = "my_bolbs"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
test_binary: bytes = ormar.LargeBinary(max_length=100000, choices=[blob, blob2])
blob3 = os.urandom(64)
blob4 = os.urandom(100)
class LargeBinaryStr(ormar.Model):
class Meta:
tablename = "my_str_blobs"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
test_binary: str = ormar.LargeBinary(
max_length=100000, choices=[blob3, blob4], represent_as_base64_str=True
)
class UUIDSample(ormar.Model):
class Meta:
tablename = "uuids"
metadata = metadata
database = database
id: uuid.UUID = ormar.UUID(primary_key=True, default=uuid.uuid4)
test_text: str = ormar.Text()
class UUIDSample2(ormar.Model):
class Meta:
tablename = "uuids2"
metadata = metadata
database = database
id: uuid.UUID = ormar.UUID(
primary_key=True, default=uuid.uuid4, uuid_format="string"
)
test_text: str = ormar.Text()
class User(ormar.Model):
class Meta:
tablename = "users"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, default="")
class User2(ormar.Model):
class Meta:
tablename = "users2"
metadata = metadata
database = database
id: str = ormar.String(primary_key=True, max_length=100)
name: str = ormar.String(max_length=100, default="")
class Product(ormar.Model):
class Meta:
tablename = "product"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
rating: int = ormar.Integer(minimum=1, maximum=5)
in_stock: bool = ormar.Boolean(default=False)
last_delivery: datetime.date = ormar.Date(default=datetime.datetime.now)
country_name_choices = ("Canada", "Algeria", "United States", "Belize")
country_taxed_choices = (True,)
country_country_code_choices = (-10, 1, 213, 1200)
class Country(ormar.Model):
class Meta:
tablename = "country"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(
max_length=9, choices=country_name_choices, default="Canada",
)
taxed: bool = ormar.Boolean(choices=country_taxed_choices, default=True)
country_code: int = ormar.Integer(
minimum=0, maximum=1000, choices=country_country_code_choices, default=1
)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
def test_model_class():
assert list(User.Meta.model_fields.keys()) == ["id", "name"]
assert issubclass(User.Meta.model_fields["id"].__class__, pydantic.fields.FieldInfo)
assert User.Meta.model_fields["id"].primary_key is True
assert isinstance(User.Meta.model_fields["name"], pydantic.fields.FieldInfo)
assert User.Meta.model_fields["name"].max_length == 100
assert isinstance(User.Meta.table, sqlalchemy.Table)
def test_wrong_field_name():
with pytest.raises(ModelError):
User(non_existing_pk=1)
def test_model_pk():
user = User(pk=1)
assert user.pk == 1
assert user.id == 1
@pytest.mark.asyncio
async def test_json_column():
async with database:
async with database.transaction(force_rollback=True):
await JsonSample.objects.create(test_json=dict(aa=12))
await JsonSample.objects.create(test_json='{"aa": 12}')
items = await JsonSample.objects.all()
assert len(items) == 2
assert items[0].test_json == dict(aa=12)
assert items[1].test_json == dict(aa=12)
items[0].test_json = "[1, 2, 3]"
assert items[0].test_json == [1, 2, 3]
@pytest.mark.asyncio
async def test_binary_column():
async with database:
async with database.transaction(force_rollback=True):
await LargeBinarySample.objects.create(test_binary=blob)
await LargeBinarySample.objects.create(test_binary=blob2)
items = await LargeBinarySample.objects.all()
assert len(items) == 2
assert items[0].test_binary == blob
assert items[1].test_binary == blob2
items[0].test_binary = "test2icac89uc98"
assert items[0].test_binary == b"test2icac89uc98"
@pytest.mark.asyncio
async def test_binary_str_column():
async with database:
async with database.transaction(force_rollback=True):
await LargeBinaryStr(test_binary=blob3).save()
await LargeBinaryStr.objects.create(test_binary=blob4)
items = await LargeBinaryStr.objects.all()
assert len(items) == 2
assert items[0].test_binary == base64.b64encode(blob3).decode()
items[0].test_binary = base64.b64encode(blob4).decode()
assert items[0].test_binary == base64.b64encode(blob4).decode()
assert items[1].test_binary == base64.b64encode(blob4).decode()
assert items[1].__dict__["test_binary"] == blob4
@pytest.mark.asyncio
async def test_uuid_column():
async with database:
async with database.transaction(force_rollback=True):
u1 = await UUIDSample.objects.create(test_text="aa")
u2 = await UUIDSample.objects.create(test_text="bb")
items = await UUIDSample.objects.all()
assert len(items) == 2
assert isinstance(items[0].id, uuid.UUID)
assert isinstance(items[1].id, uuid.UUID)
assert items[0].id in (u1.id, u2.id)
assert items[1].id in (u1.id, u2.id)
assert items[0].id != items[1].id
item = await UUIDSample.objects.filter(id=u1.id).get()
assert item.id == u1.id
item2 = await UUIDSample.objects.first()
item3 = await UUIDSample.objects.get(pk=item2.id)
assert item2.id == item3.id
assert isinstance(item3.id, uuid.UUID)
u3 = await UUIDSample2(**u1.dict()).save()
u1_2 = await UUIDSample.objects.get(pk=u3.id)
assert u1_2 == u1
u4 = await UUIDSample2.objects.get(pk=u3.id)
assert u3 == u4
@pytest.mark.asyncio
async def test_model_crud():
async with database:
async with database.transaction(force_rollback=True):
users = await User.objects.all()
assert users == []
user = await User.objects.create(name="Tom")
users = await User.objects.all()
assert user.name == "Tom"
assert user.pk is not None
assert users == [user]
lookup = await User.objects.get()
assert lookup == user
await user.update(name="Jane")
users = await User.objects.all()
assert user.name == "Jane"
assert user.pk is not None
assert users == [user]
await user.delete()
users = await User.objects.all()
assert users == []
@pytest.mark.asyncio
async def test_model_get():
async with database:
async with database.transaction(force_rollback=True):
with pytest.raises(ormar.NoMatch):
await User.objects.get()
assert await User.objects.get_or_none() is None
user = await User.objects.create(name="Tom")
lookup = await User.objects.get()
assert lookup == user
user = await User.objects.create(name="Jane")
await User.objects.create(name="Jane")
with pytest.raises(ormar.MultipleMatches):
await User.objects.get(name="Jane")
same_user = await User.objects.get(pk=user.id)
assert same_user.id == user.id
assert same_user.pk == user.pk
@pytest.mark.asyncio
async def test_model_filter():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
user = await User.objects.get(name="Lucy")
assert user.name == "Lucy"
with pytest.raises(ormar.NoMatch):
await User.objects.get(name="Jim")
await Product.objects.create(name="T-Shirt", rating=5, in_stock=True)
await Product.objects.create(name="Dress", rating=4)
await Product.objects.create(name="Coat", rating=3, in_stock=True)
product = await Product.objects.get(name__iexact="t-shirt", rating=5)
assert product.pk is not None
assert product.name == "T-Shirt"
assert product.rating == 5
assert product.last_delivery == datetime.datetime.now().date()
products = await Product.objects.all(rating__gte=2, in_stock=True)
assert len(products) == 2
products = await Product.objects.all(name__icontains="T")
assert len(products) == 2
products = await Product.objects.exclude(rating__gte=4).all()
assert len(products) == 1
products = await Product.objects.exclude(rating__gte=4, in_stock=True).all()
assert len(products) == 2
products = await Product.objects.exclude(in_stock=True).all()
assert len(products) == 1
products = await Product.objects.exclude(name__icontains="T").all()
assert len(products) == 1
# Test escaping % character from icontains, contains, and iexact
await Product.objects.create(name="100%-Cotton", rating=3)
await Product.objects.create(name="Cotton-100%-Egyptian", rating=3)
await Product.objects.create(name="Cotton-100%", rating=3)
products = Product.objects.filter(name__iexact="100%-cotton")
assert await products.count() == 1
products = Product.objects.filter(name__contains="%")
assert await products.count() == 3
products = Product.objects.filter(name__icontains="%")
assert await products.count() == 3
@pytest.mark.asyncio
async def test_wrong_query_contains_model():
async with database:
with pytest.raises(QueryDefinitionError):
product = Product(name="90%-Cotton", rating=2)
await Product.objects.filter(name__contains=product).count()
@pytest.mark.asyncio
async def test_model_exists():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
assert await User.objects.filter(name="Tom").exists() is True
assert await User.objects.filter(name="Jane").exists() is False
@pytest.mark.asyncio
async def test_model_count():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
assert await User.objects.count() == 3
assert await User.objects.filter(name__icontains="T").count() == 1
@pytest.mark.asyncio
async def test_model_limit():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
assert len(await User.objects.limit(2).all()) == 2
@pytest.mark.asyncio
async def test_model_limit_with_filter():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
await User.objects.create(name="Tom")
await User.objects.create(name="Tom")
assert (
len(await User.objects.limit(2).filter(name__iexact="Tom").all()) == 2
)
@pytest.mark.asyncio
async def test_offset():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
users = await User.objects.offset(1).limit(1).all()
assert users[0].name == "Jane"
@pytest.mark.asyncio
async def test_model_first():
async with database:
async with database.transaction(force_rollback=True):
tom = await User.objects.create(name="Tom")
jane = await User.objects.create(name="Jane")
assert await User.objects.first() == tom
assert await User.objects.first(name="Jane") == jane
assert await User.objects.filter(name="Jane").first() == jane
with pytest.raises(NoMatch):
await User.objects.filter(name="Lucy").first()
def not_contains(a, b):
return a not in b
def contains(a, b):
return a in b
def check_choices(values: tuple, ops: List):
ops_dict = {"in": contains, "out": not_contains}
checks = (country_name_choices, country_taxed_choices, country_country_code_choices)
assert all(
[ops_dict[op](value, check) for value, op, check in zip(values, ops, checks)]
)
@pytest.mark.asyncio
async def test_model_choices():
"""Test that choices work properly for various types of fields."""
async with database:
# Test valid choices.
await asyncio.gather(
Country.objects.create(name="Canada", taxed=True, country_code=1),
Country.objects.create(name="Algeria", taxed=True, country_code=213),
Country.objects.create(name="Algeria"),
)
with pytest.raises(ValueError):
name, taxed, country_code = "Saudi Arabia", True, 1
check_choices((name, taxed, country_code), ["out", "in", "in"])
await Country.objects.create(
name=name, taxed=taxed, country_code=country_code
)
with pytest.raises(ValueError):
name, taxed, country_code = "Algeria", False, 1
check_choices((name, taxed, country_code), ["in", "out", "in"])
await Country.objects.create(
name=name, taxed=taxed, country_code=country_code
)
with pytest.raises(ValueError):
name, taxed, country_code = "Algeria", True, 967
check_choices((name, taxed, country_code), ["in", "in", "out"])
await Country.objects.create(
name=name, taxed=taxed, country_code=country_code
)
with pytest.raises(ValueError):
name, taxed, country_code = (
"United States",
True,
1,
) # name is too long but is a valid choice
check_choices((name, taxed, country_code), ["in", "in", "in"])
await Country.objects.create(
name=name, taxed=taxed, country_code=country_code
)
with pytest.raises(ValueError):
name, taxed, country_code = (
"Algeria",
True,
-10,
) # country code is too small but is a valid choice
check_choices((name, taxed, country_code), ["in", "in", "in"])
await Country.objects.create(
name=name, taxed=taxed, country_code=country_code
)
with pytest.raises(ValueError):
name, taxed, country_code = (
"Algeria",
True,
1200,
) # country code is too large but is a valid choice
check_choices((name, taxed, country_code), ["in", "in", "in"])
await Country.objects.create(
name=name, taxed=taxed, country_code=country_code
)
# test setting after init also triggers validation
with pytest.raises(ValueError):
name, taxed, country_code = "Algeria", True, 967
check_choices((name, taxed, country_code), ["in", "in", "out"])
country = Country()
country.country_code = country_code
with pytest.raises(ValueError):
name, taxed, country_code = "Saudi Arabia", True, 1
check_choices((name, taxed, country_code), ["out", "in", "in"])
country = Country()
country.name = name
with pytest.raises(ValueError):
name, taxed, country_code = "Algeria", False, 1
check_choices((name, taxed, country_code), ["in", "out", "in"])
country = Country()
country.taxed = taxed
# check also update from queryset
with pytest.raises(ValueError):
name, taxed, country_code = "Algeria", False, 1
check_choices((name, taxed, country_code), ["in", "out", "in"])
await Country(name="Belize").save()
await Country.objects.filter(name="Belize").update(name="Vietnam")
@pytest.mark.asyncio
async def test_start_and_end_filters():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Markos Uj")
await User.objects.create(name="Maqua Bigo")
await User.objects.create(name="maqo quidid")
await User.objects.create(name="Louis Figo")
await User.objects.create(name="Loordi Kami")
await User.objects.create(name="Yuuki Sami")
users = await User.objects.filter(name__startswith="Mar").all()
assert len(users) == 1
users = await User.objects.filter(name__istartswith="ma").all()
assert len(users) == 3
users = await User.objects.filter(name__istartswith="Maq").all()
assert len(users) == 2
users = await User.objects.filter(name__iendswith="AMI").all()
assert len(users) == 2
users = await User.objects.filter(name__endswith="Uj").all()
assert len(users) == 1
users = await User.objects.filter(name__endswith="igo").all()
assert len(users) == 2
@pytest.mark.asyncio
async def test_get_and_first():
async with database:
async with database.transaction(force_rollback=True):
await User.objects.create(name="Tom")
await User.objects.create(name="Jane")
await User.objects.create(name="Lucy")
await User.objects.create(name="Zack")
await User.objects.create(name="Ula")
user = await User.objects.get()
assert user.name == "Ula"
user = await User.objects.first()
assert user.name == "Tom"
await User2.objects.create(id="Tom", name="Tom")
await User2.objects.create(id="Jane", name="Jane")
await User2.objects.create(id="Lucy", name="Lucy")
await User2.objects.create(id="Zack", name="Zack")
await User2.objects.create(id="Ula", name="Ula")
user = await User2.objects.get()
assert user.name == "Zack"
user = await User2.objects.first()
assert user.name == "Jane"
def test_constraints():
with pytest.raises(pydantic.ValidationError) as e:
Product(name="T-Shirt", rating=50, in_stock=True)
assert "ensure this value is less than or equal to 5" in str(e.value)
|
[
"[email protected]"
] | |
5e18cf19781ab567ab705e5609abb37f764adcdd
|
bf473d57dff028f3167811a1cb76d7ce8e95b42d
|
/ocrmypdf/qpdf.py
|
dcccc2ecf04069eb579e416cb1498c56452b9c55
|
[
"MIT"
] |
permissive
|
thecocce/OCRmyPDF
|
ef6cb0e5e0c55433e2b7db244d5ecc6b0e6b183c
|
514efa36fcc2f79ae173f429cb208a63ae968f5b
|
refs/heads/master
| 2020-07-12T01:14:08.685260 | 2016-07-24T18:21:46 | 2016-07-24T18:21:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,819 |
py
|
#!/usr/bin/env python3
# © 2015 James R. Barlow: github.com/jbarlow83
from subprocess import CalledProcessError, check_output, STDOUT, check_call
import sys
import os
from . import ExitCode, get_program
def check(input_file, log):
args_qpdf = [
get_program('qpdf'),
'--check',
input_file
]
try:
check_output(args_qpdf, stderr=STDOUT, universal_newlines=True)
except CalledProcessError as e:
if e.returncode == 2:
log.error("{0}: not a valid PDF, and could not repair it.".format(
input_file))
log.error("Details:")
log.error(e.output)
elif e.returncode == 3:
log.info("qpdf --check returned warnings:")
log.info(e.output)
else:
log.warning(e.output)
return False
return True
def repair(input_file, output_file, log):
args_qpdf = [
get_program('qpdf'), input_file, output_file
]
try:
check_output(args_qpdf, stderr=STDOUT, universal_newlines=True)
except CalledProcessError as e:
if e.returncode == 3 and e.output.find("operation succeeded"):
log.debug('qpdf found and fixed errors: ' + e.output)
log.debug(e.output)
return
if e.returncode == 2 and e.output.find("invalid password"):
log.error("{0}: this PDF is password-protected - password must "
"be removed for OCR".format(input_file))
sys.exit(ExitCode.input_file)
elif e.returncode == 2:
log.error("{0}: not a valid PDF, and could not repair it.".format(
input_file))
log.error("Details: " + e.output)
sys.exit(ExitCode.input_file)
else:
log.error("{0}: unknown error".format(
input_file))
log.error(e.output)
sys.exit(ExitCode.unknown)
def get_npages(input_file, log):
try:
pages = check_output(
[get_program('qpdf'), '--show-npages', input_file],
universal_newlines=True, close_fds=True)
except CalledProcessError as e:
if e.returncode == 2 and e.output.find('No such file'):
log.error(e.output)
sys.exit(ExitCode.input_file)
return int(pages)
def split_pages(input_file, work_folder, npages):
"""Split multipage PDF into individual pages.
Incredibly enough, this multiple process approach is about 70 times
faster than using Ghostscript.
"""
for n in range(int(npages)):
args_qpdf = [
get_program('qpdf'), input_file,
'--pages', input_file, '{0}'.format(n + 1), '--',
os.path.join(work_folder, '{0:06d}.page.pdf'.format(n + 1))
]
check_call(args_qpdf)
|
[
"[email protected]"
] | |
e0cf215b999026a8636472b15a796e1222e3847e
|
30cffb7452220c2ac2961dd2e0f42e3b359a59c0
|
/simscale_sdk/models/zero_gradient_nbc.py
|
f75838ccaefa9dba193d8b59a8daffd25ce686e9
|
[
"MIT"
] |
permissive
|
vpurcarea/simscale-python-sdk
|
0bf892d8824f8d4599caa0f345d5ba28e038f5eb
|
6f2d12b2d21142bd854042c0fb402c2c797629e4
|
refs/heads/master
| 2023-03-14T04:31:06.226337 | 2021-03-03T16:20:01 | 2021-03-03T16:20:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,321 |
py
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class ZeroGradientNBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
def __init__(self, type='ZERO_GRADIENT', local_vars_configuration=None): # noqa: E501
"""ZeroGradientNBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = None
self.type = type
@property
def type(self):
"""Gets the type of this ZeroGradientNBC. # noqa: E501
:return: The type of this ZeroGradientNBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ZeroGradientNBC.
:param type: The type of this ZeroGradientNBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ZeroGradientNBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ZeroGradientNBC):
return True
return self.to_dict() != other.to_dict()
|
[
"simscale"
] |
simscale
|
bafdc62773bc9fd08ba308ca2db163705297ec18
|
c7a4e634ea260da4c6c94ca716f2910509579e91
|
/functional_tests/pages/projects.py
|
5a76026fc313007cba773855df88559c19282fb7
|
[
"MIT"
] |
permissive
|
XeryusTC/projman
|
858a72496ea6eaa23e8e0b511f8c17e037fa37b6
|
3db118d51a9fc362153593f5a862187bdaf0a73c
|
refs/heads/master
| 2016-08-12T09:36:48.371178 | 2016-05-07T21:12:02 | 2016-05-07T21:12:02 | 45,639,983 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,986 |
py
|
# -*- coding: utf-8 -*-
from page_objects import PageObject, PageElement, MultiPageElement
from selenium.webdriver.support.ui import Select
class BaseProjectPage(PageObject):
body = PageElement(tag_name='body')
content = PageElement(id_='content')
logout = PageElement(name='logout')
overlay = PageElement(id_='mui-overlay')
sidebar = PageElement(id_="sidebar")
sidebar_hide = PageElement(class_name='js-hide-sidebar')
sidebar_show = PageElement(class_name='js-show-sidebar')
inlist_link = PageElement(name='inlist_link', context=True)
action_link = PageElement(link_text='Actions', context=True)
create_project_link = PageElement(link_text='Create project', context=True)
settings_link = PageElement(name='settings')
menu = PageElement(name='menu')
_project_links = MultiPageElement(css="a.project", context=True)
def project_link(self, text):
for link in self._project_links(self.sidebar):
if text == link.text:
return link
class InlistPage(PageObject):
add_box = PageElement(name='text')
add_button = PageElement(xpath="//form//input[@id='submit-id-submit']")
thelist = MultiPageElement(css='#list .full-height')
listrows = MultiPageElement(css='#list .mui-row')
error_lists = MultiPageElement(css='.errorlist')
delete_item = PageElement(class_name='action-delete', context=True)
convert_action = PageElement(class_name='action-convert', context=True)
convert_project = PageElement(class_name='action-project', context=True)
class InlistDeletePage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(xpath="//input[@type='submit']")
class ActionDeletePage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(xpath="//input[@type='submit']")
class ConvertToActionPage(PageObject):
text_box = PageElement(name='text')
convert_button = PageElement(xpath="//input[@type='submit']")
class CreateProjectPage(PageObject):
name_box = PageElement(name='name')
description_box = PageElement(name='description')
create_button = PageElement(name='create')
error_lists = MultiPageElement(css='.errorlist')
class ProjectPage(PageObject):
info = PageElement(id_='info')
title = PageElement(xpath="//div[@id='info']//h1/parent::*")
add_box = PageElement(name='text')
add_button = PageElement(xpath="//form//input[@name='submit']")
edit = PageElement(css='.action-edit')
delete = PageElement(class_name='delete-project')
thelist = MultiPageElement(css='#list .mui-row')
checked_list = MultiPageElement(css='#list .mui-row.checked')
error_lists = MultiPageElement(css='.errorlist')
_item = PageElement(css='.action-item', context=True)
_list_text = PageElement(css='.action-item .action-text', context=True)
_delete_item = PageElement(class_name='action-delete', context=True)
_move_item = PageElement(class_name='action-edit-action', context=True)
_item_deadline = PageElement(css='.action-deadline', context=True)
apply_sort = PageElement(name='sort')
_sort_method = PageElement(name='sort_method')
_sort_order = PageElement(name='sort_order')
@property
def sort_method(self):
return Select(self._sort_method)
@property
def sort_order(self):
return Select(self._sort_order)
def list_text(self, context):
return [self._list_text(row).text for row in context]
def get_list_rows(self, context):
res = {}
for i in range(len(context)):
res[i] = {
'item': self._item(context[i]),
'text': self._list_text(context[i]),
'delete': self._delete_item(context[i]),
'edit': self._move_item(context[i]),
'deadline': self._item_deadline(context[i]),
}
# The following is for compatibility with older FTs
res[i]['move'] = res[i]['edit']
return res
ActionlistPage = ProjectPage
class EditPage(PageObject):
name = PageElement(name='name')
description = PageElement(name='description')
confirm = PageElement(name='update')
class ProjectDeletePage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(xpath="//input[@value='Confirm']")
class EditActionPage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(name='move')
errors = MultiPageElement(css='.errorlist')
form = PageElement(tag_name='form')
deadline_date = PageElement(name='deadline_0')
deadline_time = PageElement(name='deadline_1')
text_box = PageElement(name='text')
_select = PageElement(tag_name='select')
@property
def select(self):
return Select(self._select)
# Compatibility with FTs that test for the move button
MoveActionPage = EditActionPage
|
[
"[email protected]"
] | |
f20ca50144e3fcf25058d0b73bc4c50ebeeb42c5
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/coverage-big-888.py
|
bff62c8061c021e3e593de8fa9d76ef7293ada3e
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,350 |
py
|
count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.$ID:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
|
[
"[email protected]"
] | |
2a6bfb83510ec56d87a9d444cc346d56a3fdbd9f
|
0abae2b0586605f6b99cb498ac8161297a7d72c0
|
/synthtorch/models/nconvnet.py
|
dc51157d7875fd02f732b81b30df5053159876dd
|
[
"Apache-2.0"
] |
permissive
|
jcreinhold/synthtorch
|
fc227d5597bb77e2018cb6a6cfee9bc086ff5001
|
bb6eb20641b2cae3cbb96421b12e03865b5c5095
|
refs/heads/master
| 2021-09-25T15:31:39.398836 | 2021-09-24T19:51:46 | 2021-09-24T19:51:46 | 155,944,524 | 23 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,273 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
synthtorch.models.nconvnet
define the class for a N layer CNN with
no max pool, increase in channels, or any of that
fancy stuff. This is generally used for testing
purposes
Author: Jacob Reinhold ([email protected])
Created on: Nov 2, 2018
"""
__all__ = ['SimpleConvNet']
from typing import Tuple
import logging
import torch
from torch import nn
logger = logging.getLogger(__name__)
class SimpleConvNet(torch.nn.Module):
def __init__(self, n_layers: int, n_input: int = 1, n_output: int = 1, kernel_size: Tuple[int] = (3, 3, 3),
dropout_prob: float = 0, dim: int = 3, **kwargs):
super(SimpleConvNet, self).__init__()
self.n_layers = n_layers
self.n_input = n_input
self.n_output = n_output
self.kernel_sz = kernel_size
self.dropout_prob = dropout_prob
self.dim = dim
self.criterion = nn.MSELoss()
if isinstance(kernel_size[0], int):
self.kernel_sz = [kernel_size for _ in range(n_layers)]
else:
self.kernel_sz = kernel_size
pad = nn.ReplicationPad3d if dim == 3 else \
nn.ReplicationPad2d if dim == 2 else \
nn.ReplicationPad1d
self.layers = nn.ModuleList([nn.Sequential(
pad([ks // 2 for p in zip(ksz, ksz) for ks in p]),
nn.Conv3d(n_input, n_output, ksz) if dim == 3 else \
nn.Conv2d(n_input, n_output, ksz) if dim == 2 else \
nn.Conv1d(n_input, n_output, ksz),
nn.ReLU(),
nn.InstanceNorm3d(n_output, affine=True) if dim == 3 else \
nn.InstanceNorm2d(n_output, affine=True) if dim == 2 else \
nn.InstanceNorm1d(n_output, affine=True),
nn.Dropout3d(dropout_prob) if dim == 3 else \
nn.Dropout2d(dropout_prob) if dim == 2 else \
nn.Dropout(dropout_prob)) for ksz in self.kernel_sz])
def forward(self, x: torch.Tensor) -> torch.Tensor:
for l in self.layers:
x = l(x)
return x
def predict(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
return self.forward(x)
def freeze(self):
raise NotImplementedError
|
[
"[email protected]"
] | |
ffedf66584fb5b7a988b85720f9c11517f525e05
|
a5e71a333a86476b9cb1bdf6989bb5f47dd5e409
|
/ScrapePlugins/M/FoolSlide/FoolSlideDownloadBase.py
|
395abb1796f52b7fd3c704e900c0f38787d304a6
|
[] |
no_license
|
GDXN/MangaCMS
|
0e797299f12c48986fda5f2e7de448c2934a62bd
|
56be0e2e9a439151ae5302b3e6ceddc7868d8942
|
refs/heads/master
| 2021-01-18T11:40:51.993195 | 2017-07-22T12:55:32 | 2017-07-22T12:55:32 | 21,105,690 | 6 | 1 | null | 2017-07-22T12:55:33 | 2014-06-22T21:13:19 |
Python
|
UTF-8
|
Python
| false | false | 5,584 |
py
|
import os
import os.path
import nameTools as nt
import urllib.parse
import zipfile
import runStatus
import traceback
import bs4
import re
import json
import ScrapePlugins.RetreivalBase
import processDownload
import abc
class FoolContentLoader(ScrapePlugins.RetreivalBase.RetreivalBase):
@abc.abstractmethod
def groupName(self):
return None
@abc.abstractmethod
def contentSelector(self):
return None
retreivalThreads = 1
def getImage(self, imageUrl, referrer):
content, handle = self.wg.getpage(imageUrl, returnMultiple=True, addlHeaders={'Referer': referrer})
if not content or not handle:
raise ValueError("Failed to retreive image from page '%s'!" % referrer)
fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1])
fileN = bs4.UnicodeDammit(fileN).unicode_markup
self.log.info("retreived image '%s' with a size of %0.3f K", fileN, len(content)/1000.0)
return fileN, content
def getImageUrls(self, baseUrl):
pageCtnt = self.wg.getpage(baseUrl)
# print("GetImageUrls")
# print("This series contains mature contents and is meant to be viewed by an adult audience." in pageCtnt)
if "This series contains mature contents and is meant to be viewed by an adult audience." in pageCtnt:
self.log.info("Adult check page. Confirming...")
pageCtnt = self.wg.getpage(baseUrl, postData={"adult": "true"})
if "This series contains mature contents and is meant to be viewed by an adult audience." in pageCtnt:
raise ValueError("Wat?")
soup = bs4.BeautifulSoup(pageCtnt, "lxml")
container = soup.find(self.contentSelector[0], id=self.contentSelector[1])
if not container:
raise ValueError("Unable to find javascript container div '%s'" % baseUrl)
# If there is a ad div in the content container, it'll mess up the javascript match, so
# find it, and remove it from the tree.
container.find('div', id='bottombar').decompose()
if container.find('div', class_='ads'):
container.find('div', class_='ads').decompose()
scriptText = container.script.get_text()
if not scriptText:
raise ValueError("No contents in script tag? '%s'" % baseUrl)
jsonRe = re.compile(r'var [a-zA-Z]+ ?= ?(\[.*?\]);', re.DOTALL)
jsons = jsonRe.findall(scriptText)
jsons = [tmp for tmp in jsons if len(tmp)>2]
if not jsons:
# print("Script = ", container.script)
raise ValueError("No JSON variable in script! '%s'" % baseUrl)
valid = False
for item in jsons:
loaded = json.loads(item)
bad = False
for image in loaded:
urlfname = os.path.split(urllib.parse.urlsplit(image['url']).path)[-1]
if image['filename'] != urlfname:
bad = True
if not bad:
arr = loaded
break
imageUrls = []
for item in arr:
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(item['url'])
path = urllib.parse.quote(path)
itemUrl = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
imageUrls.append((item['filename'], itemUrl, baseUrl))
if not imageUrls:
raise ValueError("Unable to find contained images on page '%s'" % baseUrl)
return imageUrls
def getLink(self, link):
sourceUrl = link["sourceUrl"]
seriesName = link["seriesName"]
chapterVol = link["originName"]
try:
self.log.info( "Should retreive url - %s", sourceUrl)
self.updateDbEntry(sourceUrl, dlState=1)
imageUrls = self.getImageUrls(sourceUrl)
if not imageUrls:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Page not found - 404")
self.updateDbEntry(sourceUrl, dlState=-1)
return
self.log.info("Downloading = '%s', '%s' ('%s images)", seriesName, chapterVol, len(imageUrls))
dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
if link["flags"] == None:
link["flags"] = ""
if newDir:
self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
chapterName = nt.makeFilenameSafe(chapterVol)
fqFName = os.path.join(dlPath, chapterName+"["+self.groupName+"].zip")
loop = 1
while os.path.exists(fqFName):
fqFName, ext = os.path.splitext(fqFName)
fqFName = "%s (%d)%s" % (fqFName, loop, ext)
loop += 1
self.log.info("Saving to archive = %s", fqFName)
images = []
for imageName, imgUrl, referrerUrl in imageUrls:
dummy_imageName, imageContent = self.getImage(imgUrl, referrerUrl)
images.append([imageName, imageContent])
if not runStatus.run:
self.log.info( "Breaking due to exit flag being set")
self.updateDbEntry(sourceUrl, dlState=0)
return
self.log.info("Creating archive with %s images", len(images))
if not images:
self.updateDbEntry(sourceUrl, dlState=-1, seriesName=seriesName, originName=chapterVol, tags="error-404")
return
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(fqFName, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
filePath, fileName = os.path.split(fqFName)
self.updateDbEntry(sourceUrl, downloadPath=filePath, fileName=fileName)
dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True, rowId=link['dbId'])
self.log.info( "Done")
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, seriesName=seriesName, originName=chapterVol, tags=dedupState)
return
except Exception:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Traceback = %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.