hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df71fa9bfc17373d41a19fa97cbd77687725d81b
| 60,395 |
py
|
Python
|
abstract/tools/address_parser.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 3 |
2018-06-10T21:20:56.000Z
|
2021-04-04T11:21:06.000Z
|
abstract/tools/address_parser.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 7 |
2018-08-14T20:54:49.000Z
|
2020-06-05T18:17:30.000Z
|
abstract/tools/address_parser.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 3 |
2018-06-27T12:53:13.000Z
|
2020-09-25T19:41:46.000Z
|
__all__ = ['addressProcessor']
# (c) Kyrylo Zakharov (@Amice13), 2019
# Don't look below, you will not understand this Python code :) I don't.
from js2py.pyjs import *
# setting scope
var = Scope( JS_BUILTINS )
set_global_object(var)
# Code follows:
var.registers(['mixedCyrillicLatinPattern', 'spaceAfterPunctuationPattern', 'replaceAllCyrillicToLatin', 'latinToCyrillic', 'otherPhoneNumbersPattern', 'removeSebastopolKyivPattern', 'quoteSpacePatternEnd', 'firstLettersPattern', 'replaceApostrophes', 'quoteSpacePatternStart', 'districtPattern2', 'beforePunctuationPattern', 'removeStreetPattern2', 'removeLocalityPattern', 'zgLettersPattern', 'startSpace', 'linebreakPattern', 'placeholder', 'noSpacesAfterPunctuationPattern', 'firstLetters', 'replaceCarriages', 'postalCodePattern', 'fixStupidTitles', 'getAddress', 'noSpacesBeforePunctuationPattern', 'preprocessingStringWithPunctuation', 'replaceSpaces', 'reverseCommonRegionPattern', 'removeRegionPattern', 'replaceHyphens', 'localityTypes', 'fixApostrophes', 'localityPattern3', 'regions', 'replaceQuotes', 'removeBuilingPattern', 'districtPattern', 'removeApartmentPattern', 'regionPattern', 'addSpacesAfterPunctuation', 'replaceSpacesBeforePunctuation', 'capitalizeFirst', 'removeDistrictPattern3', 'streetPattern', 'removeExtraSpaces', 'wrongQuotePattern', 'removeSlash', 'doublePunctuationPattern', 'apostrophePattern', 'stringPreprocessing', 'numberPattern', '_geonymTypes', 'removeSpaceAfterPunctuationPattern', 'streetPattern2', 'textPreprocessing', 'russianPattern', 'preserveLinebreakPattern', 'zgLetters', 'crimeaPattern', 'districtPattern3', 'listPattern1', 'addStreetSpaces', 'translit', 'latinInCyrillicPattern', 'removeUkrainePattern', 'otherLetters', 'removeDistrictPattern', 'endSpace', 'localityPattern2', 'allLetters', 'numbersToCyrillic', 'replaceWrongQuote', 'addSpacesBeforePunctuation', 'fullPhoneNumbersPattern', 'removeExtraHyphens', 'buildingPattern', 'removeDistrictPattern2', 'reverseStreetPattern', 'spaceAfterPunctuationPattern2', 'latinPattern', 'removeStreetPattern', 'fixLists', 'spacePattern', 'commonRegionPattern', 'removeReverseDistrictPattern', 'replaceEllipsis', 'apartmentPattern', 'geonymTypes', 'replaceDoublePunctuation', 'preprocessingTextWithPunctuation', 'titleCasePattern', 'restoreLinebreakPattern', 'firstLetter', 'stupidTitlePattern', 'sixPhoneNumbersPattern', 'phonePunctuationPattern', 'replaceLinebreaks', 'commonMistakes', 'removeReverseStreetPattern', 'replaceLatinInCyrillic', 'listPattern2', 'sebastopolKyivPattern', 'removeLocalityPattern2', 'reverseDistrictPattern', 'fixApostrophePattern', 'fixCommonMistakes', 'ellipsisPattern', 'carriagePattern', 'cyrillicToLatin', 'russianToUkraine', 'toTitleCase', 'replaceAllNumbers', 'removeStartEndSpaces', 'replaceAllRussian', 'removeInBracesPattern', 'replaceApostrophePattern', 'cyrillicPattern', 'fivePhoneNumbersPattern', 'latinInCyrillicPattern2', 'buildingPattern2', 'removeCommonRegionPattern', 'replaceSmartLatin', 'localityPattern', 'multipleSpacePattern', 'hyphenPattern', 'replaceSpacesAfterPunctuation', 'removeDoubleSpaces', 'quoteSpacePatternMiddle', 'formatPhone', 'spaceBeforePunctuationPattern', 'shortPhoneNumbersPattern', 'removePostalCodePattern', '_defineProperty', 'removeGarbagePattern', 'removeLocalityPattern3', 'fixStupidTitlePattern', 'doubleSpace', 'removeBuilingPattern2', 'replaceAllLatin', 'fixApostrophePattern2'])
@Js
def PyJsHoisted__defineProperty_(obj, key, value, this, arguments, var=var):
var = Scope({'obj':obj, 'key':key, 'value':value, 'this':this, 'arguments':arguments}, var)
var.registers(['obj', 'value', 'key'])
if var.get('obj').contains(var.get('key')):
PyJs_Object_0_ = Js({'value':var.get('value'),'enumerable':Js(True),'configurable':Js(True),'writable':Js(True)})
var.get('Object').callprop('defineProperty', var.get('obj'), var.get('key'), PyJs_Object_0_)
else:
var.get('obj').put(var.get('key'), var.get('value'))
return var.get('obj')
PyJsHoisted__defineProperty_.func_name = '_defineProperty'
var.put('_defineProperty', PyJsHoisted__defineProperty_)
@Js
def PyJsHoisted_capitalizeFirst_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_1_(letter, this, arguments, var=var):
var = Scope({'letter':letter, 'this':this, 'arguments':arguments}, var)
var.registers(['letter'])
return var.get('letter').callprop('toUpperCase')
PyJs_anonymous_1_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('firstLetter'), PyJs_anonymous_1_))
return var.get('s')
PyJsHoisted_capitalizeFirst_.func_name = 'capitalizeFirst'
var.put('capitalizeFirst', PyJsHoisted_capitalizeFirst_)
@Js
def PyJsHoisted_replaceQuotes_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('quoteSpacePatternStart'), Js('$1')))
var.put('s', var.get('s').callprop('replace', var.get('quoteSpacePatternEnd'), Js('$1')))
var.put('s', var.get('s').callprop('replace', var.get('quoteSpacePatternMiddle'), Js(' "')))
return var.get('s')
PyJsHoisted_replaceQuotes_.func_name = 'replaceQuotes'
var.put('replaceQuotes', PyJsHoisted_replaceQuotes_)
@Js
def PyJsHoisted_replaceApostrophes_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('apostrophePattern'), Js("'")))
return var.get('s')
PyJsHoisted_replaceApostrophes_.func_name = 'replaceApostrophes'
var.put('replaceApostrophes', PyJsHoisted_replaceApostrophes_)
@Js
def PyJsHoisted_replaceHyphens_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('hyphenPattern'), Js('-')))
return var.get('s')
PyJsHoisted_replaceHyphens_.func_name = 'replaceHyphens'
var.put('replaceHyphens', PyJsHoisted_replaceHyphens_)
@Js
def PyJsHoisted_replaceSpaces_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('spacePattern'), Js(' ')))
var.put('s', var.get('s').callprop('replace', var.get('multipleSpacePattern'), Js(' ')))
return var.get('s')
PyJsHoisted_replaceSpaces_.func_name = 'replaceSpaces'
var.put('replaceSpaces', PyJsHoisted_replaceSpaces_)
@Js
def PyJsHoisted_removeStartEndSpaces_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('startSpace'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('endSpace'), Js('')))
return var.get('s')
PyJsHoisted_removeStartEndSpaces_.func_name = 'removeStartEndSpaces'
var.put('removeStartEndSpaces', PyJsHoisted_removeStartEndSpaces_)
@Js
def PyJsHoisted_removeDoubleSpaces_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('doubleSpace'), Js(' ')))
return var.get('s')
PyJsHoisted_removeDoubleSpaces_.func_name = 'removeDoubleSpaces'
var.put('removeDoubleSpaces', PyJsHoisted_removeDoubleSpaces_)
@Js
def PyJsHoisted_replaceAllLatin_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_2_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('latinToCyrillic').get(var.get('match'))
PyJs_anonymous_2_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('latinPattern'), PyJs_anonymous_2_))
return var.get('s')
PyJsHoisted_replaceAllLatin_.func_name = 'replaceAllLatin'
var.put('replaceAllLatin', PyJsHoisted_replaceAllLatin_)
@Js
def PyJsHoisted_replaceAllCyrillicToLatin_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_3_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('cyrillicToLatin').get(var.get('match'))
PyJs_anonymous_3_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('cyrillicPattern'), PyJs_anonymous_3_))
return var.get('s')
PyJsHoisted_replaceAllCyrillicToLatin_.func_name = 'replaceAllCyrillicToLatin'
var.put('replaceAllCyrillicToLatin', PyJsHoisted_replaceAllCyrillicToLatin_)
@Js
def PyJsHoisted_replaceAllRussian_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_4_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('russianToUkraine').get(var.get('match'))
PyJs_anonymous_4_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('russianPattern'), PyJs_anonymous_4_))
return var.get('s')
PyJsHoisted_replaceAllRussian_.func_name = 'replaceAllRussian'
var.put('replaceAllRussian', PyJsHoisted_replaceAllRussian_)
@Js
def PyJsHoisted_fixApostrophes_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('fixApostrophePattern'), Js("'")))
var.put('s', var.get('s').callprop('replace', var.get('fixApostrophePattern2'), Js("'")))
return var.get('s')
PyJsHoisted_fixApostrophes_.func_name = 'fixApostrophes'
var.put('fixApostrophes', PyJsHoisted_fixApostrophes_)
@Js
def PyJsHoisted_replaceLatinInCyrillic_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_5_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
@Js
def PyJs_anonymous_6_(letter, this, arguments, var=var):
var = Scope({'letter':letter, 'this':this, 'arguments':arguments}, var)
var.registers(['letter'])
return var.get('latinToCyrillic').get(var.get('letter'))
PyJs_anonymous_6_._set_name('anonymous')
var.put('match', var.get('match').callprop('split', Js('')).callprop('map', PyJs_anonymous_6_).callprop('join', Js('')))
return var.get('match')
PyJs_anonymous_5_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('latinInCyrillicPattern'), PyJs_anonymous_5_))
@Js
def PyJs_anonymous_7_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
var.put('match', var.get('replaceAllLatin')(var.get('match')))
return var.get('match')
PyJs_anonymous_7_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('latinInCyrillicPattern2'), PyJs_anonymous_7_))
return var.get('s')
PyJsHoisted_replaceLatinInCyrillic_.func_name = 'replaceLatinInCyrillic'
var.put('replaceLatinInCyrillic', PyJsHoisted_replaceLatinInCyrillic_)
@Js
def PyJsHoisted_replaceSmartLatin_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_8_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['countCyrillic', 'countLatin', 'match'])
var.put('countLatin', var.get('match').callprop('match', JsRegExp('/[a-z]/gi')))
var.put('countCyrillic', var.get('match').callprop('match', JsRegExp("/[а-яєіїґ\\']/gi")))
if var.get('countLatin').neg():
return var.get('match')
if var.get('countCyrillic').neg():
return var.get('match')
if (var.get('countCyrillic')>=var.get('countLatin')):
return var.get('replaceAllLatin')(var.get('match'))
else:
return var.get('replaceAllCyrillicToLatin')(var.get('match'))
PyJs_anonymous_8_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('mixedCyrillicLatinPattern'), PyJs_anonymous_8_))
return var.get('s')
PyJsHoisted_replaceSmartLatin_.func_name = 'replaceSmartLatin'
var.put('replaceSmartLatin', PyJsHoisted_replaceSmartLatin_)
@Js
def PyJsHoisted_replaceAllNumbers_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_9_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('numbersToCyrillic').get(var.get('match'))
PyJs_anonymous_9_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('numberPattern'), PyJs_anonymous_9_))
return var.get('s')
PyJsHoisted_replaceAllNumbers_.func_name = 'replaceAllNumbers'
var.put('replaceAllNumbers', PyJsHoisted_replaceAllNumbers_)
@Js
def PyJsHoisted_replaceCarriages_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('carriagePattern'), Js('\n')))
return var.get('s')
PyJsHoisted_replaceCarriages_.func_name = 'replaceCarriages'
var.put('replaceCarriages', PyJsHoisted_replaceCarriages_)
@Js
def PyJsHoisted_replaceLinebreaks_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('preserveLinebreakPattern'), Js('~linebreak~')))
var.put('s', var.get('s').callprop('replace', var.get('linebreakPattern'), Js(' ')))
var.put('s', var.get('s').callprop('replace', var.get('restoreLinebreakPattern'), Js('\n\n')))
return var.get('s')
PyJsHoisted_replaceLinebreaks_.func_name = 'replaceLinebreaks'
var.put('replaceLinebreaks', PyJsHoisted_replaceLinebreaks_)
@Js
def PyJsHoisted_replaceSpacesBeforePunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('spaceBeforePunctuationPattern'), Js('$1')))
return var.get('s')
PyJsHoisted_replaceSpacesBeforePunctuation_.func_name = 'replaceSpacesBeforePunctuation'
var.put('replaceSpacesBeforePunctuation', PyJsHoisted_replaceSpacesBeforePunctuation_)
@Js
def PyJsHoisted_addSpacesAfterPunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('spaceAfterPunctuationPattern'), Js('$1 ')))
var.put('s', var.get('s').callprop('replace', var.get('spaceAfterPunctuationPattern2'), Js('$1 ')))
var.put('s', var.get('s').callprop('replace', var.get('removeSpaceAfterPunctuationPattern'), Js('$1$2')))
return var.get('s')
PyJsHoisted_addSpacesAfterPunctuation_.func_name = 'addSpacesAfterPunctuation'
var.put('addSpacesAfterPunctuation', PyJsHoisted_addSpacesAfterPunctuation_)
@Js
def PyJsHoisted_replaceSpacesAfterPunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('noSpacesAfterPunctuationPattern'), Js('$1')))
return var.get('s')
PyJsHoisted_replaceSpacesAfterPunctuation_.func_name = 'replaceSpacesAfterPunctuation'
var.put('replaceSpacesAfterPunctuation', PyJsHoisted_replaceSpacesAfterPunctuation_)
@Js
def PyJsHoisted_addSpacesBeforePunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('noSpacesBeforePunctuationPattern'), Js('$1 $2')))
return var.get('s')
PyJsHoisted_addSpacesBeforePunctuation_.func_name = 'addSpacesBeforePunctuation'
var.put('addSpacesBeforePunctuation', PyJsHoisted_addSpacesBeforePunctuation_)
@Js
def PyJsHoisted_replaceEllipsis_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('ellipsisPattern'), Js('...')))
return var.get('s')
PyJsHoisted_replaceEllipsis_.func_name = 'replaceEllipsis'
var.put('replaceEllipsis', PyJsHoisted_replaceEllipsis_)
@Js
def PyJsHoisted_replaceWrongQuote_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('wrongQuotePattern'), Js("$1'$2")))
return var.get('s')
PyJsHoisted_replaceWrongQuote_.func_name = 'replaceWrongQuote'
var.put('replaceWrongQuote', PyJsHoisted_replaceWrongQuote_)
@Js
def PyJsHoisted_replaceDoublePunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_10_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('match').get('0')
PyJs_anonymous_10_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('doublePunctuationPattern'), PyJs_anonymous_10_))
return var.get('s')
PyJsHoisted_replaceDoublePunctuation_.func_name = 'replaceDoublePunctuation'
var.put('replaceDoublePunctuation', PyJsHoisted_replaceDoublePunctuation_)
@Js
def PyJsHoisted_toTitleCase_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('toLowerCase'))
@Js
def PyJs_anonymous_11_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('match').callprop('toUpperCase')
PyJs_anonymous_11_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('titleCasePattern'), PyJs_anonymous_11_))
return var.get('s')
PyJsHoisted_toTitleCase_.func_name = 'toTitleCase'
var.put('toTitleCase', PyJsHoisted_toTitleCase_)
@Js
def PyJsHoisted_formatPhone_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('phonePunctuationPattern'), Js('')))
while 1:
SWITCHED = False
CONDITION = (Js(True))
if SWITCHED or PyJsStrictEq(CONDITION, (var.get('s').callprop('match', JsRegExp('/^0/')) and PyJsStrictEq(var.get('s').get('length'),Js(10.0)))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('fullPhoneNumbersPattern'), Js('($1) $2 $3 $4')))
var.put('s', (Js('+38 ')+var.get('s')))
break
if SWITCHED or PyJsStrictEq(CONDITION, PyJsStrictEq(var.get('s').get('length'),Js(7.0))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('shortPhoneNumbersPattern'), Js('$1 $2 $3')))
break
if SWITCHED or PyJsStrictEq(CONDITION, (var.get('s').callprop('match', JsRegExp('/^8/')) and PyJsStrictEq(var.get('s').get('length'),Js(11.0)))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', JsRegExp('/^8/'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('fullPhoneNumbersPattern'), Js('($1) $2 $3 $4')))
var.put('s', (Js('+38 ')+var.get('s')))
break
if SWITCHED or PyJsStrictEq(CONDITION, (var.get('s').callprop('match', JsRegExp('/^3/')) and PyJsStrictEq(var.get('s').get('length'),Js(12.0)))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', JsRegExp('/^38/'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('fullPhoneNumbersPattern'), Js('($1) $2 $3 $4')))
var.put('s', (Js('+38 ')+var.get('s')))
break
if SWITCHED or PyJsStrictEq(CONDITION, (var.get('s').get('length')>=Js(11.0))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('otherPhoneNumbersPattern'), Js('($1) $2 $3 $4')))
break
if SWITCHED or PyJsStrictEq(CONDITION, PyJsStrictEq(var.get('s').get('length'),Js(8.0))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('fivePhoneNumbersPattern'), Js('($1) $2 $3 $4')))
break
if SWITCHED or PyJsStrictEq(CONDITION, PyJsStrictEq(var.get('s').get('length'),Js(9.0))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('sixPhoneNumbersPattern'), Js('($1) $2 $3 $4')))
break
if SWITCHED or PyJsStrictEq(CONDITION, PyJsStrictEq(var.get('s').get('length'),Js(6.0))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('sixPhoneNumbersPattern'), Js('$2 $3 $4')))
break
if SWITCHED or PyJsStrictEq(CONDITION, PyJsStrictEq(var.get('s').get('length'),Js(5.0))):
SWITCHED = True
var.put('s', var.get('s').callprop('replace', var.get('fivePhoneNumbersPattern'), Js('$2 $3 $4')))
break
if True:
SWITCHED = True
var.put('s', var.get(u"null"))
SWITCHED = True
break
return var.get('s')
PyJsHoisted_formatPhone_.func_name = 'formatPhone'
var.put('formatPhone', PyJsHoisted_formatPhone_)
@Js
def PyJsHoisted_stringPreprocessing_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('replaceSpaces')(var.get('s')))
var.put('s', var.get('fixApostrophes')(var.get('s')))
var.put('s', var.get('replaceHyphens')(var.get('s')))
var.put('s', var.get('replaceQuotes')(var.get('s')))
var.put('s', var.get('replaceApostrophes')(var.get('s')))
var.put('s', var.get('removeStartEndSpaces')(var.get('s')))
var.put('s', var.get('removeDoubleSpaces')(var.get('s')))
var.put('s', var.get('replaceEllipsis')(var.get('s')))
return var.get('s')
PyJsHoisted_stringPreprocessing_.func_name = 'stringPreprocessing'
var.put('stringPreprocessing', PyJsHoisted_stringPreprocessing_)
@Js
def PyJsHoisted_textPreprocessing_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('stringPreprocessing')(var.get('s')))
var.put('s', var.get('replaceCarriages')(var.get('s')))
var.put('s', var.get('replaceLinebreaks')(var.get('s')))
return var.get('s')
PyJsHoisted_textPreprocessing_.func_name = 'textPreprocessing'
var.put('textPreprocessing', PyJsHoisted_textPreprocessing_)
@Js
def PyJsHoisted_fixStupidTitles_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
@Js
def PyJs_anonymous_12_(text, this, arguments, var=var):
var = Scope({'text':text, 'this':this, 'arguments':arguments}, var)
var.registers(['text'])
return var.get('replaceLatinInCyrillic')(var.get('text').callprop('replace', var.get('spacePattern'), Js('')))
PyJs_anonymous_12_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('stupidTitlePattern'), PyJs_anonymous_12_))
var.put('s', var.get('s').callprop('replace', var.get('fixStupidTitlePattern'), Js('$1 ')))
return var.get('s')
PyJsHoisted_fixStupidTitles_.func_name = 'fixStupidTitles'
var.put('fixStupidTitles', PyJsHoisted_fixStupidTitles_)
@Js
def PyJsHoisted_preprocessingStringWithPunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('stringPreprocessing')(var.get('s')))
var.put('s', var.get('replaceSpacesBeforePunctuation')(var.get('s')))
var.put('s', var.get('addSpacesAfterPunctuation')(var.get('s')))
var.put('s', var.get('replaceSpacesAfterPunctuation')(var.get('s')))
var.put('s', var.get('addSpacesBeforePunctuation')(var.get('s')))
var.put('s', var.get('replaceDoublePunctuation')(var.get('s')))
return var.get('s')
PyJsHoisted_preprocessingStringWithPunctuation_.func_name = 'preprocessingStringWithPunctuation'
var.put('preprocessingStringWithPunctuation', PyJsHoisted_preprocessingStringWithPunctuation_)
@Js
def PyJsHoisted_preprocessingTextWithPunctuation_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('textPreprocessing')(var.get('s')))
var.put('s', var.get('replaceDoublePunctuation')(var.get('s')))
var.put('s', var.get('replaceSpacesBeforePunctuation')(var.get('s')))
var.put('s', var.get('addSpacesAfterPunctuation')(var.get('s')))
var.put('s', var.get('replaceSpacesAfterPunctuation')(var.get('s')))
var.put('s', var.get('addSpacesBeforePunctuation')(var.get('s')))
return var.get('s')
PyJsHoisted_preprocessingTextWithPunctuation_.func_name = 'preprocessingTextWithPunctuation'
var.put('preprocessingTextWithPunctuation', PyJsHoisted_preprocessingTextWithPunctuation_)
@Js
def PyJsHoisted_fixCommonMistakes_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
for PyJsTemp in var.get('commonMistakes'):
var.put('i', PyJsTemp)
var.put('s', var.get('s').callprop('replace', var.get('commonMistakes').get(var.get('i')), var.get('i')))
return var.get('s')
PyJsHoisted_fixCommonMistakes_.func_name = 'fixCommonMistakes'
var.put('fixCommonMistakes', PyJsHoisted_fixCommonMistakes_)
@Js
def PyJsHoisted_fixLists_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['s'])
var.put('s', var.get('s').callprop('replace', var.get('listPattern1'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('listPattern2'), Js('$1$2 ')))
return var.get('s')
PyJsHoisted_fixLists_.func_name = 'fixLists'
var.put('fixLists', PyJsHoisted_fixLists_)
@Js
def PyJsHoisted_translit_(s, letterCase, this, arguments, var=var):
var = Scope({'s':s, 'letterCase':letterCase, 'this':this, 'arguments':arguments}, var)
var.registers(['s', 'letterCase'])
@Js
def PyJs_anonymous_18_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('match').callprop('replace', Js("'"), Js(''))
PyJs_anonymous_18_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('replaceApostrophePattern'), PyJs_anonymous_18_))
@Js
def PyJs_anonymous_19_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('zgLetters').get(var.get('match'))
PyJs_anonymous_19_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('zgLettersPattern'), PyJs_anonymous_19_))
@Js
def PyJs_anonymous_20_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
for PyJsTemp in var.get('firstLetters'):
var.put('l', PyJsTemp)
var.put('match', var.get('match').callprop('replace', var.get('l'), var.get('firstLetters').get(var.get('l'))))
return var.get('match')
PyJs_anonymous_20_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('firstLettersPattern'), PyJs_anonymous_20_))
@Js
def PyJs_anonymous_21_(match, this, arguments, var=var):
var = Scope({'match':match, 'this':this, 'arguments':arguments}, var)
var.registers(['match'])
return var.get('otherLetters').get(var.get('match'))
PyJs_anonymous_21_._set_name('anonymous')
var.put('s', var.get('s').callprop('replace', var.get('allLetters'), PyJs_anonymous_21_))
if (var.get('letterCase') and PyJsStrictEq(var.get('letterCase'),Js('lower'))):
var.put('s', var.get('s').callprop('toLowerCase'))
else:
if (var.get('letterCase') and PyJsStrictEq(var.get('letterCase'),Js('upper'))):
var.put('s', var.get('s').callprop('toUpperCase'))
else:
if (var.get('letterCase') and PyJsStrictEq(var.get('letterCase'),Js('title'))):
var.put('s', var.get('s').callprop('toTitleCase'))
return var.get('s')
PyJsHoisted_translit_.func_name = 'translit'
var.put('translit', PyJsHoisted_translit_)
@Js
def PyJsHoisted_getAddress_(s, this, arguments, var=var):
var = Scope({'s':s, 'this':this, 'arguments':arguments}, var)
var.registers(['locality', 'postalCodeMatch', 'streetType', 'district', 'localityName', 'address', 'streetMatch', 'districtMatch', 'postalCode', 'streetName', 'source', 'building', 'localityType', 'apartment', 'buildingMatch', 's', 'region', 'street', 'apartmentMatch', 'localityMatch', 'regionMatch'])
var.put('source', var.get('s'))
var.put('s', var.get('preprocessingStringWithPunctuation')(var.get('s')))
var.put('s', var.get('replaceAllLatin')(var.get('s')))
var.put('s', var.get('replaceAllRussian')(var.get('s')))
var.put('s', var.get('replaceWrongQuote')(var.get('s')))
var.put('s', var.get('s').callprop('replace', var.get('removeExtraSpaces'), Js('-')))
var.put('s', var.get('s').callprop('replace', var.get('removeExtraHyphens'), Js('-')))
var.put('s', var.get('s').callprop('replace', var.get('addStreetSpaces'), Js('$1 ')))
var.put('s', var.get('s').callprop('replace', Js('КИІВ'), Js('КИЇВ')))
var.put('s', var.get('s').callprop('replace', var.get('removeInBracesPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeSlash'), Js('м.')))
var.put('postalCodeMatch', var.get('s').callprop('match', var.get('postalCodePattern')))
if var.get('postalCodeMatch'):
var.put('postalCode', var.get('postalCodeMatch').get('1'))
var.put('s', var.get('s').callprop('replace', var.get('removePostalCodePattern'), Js('')))
var.put('regionMatch', var.get('s').callprop('match', var.get('commonRegionPattern')))
if var.get('regionMatch'):
var.put('region', (var.get('toTitleCase')(var.get('regionMatch').get('1'))+Js(' область')))
if var.get('region').callprop('match', var.get('crimeaPattern')):
var.put('region', Js('Автономна Республіка Крим'))
var.put('s', var.get('s').callprop('replace', var.get('removeCommonRegionPattern'), Js('')))
else:
var.put('regionMatch', var.get('s').callprop('match', var.get('regionPattern')))
if var.get('regionMatch'):
if var.get('regionMatch').get('1').callprop('match', var.get('crimeaPattern')):
var.put('region', Js('Автономна Республіка Крим'))
else:
if var.get('regionMatch').get('1').callprop('match', var.get('sebastopolKyivPattern')):
var.put('region', (Js('місто ')+var.get('toTitleCase')(var.get('regionMatch').get('1'))))
var.put('s', var.get('s').callprop('replace', var.get('removeSebastopolKyivPattern'), Js('')))
else:
var.put('region', (var.get('toTitleCase')(var.get('regionMatch').get('1'))+Js(' область')))
var.put('s', var.get('s').callprop('replace', var.get('removeRegionPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('reverseCommonRegionPattern'), Js('')))
var.put('districtMatch', var.get('s').callprop('match', var.get('districtPattern')))
if var.get('districtMatch'):
var.put('district', (var.get('toTitleCase')(var.get('districtMatch').get('1'))+Js(' район')))
else:
var.put('districtMatch', var.get('s').callprop('match', var.get('reverseDistrictPattern')))
if var.get('districtMatch'):
var.put('district', (var.get('toTitleCase')(var.get('districtMatch').get('1'))+Js(' район')))
var.put('s', var.get('s').callprop('replace', var.get('removeDistrictPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeReverseDistrictPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeGarbagePattern'), Js('')))
if (var.get('region')==Js('місто Київ')):
var.put('locality', Js('місто Київ'))
if (var.get('region')==Js('місто Севастополь')):
var.put('locality', Js('місто Севастополь'))
if var.get('locality').neg():
var.put('localityMatch', var.get('s').callprop('match', var.get('localityPattern')))
if var.get('localityMatch'):
var.put('localityType', var.get('localityTypes').get(var.get('localityMatch').get('1').callprop('toLowerCase')))
var.put('localityName', var.get('toTitleCase')(var.get('localityMatch').get('2')))
var.put('locality', ((var.get('localityType')+Js(' '))+var.get('localityName')))
var.put('s', var.get('s').callprop('replace', var.get('removeLocalityPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeGarbagePattern'), Js('')))
if (((var.get('s').get('length')>Js(1.0)) and var.get('s').callprop('match', var.get('streetPattern')).neg()) and var.get('s').callprop('match', var.get('reverseStreetPattern')).neg()):
var.put('s', (Js('вул ')+var.get('s')))
var.put('streetMatch', var.get('s').callprop('match', var.get('streetPattern')))
if var.get('streetMatch'):
var.put('streetType', var.get('geonymTypes').get(var.get('streetMatch').get('1').callprop('toLowerCase')))
var.put('streetName', var.get('toTitleCase')(var.get('streetMatch').get('2')))
var.put('street', ((var.get('streetType')+Js(' '))+var.get('streetName')))
else:
var.put('streetMatch', var.get('s').callprop('match', var.get('reverseStreetPattern')))
if var.get('streetMatch'):
var.put('streetType', var.get('geonymTypes').get(var.get('streetMatch').get('2').callprop('toLowerCase')))
var.put('streetName', var.get('toTitleCase')(var.get('streetMatch').get('1')))
var.put('street', ((var.get('streetType')+Js(' '))+var.get('streetName')))
var.put('s', var.get('s').callprop('replace', var.get('removeStreetPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeReverseStreetPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeGarbagePattern'), Js('')))
var.put('buildingMatch', var.get('s').callprop('match', var.get('buildingPattern')))
if var.get('buildingMatch'):
var.put('building', var.get('buildingMatch').get('1'))
var.put('s', var.get('s').callprop('replace', var.get('removeBuilingPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeGarbagePattern'), Js('')))
var.put('apartmentMatch', var.get('s').callprop('match', var.get('apartmentPattern')))
if var.get('apartmentMatch'):
var.put('apartment', var.get('apartmentMatch').get('1'))
var.put('s', var.get('s').callprop('replace', var.get('removeApartmentPattern'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeGarbagePattern'), Js('')))
if var.get('region').neg():
var.put('regionMatch', var.get('s').callprop('match', var.get('regionPattern')))
if var.get('regionMatch'):
var.put('region', (var.get('toTitleCase')(var.get('regionMatch').get('1'))+Js(' область')))
var.put('s', var.get('s').callprop('replace', var.get('removeRegionPattern'), Js('')))
if var.get('locality').neg():
var.put('localityMatch', var.get('s').callprop('match', var.get('localityPattern2')))
if var.get('localityMatch'):
var.put('locality', (Js('місто ')+var.get('toTitleCase')(var.get('localityMatch').get('1'))))
if var.get('localityMatch').get('2'):
if var.get('district').neg():
var.put('district', (var.get('toTitleCase')(var.get('localityMatch').get('2'))+Js(' район')))
var.put('s', var.get('s').callprop('replace', var.get('removeLocalityPattern2'), Js('')))
else:
var.put('localityMatch', var.get('s').callprop('match', var.get('localityPattern3')))
if var.get('localityMatch'):
var.put('locality', (Js('місто ')+var.get('toTitleCase')(var.get('localityMatch').get('1'))))
var.put('s', var.get('s').callprop('replace', var.get('removeLocalityPattern3'), Js('')))
if var.get('district').neg():
var.put('districtMatch', var.get('s').callprop('match', var.get('districtPattern2')))
if var.get('districtMatch'):
var.put('district', (var.get('toTitleCase')(var.get('districtMatch').get('1'))+Js(' район')))
var.put('s', var.get('s').callprop('replace', var.get('removeDistrictPattern2'), Js('')))
else:
var.put('districtMatch', var.get('s').callprop('match', var.get('districtPattern3')))
if var.get('districtMatch'):
var.put('district', (var.get('toTitleCase')(var.get('districtMatch').get('1'))+Js(' район')))
var.put('s', var.get('s').callprop('replace', var.get('removeDistrictPattern3'), Js('')))
var.put('s', var.get('s').callprop('replace', var.get('removeUkrainePattern'), Js('')))
if var.get('street').neg():
var.put('streetMatch', var.get('s').callprop('match', var.get('streetPattern2')))
if var.get('streetMatch'):
var.put('street', var.get('streetMatch').get('1'))
if var.get('streetMatch').get('2').neg():
var.put('apartment', var.get('streetMatch').get('3'))
else:
var.put('building', var.get('streetMatch').get('2'))
var.put('apartment', var.get('streetMatch').get('3'))
var.put('s', var.get('s').callprop('replace', var.get('streetPattern2'), Js('')))
if var.get('building').neg():
var.put('buildingMatch', var.get('s').callprop('match', var.get('buildingPattern2')))
if var.get('buildingMatch'):
var.put('building', var.get('buildingMatch').get('1'))
if var.get('buildingMatch').get('2'):
var.put('apartment', var.get('buildingMatch').get('2'))
PyJs_Object_26_ = Js({})
var.put('address', PyJs_Object_26_)
var.get('address').put('source', var.get('source'))
var.get('address').put('countryName', Js('Україна'))
if var.get('postalCode'):
var.get('address').put('postalCode', var.get('postalCode'))
if var.get('region'):
var.get('address').put('region', var.get('region').callprop('replace', JsRegExp('/\\s+/'), Js(' ')))
if var.get('district'):
var.get('address').put('district', var.get('district'))
if var.get('locality'):
var.get('address').put('locality', var.get('locality'))
if var.get('building'):
var.get('address').put('streetNumber', var.get('building'))
if (var.get('street') and var.get('building')):
var.get('address').put('streetAddress', var.get('street'))
else:
if var.get('street'):
var.get('address').put('streetAddress', var.get('street'))
if var.get('apartment'):
var.get('address').put('apartment', var.get('apartment'))
@Js
def PyJs_anonymous_27_(n, this, arguments, var=var):
var = Scope({'n':n, 'this':this, 'arguments':arguments}, var)
var.registers(['n'])
return (var.get('n')!=var.get('undefined'))
PyJs_anonymous_27_._set_name('anonymous')
var.get('address').put('fullAddress', Js([var.get('address').get('postalCode'), var.get('address').get('region'), var.get('address').get('district'), var.get('address').get('locality'), var.get('address').get('streetAddress'), var.get('address').get('streetNumber'), var.get('address').get('apartment')]).callprop('filter', PyJs_anonymous_27_).callprop('join', Js(', ')))
return var.get('address')
PyJsHoisted_getAddress_.func_name = 'getAddress'
var.put('getAddress', PyJsHoisted_getAddress_)
Js('use strict')
pass
pass
Js('use strict')
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
PyJs_Object_13_ = Js({'A':Js('А'),'B':Js('В'),'C':Js('С'),'E':Js('Е'),'H':Js('Н'),'I':Js('І'),'K':Js('К'),'M':Js('М'),'O':Js('О'),'P':Js('Р'),'T':Js('Т'),'X':Js('Х'),'a':Js('а'),'c':Js('с'),'e':Js('е'),'i':Js('і'),'o':Js('о'),'p':Js('р'),'y':Js('у'),'x':Js('х')})
var.put('latinToCyrillic', PyJs_Object_13_)
PyJs_Object_14_ = Js({'А':Js('A'),'В':Js('B'),'С':Js('C'),'Е':Js('E'),'Н':Js('H'),'І':Js('I'),'К':Js('K'),'М':Js('M'),'О':Js('O'),'Р':Js('P'),'Т':Js('T'),'Х':Js('X'),'а':Js('a'),'с':Js('c'),'е':Js('e'),'і':Js('i'),'о':Js('o'),'р':Js('p'),'у':Js('y'),'х':Js('x')})
var.put('cyrillicToLatin', PyJs_Object_14_)
PyJs_Object_15_ = Js({'Ы':Js('І'),'Ъ':Js('Ї'),'ы':Js('і'),'ъ':Js('Ї')})
var.put('russianToUkraine', PyJs_Object_15_)
PyJs_Object_16_ = Js({'0':Js('О'),'1':Js('І'),'3':Js('З'),'6':Js('б')})
var.put('numbersToCyrillic', PyJs_Object_16_)
var.put('firstLetter', JsRegExp('/^./'))
var.put('apostrophePattern', JsRegExp('/(’|‘|′|`|´)/g'))
var.put('hyphenPattern', JsRegExp('/(‒|–|—|―)/g'))
var.put('quoteSpacePatternStart', JsRegExp('/(“|„|«)\\s*/g'))
var.put('quoteSpacePatternEnd', JsRegExp('/\\s*(”|»|″)/g'))
var.put('quoteSpacePatternMiddle', JsRegExp('/(?<= )" +(?=[А-ЯЄІЇҐ])/g'))
var.put('startSpace', JsRegExp('/^[\\t\\v\\f \\u00a0\\u2000-\\u200b\\u2028-\\u2029\\u3000]+/gm'))
var.put('endSpace', JsRegExp('/[\\t\\v\\f \\u00a0\\u2000-\\u200b\\u2028-\\u2029\\u3000]+$/gm'))
var.put('doubleSpace', JsRegExp('/[\\t\\v\\f \\u00a0\\u2000-\\u200b\\u2028-\\u2029\\u3000]{2,}/g'))
var.put('latinPattern', var.get('RegExp').create(((Js('(')+var.get('Object').callprop('keys', var.get('latinToCyrillic')).callprop('join', Js('|')))+Js(')')), Js('g')))
var.put('cyrillicPattern', var.get('RegExp').create(((Js('(')+var.get('Object').callprop('keys', var.get('cyrillicToLatin')).callprop('join', Js('|')))+Js(')')), Js('g')))
var.put('latinInCyrillicPattern', var.get('RegExp').create(((Js('(')+var.get('Object').callprop('keys', var.get('latinToCyrillic')).callprop('join', Js('|')))+Js(')+(?![a-z])')), Js('g')))
var.put('latinInCyrillicPattern2', var.get('RegExp').create(((Js("[а-яєіїґ'](")+var.get('Object').callprop('keys', var.get('latinToCyrillic')).callprop('join', Js('|')))+Js(')+(?!^|[^a-z])')), Js('g')))
var.put('mixedCyrillicLatinPattern', JsRegExp('/(?:^|[^а-яєіїґ\\\'\\-a-z])(?=[^\\s"]*[a-z])(?=[^\\s]*[а-яєіїґ])[а-яєіїґ\\\'\\-a-z]+/gi'))
var.put('russianPattern', var.get('RegExp').create(((Js('(')+var.get('Object').callprop('keys', var.get('russianToUkraine')).callprop('join', Js('|')))+Js(')')), Js('g')))
var.put('numberPattern', var.get('RegExp').create(((Js('(')+var.get('Object').callprop('keys', var.get('numbersToCyrillic')).callprop('join', Js('|')))+Js(')')), Js('g')))
var.put('carriagePattern', JsRegExp('/\\r/g'))
var.put('preserveLinebreakPattern', JsRegExp('/\\n{2,}/g'))
var.put('linebreakPattern', JsRegExp('/\\n/g'))
var.put('restoreLinebreakPattern', JsRegExp('/~linebreak~/g'))
var.put('spaceBeforePunctuationPattern', JsRegExp('/\\s+([.,:;?!№#\\)\\]])/g'))
var.put('spaceAfterPunctuationPattern', JsRegExp('/([;?!])(?!\\s|\\n)/g'))
var.put('spaceAfterPunctuationPattern2', JsRegExp('/(?<=[а-я])([.,])(?!\\s|\\n|,|\\d)/g'))
var.put('removeSpaceAfterPunctuationPattern', JsRegExp('/(\\d)([.,])\\s(?=\\d)/g'))
var.put('noSpacesBeforePunctuationPattern', JsRegExp('/([^\\n ])(\\(|#|№)/g'))
var.put('noSpacesAfterPunctuationPattern', JsRegExp('/([\\[\\(])\\s+/g'))
var.put('ellipsisPattern', JsRegExp('/…/g'))
var.put('spacePattern', JsRegExp('/[\\u0020\\u00A0\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u200B\\u202F\\u205F\\u3000\\uFEFF]/g'))
var.put('multipleSpacePattern', JsRegExp('/[\\t\\v\\f \\u00a0\\u2000-\\u200b\\u2028-\\u2029\\u3000]{2,}/g'))
var.put('titleCasePattern', JsRegExp("/([^a-zа-яєіїґ']|^)([a-zа-яєіїґ])/gi"))
var.put('wrongQuotePattern', JsRegExp('/([а-яєіїґ])"([а-яєіїґ])/gi'))
var.put('doublePunctuationPattern', JsRegExp('/(,{2,}|:{2,}|;{2,}|\\?{2,}|!{2,}|№{2,}|#{2,})/gi'))
var.put('beforePunctuationPattern', JsRegExp('/(^.+?)(?:[,;.\\/]|$)/i'))
var.put('phonePunctuationPattern', JsRegExp('/([\\(\\)\\s\\-\\+,\\.]|факс|[мбф])/g'))
var.put('fullPhoneNumbersPattern', JsRegExp('/(\\d{3})(\\d{3})(\\d{2})(\\d{2})/'))
var.put('shortPhoneNumbersPattern', JsRegExp('/(\\d{3})(\\d{2})(\\d{2})/'))
var.put('fivePhoneNumbersPattern', JsRegExp('/(\\d*)(\\d{1})(\\d{2})(\\d{2})$/'))
var.put('sixPhoneNumbersPattern', JsRegExp('/(\\d*)(\\d{2})(\\d{2})(\\d{2})$/'))
var.put('otherPhoneNumbersPattern', JsRegExp('/(\\d+)(\\d{3})(\\d{2})(\\d{2})/'))
var.put('listPattern1', JsRegExp('/^\\s+(?=\\d)/gm'))
var.put('listPattern2', JsRegExp('/^(\\d+)([\\.\\)])(?!\\s)(?=[а-яєіїґ])/gmi'))
var.put('fixApostrophePattern', JsRegExp('/(?<=[бпвмфгґкхжчшр])[\\*’‘′`´“«»″"](?=[яюєї])/g'))
var.put('fixApostrophePattern2', JsRegExp('/(?<= [БПВДМФГҐКХЖЧШРО])[\\*’‘′`´“«»″"](?=[яюєї])/g'))
pass
var.put('stupidTitlePattern', JsRegExp('/placholder/'))
var.put('fixStupidTitlePattern', JsRegExp('/(?<=\\n)(заочно?е|[иі]менем|судовий|судове|судебный|судебное|суть)/gi'))
PyJs_Object_17_ = Js({'відкласти':var.get('RegExp').create(Js('в і д к л а с т и'), Js('g')),'відмовити':var.get('RegExp').create(Js('в і д м о в и т и'), Js('g')),'відхилити':var.get('RegExp').create(Js('в і д х и л и т и'), Js('g')),'задовільнити':var.get('RegExp').create(Js('з а д о в і л ь н и т и'), Js('g')),'задоволити':var.get('RegExp').create(Js('з а д о в о л и т и'), Js('g')),'ухвалити':var.get('RegExp').create(Js('у х в а л и т и'), Js('g')),'частково':var.get('RegExp').create(Js('ч а с т к о в о'), Js('g'))})
var.put('commonMistakes', PyJs_Object_17_)
pass
PyJs_Object_22_ = Js({'Є':Js('Ye'),'Ї':Js('Yi'),'Й':Js('Y'),'Ю':Js('Yu'),'Я':Js('Ya'),'є':Js('ye'),'ї':Js('yi'),'й':Js('y'),'ю':Js('yu'),'я':Js('ya')})
var.put('firstLetters', PyJs_Object_22_)
PyJs_Object_23_ = Js({'А':Js('A'),'Б':Js('B'),'В':Js('V'),'Г':Js('H'),'Ґ':Js('G'),'Д':Js('D'),'Е':Js('E'),'Є':Js('Ie'),'Ж':Js('Zh'),'З':Js('Z'),'И':Js('Y'),'І':Js('I'),'Ї':Js('I'),'Й':Js('I'),'К':Js('K'),'Л':Js('L'),'М':Js('M'),'Н':Js('N'),'О':Js('O'),'П':Js('P'),'Р':Js('R'),'С':Js('S'),'Т':Js('T'),'У':Js('U'),'Ф':Js('F'),'Х':Js('Kh'),'Ц':Js('Ts'),'Ч':Js('Ch'),'Ш':Js('Sh'),'Щ':Js('Shch'),'Ь':Js(''),'Ъ':Js(''),'Ы':Js('Y'),'Э':Js('E'),'Ю':Js('Iu'),'Я':Js('Ia'),'а':Js('a'),'б':Js('b'),'в':Js('v'),'г':Js('h'),'ґ':Js('g'),'д':Js('d'),'е':Js('e'),'є':Js('ie'),'ж':Js('zh'),'з':Js('z'),'и':Js('y'),'і':Js('i'),'ї':Js('i'),'й':Js('i'),'к':Js('k'),'л':Js('l'),'м':Js('m'),'н':Js('n'),'о':Js('o'),'п':Js('p'),'р':Js('r'),'с':Js('s'),'т':Js('t'),'у':Js('u'),'ф':Js('f'),'х':Js('kh'),'ц':Js('ts'),'ч':Js('ch'),'ш':Js('sh'),'щ':Js('shch'),'ь':Js(''),'ъ':Js(''),'ы':Js('Y'),'э':Js('E'),'ю':Js('iu'),'я':Js('ia')})
var.put('otherLetters', PyJs_Object_23_)
PyJs_Object_24_ = Js({'Зг':Js('Zgh'),'зг':Js('zgh'),'ЗГ':Js('ZgH')})
var.put('zgLetters', PyJs_Object_24_)
var.put('firstLettersPattern', JsRegExp("/(^|[^а-яєіїґ\\']|[^а-яєіїґ\\']\\')([єїйюя])/gi"))
var.put('allLetters', JsRegExp('/[а-яєіїґ]/gi'))
var.put('zgLettersPattern', JsRegExp('/зг/gi'))
var.put('replaceApostrophePattern', JsRegExp("/[а-яєіїґ]'[а-яєіїґ]/gi"))
PyJs_Object_25_ = Js({'capitalizeFirst':var.get('capitalizeFirst'),'replaceQuotes':var.get('replaceQuotes'),'replaceApostrophes':var.get('replaceApostrophes'),'fixApostrophes':var.get('fixApostrophes'),'replaceHyphens':var.get('replaceHyphens'),'replaceSpaces':var.get('replaceSpaces'),'removeStartEndSpaces':var.get('removeStartEndSpaces'),'removeDoubleSpaces':var.get('removeDoubleSpaces'),'replaceAllLatin':var.get('replaceAllLatin'),'replaceLatinInCyrillic':var.get('replaceLatinInCyrillic'),'replaceAllCyrillicToLatin':var.get('replaceAllCyrillicToLatin'),'replaceSmartLatin':var.get('replaceSmartLatin'),'replaceCarriages':var.get('replaceCarriages'),'replaceLinebreaks':var.get('replaceLinebreaks'),'replaceSpacesBeforePunctuation':var.get('replaceSpacesBeforePunctuation'),'addSpacesAfterPunctuation':var.get('addSpacesAfterPunctuation'),'replaceSpacesAfterPunctuation':var.get('replaceSpacesAfterPunctuation'),'addSpacesBeforePunctuation':var.get('addSpacesBeforePunctuation'),'replaceEllipsis':var.get('replaceEllipsis'),'replaceAllNumbers':var.get('replaceAllNumbers'),'replaceDoublePunctuation':var.get('replaceDoublePunctuation'),'stringPreprocessing':var.get('stringPreprocessing'),'textPreprocessing':var.get('textPreprocessing'),'preprocessingStringWithPunctuation':var.get('preprocessingStringWithPunctuation'),'preprocessingTextWithPunctuation':var.get('preprocessingTextWithPunctuation'),'toTitleCase':var.get('toTitleCase'),'replaceWrongQuote':var.get('replaceWrongQuote'),'replaceAllRussian':var.get('replaceAllRussian'),'beforePunctuationPattern':var.get('beforePunctuationPattern'),'formatPhone':var.get('formatPhone'),'fixLists':var.get('fixLists'),'fixStupidTitles':var.get('fixStupidTitles'),'fixCommonMistakes':var.get('fixCommonMistakes'),'translit':var.get('translit')})
var.put('placeholder', PyJs_Object_25_)
pass
var.put('regions', Js([Js('Автономна Республіка Крим'), Js('Вінницька'), Js('Волинська'), Js('Дніпропетровська'), Js('Донецька'), Js('Житомирська'), Js('Закарпатська'), Js('Запорізька'), Js('Івано-?Франківська'), Js('Кіровоградська'), Js('Київська'), Js('Луганська'), Js('Львівська'), Js('Миколаївська'), Js('Одеська'), Js('Тернопільська'), Js('Полтавська'), Js('Рівненська'), Js('Сумська'), Js('Харківська'), Js('Чернігівська'), Js('Херсонська'), Js('Хмельницька'), Js('Черкаська'), Js('Чернівецька'), Js('Севастополь'), Js('Київ')]))
PyJs_Object_28_ = Js({'м.':Js('місто'),'місто':Js('місто'),'м':Js('місто'),'с.':Js('село'),'с':Js('село'),'с-ще':Js('селище'),'сільрада':Js('село'),'сільська рада':Js('село'),'село':Js('село'),'сел.':Js('селище'),'селище':Js('селище'),'пос.':Js('селище'),'селище міського типу':Js('селище міського типу'),'смт':Js('селище міського типу'),'смт.':Js('селище міського типу')})
var.put('localityTypes', PyJs_Object_28_)
def PyJs_LONG_30_(var=var):
PyJs_Object_29_ = Js({'балка':Js('балка'),'бул':Js('бульвар'),'бульв':Js('бульвар'),'бульва':Js('бульвар'),'бульвар':Js('бульвар'),'б-р':Js('бульвар'),'булвар':Js('бульвар'),'бул-р':Js('бульвар'),"в'їзд":Js("в'їзд"),'вул':Js('вулиця'),'ву':Js('вулиця'),'вл.':Js('вулиця'),'вулиця':Js('вулиця'),'вулиц':Js('вулиця'),'в.':Js('вулиця'),'ж.м.':Js('житловий масив'),'ж. м. ':Js('житловий масив'),'ж/м':Js('житловий масив'),'житловий масив':Js('житловий масив'),'ж-м':Js('житловий масив'),'житломасив':Js('житловий масив'),'дорога':Js('дорога'),'квартал':Js('квартал'),'кварт':Js('квартал'),'квар':Js('квартал'),'кв':Js('квартал'),'майдан':Js('майдан'),'мкр-н':Js('мікрорайон'),'мкр':Js('мікрорайон'),'мікр.':Js('мікрорайон')})
return PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(PyJsComma(var.put('_geonymTypes', PyJs_Object_29_),var.get('_defineProperty')(var.get('_geonymTypes'), Js('мкр'), Js('мікрорайон'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('м-н'), Js('мікрорайон'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('мис'), Js('мис'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('мікрорайон'), Js('мікрорайон'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('наб.'), Js('набережна'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('набережна'), Js('набережна'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('острів'), Js('острів'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('о.'), Js('острів'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пл.'), Js('площа'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('площа'), Js('площа'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('провулок'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('провул'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пров'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пер.'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('переулок'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пров.'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('п-к.'), Js('провулок'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пр.'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('прc.'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('прcп.'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('просп.'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('проспект'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пр-т'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('пр-кт'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('прк'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('п-ст'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('п-т'), Js('проспект'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('проїзд'), Js('проїзд'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('станція'), Js('станція'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('ст.'), Js('станція'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('станц.'), Js('станція'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('тупік'), Js('тупик'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('тупик'), Js('тупик'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('туп.'), Js('тупик'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('спуск'), Js('узвіз'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('узвіз'), Js('узвіз'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('узв'), Js('узвіз'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('шосе'), Js('шосе'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('урочише'), Js('урочише'))),var.get('_defineProperty')(var.get('_geonymTypes'), Js('ш.'), Js('шосе'))),var.get('_geonymTypes'))
var.put('geonymTypes', PyJs_LONG_30_())
var.put('removeInBracesPattern', JsRegExp('/\\(.*?\\)/g'))
var.put('removeExtraSpaces', JsRegExp('/\\s*-\\s*/g'))
var.put('removeExtraHyphens', JsRegExp('/-{2,}/g'))
var.put('removeSlash', JsRegExp('/\\/м\\./gi'))
var.put('addStreetSpaces', var.get('RegExp').create(Js('(вулиця|бульвар|площа|проспект|тупик|узвіз|квартал|провулок)(?=[а-яєіїґ0-9])'), Js('gi')))
var.put('postalCodePattern', JsRegExp('/(\\d{5})(?:,\\s?|\\s)/'))
var.put('removePostalCodePattern', var.get('RegExp').create(var.get('postalCodePattern').get('source'), Js('gi')))
var.put('commonRegionPattern', JsRegExp("/(?:,\\s|^)([а-яєіїґ'\\-\\s]+)\\s?обл(?:\\.|асть)?(?:,\\s?|$)?/i"))
var.put('removeCommonRegionPattern', JsRegExp("/([а-яєіїґ'\\-]+)\\s?обл(?:\\.|[а-яєіїґ]+)?(?:,\\s?|$)?/gi"))
var.put('reverseCommonRegionPattern', JsRegExp("/обл(?:\\.|[а-яєіїґ]+)?\\s([а-яєіїґ'\\-]+)(?:,\\s|$)?/i"))
var.put('regionPattern', var.get('RegExp').create(((Js('(?:^|,s)(')+var.get('regions').callprop('join', Js('|')))+Js(')(?![а-яєіїґ])(?:,\\s|\\s|$)')), Js('i')))
var.put('removeRegionPattern', var.get('RegExp').create(var.get('regionPattern').get('source'), Js('gi')))
var.put('crimeaPattern', JsRegExp('/(Автономна\\s*Республіка\\s*Крим|АР\\s*Крим|(?<![А-ЯІЇ])АРК(?![А-ЯІЇ]))/i'))
var.put('sebastopolKyivPattern', JsRegExp('/(Севастополь|Київ)/i'))
var.put('removeSebastopolKyivPattern', JsRegExp('/м(\\.|істо)\\s?(Севастополь|Київ)(,\\s?|$)/gi'))
var.put('localityPattern', JsRegExp('/(?:(?:^|,\\s)(м\\.|місто|м|с\\.|с-ще|с|сільрада|сільська рада|село|сел\\.|селище(?:\\sміського\\sтипу)?|смт)\\.?\\s)(.+?)(?=,\\s|\\sвулиця|$)/i'))
var.put('removeLocalityPattern', var.get('RegExp').create(var.get('localityPattern').get('source'), Js('gi')))
var.put('localityPattern2', JsRegExp("/(?:,\\s|^)(?!вул)([^0-9\\.,]{4,}),\\s(?:([а-яєіїґ\\s\\-']{4,}?)|-),\\sУкраїна$/i"))
var.put('removeLocalityPattern2', var.get('RegExp').create(var.get('localityPattern2').get('source'), Js('gi')))
var.put('localityPattern3', JsRegExp('/(?:,\\s|^)(?!вул)([^0-9\\.,]{4,}),\\sУкраїна$/i'))
var.put('removeLocalityPattern3', var.get('RegExp').create(var.get('localityPattern3').get('source'), Js('gi')))
var.put('districtPattern', JsRegExp('/(?:^|,\\s)([^,]+?)\\s(?:р-н|район)\\.?(?=,|\\s|$)/i'))
var.put('removeDistrictPattern', var.get('RegExp')(var.get('districtPattern').get('source'), Js('gi')))
var.put('reverseDistrictPattern', JsRegExp('/(?:^|,\\s)(?:р-н|район)\\s([^,]+?)(?=,|$)/i'))
var.put('removeReverseDistrictPattern', var.get('RegExp').create(var.get('reverseDistrictPattern').get('source'), Js('gi')))
var.put('districtPattern2', JsRegExp('/(?:,\\s|^)(?!вул)([^0-9,]*?ий),\\sУкраїна$/i'))
var.put('removeDistrictPattern2', var.get('RegExp')(var.get('districtPattern2').get('source'), Js('gi')))
var.put('districtPattern3', JsRegExp('/(?:,\\s|^)р\\s([^0-9,]*?ий)(?=,\\s|$)/i'))
var.put('removeDistrictPattern3', var.get('RegExp')(var.get('districtPattern3').get('source'), Js('gi')))
var.put('streetPattern', var.get('RegExp').create(((Js('(?:^|,\\s)(?:.{1,4})?(')+var.get('Object').callprop('keys', var.get('geonymTypes')).callprop('join', Js('|')).callprop('replace', JsRegExp('/\\./gi'), Js('\\.')))+Js(')\\.?\\s(.{4,}?)(?=,\\s|$)')), Js('i')))
var.put('removeStreetPattern', var.get('RegExp').create(var.get('streetPattern').get('source'), Js('gi')))
var.put('reverseStreetPattern', var.get('RegExp').create(((Js('(?:^|,\\s)(.{4,}?)\\s(')+var.get('Object').callprop('keys', var.get('geonymTypes')).callprop('join', Js('|')).callprop('replace', JsRegExp('/\\./gi'), Js('\\.')))+Js(')(?=,\\s|$)')), Js('i')))
var.put('removeReverseStreetPattern', var.get('RegExp').create(var.get('reverseStreetPattern').get('source'), Js('gi')))
var.put('streetPattern2', JsRegExp('/(?:^|,\\s)([^0-9,]+)(?:(?:,\\s([^,]+))?(?:,\\s?([^,]+))|$)?/i'))
var.put('removeStreetPattern2', var.get('RegExp').create(var.get('streetPattern2').get('source'), Js('gi')))
var.put('buildingPattern', JsRegExp('/(?:^|,\\s)(?:б\\.|буд\\.|будинок|будівля)\\s(.+?)(?=,\\s|$)/i'))
var.put('removeBuilingPattern', var.get('RegExp').create(var.get('buildingPattern').get('source'), Js('gi')))
var.put('buildingPattern2', JsRegExp('/(?:^|,\\s)([0-9][^,]+)(?:,\\s?([^,]+)|$)?/i'))
var.put('removeBuilingPattern2', var.get('RegExp').create(var.get('buildingPattern2').get('source'), Js('gi')))
var.put('apartmentPattern', JsRegExp('/(?:^|,\\s)(?:кв?\\.|квартира|кімн(?:ата|\\.)|офіс|оф\\.)\\s+(.+?)(?=,|$)/i'))
var.put('removeApartmentPattern', var.get('RegExp').create(var.get('apartmentPattern').get('source'), Js('gi')))
var.put('removeGarbagePattern', JsRegExp('/^(,|\\s)+/g'))
var.put('removeUkrainePattern', JsRegExp('/(?:^|,\\s)УкраЇна/gi'))
pass
# Add lib to the module scope
addressProcessor = var.to_python()
| 74.195332 | 3,721 | 0.651875 |
bfd9222c32e39746b0b3b1ff268b4dad09f4d637
| 6,851 |
py
|
Python
|
cifar/l1-norm-pruning/vggprune.py
|
dukebw/rethinking-network-pruning
|
5486af65530f61e6688e542f1f2f13dbebf88e69
|
[
"MIT"
] | 1,410 |
2018-10-15T01:47:00.000Z
|
2022-03-31T07:21:13.000Z
|
cifar/l1-norm-pruning/vggprune.py
|
dukebw/rethinking-network-pruning
|
5486af65530f61e6688e542f1f2f13dbebf88e69
|
[
"MIT"
] | 50 |
2018-10-15T17:15:52.000Z
|
2021-12-23T21:40:00.000Z
|
cifar/l1-norm-pruning/vggprune.py
|
dukebw/rethinking-network-pruning
|
5486af65530f61e6688e542f1f2f13dbebf88e69
|
[
"MIT"
] | 336 |
2018-10-15T11:54:35.000Z
|
2022-03-28T09:22:57.000Z
|
import argparse
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
from models import *
# Prune settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')
parser.add_argument('--dataset', type=str, default='cifar10',
help='training dataset (default: cifar10)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--depth', type=int, default=16,
help='depth of the vgg')
parser.add_argument('--model', default='', type=str, metavar='PATH',
help='path to the model (default: none)')
parser.add_argument('--save', default='.', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if not os.path.exists(args.save):
os.makedirs(args.save)
model = vgg(dataset=args.dataset, depth=args.depth)
if args.cuda:
model.cuda()
if args.model:
if os.path.isfile(args.model):
print("=> loading checkpoint '{}'".format(args.model))
checkpoint = torch.load(args.model)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.model, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
print('Pre-processing Successful!')
# simple test model after Pre-processing prune (simple set BN scales to zeros)
def test(model):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'cifar100':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
raise ValueError("No valid dataset is given.")
model.eval()
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format(
correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
acc = test(model)
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
cfg_mask = []
layer_id = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
out_channels = m.weight.data.shape[0]
if out_channels == cfg[layer_id]:
cfg_mask.append(torch.ones(out_channels))
layer_id += 1
continue
weight_copy = m.weight.data.abs().clone()
weight_copy = weight_copy.cpu().numpy()
L1_norm = np.sum(weight_copy, axis=(1, 2, 3))
arg_max = np.argsort(L1_norm)
arg_max_rev = arg_max[::-1][:cfg[layer_id]]
assert arg_max_rev.size == cfg[layer_id], "size of arg_max_rev not correct"
mask = torch.zeros(out_channels)
mask[arg_max_rev.tolist()] = 1
cfg_mask.append(mask)
layer_id += 1
elif isinstance(m, nn.MaxPool2d):
layer_id += 1
newmodel = vgg(dataset=args.dataset, cfg=cfg)
if args.cuda:
newmodel.cuda()
start_mask = torch.ones(3)
layer_id_in_cfg = 0
end_mask = cfg_mask[layer_id_in_cfg]
for [m0, m1] in zip(model.modules(), newmodel.modules()):
if isinstance(m0, nn.BatchNorm2d):
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
if idx1.size == 1:
idx1 = np.resize(idx1,(1,))
m1.weight.data = m0.weight.data[idx1.tolist()].clone()
m1.bias.data = m0.bias.data[idx1.tolist()].clone()
m1.running_mean = m0.running_mean[idx1.tolist()].clone()
m1.running_var = m0.running_var[idx1.tolist()].clone()
layer_id_in_cfg += 1
start_mask = end_mask
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
elif isinstance(m0, nn.Conv2d):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
w1 = w1[idx1.tolist(), :, :, :].clone()
m1.weight.data = w1.clone()
elif isinstance(m0, nn.Linear):
if layer_id_in_cfg == len(cfg_mask):
idx0 = np.squeeze(np.argwhere(np.asarray(cfg_mask[-1].cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
m1.weight.data = m0.weight.data[:, idx0].clone()
m1.bias.data = m0.bias.data.clone()
layer_id_in_cfg += 1
continue
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
elif isinstance(m0, nn.BatchNorm1d):
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
m1.running_mean = m0.running_mean.clone()
m1.running_var = m0.running_var.clone()
torch.save({'cfg': cfg, 'state_dict': newmodel.state_dict()}, os.path.join(args.save, 'pruned.pth.tar'))
print(newmodel)
model = newmodel
acc = test(model)
num_parameters = sum([param.nelement() for param in newmodel.parameters()])
with open(os.path.join(args.save, "prune.txt"), "w") as fp:
fp.write("Number of parameters: \n"+str(num_parameters)+"\n")
fp.write("Test accuracy: \n"+str(acc)+"\n")
| 41.521212 | 104 | 0.620493 |
9eb6ce364d336e78abb6d20947f94847a7e7647b
| 902 |
py
|
Python
|
filemonitor/migrations/0002_auto_20170523_1140.py
|
imsilence/shadow-hostmonitor
|
faa28d7f5bb85212d5a64a60f742b807cf8644f7
|
[
"Apache-2.0"
] | 1 |
2019-11-02T14:25:29.000Z
|
2019-11-02T14:25:29.000Z
|
filemonitor/migrations/0002_auto_20170523_1140.py
|
imsilence/shadow-hostmonitor
|
faa28d7f5bb85212d5a64a60f742b807cf8644f7
|
[
"Apache-2.0"
] | null | null | null |
filemonitor/migrations/0002_auto_20170523_1140.py
|
imsilence/shadow-hostmonitor
|
faa28d7f5bb85212d5a64a60f742b807cf8644f7
|
[
"Apache-2.0"
] | 1 |
2019-11-02T14:25:19.000Z
|
2019-11-02T14:25:19.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 03:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filemonitor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cid', models.CharField(default='', max_length=128)),
('content', models.TextField(default='{}')),
('adtime', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='event',
name='cid',
field=models.CharField(default='', max_length=128),
),
]
| 30.066667 | 115 | 0.547672 |
4157b9343eddb28feb975c51a8d64c8ee3aa8619
| 343 |
py
|
Python
|
shards/manager.py
|
ReigenAraka/milvus
|
b2f19ace0e1dcd431a512141f42b748581d4b92d
|
[
"Apache-2.0"
] | 4 |
2020-07-29T02:59:53.000Z
|
2021-11-16T11:07:51.000Z
|
shards/manager.py
|
liangwlw/milvus
|
7e7f626b9c7288c1c82f5dafed87d33897f4b64e
|
[
"Apache-2.0"
] | 2 |
2020-08-20T07:17:50.000Z
|
2020-08-21T04:21:34.000Z
|
shards/manager.py
|
liangwlw/milvus
|
7e7f626b9c7288c1c82f5dafed87d33897f4b64e
|
[
"Apache-2.0"
] | 2 |
2021-06-09T23:50:48.000Z
|
2021-06-17T06:24:29.000Z
|
import fire
from mishards import db, settings
class DBHandler:
@classmethod
def create_all(cls):
db.create_all()
@classmethod
def drop_all(cls):
db.drop_all()
if __name__ == '__main__':
db.init_db(settings.DefaultConfig.SQLALCHEMY_DATABASE_URI)
from mishards import models
fire.Fire(DBHandler)
| 18.052632 | 62 | 0.693878 |
c0c62074a3f934ca8226452c3a0871b149e68c78
| 2,729 |
py
|
Python
|
domain_hole.py
|
Blue-Giant/COVID_DNN
|
39c62a25dc78b9feb418ee0474cb06503003d2b4
|
[
"MIT"
] | 1 |
2020-11-13T11:42:36.000Z
|
2020-11-13T11:42:36.000Z
|
domain_hole.py
|
Blue-Giant/COVID_DNN
|
39c62a25dc78b9feb418ee0474cb06503003d2b4
|
[
"MIT"
] | null | null | null |
domain_hole.py
|
Blue-Giant/COVID_DNN
|
39c62a25dc78b9feb418ee0474cb06503003d2b4
|
[
"MIT"
] | null | null | null |
###################################
# coding=utf-8
# !/usr/bin/env python
# __author__ = 'LXA'
# ctime 2020.10.15
# 矩形区域内绘制椭圆和圆形
###################################
from matplotlib.patches import Ellipse, Circle, Rectangle
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111)
rctan = Rectangle((-1, -1), 2, 2, color='y', alpha=1.0)
ax.add_patch(rctan)
# # 第二个参数是边数,第三个是距离中心的位置
# polygon1 = patches.RegularPolygon((0, 0), 5, 1.25, color= "r")
# ax.add_patch(polygon1)
# # x1 = np.random.uniform(-1, 1, 3000) # 随机产生300个平均值为2,方差为1.2的浮点数,即第一簇点的x轴坐标
# # y1 = np.random.uniform(-1, 1, 3000) # 随
# # plt.scatter(x1, y1, cmap='rainbow', alpha=0.25)
cir1 = Circle(xy=(0.1, 0.1), radius=0.125, color='w', alpha=1.0)
cir2 = Circle(xy=(0.3, 0.5), radius=0.075, color='w', alpha=1.0)
cir3 = Circle(xy=(0.6, 0.2), radius=0.15, color='w', alpha=1.0)
cir4 = Circle(xy=(0.825, 0.5), radius=0.075, color='w', alpha=1.0)
cir5 = Circle(xy=(0.1, 0.75), radius=0.1, color='w', alpha=1.0)
cir6 = Circle(xy=(-0.1, 0.8), radius=0.075, color='w', alpha=1.0)
cir7 = Circle(xy=(-0.4, 0.5), radius=0.075, color='w', alpha=1.0)
cir8 = Circle(xy=(-0.6, 0.2), radius=0.075, color='w', alpha=1.0)
cir9 = Circle(xy=(-0.8, 0.7), radius=0.075, color='w', alpha=1.0)
cir10 = Circle(xy=(-0.9, 0.1), radius=0.1, color='w', alpha=1.0)
cir11 = Circle(xy=(-0.1, -0.75), radius=0.1, color='w', alpha=1.0)
cir12 = Circle(xy=(-0.4, -0.8), radius=0.075, color='w', alpha=1.0)
cir13 = Circle(xy=(-0.3, -0.5), radius=0.075, color='w', alpha=1.0)
cir14 = Circle(xy=(-0.6, -0.2), radius=0.125, color='w', alpha=1.0)
cir15 = Circle(xy=(-0.825, -0.5), radius=0.075, color='w', alpha=1.0)
cir16 = Circle(xy=(0.1, -0.5), radius=0.075, color='w', alpha=1.0)
cir17 = Circle(xy=(0.3, -0.2), radius=0.105, color='w', alpha=1.0)
cir18 = Circle(xy=(0.5, -0.75), radius=0.125, color='w', alpha=1.0)
cir19 = Circle(xy=(0.725, -0.3), radius=0.1, color='w', alpha=1.0)
cir20 = Circle(xy=(0.9, -0.9), radius=0.075, color='w', alpha=1.0)
ax.add_patch(cir1)
ax.add_patch(cir2)
ax.add_patch(cir3)
ax.add_patch(cir4)
ax.add_patch(cir5)
ax.add_patch(cir6)
ax.add_patch(cir7)
ax.add_patch(cir8)
ax.add_patch(cir9)
ax.add_patch(cir10)
ax.add_patch(cir11)
ax.add_patch(cir12)
ax.add_patch(cir13)
ax.add_patch(cir14)
ax.add_patch(cir15)
ax.add_patch(cir16)
ax.add_patch(cir17)
ax.add_patch(cir18)
ax.add_patch(cir19)
ax.add_patch(cir20)
# ax.add_patch(ell1)
plt.axis('scaled')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.axis('equal') #changes limits of x or y axis so that equal increments of x and y have the same length
plt.show()
| 36.878378 | 108 | 0.621473 |
2e210339337dea827c94c513ca3893a77d7cbd15
| 10,843 |
py
|
Python
|
venv/lib/python2.7/site-packages/tinydb/queries.py
|
betisb/WebProgrammin_I
|
6e2e4525c8d7766d65785e30bb43234cd7d829ef
|
[
"MIT"
] | 1 |
2019-04-15T10:28:42.000Z
|
2019-04-15T10:28:42.000Z
|
venv/lib/python2.7/site-packages/tinydb/queries.py
|
betisb/WebProgrammin_I
|
6e2e4525c8d7766d65785e30bb43234cd7d829ef
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/tinydb/queries.py
|
betisb/WebProgrammin_I
|
6e2e4525c8d7766d65785e30bb43234cd7d829ef
|
[
"MIT"
] | null | null | null |
"""
Contains the querying interface.
Starting with :class:`~tinydb.queries.Query` you can construct complex
queries:
>>> ((where('f1') == 5) & (where('f2') != 2)) | where('s').matches(r'^\w+$')
(('f1' == 5) and ('f2' != 2)) or ('s' ~= ^\w+$ )
Queries are executed by using the ``__call__``:
>>> q = where('val') == 5
>>> q({'val': 5})
True
>>> q({'val': 1})
False
"""
import re
import sys
from .utils import catch_warning, freeze
__all__ = ('Query', 'where')
def is_sequence(obj):
return hasattr(obj, '__iter__')
class QueryImpl(object):
"""
A query implementation.
This query implementation wraps a test function which is run when the
query is evaluated by calling the object.
Queries can be combined with logical and/or and modified with logical not.
"""
def __init__(self, test, hashval):
self._test = test
self.hashval = hashval
def __call__(self, value):
return self._test(value)
def __hash__(self):
return hash(self.hashval)
def __repr__(self):
return 'QueryImpl{}'.format(self.hashval)
def __eq__(self, other):
return self.hashval == other.hashval
# --- Query modifiers -----------------------------------------------------
def __and__(self, other):
# We use a frozenset for the hash as the AND operation is commutative
# (a & b == b & a)
return QueryImpl(lambda value: self(value) and other(value),
('and', frozenset([self.hashval, other.hashval])))
def __or__(self, other):
# We use a frozenset for the hash as the OR operation is commutative
# (a | b == b | a)
return QueryImpl(lambda value: self(value) or other(value),
('or', frozenset([self.hashval, other.hashval])))
def __invert__(self):
return QueryImpl(lambda value: not self(value),
('not', self.hashval))
class Query(QueryImpl):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> db.search(where('field1').exists() & where('field2') == 5) # Binary AND
>>> db.search(where('field1').exists() | where('field2') == 5) # Binary OR
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = []
super(Query, self).__init__(
self._prepare_test(lambda _: True),
('path', tuple(self._path))
)
def __repr__(self):
return '{}()'.format(type(self).__name__)
def __hash__(self):
return super(Query, self).__hash__()
def __getattr__(self, item):
query = Query()
query._path = self._path + [item]
query.hashval = ('path', tuple(query._path))
return query
__getitem__ = __getattr__
def _prepare_test(self, test):
def runner(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return runner
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
return QueryImpl(self._prepare_test(test), hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(
lambda value: test(value),
('==', tuple(self._path), freeze(rhs))
)
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value != rhs,
('!=', tuple(self._path), freeze(rhs))
)
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value < rhs,
('<', tuple(self._path), rhs)
)
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value <= rhs,
('<=', tuple(self._path), rhs)
)
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value > rhs,
('>', tuple(self._path), rhs)
)
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value >= rhs,
('>=', tuple(self._path), rhs)
)
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists()
"""
return self._generate_test(
lambda _: True,
('exists', tuple(self._path))
)
def matches(self, regex, flags=0):
"""
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.match(regex, value, flags),
('matches', tuple(self._path), regex)
)
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', tuple(self._path), regex)
)
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(
lambda value: func(value, *args),
('test', tuple(self._path), func, args)
)
def any(self, cond):
"""
Check if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(
lambda value: _cmp(value),
('any', tuple(self._path), freeze(cond))
)
def all(self, cond):
"""
Check if a condition is met by all documents in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(
lambda value: _cmp(value),
('all', tuple(self._path), freeze(cond))
)
def one_of(self, items):
"""
Check if the value is contained in a list or generator.
>>> Query().f1.one_of(['value 1', 'value 2'])
:param items: The list of items to check with
"""
return self._generate_test(
lambda value: value in items,
('one_of', tuple(self._path), freeze(items))
)
def where(key):
return Query()[key]
| 27.731458 | 79 | 0.53048 |
f8c191809f76425783dab1bc5ede182bdd46ba5e
| 9,345 |
py
|
Python
|
yt/data_objects/tests/test_sph_data_objects.py
|
cgyurgyik/yt
|
9251e7bff9112e0f54c9b24f6a8bbba66869bb9d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/data_objects/tests/test_sph_data_objects.py
|
cgyurgyik/yt
|
9251e7bff9112e0f54c9b24f6a8bbba66869bb9d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/data_objects/tests/test_sph_data_objects.py
|
cgyurgyik/yt
|
9251e7bff9112e0f54c9b24f6a8bbba66869bb9d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
from yt import SlicePlot
from yt.testing import assert_equal, fake_sph_grid_ds, fake_sph_orientation_ds
def test_point():
ds = fake_sph_orientation_ds()
field_data = ds.stream_handler.fields["stream_file"]
ppos = [field_data["io", "particle_position_%s" % d] for d in "xyz"]
ppos = np.array(ppos).T
for pos in ppos:
for i in range(-1, 2):
offset = 0.1 * np.array([i, 0, 0])
pt = ds.point(pos + offset)
assert_equal(pt["gas", "density"].shape[0], 1)
for j in range(-1, 2):
offset = 0.1 * np.array([0, j, 0])
pt = ds.point(pos + offset)
assert_equal(pt["gas", "density"].shape[0], 1)
for k in range(-1, 2):
offset = 0.1 * np.array([0, 0, k])
pt = ds.point(pos + offset)
assert_equal(pt["gas", "density"].shape[0], 1)
# The number of particles along each slice axis at that coordinate
SLICE_ANSWERS = {
("x", 0): 6,
("x", 0.5): 0,
("x", 1): 1,
("y", 0): 5,
("y", 1): 1,
("y", 2): 1,
("z", 0): 4,
("z", 1): 1,
("z", 2): 1,
("z", 3): 1,
}
def test_slice():
ds = fake_sph_orientation_ds()
for (ax, coord), answer in SLICE_ANSWERS.items():
# test that we can still select particles even if we offset the slice
# within each particle's smoothing volume
for i in range(-1, 2):
sl = ds.slice(ax, coord + i * 0.1)
assert_equal(sl["gas", "density"].shape[0], answer)
REGION_ANSWERS = {
((-4, -4, -4), (4, 4, 4)): 7,
((0, 0, 0), (4, 4, 4)): 7,
((1, 0, 0), (4, 4, 4)): 1,
((0, 1, 0), (4, 4, 4)): 2,
((0, 0, 1), (4, 4, 4)): 3,
((0, 0, 0), (4, 4, 2)): 6,
((0, 0, 0), (4, 4, 1)): 5,
((0, 0, 0), (4, 1, 4)): 6,
((0, 0, 0), (1, 1, 4)): 6,
}
def test_region():
ds = fake_sph_orientation_ds()
for (left_edge, right_edge), answer in REGION_ANSWERS.items():
# test that regions enclosing a particle's smoothing region
# correctly select SPH particles
for i in range(-1, 2):
for j in range(-1, 2):
le = np.array([le + i * 0.1 for le in left_edge])
re = np.array([re + j * 0.1 for re in right_edge])
# check if we went off the edge of the domain
whl = le < ds.domain_left_edge
le[whl] = ds.domain_left_edge[whl]
whr = re > ds.domain_right_edge
re[whr] = ds.domain_right_edge[whr]
reg = ds.box(le, re)
assert_equal(reg["gas", "density"].shape[0], answer)
SPHERE_ANSWERS = {
((0, 0, 0), 4): 7,
((0, 0, 0), 3): 7,
((0, 0, 0), 2): 6,
((0, 0, 0), 1): 4,
((0, 0, 0), 0.5): 1,
((1, 0, 0), 0.5): 1,
((1, 0, 0), 1.0): 2,
((0, 1, 0), 1.0): 3,
((0, 0, 1), 1.0): 3,
}
def test_sphere():
ds = fake_sph_orientation_ds()
for (center, radius), answer in SPHERE_ANSWERS.items():
# test that spheres enclosing a particle's smoothing region
# correctly select SPH particles
for i in range(-1, 2):
for j in range(-1, 2):
cent = np.array([c + i * 0.1 for c in center])
rad = radius + 0.1 * j
sph = ds.sphere(cent, rad)
assert_equal(sph["gas", "density"].shape[0], answer)
DISK_ANSWERS = {
((0, 0, 0), (0, 0, 1), 4, 3): 7,
((0, 0, 0), (0, 0, 1), 4, 2): 6,
((0, 0, 0), (0, 0, 1), 4, 1): 5,
((0, 0, 0), (0, 0, 1), 4, 0.5): 4,
((0, 0, 0), (0, 1, 0), 4, 3): 7,
((0, 0, 0), (0, 1, 0), 4, 2): 7,
((0, 0, 0), (0, 1, 0), 4, 1): 6,
((0, 0, 0), (0, 1, 0), 4, 0.5): 5,
((0, 0, 0), (1, 0, 0), 4, 3): 7,
((0, 0, 0), (1, 0, 0), 4, 2): 7,
((0, 0, 0), (1, 0, 0), 4, 1): 7,
((0, 0, 0), (1, 0, 0), 4, 0.5): 6,
((0, 0, 0), (1, 1, 1), 1, 1): 4,
((-0.5, -0.5, -0.5), (1, 1, 1), 4, 4): 7,
}
def test_disk():
ds = fake_sph_orientation_ds()
for (center, normal, radius, height), answer in DISK_ANSWERS.items():
# test that disks enclosing a particle's smoothing region
# correctly select SPH particles
for i in range(-1, 2):
cent = np.array([c + i * 0.1 for c in center])
disk = ds.disk(cent, normal, radius, height)
assert_equal(disk["gas", "density"].shape[0], answer)
RAY_ANSWERS = {
((0, 0, 0), (3, 0, 0)): 2,
((0, 0, 0), (0, 3, 0)): 3,
((0, 0, 0), (0, 0, 3)): 4,
((0, 1, 0), (0, 2, 0)): 2,
((1, 0, 0), (0, 2, 0)): 2,
((0.5, 0.5, 0.5), (0.5, 0.5, 3.5)): 0,
}
def test_ray():
ds = fake_sph_orientation_ds()
for (start_point, end_point), answer in RAY_ANSWERS.items():
for i in range(-1, 2):
start = np.array([s + i * 0.1 for s in start_point])
end = np.array([e + i * 0.1 for e in end_point])
ray = ds.ray(start, end)
assert_equal(ray["gas", "density"].shape[0], answer)
CUTTING_ANSWERS = {
((1, 0, 0), (0, 0, 0)): 6,
((0, 1, 0), (0, 0, 0)): 5,
((0, 0, 1), (0, 0, 0)): 4,
((1, 1, 1), (1.0 / 3, 1.0 / 3, 1.0 / 3)): 3,
((1, 1, 1), (2.0 / 3, 2.0 / 3, 2.0 / 3)): 2,
((1, 1, 1), (1, 1, 1)): 1,
}
def test_cutting():
ds = fake_sph_orientation_ds()
for (normal, center), answer in CUTTING_ANSWERS.items():
for i in range(-1, 2):
cen = [c + 0.1 * i for c in center]
cut = ds.cutting(normal, cen)
assert_equal(cut["gas", "density"].shape[0], answer)
def test_chained_selection():
ds = fake_sph_orientation_ds()
for (center, radius), answer in SPHERE_ANSWERS.items():
sph = ds.sphere(center, radius)
region = ds.box(ds.domain_left_edge, ds.domain_right_edge, data_source=sph)
assert_equal(region["gas", "density"].shape[0], answer)
def test_boolean_selection():
ds = fake_sph_orientation_ds()
sph = ds.sphere([0, 0, 0], 0.5)
sph2 = ds.sphere([1, 0, 0], 0.5)
reg = ds.all_data()
neg = reg - sph
assert_equal(neg["gas", "density"].shape[0], 6)
plus = sph + sph2
assert_equal(plus["gas", "density"].shape[0], 2)
intersect = sph & sph2
assert_equal(intersect["gas", "density"].shape[0], 0)
intersect = reg & sph2
assert_equal(intersect["gas", "density"].shape[0], 1)
exclusive = sph ^ sph2
assert_equal(exclusive["gas", "density"].shape[0], 2)
exclusive = sph ^ reg
assert_equal(exclusive["gas", "density"].shape[0], 6)
intersect = ds.intersection([sph, sph2])
assert_equal(intersect["gas", "density"].shape[0], 0)
intersect = ds.intersection([reg, sph2])
assert_equal(intersect["gas", "density"].shape[0], 1)
union = ds.union([sph, sph2])
assert_equal(union["gas", "density"].shape[0], 2)
union = ds.union([sph, reg])
assert_equal(union["gas", "density"].shape[0], 7)
def test_arbitrary_grid():
ds = fake_sph_grid_ds()
# this loads up some sph data in a test grid
agrid = ds.arbitrary_grid([0, 0, 0], [3, 3, 3], dims=[3, 3, 3])
# the field should be equal to the density of a particle in every voxel
# which is 1.
dens = agrid["gas", "density"]
answers = np.ones(shape=(3, 3, 3))
assert_equal(dens, answers)
def test_compare_arbitrary_grid_slice():
ds = fake_sph_orientation_ds()
c = np.array([0.0, 0.0, 0.0])
width = 1.5
buff_size = 51
field = ("gas", "density")
# buffer from arbitrary grid
ag = ds.arbitrary_grid(c - width / 2, c + width / 2, [buff_size] * 3)
buff_ag = ag[field][:, :, int(np.floor(buff_size / 2))].d.T
# buffer from slice
p = SlicePlot(ds, "z", field, center=c, width=width)
p.set_buff_size(51)
buff_slc = p.frb.data[field].d
assert_equal(buff_slc, buff_ag)
def test_gather_slice():
ds = fake_sph_grid_ds()
ds.num_neighbors = 5
field = ("gas", "density")
c = np.array([1.5, 1.5, 0.5])
width = 3.0
p = SlicePlot(ds, "z", field, center=c, width=width)
p.set_buff_size(3)
buff_scatter = p.frb.data[field].d
ds.sph_smoothing_style = "gather"
p = SlicePlot(ds, "z", field, center=c, width=width)
p.set_buff_size(3)
buff_gather = p.frb.data[field].d
assert_equal(buff_scatter, buff_gather)
def test_gather_grid():
ds = fake_sph_grid_ds()
ds.num_neighbors = 5
field = ("gas", "density")
ag = ds.arbitrary_grid([0, 0, 0], [3, 3, 3], dims=[3, 3, 3])
scatter = ag[field]
ds.sph_smoothing_style = "gather"
ag = ds.arbitrary_grid([0, 0, 0], [3, 3, 3], dims=[3, 3, 3])
gather = ag[field]
assert_equal(gather, scatter)
def test_covering_grid_scatter():
ds = fake_sph_grid_ds()
field = ("gas", "density")
buff_size = 8
ag = ds.arbitrary_grid(0, 3, [buff_size] * 3)
ag_dens = ag[field].to("g*cm**-3").d
cg = ds.covering_grid(3, 0, 8)
cg_dens = cg[field].to("g*cm**-3").d
assert_equal(ag_dens, cg_dens)
def test_covering_grid_gather():
ds = fake_sph_grid_ds()
ds.sph_smoothing_style = "gather"
ds.num_neighbors = 5
field = ("gas", "density")
buff_size = 8
ag = ds.arbitrary_grid(0, 3, [buff_size] * 3)
ag_dens = ag[field].to("g*cm**-3").d
cg = ds.covering_grid(3, 0, 8)
cg_dens = cg[field].to("g*cm**-3").d
assert_equal(ag_dens, cg_dens)
| 27.895522 | 83 | 0.530337 |
7b1b223af3866568b86ac96175cd05323bd92273
| 8,486 |
py
|
Python
|
bm/base/tf_model.py
|
carolinscholl/boltzmann-machines
|
c6d3f9051b1cb12eca7a9c6ad540e58ee36f7501
|
[
"MIT"
] | 1 |
2018-05-28T12:32:16.000Z
|
2018-05-28T12:32:16.000Z
|
bm/base/tf_model.py
|
carolinscholl/boltzmann-machines
|
c6d3f9051b1cb12eca7a9c6ad540e58ee36f7501
|
[
"MIT"
] | null | null | null |
bm/base/tf_model.py
|
carolinscholl/boltzmann-machines
|
c6d3f9051b1cb12eca7a9c6ad540e58ee36f7501
|
[
"MIT"
] | null | null | null |
import os
import json
import tensorflow as tf
from functools import wraps
from bm.base import (BaseModel, DtypeMixin,
is_param_name)
def run_in_tf_session(check_initialized=True, update_seed=False):
"""Decorator function that takes care to load appropriate graph/session,
depending on whether model can be loaded from disk or is just created,
and to execute `f` inside this session.
"""
def wrap(f):
@wraps(f) # preserve bound method properties
def wrapped_f(model, *args, **kwargs):
tf.compat.v1.reset_default_graph()
model._tf_graph = tf.compat.v1.get_default_graph()
if update_seed:
tf.compat.v1.set_random_seed(model.make_random_seed())
if model.initialized_: # model should be loaded from disk
model._tf_saver = tf.train.import_meta_graph(model._tf_meta_graph_filepath)
with model._tf_graph.as_default():
with tf.compat.v1.Session(config=model._tf_session_config) as model._tf_session:
model._tf_saver.restore(model._tf_session, model._model_filepath)
model._init_tf_writers()
res = f(model, *args, **kwargs)
elif check_initialized:
raise RuntimeError('`fit` or `init` must be called before calling `{0}`'.format(f.__name__))
else:
with model._tf_graph.as_default():
with tf.compat.v1.Session(config=model._tf_session_config) as model._tf_session:
model._make_tf_model()
model._init_tf_ops()
model._init_tf_writers()
res = f(model, *args, **kwargs)
return res
return wrapped_f
return wrap
class TensorFlowModel(BaseModel, DtypeMixin):
def __init__(self, model_path='tf_model/', paths=None,
tf_session_config=None, tf_saver_params=None, json_params=None,
*args, **kwargs):
super(TensorFlowModel, self).__init__(*args, **kwargs)
self._model_dirpath = None
self._model_filepath = None
self._params_filepath = None
self._random_state_filepath = None
self._train_summary_dirpath = None
self._val_summary_dirpath = None
self._tf_meta_graph_filepath = None
self.update_working_paths(model_path=model_path, paths=paths)
self._tf_session_config = tf_session_config or tf.compat.v1.ConfigProto()
self.tf_saver_params = tf_saver_params or {}
self.json_params = json_params or {}
self.json_params.setdefault('sort_keys', True)
self.json_params.setdefault('indent', 4)
self.initialized_ = False
self._tf_graph = tf.Graph()
self._tf_session = None
self._tf_saver = None
self._tf_merged_summaries = None
self._tf_train_writer = None
self._tf_val_writer = None
@staticmethod
def compute_working_paths(model_path):
"""
Parameters
----------
model_path : str
Model dirpath (should contain slash at the end) or filepath
"""
head, tail = os.path.split(model_path)
if not head: head = '.'
if not head.endswith('/'): head += '/'
if not tail: tail = 'model'
paths = {}
paths['model_dirpath'] = head
paths['model_filepath'] = os.path.join(paths['model_dirpath'], tail)
paths['params_filepath'] = os.path.join(paths['model_dirpath'], 'params.json')
paths['random_state_filepath'] = os.path.join(paths['model_dirpath'], 'random_state.json')
paths['train_summary_dirpath'] = os.path.join(paths['model_dirpath'], 'logs/train')
paths['val_summary_dirpath'] = os.path.join(paths['model_dirpath'], 'logs/val')
paths['tf_meta_graph_filepath'] = paths['model_filepath'] + '.meta'
return paths
def update_working_paths(self, model_path=None, paths=None):
paths = paths or {}
if not paths:
paths = TensorFlowModel.compute_working_paths(model_path=model_path)
for k, v in list(paths.items()):
setattr(self, '_{0}'.format(k), v)
def _make_tf_model(self):
raise NotImplementedError('`_make_tf_model` is not implemented')
def _init_tf_ops(self):
"""Initialize all TF variables and Saver"""
init_op = tf.compat.v1.global_variables_initializer()
self._tf_session.run(init_op)
self._tf_saver = tf.compat.v1.train.Saver(**self.tf_saver_params)
def _init_tf_writers(self):
self._tf_merged_summaries = tf.compat.v1.summary.merge_all()
#self._tf_train_writer = tf.compat.v1.summary.FileWriter(self._train_summary_dirpath,self._tf_graph)
#self._tf_val_writer = tf.compat.v1.summary.FileWriter(self._val_summary_dirpath,self._tf_graph)
def _save_model(self, global_step=None):
# (recursively) create all folders needed
for dirpath in (self._train_summary_dirpath, self._val_summary_dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
# save params
params = self.get_params(deep=False)
params = self._serialize(params)
params['__class_name__'] = self.__class__.__name__
with open(self._params_filepath, 'w') as params_file:
json.dump(params, params_file, **self.json_params)
# dump random state if needed
if self.random_seed is not None:
random_state = self._rng.get_state()
with open(self._random_state_filepath, 'w') as random_state_file:
json.dump(random_state, random_state_file)
# save tf model
self._tf_saver.save(self._tf_session,
self._model_filepath,
global_step=global_step)
@classmethod
def load_model(cls, model_path):
paths = TensorFlowModel.compute_working_paths(model_path)
# load params
with open(paths['params_filepath'], 'r') as params_file:
params = json.load(params_file)
#print(params)
class_name = params.pop('__class_name__')
if class_name != cls.__name__:
raise RuntimeError("attempt to load {0} with class {1}".format(class_name, cls.__name__))
model = cls(paths=paths, **{k: params[k] for k in params if is_param_name(k)})
params = model._deserialize(params)
model.set_params(**params) # set attributes and deserialized params
# restore random state if needed
if os.path.isfile(model._random_state_filepath):
with open(model._random_state_filepath, 'r') as random_state_file:
random_state = json.load(random_state_file)
model._rng.set_state(random_state)
# (tf model will be loaded once any computation will be needed)
return model
def _fit(self, X, X_val=None, *args, **kwargs):
"""Class-specific `fit` routine."""
raise NotImplementedError('`fit` is not implemented')
@run_in_tf_session(check_initialized=False)
def init(self):
if not self.initialized_:
self.initialized_ = True
self._save_model()
return self
@run_in_tf_session(check_initialized=False, update_seed=True)
def fit(self, X, X_val=None, *args, **kwargs):
"""Fit the model according to the given training data."""
self.initialized_ = True
self._fit(X, X_val=X_val, *args, **kwargs)
self._save_model()
return self
@run_in_tf_session()
def get_tf_params(self, scope=None):
"""Get tf params of the model.
Returns
-------
params : dict[str] = np.ndarray
Evaluated parameters of the model.
"""
weights = {}
for var in tf.compat.v1.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope):
key = var.name
if scope and scope in key:
key = key.replace(scope, '')
if key.startswith('/'):
key = key[1:]
if key.endswith(':0'):
key = key[:-2]
weights[key] = var.eval()
return weights
if __name__ == '__main__':
# run corresponding tests
from bm.utils.testing import run_tests
from tests import test_tf_model as t
run_tests(__file__, t)
| 40.602871 | 108 | 0.624087 |
684cfc87bf64fb552ca1caa74a46163ac556af7d
| 2,093 |
py
|
Python
|
tfx/tools/cli/labels.py
|
johnPertoft/tfx
|
c6335684a54651adbcbe50aa52918b9b9948326e
|
[
"Apache-2.0"
] | null | null | null |
tfx/tools/cli/labels.py
|
johnPertoft/tfx
|
c6335684a54651adbcbe50aa52918b9b9948326e
|
[
"Apache-2.0"
] | null | null | null |
tfx/tools/cli/labels.py
|
johnPertoft/tfx
|
c6335684a54651adbcbe50aa52918b9b9948326e
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Flags."""
ENGINE_FLAG = 'engine'
PIPELINE_DSL_PATH = 'pipeline_dsl_path'
PIPELINE_NAME = 'pipeline_name'
AIRFLOW_PACKAGE_NAME = 'apache-airflow'
KUBEFLOW_PACKAGE_NAME = 'kfp'
RUN_ID = 'run_id'
AIRFLOW_ENGINE = 'airflow'
BEAM_ENGINE = 'beam'
KUBEFLOW_ENGINE = 'kubeflow'
LOCAL_ENGINE = 'local'
VERTEX_ENGINE = 'vertex'
# Path to root directory of the pipeline.
PIPELINE_ROOT = 'pipeline_root'
# List of components in the pipeline.
PIPELINE_COMPONENTS = 'pipeline_components'
# Kubeflow specific labels.
# Base container image path.
BASE_IMAGE = 'build_base_image'
# Client ID for IAP protected endpoint.
IAP_CLIENT_ID = 'iap_client_id'
# Endpoint of the KFP API service to connect.
ENDPOINT = 'endpoint'
# Kubernetes namespace to connect to the KFP API.
NAMESPACE = 'namespace'
# Pipeline id generated when pipeline is uploaded to KFP server.
PIPELINE_ID = 'pipeline_id'
# Pipeline version id generated when pipeline is created or updated.
PIPELINE_VERSION_ID = 'pipeline_version_id'
# Experiment id generated when a new experiment is created on KFP server.
EXPERIMENT_ID = 'experiment_id'
# Flag to decide whether an image build is needed
BUILD_IMAGE = 'build_image'
# GCP Project ID for GCP API call.
GCP_PROJECT_ID = 'gcp_project_id'
# GCP Region for GCP API call.
GCP_REGION = 'gcp_region'
# Template specific labels.
# Destination directory path to copy files
DESTINATION_PATH = 'destination_path'
# Model kind of the copying template
MODEL = 'model'
| 34.311475 | 74 | 0.77592 |
11d28f7be1ad33687f8a7b72261ffd946a0fc5f2
| 429 |
py
|
Python
|
drf_admin/apps/cmdb/admin.py
|
guohaihan/myproject
|
0ec105d0bd48477faddf93bd62a8ede800419ae6
|
[
"MIT"
] | 228 |
2020-06-20T10:07:03.000Z
|
2022-03-29T07:11:01.000Z
|
drf_admin/apps/cmdb/admin.py
|
guohaihan/myproject
|
0ec105d0bd48477faddf93bd62a8ede800419ae6
|
[
"MIT"
] | 25 |
2020-07-16T12:29:04.000Z
|
2022-02-16T06:31:06.000Z
|
drf_admin/apps/cmdb/admin.py
|
guohaihan/myproject
|
0ec105d0bd48477faddf93bd62a8ede800419ae6
|
[
"MIT"
] | 82 |
2020-10-26T07:14:15.000Z
|
2022-03-29T07:53:23.000Z
|
from django.contrib import admin
from cmdb.models import Assets, Servers, SecurityDevices, StorageDevices, NetworkDevices, IDC, Cabinets, Accounts
# Register your models here.
admin.site.register(Assets)
admin.site.register(Servers)
admin.site.register(SecurityDevices)
admin.site.register(StorageDevices)
admin.site.register(NetworkDevices)
admin.site.register(IDC)
admin.site.register(Cabinets)
admin.site.register(Accounts)
| 30.642857 | 113 | 0.829837 |
16faee1d432c27fee91223052bc1fe526a35bc9f
| 617 |
py
|
Python
|
tutorial-01/pySimpleGui-0101.py
|
lungen/pySimpleGui
|
31c10e481d90a60a82b4036b187d56fe008a96f1
|
[
"MIT"
] | null | null | null |
tutorial-01/pySimpleGui-0101.py
|
lungen/pySimpleGui
|
31c10e481d90a60a82b4036b187d56fe008a96f1
|
[
"MIT"
] | null | null | null |
tutorial-01/pySimpleGui-0101.py
|
lungen/pySimpleGui
|
31c10e481d90a60a82b4036b187d56fe008a96f1
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
sg.theme("DarkAmber") # Add a touch of color
# All the stuff inside your window.
layout = [
[sg.Text("Some text on Row 1")],
[sg.Text("Enter something on Row 2"), sg.InputText()],
[sg.Button("Ok"), sg.Button("Cancel")],
]
# Create the Window
window = sg.Window("Window Title", layout)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if (
event == sg.WIN_CLOSED or event == "Cancel"
): # if user closes window or clicks cancel
break
print("You entered ", values[0])
window.close()
| 25.708333 | 67 | 0.640194 |
46d56896e86b1d6e826d61c8876888e02c85994a
| 2,113 |
py
|
Python
|
experiments/exp_20170719_01/main.py
|
charliezon/deep_stock
|
bbc7e6ea8498c50459b8e42eeff42d8578b6491d
|
[
"MIT"
] | 7 |
2017-08-01T04:13:32.000Z
|
2021-08-17T02:19:51.000Z
|
experiments/exp_20170719_01/main.py
|
urantialife/deep_stock
|
bbc7e6ea8498c50459b8e42eeff42d8578b6491d
|
[
"MIT"
] | 1 |
2018-01-24T12:02:30.000Z
|
2018-01-24T12:02:30.000Z
|
experiments/exp_20170719_01/main.py
|
urantialife/deep_stock
|
bbc7e6ea8498c50459b8e42eeff42d8578b6491d
|
[
"MIT"
] | 3 |
2018-10-03T18:05:16.000Z
|
2020-07-23T15:55:58.000Z
|
import sys
sys.path.append('../../')
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Dropout
from keras.optimizers import SGD
from keras.layers.advanced_activations import LeakyReLU
import matplotlib.pyplot as plt
import h5py
from utils.metrics import precision
# Accuracy, Precision ??
data = pd.read_csv("../../data/data_20170719_01/data.csv", header=None)
dataset = data.values
feature_len = 100
train_len = int(len(dataset)*0.96)
epochs = 1000
num_unit = 128
batch_size = 128
num_layer = 5
dropout = 0.5
x_train = dataset[0:train_len, 0:feature_len].astype(float)
y_train = dataset[0:train_len, feature_len]
x_test = dataset[train_len:, 0:feature_len].astype(float)
y_test = dataset[train_len:, feature_len]
#leakyReLU = LeakyReLU(alpha=0.3)
model = Sequential()
model.add(Dense(num_unit, input_dim=feature_len))
model.add(LeakyReLU(alpha=0.3))
model.add(Dropout(dropout))
for i in range(num_layer):
model.add(Dense(num_unit))
model.add(LeakyReLU(alpha=0.3))
model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy',
optimizer=sgd,
metrics=['accuracy', precision])
# model.load_weights('./model_weights.h5')
history = model.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
verbose=1,
validation_split=0.1)
model.save_weights('./model_weights.h5')
score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print(score)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.plot(history.history['precision'])
plt.plot(history.history['val_precision'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model accuracy, precision and loss')
plt.ylabel('accuracy, precision or loss')
plt.xlabel('epoch')
plt.legend(['train_acc', 'val_acc', 'train_precision', 'val_precision', 'train_loss', 'val_loss'], loc='upper left')
plt.show()
| 28.554054 | 116 | 0.733554 |
bf8d5df5ea0d13739a01f2fba7f1d9f7a34deefb
| 4,588 |
py
|
Python
|
mistos-backend/src/app/api/classes/experiment_result.py
|
Maddonix/mistos_2
|
4bd9f45ad9e49f4178c0b8bb1a177d7db5349c34
|
[
"MIT"
] | 1 |
2021-03-22T10:57:01.000Z
|
2021-03-22T10:57:01.000Z
|
mistos-backend/src/app/api/classes/experiment_result.py
|
Maddonix/mistos_2
|
4bd9f45ad9e49f4178c0b8bb1a177d7db5349c34
|
[
"MIT"
] | 44 |
2021-02-17T15:07:17.000Z
|
2021-04-05T07:07:09.000Z
|
mistos-backend/src/app/api/classes/experiment_result.py
|
Maddonix/mistos_2
|
4bd9f45ad9e49f4178c0b8bb1a177d7db5349c34
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional, Any
from app import crud
from app import fileserver_requests as fsr
from app.api.classes_com import ComExperimentResult
from app.api.dependencies import check_sess
from pandas import DataFrame
from pydantic import BaseModel, constr
from app.api import cfg_classes
class DbExperimentResult(BaseModel):
'''
A class to handle database and file storage of ExperimentResults
Attributes
----------
uid : int
the objects unique identifier
name : str
the objects name
hint : str
brief description of the object
description : str
a detailed description of the experiment result
experiment_group_id : int
unique identifier of the associated experiment group
result_type: str
describes type of result layer. Must be one of strings defined in app.api.cfg_classes.result_types
path : pathlib.Path, optional
path to file storage, will be automatically generated when object is saved to database.
Methods
-------
to_int_class()->app.api.classes_internal.IntExperimentResult:
returns object as int_class. Loads layer array from file path in the process.
to_com_class()->app.api.classes_com.ComExperimentResult:
returns object as com_class.
create_in_db(sess = None):
creates object in database, updates objects path and uid attributes accordingly. Uses default session if none is passed.
delete(sess = None):
deletes object in database and file storage. Uses default session if none is passed.
update_hint(new_hint: str, sess = None):
updates objects hint in database. Uses default session if none is passed.
update_name(new_name: str, sess = None):
updates objects name in database. Uses default session if none is passed.
'''
uid: int
name: str = ""
hint: str = ""
description: str = ""
experiment_group_id: int
result_type: constr(regex=cfg_classes.result_type_regex)
path: Optional[Path]
def to_int_class(self):
'''Returns c_int.IntExperimentResult'''
kwargs = self.dict()
kwargs["data"] = fsr.load_result_df(self.path)
return IntExperimentResult(**kwargs)
def create_in_db(self, sess=None):
'''
Creates object in db. Path and id are generated and updated in object.
Parameters:
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db)
'''
sess = check_sess(sess)
sql_result = crud.create_experiment_result(self)
self.uid = sql_result.id
self.path = Path(sql_result.path)
def to_com_class(self):
'''Returns c_com.ComExperimentResult'''
kwargs = self.dict()
kwargs["experimentGroups"] = self.experiment_group_id
kwargs["resultType"] = self.result_type
return ComExperimentResult(**kwargs)
class IntExperimentResult(BaseModel):
'''
A class to handle calculations and other internal operations with ExperimentResults.
Attributes
----------
uid : int
the objects unique identifier
name : str
the objects name
hint : str, optional
empty string by default. brief description of the object.
description : str, optional
empty string by default. detailed description of the object.
experiment_group_id : int
unique identifier of the associated experiment_group
result_type: str
describes type of result. Must be one of strings defined in app.api.cfg_classes.result_types
data : pd.DataFrame, optional
DataFrame summarizing the result
Methods
-------
onInit():
Initializes object. Object is saved in database and file storage
to_db_class():
Transforms object to db_class
'''
uid: int
name: str = ""
hint: Optional[str] = ""
description: Optional[str] = ""
experiment_group_id: int
result_type: constr(regex=cfg_classes.result_type_regex)
data: Any
def on_init(self):
# should be called on every creation
if self.uid == -1:
db_result = self.to_db_class()
db_result.create_in_db()
self.uid = db_result.uid
fsr.save_result_df(self.data, db_result.path)
print(f"New Result created with id {self.uid}")
def to_db_class(self):
kwargs = self.dict()
del kwargs["data"]
return DbExperimentResult(**kwargs)
| 34.496241 | 159 | 0.67524 |
0be9c3a6bfe058f259246d3b6626b22816a515c7
| 8,070 |
py
|
Python
|
src/oci/data_labeling_service/models/work_request_resource.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249 |
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/data_labeling_service/models/work_request_resource.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228 |
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/data_labeling_service/models/work_request_resource.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224 |
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestResource(object):
"""
A resource created or operated on by a work request.
"""
#: A constant which can be used with the action_type property of a WorkRequestResource.
#: This constant has a value of "CREATED"
ACTION_TYPE_CREATED = "CREATED"
#: A constant which can be used with the action_type property of a WorkRequestResource.
#: This constant has a value of "UPDATED"
ACTION_TYPE_UPDATED = "UPDATED"
#: A constant which can be used with the action_type property of a WorkRequestResource.
#: This constant has a value of "DELETED"
ACTION_TYPE_DELETED = "DELETED"
#: A constant which can be used with the action_type property of a WorkRequestResource.
#: This constant has a value of "IN_PROGRESS"
ACTION_TYPE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the action_type property of a WorkRequestResource.
#: This constant has a value of "WRITTEN"
ACTION_TYPE_WRITTEN = "WRITTEN"
#: A constant which can be used with the action_type property of a WorkRequestResource.
#: This constant has a value of "RELATED"
ACTION_TYPE_RELATED = "RELATED"
def __init__(self, **kwargs):
"""
Initializes a new WorkRequestResource object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param entity_type:
The value to assign to the entity_type property of this WorkRequestResource.
:type entity_type: str
:param action_type:
The value to assign to the action_type property of this WorkRequestResource.
Allowed values for this property are: "CREATED", "UPDATED", "DELETED", "IN_PROGRESS", "WRITTEN", "RELATED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type action_type: str
:param identifier:
The value to assign to the identifier property of this WorkRequestResource.
:type identifier: str
:param entity_uri:
The value to assign to the entity_uri property of this WorkRequestResource.
:type entity_uri: str
:param metadata:
The value to assign to the metadata property of this WorkRequestResource.
:type metadata: dict(str, str)
"""
self.swagger_types = {
'entity_type': 'str',
'action_type': 'str',
'identifier': 'str',
'entity_uri': 'str',
'metadata': 'dict(str, str)'
}
self.attribute_map = {
'entity_type': 'entityType',
'action_type': 'actionType',
'identifier': 'identifier',
'entity_uri': 'entityUri',
'metadata': 'metadata'
}
self._entity_type = None
self._action_type = None
self._identifier = None
self._entity_uri = None
self._metadata = None
@property
def entity_type(self):
"""
**[Required]** Gets the entity_type of this WorkRequestResource.
The resource type the work request affects.
:return: The entity_type of this WorkRequestResource.
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""
Sets the entity_type of this WorkRequestResource.
The resource type the work request affects.
:param entity_type: The entity_type of this WorkRequestResource.
:type: str
"""
self._entity_type = entity_type
@property
def action_type(self):
"""
**[Required]** Gets the action_type of this WorkRequestResource.
The way in which this resource is affected by the work tracked in the work request.
A resource being created, updated, or deleted will remain in the IN_PROGRESS state until
work is complete for that resource at which point it will transition to CREATED, UPDATED,
or DELETED, respectively.
Allowed values for this property are: "CREATED", "UPDATED", "DELETED", "IN_PROGRESS", "WRITTEN", "RELATED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The action_type of this WorkRequestResource.
:rtype: str
"""
return self._action_type
@action_type.setter
def action_type(self, action_type):
"""
Sets the action_type of this WorkRequestResource.
The way in which this resource is affected by the work tracked in the work request.
A resource being created, updated, or deleted will remain in the IN_PROGRESS state until
work is complete for that resource at which point it will transition to CREATED, UPDATED,
or DELETED, respectively.
:param action_type: The action_type of this WorkRequestResource.
:type: str
"""
allowed_values = ["CREATED", "UPDATED", "DELETED", "IN_PROGRESS", "WRITTEN", "RELATED"]
if not value_allowed_none_or_none_sentinel(action_type, allowed_values):
action_type = 'UNKNOWN_ENUM_VALUE'
self._action_type = action_type
@property
def identifier(self):
"""
**[Required]** Gets the identifier of this WorkRequestResource.
The identifier of the resource the work request affects.
:return: The identifier of this WorkRequestResource.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this WorkRequestResource.
The identifier of the resource the work request affects.
:param identifier: The identifier of this WorkRequestResource.
:type: str
"""
self._identifier = identifier
@property
def entity_uri(self):
"""
Gets the entity_uri of this WorkRequestResource.
The URI path that the user can do a GET on to access the resource metadata
:return: The entity_uri of this WorkRequestResource.
:rtype: str
"""
return self._entity_uri
@entity_uri.setter
def entity_uri(self, entity_uri):
"""
Sets the entity_uri of this WorkRequestResource.
The URI path that the user can do a GET on to access the resource metadata
:param entity_uri: The entity_uri of this WorkRequestResource.
:type: str
"""
self._entity_uri = entity_uri
@property
def metadata(self):
"""
Gets the metadata of this WorkRequestResource.
Additional information that helps to explain the resource.
:return: The metadata of this WorkRequestResource.
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this WorkRequestResource.
Additional information that helps to explain the resource.
:param metadata: The metadata of this WorkRequestResource.
:type: dict(str, str)
"""
self._metadata = metadata
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.635193 | 245 | 0.658612 |
e8bb54d83b3a3a5750f9c927125f84c91e66e0f0
| 524 |
py
|
Python
|
hello-world/python/send.py
|
jeffryang24/rabbitmq-learning
|
f014695f02da46557ae82b237ad4b3c600315b2e
|
[
"MIT"
] | 1 |
2020-09-02T08:32:02.000Z
|
2020-09-02T08:32:02.000Z
|
hello-world/python/send.py
|
jeffryang24/rabbitmq-learning
|
f014695f02da46557ae82b237ad4b3c600315b2e
|
[
"MIT"
] | null | null | null |
hello-world/python/send.py
|
jeffryang24/rabbitmq-learning
|
f014695f02da46557ae82b237ad4b3c600315b2e
|
[
"MIT"
] | 1 |
2020-04-30T07:03:15.000Z
|
2020-04-30T07:03:15.000Z
|
#!/usr/bin/env python
import pika
# Create connection to rabbitmq server
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
# Create queue unless rabbit will drop our message
channel.queue_declare(queue='hello')
# Use default exchanger (no name) for first hello world
# routing_key = queue name
channel.basic_publish(
exchange='',
routing_key='hello',
body='Hello World!'
)
print(" [x] Sent 'Hello World!'")
# Close connection
connection.close()
| 23.818182 | 81 | 0.748092 |
e4818892b3ae982b36464634ff3f30a3328c96b2
| 6,886 |
py
|
Python
|
vkbottle/types/methods/likes.py
|
LouisPython217/vkbottle
|
3541bbdb66f32c2d3567b0047c36b706ac72bb3b
|
[
"MIT"
] | null | null | null |
vkbottle/types/methods/likes.py
|
LouisPython217/vkbottle
|
3541bbdb66f32c2d3567b0047c36b706ac72bb3b
|
[
"MIT"
] | null | null | null |
vkbottle/types/methods/likes.py
|
LouisPython217/vkbottle
|
3541bbdb66f32c2d3567b0047c36b706ac72bb3b
|
[
"MIT"
] | null | null | null |
# Generated with love
from vkbottle.types import responses
from .access import APIAccessibility
from .method import BaseMethod
class LikesAdd(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, type: str, item_id: int, owner_id: int = None, access_key: str = None
) -> responses.likes.Add:
""" likes.add
From Vk Docs: Adds the specified object to the 'Likes' list of the current user.
Access from user token(s)
:param type: Object type: 'post' — post on user or community wall, 'comment' — comment on a wall post, 'photo' — photo, 'audio' — audio, 'video' — video, 'note' — note, 'photo_comment' — comment on the photo, 'video_comment' — comment on the video, 'topic_comment' — comment in the discussion, 'sitepage' — page of the site where the [vk.com/dev/Like|Like widget] is installed
:param owner_id: ID of the user or community that owns the object.
:param item_id: Object ID.
:param access_key: Access key required for an object owned by a private entity.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"likes.add", params, response_model=responses.likes.AddModel
)
class LikesDelete(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, type: str, item_id: int, owner_id: int = None
) -> responses.likes.Delete:
""" likes.delete
From Vk Docs: Deletes the specified object from the 'Likes' list of the current user.
Access from user token(s)
:param type: Object type: 'post' — post on user or community wall, 'comment' — comment on a wall post, 'photo' — photo, 'audio' — audio, 'video' — video, 'note' — note, 'photo_comment' — comment on the photo, 'video_comment' — comment on the video, 'topic_comment' — comment in the discussion, 'sitepage' — page of the site where the [vk.com/dev/Like|Like widget] is installed
:param owner_id: ID of the user or community that owns the object.
:param item_id: Object ID.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"likes.delete", params, response_model=responses.likes.DeleteModel
)
class LikesGetList(BaseMethod):
access_token_type: APIAccessibility = [
APIAccessibility.USER,
APIAccessibility.SERVICE,
]
async def __call__(
self,
type: str,
owner_id: int = None,
item_id: int = None,
page_url: str = None,
filter: str = None,
friends_only: int = None,
extended: bool = None,
offset: int = None,
count: int = None,
skip_own: bool = None,
) -> responses.likes.GetList:
""" likes.getList
From Vk Docs: Returns a list of IDs of users who added the specified object to their 'Likes' list.
Access from user, service token(s)
:param type: , Object type: 'post' — post on user or community wall, 'comment' — comment on a wall post, 'photo' — photo, 'audio' — audio, 'video' — video, 'note' — note, 'photo_comment' — comment on the photo, 'video_comment' — comment on the video, 'topic_comment' — comment in the discussion, 'sitepage' — page of the site where the [vk.com/dev/Like|Like widget] is installed
:param owner_id: ID of the user, community, or application that owns the object. If the 'type' parameter is set as 'sitepage', the application ID is passed as 'owner_id'. Use negative value for a community id. If the 'type' parameter is not set, the 'owner_id' is assumed to be either the current user or the same application ID as if the 'type' parameter was set to 'sitepage'.
:param item_id: Object ID. If 'type' is set as 'sitepage', 'item_id' can include the 'page_id' parameter value used during initialization of the [vk.com/dev/Like|Like widget].
:param page_url: URL of the page where the [vk.com/dev/Like|Like widget] is installed. Used instead of the 'item_id' parameter.
:param filter: Filters to apply: 'likes' — returns information about all users who liked the object (default), 'copies' — returns information only about users who told their friends about the object
:param friends_only: Specifies which users are returned: '1' — to return only the current user's friends, '0' — to return all users (default)
:param extended: Specifies whether extended information will be returned. '1' — to return extended information about users and communities from the 'Likes' list, '0' — to return no additional information (default)
:param offset: Offset needed to select a specific subset of users.
:param count: Number of user IDs to return (maximum '1000'). Default is '100' if 'friends_only' is set to '0', otherwise, the default is '10' if 'friends_only' is set to '1'.
:param skip_own:
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"likes.getList", params, response_model=responses.likes.GetListModel
)
class LikesIsLiked(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, type: str, item_id: int, user_id: int = None, owner_id: int = None
) -> responses.likes.IsLiked:
""" likes.isLiked
From Vk Docs: Checks for the object in the 'Likes' list of the specified user.
Access from user token(s)
:param user_id: User ID.
:param type: Object type: 'post' — post on user or community wall, 'comment' — comment on a wall post, 'photo' — photo, 'audio' — audio, 'video' — video, 'note' — note, 'photo_comment' — comment on the photo, 'video_comment' — comment on the video, 'topic_comment' — comment in the discussion
:param owner_id: ID of the user or community that owns the object.
:param item_id: Object ID.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"likes.isLiked", params, response_model=responses.likes.IsLikedModel
)
class Likes:
def __init__(self, request):
self.add = LikesAdd(request)
self.delete = LikesDelete(request)
self.get_list = LikesGetList(request)
self.is_liked = LikesIsLiked(request)
| 52.564885 | 386 | 0.64711 |
69f968483f8b2c42344bf00434e73216f9906040
| 8,461 |
py
|
Python
|
maskprop/MiVOS/model/fusion_model.py
|
qinliuliuqin/iSegFormer
|
67b634588cc0a1e09fb3e092966eae997eb209fa
|
[
"MIT"
] | 14 |
2021-12-09T08:33:23.000Z
|
2022-03-26T13:11:01.000Z
|
maskprop/MiVOS/model/fusion_model.py
|
qinliuliuqin/iSegFormer
|
67b634588cc0a1e09fb3e092966eae997eb209fa
|
[
"MIT"
] | null | null | null |
maskprop/MiVOS/model/fusion_model.py
|
qinliuliuqin/iSegFormer
|
67b634588cc0a1e09fb3e092966eae997eb209fa
|
[
"MIT"
] | null | null | null |
import os
from os import path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
from model.fusion_net import FusionNet
from model.attn_network import AttentionReadNetwork
from model.aggregate import aggregate_wbg_channel
from model.losses import LossComputer, iou_hooks
from util.log_integrator import Integrator
from util.image_saver import pool_fusion
class FusionModel:
def __init__(self, para, logger=None, save_path=None, local_rank=0, world_size=1, distributed=True):
self.para = para
self.local_rank = local_rank
if distributed:
self.net = nn.parallel.DistributedDataParallel(FusionNet().cuda(),
device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False)
else:
self.net = nn.DataParallel(
FusionNet().cuda(),
device_ids=[local_rank], output_device=local_rank)
self.prop_net = AttentionReadNetwork().eval().cuda()
# Setup logger when local_rank=0
self.logger = logger
self.save_path = save_path
if logger is not None:
self.last_time = time.time()
self.train_integrator = Integrator(self.logger, distributed=distributed, local_rank=local_rank, world_size=world_size)
self.train_integrator.add_hook(iou_hooks)
self.val_integrator = Integrator(self.logger, distributed=distributed, local_rank=local_rank, world_size=world_size)
self.loss_computer = LossComputer(para)
self.train()
self.optimizer = optim.Adam(filter(
lambda p: p.requires_grad, self.net.parameters()), lr=para['lr'], weight_decay=1e-7)
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, para['steps'], para['gamma'])
if para['amp']:
self.scaler = torch.cuda.amp.GradScaler()
# Logging info
self.report_interval = 100
self.save_im_interval = 500
self.save_model_interval = 5000
if para['debug']:
self.report_interval = self.save_im_interval = 1
def do_pass(self, data, it=0):
# No need to store the gradient outside training
torch.set_grad_enabled(self._is_train)
for k, v in data.items():
if type(v) != list and type(v) != dict and type(v) != int:
data[k] = v.cuda(non_blocking=True)
# See fusion_dataset.py for variable definitions
im = data['rgb']
seg1 = data['seg1']
seg2 = data['seg2']
src2_ref = data['src2_ref']
src2_ref_gt = data['src2_ref_gt']
seg12 = data['seg12']
seg22 = data['seg22']
src2_ref2 = data['src2_ref2']
src2_ref_gt2 = data['src2_ref_gt2']
src2_ref_im = data['src2_ref_im']
selector = data['selector']
dist = data['dist']
out = {}
with torch.cuda.amp.autocast(enabled=self.para['amp']):
# Get kernelized memory
with torch.no_grad():
attn1, attn2 = self.prop_net(src2_ref_im, src2_ref, src2_ref_gt, src2_ref2, src2_ref_gt2, im)
prob1 = torch.sigmoid(self.net(im, seg1, seg2, attn1, dist))
prob2 = torch.sigmoid(self.net(im, seg12, seg22, attn2, dist))
prob = torch.cat([prob1, prob2], 1) * selector.unsqueeze(2).unsqueeze(2)
logits, prob = aggregate_wbg_channel(prob, True)
out['logits'] = logits
out['mask'] = prob
out['attn1'] = attn1
out['attn2'] = attn2
if self._do_log or self._is_train:
losses = self.loss_computer.compute({**data, **out}, it)
# Logging
if self._do_log:
self.integrator.add_dict(losses)
if self._is_train:
if it % self.save_im_interval == 0 and it != 0:
if self.logger is not None:
images = {**data, **out}
size = (320, 320)
self.logger.log_cv2('train/pairs', pool_fusion(images, size=size), it)
else:
# Validation save
if data['val_iter'] % 10 == 0:
if self.logger is not None:
images = {**data, **out}
size = (320, 320)
self.logger.log_cv2('val/pairs', pool_fusion(images, size=size), it)
if self._is_train:
if (it) % self.report_interval == 0 and it != 0:
if self.logger is not None:
self.logger.log_scalar('train/lr', self.scheduler.get_last_lr()[0], it)
self.logger.log_metrics('train', 'time', (time.time()-self.last_time)/self.report_interval, it)
self.last_time = time.time()
self.train_integrator.finalize('train', it)
self.train_integrator.reset_except_hooks()
if it % self.save_model_interval == 0 and it != 0:
if self.logger is not None:
self.save(it)
# Backward pass
self.optimizer.zero_grad(set_to_none=True)
if self.para['amp']:
self.scaler.scale(losses['total_loss']).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
losses['total_loss'].backward()
self.optimizer.step()
self.scheduler.step()
def save(self, it):
if self.save_path is None:
print('Saving has been disabled.')
return
os.makedirs(os.path.dirname(self.save_path), exist_ok=True)
model_path = self.save_path + ('_%s.pth' % it)
torch.save(self.net.module.state_dict(), model_path)
print('Model saved to %s.' % model_path)
self.save_checkpoint(it)
def save_checkpoint(self, it):
if self.save_path is None:
print('Saving has been disabled.')
return
os.makedirs(os.path.dirname(self.save_path), exist_ok=True)
checkpoint_path = self.save_path + '_checkpoint.pth'
checkpoint = {
'it': it,
'network': self.net.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict()}
torch.save(checkpoint, checkpoint_path)
print('Checkpoint saved to %s.' % checkpoint_path)
def load_model(self, path):
map_location = 'cuda:%d' % self.local_rank
checkpoint = torch.load(path, map_location={'cuda:0': map_location})
it = checkpoint['it']
network = checkpoint['network']
optimizer = checkpoint['optimizer']
scheduler = checkpoint['scheduler']
map_location = 'cuda:%d' % self.local_rank
self.net.module.load_state_dict(network)
self.optimizer.load_state_dict(optimizer)
self.scheduler.load_state_dict(scheduler)
print('Model loaded.')
return it
def load_network(self, path):
map_location = 'cuda:%d' % self.local_rank
self.net.module.load_state_dict(torch.load(path, map_location={'cuda:0': map_location}))
# self.net.load_state_dict(torch.load(path))
print('Network weight loaded:', path)
def load_prop(self, path):
map_location = 'cuda:%d' % self.local_rank
self.prop_net.load_state_dict(torch.load(path, map_location={'cuda:0': map_location}), strict=False)
print('Propagation network weight loaded:', path)
def finalize_val(self, it):
self.val_integrator.finalize('val', it)
self.val_integrator.reset_except_hooks()
def train(self):
self._is_train = True
self._do_log = True
self.integrator = self.train_integrator
# Also skip BN
self.net.eval()
self.prop_net.eval()
return self
def val(self):
self._is_train = False
self.integrator = self.val_integrator
self._do_log = True
self.net.eval()
self.prop_net.eval()
return self
def test(self):
self._is_train = False
self._do_log = False
self.net.eval()
self.prop_net.eval()
return self
| 37.109649 | 126 | 0.58161 |
a8d8b8f28400d3f10ace1d2e32d86aa10c18f93c
| 7,571 |
py
|
Python
|
openmdao.util/src/openmdao/util/test/test_parsephoenix.py
|
MrShoks/OpenMDAO-Framework
|
412f34ffe31a95631fbe55ca7d75b84669ae8f8c
|
[
"Apache-2.0"
] | 1 |
2020-06-28T20:38:56.000Z
|
2020-06-28T20:38:56.000Z
|
openmdao.util/src/openmdao/util/test/test_parsephoenix.py
|
MrShoks/OpenMDAO-Framework
|
412f34ffe31a95631fbe55ca7d75b84669ae8f8c
|
[
"Apache-2.0"
] | null | null | null |
openmdao.util/src/openmdao/util/test/test_parsephoenix.py
|
MrShoks/OpenMDAO-Framework
|
412f34ffe31a95631fbe55ca7d75b84669ae8f8c
|
[
"Apache-2.0"
] | null | null | null |
"""
Testing the ParsePhoenixWrapper utility.
"""
import unittest, os
from openmdao.util.parse_phoenixwrapper import parse_phoenixwrapper
class TestCase(unittest.TestCase):
""" Test namelist writer functions. """
def setUp(self):
self.infile = 'phx_text_input.txt'
self.outfile = 'phx_text_output.txt'
def tearDown(self):
if os.path.exists(self.infile):
os.remove(self.infile)
if os.path.exists(self.outfile):
os.remove(self.outfile)
pass
def test_phx(self):
phx = []
phx.append("#Comment\n")
phx.append("\n")
phx.append('variable: int1 integer input description="Description1"\n')
phx.append('variable: float double output description="Description2" default=3.14159\n')
phx.append('variable: ERROR string input description="Description4" default="ZZZ"\n')
phx.append('variable: ERROR2 string output description="Description4"\n')
phx.append("variable: units double input description='Description5' units='ft'\n")
phx.append("variable: array1 double[] input description='Description5' \n")
phx.append('variable: iopt integer input description="Execution Type" enumValues=1,2,3,4 enumAliases="Analysis","Parametric Variation","Optimization","Contour or Thumbprint plot"\n')
phx.append("variable: unitignore double input description='Description5' units='dB'\n")
phx.append("variable: unitreplace double input description='Description5' units='mph'\n")
phx.append("variable: bool boolean input default=false\n")
phx.append('variable: stringbadquote string input description="Description4" default="xx\n')
phx.append('variable: zfile file output\n')
phx.append("variable: unitfullreplace double output description='Description5' units='R'\n")
phx.append('variable: floatbadquote double output description="Aa, Bb, Cc\n')
phx.append("\n")
phx.append('setGroup "input.deep"\n')
phx.append('variable: int2 integer input description="Description3" default=5\n')
phx.append("####\n")
outfile = open(self.infile, 'w')
outfile.writelines(phx)
outfile.close()
parse_phoenixwrapper(self.infile, self.outfile, "TestComp")
infile = open(self.outfile, 'r')
result = infile.readlines()
infile.close()
self.assertEqual(result[0], '"""\n')
self.assertEqual(result[1], 'OpenMDAO Wrapper for TestComp\n')
self.assertEqual(result[2], 'Automatically generated from phx_text_input.txt with parse_phoenixwrapper.\n')
self.assertEqual(result[3], '"""\n')
self.assertEqual(result[4], '\n')
self.assertEqual(result[5], 'from numpy import float32 as numpy_float32\n')
self.assertEqual(result[6], '\n')
self.assertEqual(result[7], 'from openmdao.main.api import Component, Container\n')
self.assertEqual(result[8], 'from openmdao.main.datatypes.api import Int, Float, Str, Array, Enum, Bool, File\n')
self.assertEqual(result[9], '\n')
self.assertEqual(result[10], 'class TestComp_input_deep(Container):\n')
self.assertEqual(result[11], ' """Container for input.deep"""\n')
self.assertEqual(result[12], '\n')
self.assertEqual(result[13], ' # OpenMDAO Variables\n')
self.assertEqual(result[14], " int2 = Int(5, iotype='in', desc='Description3')\n")
self.assertEqual(result[15], '\n')
self.assertEqual(result[16], 'class TestComp_input(Container):\n')
self.assertEqual(result[17], ' """Container for input"""\n')
self.assertEqual(result[18], '\n')
self.assertEqual(result[19], ' # OpenMDAO Variables\n')
self.assertEqual(result[20], '\n')
self.assertEqual(result[21], " def __init__(self):\n")
self.assertEqual(result[22], ' """Constructor for the TestComp_input component"""\n')
self.assertEqual(result[23], '\n')
self.assertEqual(result[24], " super(TestComp_input, self).__init__()\n")
self.assertEqual(result[25], '\n')
self.assertEqual(result[26], " # Variable Containers\n")
self.assertEqual(result[27], " self.add('deep', TestComp_input_deep())\n")
self.assertEqual(result[28], '\n')
self.assertEqual(result[29], '\n')
self.assertEqual(result[30], 'class TestComp(Component):\n')
self.assertEqual(result[31], ' """Wrapper for TestComp"""\n')
self.assertEqual(result[32], '\n')
self.assertEqual(result[33], ' # OpenMDAO Variables\n')
self.assertEqual(result[34], " int1 = Int(0, iotype='in', desc='Description1')\n")
self.assertEqual(result[35], " float = Float(3.14159, iotype='out', desc='Description2')\n")
self.assertEqual(result[36], " ERROR = Str('ZZZ', iotype='in', desc='Description4')\n")
self.assertEqual(result[37], " ERROR2 = Str('', iotype='out', desc='Description4')\n")
self.assertEqual(result[38], " units = Float(0.0, iotype='in', units='ft', desc='Description5')\n")
self.assertEqual(result[39], " array1 = Array(iotype='in', dtype=numpy_float32, desc='Description5')\n")
self.assertEqual(result[40], " iopt = Enum((1,2,3,4), iotype='in', desc='Execution Type', aliases=('Analysis', 'Parametric Variation', 'Optimization', 'Contour or Thumbprint plot'))\n")
self.assertEqual(result[41], " unitignore = Float(0.0, iotype='in', desc='Description5')\n")
self.assertEqual(result[42], " unitreplace = Float(0.0, iotype='in', units='mi/h', desc='Description5')\n")
self.assertEqual(result[43], " bool = Bool(False, iotype='in')\n")
self.assertEqual(result[44], " stringbadquote = Str('xx', iotype='in', desc='Description4')\n")
self.assertEqual(result[45], " zfile = File(iotype='out', path='Insert_Filename_Here')\n")
self.assertEqual(result[46], " unitfullreplace = Float(0.0, iotype='out', units='degR', desc='Description5')\n")
self.assertEqual(result[47], " floatbadquote = Float(0.0, iotype='out', desc='Aa, Bb, Cc')\n")
def test_small_phx(self):
phx = []
phx.append('variable: int1 integer input description="Description1"\n')
outfile = open(self.infile, 'w')
outfile.writelines(phx)
outfile.close()
parse_phoenixwrapper(self.infile, self.outfile, "TestComp")
infile = open(self.outfile, 'r')
result = infile.readlines()
infile.close()
self.assertEqual(result[6], 'from openmdao.main.api import Component\n')
def test_bad_datatype(self):
phx = []
phx.append('variable: int1 badtype input description="Description1"\n')
outfile = open(self.infile, 'w')
outfile.writelines(phx)
outfile.close()
try:
parse_phoenixwrapper(self.infile, self.outfile, "TestComp")
except KeyError, err:
msg = "'Unhandled Modelcenter input type - badtype'"
self.assertEqual(str(err), msg)
else:
self.fail('KeyError expected')
if __name__ == '__main__':
import nose
sys.argv.append('--cover-package=openmdao')
sys.argv.append('--cover-erase')
nose.runmodule()
| 50.812081 | 199 | 0.616167 |
90ffd93b49b3af4b0b93691554b504a3ed0aff8d
| 2,407 |
py
|
Python
|
pygpu/basic.py
|
nondejus/libgpuarray
|
01909c2f7b99f574e73ab8d0ab4211ea8deb61d2
|
[
"0BSD"
] | 204 |
2015-02-01T13:59:25.000Z
|
2021-12-17T14:27:09.000Z
|
pygpu/basic.py
|
nondejus/libgpuarray
|
01909c2f7b99f574e73ab8d0ab4211ea8deb61d2
|
[
"0BSD"
] | 464 |
2015-01-16T22:02:20.000Z
|
2022-01-11T16:47:40.000Z
|
pygpu/basic.py
|
nondejus/libgpuarray
|
01909c2f7b99f574e73ab8d0ab4211ea8deb61d2
|
[
"0BSD"
] | 110 |
2015-01-14T02:26:26.000Z
|
2022-03-21T19:13:33.000Z
|
from string import Template
from .gpuarray import GpuArray, GpuKernel, SIZE, dtype_to_ctype
import numpy
def _generate_kernel(ctx, cols, dtype, upper=True):
tmpl = Template("""
#include "cluda.h"
KERNEL void extract_tri(GLOBAL_MEM ${ctype} *a, ga_size a_off, ga_uint N) {
a = (GLOBAL_MEM ${ctype} *)(((GLOBAL_MEM char *)a) + a_off);
unsigned int idx = GID_1 * LDIM_0 * GDIM_0 +
GID_0 * LDIM_0 + LID_0;
unsigned int ix = idx/${cols};
unsigned int iy = idx%${cols};
if (idx < N) {
if (ix ${le} iy)
a[idx] = 0.0;
}
}
""")
if upper:
le = '>'
else:
le = '<'
ctype = dtype_to_ctype(dtype)
src = tmpl.substitute(cols=cols, ctype=ctype, le=le)
spec = [GpuArray, SIZE, 'uint32']
have_small = False
have_double = False
have_complex = False
if dtype.itemsize < 4:
have_small = True
if dtype in [numpy.float64, numpy.complex128]:
have_double = True
if dtype in [numpy.complex64, numpy.complex128]:
have_complex = True
k = GpuKernel(src, "extract_tri", spec, context=ctx,
have_double=have_double, have_small=have_small,
have_complex=have_complex)
return k
def triu(A, inplace=True):
if A.ndim != 2:
raise ValueError("triu only works for 2d arrays")
if A.flags.c_contiguous is A.flags.f_contiguous is False:
raise ValueError("triu only works for contiguous arrays")
if not inplace:
A = A.copy()
if A.flags['F_CONTIGUOUS']:
upper = False
cols = A.shape[0]
else:
upper = True
cols = A.shape[1]
k = _generate_kernel(A.context, cols, A.dtype, upper)
k(A, A.offset, A.shape[0] * A.shape[1], n=A.shape[0] * A.shape[1])
return A
def tril(A, inplace=True):
if A.ndim != 2:
raise ValueError("tril only works for 2d arrays")
if A.flags.c_contiguous is A.flags.f_contiguous is False:
raise ValueError("tril only works for contiguous arrays")
if not inplace:
A = A.copy()
if A.flags['F_CONTIGUOUS']:
upper = True
cols = A.shape[0]
else:
upper = False
cols = A.shape[1]
k = _generate_kernel(A.context, cols, A.dtype, upper)
k(A, A.offset, A.shape[0] * A.shape[1], n=A.shape[0] * A.shape[1])
return A
| 30.858974 | 79 | 0.582883 |
a5b734f8d2a38d176b9a08f07e5f44c01b6a8de8
| 263 |
py
|
Python
|
script/model/sklearn_like_model/NetModule/InceptionSructure/Inception_ResnetV2Structure.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/model/sklearn_like_model/NetModule/InceptionSructure/Inception_ResnetV2Structure.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/model/sklearn_like_model/NetModule/InceptionSructure/Inception_ResnetV2Structure.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
from script.model.sklearn_like_model.NetModule.InceptionSructure.BaseInceptionNetModule import \
BaseInceptionNetModule
class Inception_ResnetV2NetModule(BaseInceptionNetModule):
def build(self):
# TODO
raise NotImplementedError
| 29.222222 | 97 | 0.771863 |
17862c9185d624b4272e7d8063c1a8fe28c2dabd
| 4,168 |
py
|
Python
|
neutron/tests/unit/sriovnicagent/test_pci_lib.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 10 |
2015-09-22T10:22:53.000Z
|
2016-02-25T06:12:05.000Z
|
neutron/tests/unit/sriovnicagent/test_pci_lib.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 12 |
2015-01-08T18:30:45.000Z
|
2015-03-13T21:04:15.000Z
|
neutron/tests/unit/sriovnicagent/test_pci_lib.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 7 |
2015-02-05T10:23:52.000Z
|
2019-05-18T17:11:19.000Z
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import pci_lib
from neutron.tests import base
class TestPciLib(base.BaseTestCase):
DEV_NAME = "p7p1"
VF_INDEX = 1
VF_INDEX_DISABLE = 0
PF_LINK_SHOW = ('122: p7p1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop'
' state DOWN mode DEFAULT group default qlen 1000')
PF_MAC = ' link/ether f4:52:14:2a:3e:c0 brd ff:ff:ff:ff:ff:ff'
VF_0_LINK_SHOW = (' vf 0 MAC fa:16:3e:b4:81:ac, vlan 4095, spoof'
' checking off, link-state disable')
VF_1_LINK_SHOW = (' vf 1 MAC 00:00:00:00:00:11, vlan 4095, spoof'
' checking off, link-state enable')
VF_2_LINK_SHOW = (' vf 2 MAC fa:16:3e:68:4e:79, vlan 4095, spoof'
' checking off, link-state enable')
VF_LINK_SHOW = '\n'.join((PF_LINK_SHOW, PF_MAC, VF_0_LINK_SHOW,
VF_1_LINK_SHOW, VF_2_LINK_SHOW))
MAC_MAPPING = {
0: "fa:16:3e:b4:81:ac",
1: "00:00:00:00:00:11",
2: "fa:16:3e:68:4e:79",
}
def setUp(self):
super(TestPciLib, self).setUp()
self.pci_wrapper = pci_lib.PciDeviceIPWrapper(self.DEV_NAME)
def test_get_assigned_macs(self):
with mock.patch.object(self.pci_wrapper,
"_execute") as mock_exec:
mock_exec.return_value = self.VF_LINK_SHOW
result = self.pci_wrapper.get_assigned_macs([self.VF_INDEX])
self.assertEqual([self.MAC_MAPPING[self.VF_INDEX]], result)
def test_get_assigned_macs_fail(self):
with mock.patch.object(self.pci_wrapper,
"_execute") as mock_exec:
mock_exec.side_effect = Exception()
self.assertRaises(exc.IpCommandError,
self.pci_wrapper.get_assigned_macs,
[self.VF_INDEX])
def test_get_vf_state_enable(self):
with mock.patch.object(self.pci_wrapper,
"_execute") as mock_exec:
mock_exec.return_value = self.VF_LINK_SHOW
result = self.pci_wrapper.get_vf_state(self.VF_INDEX)
self.assertTrue(result)
def test_get_vf_state_disable(self):
with mock.patch.object(self.pci_wrapper,
"_execute") as mock_exec:
mock_exec.return_value = self.VF_LINK_SHOW
result = self.pci_wrapper.get_vf_state(self.VF_INDEX_DISABLE)
self.assertFalse(result)
def test_get_vf_state_fail(self):
with mock.patch.object(self.pci_wrapper,
"_execute") as mock_exec:
mock_exec.side_effect = Exception()
self.assertRaises(exc.IpCommandError,
self.pci_wrapper.get_vf_state,
self.VF_INDEX)
def test_set_vf_state(self):
with mock.patch.object(self.pci_wrapper, "_execute"):
result = self.pci_wrapper.set_vf_state(self.VF_INDEX,
True)
self.assertIsNone(result)
def test_set_vf_state_fail(self):
with mock.patch.object(self.pci_wrapper,
"_execute") as mock_exec:
mock_exec.side_effect = Exception()
self.assertRaises(exc.IpCommandError,
self.pci_wrapper.set_vf_state,
self.VF_INDEX,
True)
| 41.267327 | 74 | 0.605086 |
b5627c7766d52807108a811a712e84b1e9050ec1
| 66,249 |
py
|
Python
|
notsobot/notsobot.py
|
fixator10/Trusty-cogs
|
3d47a63f562cb64eb44da6bb53cfe9f8324026e7
|
[
"MIT"
] | null | null | null |
notsobot/notsobot.py
|
fixator10/Trusty-cogs
|
3d47a63f562cb64eb44da6bb53cfe9f8324026e7
|
[
"MIT"
] | null | null | null |
notsobot/notsobot.py
|
fixator10/Trusty-cogs
|
3d47a63f562cb64eb44da6bb53cfe9f8324026e7
|
[
"MIT"
] | null | null | null |
# https://github.com/NotSoSuper/NotSoBot
import asyncio
import logging
import random
import re
import sys
import textwrap
import uuid
from io import BytesIO
from typing import List, Optional, Tuple, Union
from urllib.parse import quote
import aiohttp
import discord
import jpglitch
import numpy as np
import PIL
import wand
import wand.color
import wand.drawing
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageSequence
from pyfiglet import figlet_format
from redbot.core import commands
from redbot.core.data_manager import bundled_data_path, cog_data_path
from .converter import ImageFinder
from .vw import macintoshplus
log = logging.getLogger("red.trusty-cogs.NotSoBot")
try:
import aalib
AALIB_INSTALLED = True
except Exception:
AALIB_INSTALLED = False
code = "```py\n{0}\n```"
def posnum(num):
if num < 0:
return -(num)
else:
return num
def find_coeffs(pa, pb):
matrix = []
for p1, p2 in zip(pa, pb):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(pb).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8)
class DataProtocol(asyncio.SubprocessProtocol):
def __init__(self, exit_future):
self.exit_future = exit_future
self.output = bytearray()
def pipe_data_received(self, fd, data):
self.output.extend(data)
def process_exited(self):
try:
self.exit_future.set_result(True)
except Exception:
pass
def pipe_connection_lost(self, fd, exc):
try:
self.exit_future.set_result(True)
except Exception:
pass
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
try:
self.exit_future.set_result(True)
except Exception:
pass
class NotSoBot(commands.Cog):
"""
Rewrite of many NotSoBot commands to work on RedBot V3
"""
__author__ = ["NotSoSuper", "TrustyJAID"]
__version__ = "2.5.1"
def __init__(self, bot):
self.bot = bot
self.image_cache = {}
self.search_cache = {}
self.youtube_cache = {}
self.twitch_cache = []
self.api_count = 0
self.emoji_map = {
"a": "",
"b": "",
"c": "©",
"d": "↩",
"e": "",
"f": "",
"g": "⛽",
"h": "♓",
"i": "ℹ",
"j": "" or "",
"k": "",
"l": "",
"m": "Ⓜ",
"n": "♑",
"o": "⭕" or "",
"p": "",
"q": "",
"r": "®",
"s": "" or "⚡",
"t": "",
"u": "⛎",
"v": "" or "♈",
"w": "〰" or "",
"x": "❌" or "⚔",
"y": "✌",
"z": "Ⓩ",
"1": "1⃣",
"2": "2⃣",
"3": "3⃣",
"4": "4⃣",
"5": "5⃣",
"6": "6⃣",
"7": "7⃣",
"8": "8⃣",
"9": "9⃣",
"0": "0⃣",
"$": "",
"!": "❗",
"?": "❓",
" ": " ",
}
self.retro_regex = re.compile(
r"((https)(\:\/\/|)?u2?\.photofunia\.com\/.\/results\/.\/.\/.*(\.jpg\?download))"
)
self.image_mimes = ["image/png", "image/pjpeg", "image/jpeg", "image/x-icon"]
self.gif_mimes = ["image/gif"]
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
Thanks Sinbad!
"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nCog Version: {self.__version__}"
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
def random(self, image=False, ext: str = "png"):
h = str(uuid.uuid4().hex)
if image:
return "{0}.{1}".format(h, ext)
return h
async def get_text(self, url: str):
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
try:
text = await resp.text()
return text
except Exception:
return False
except asyncio.TimeoutError:
return False
async def truncate(self, channel, msg):
if len(msg) == 0:
return
split = [msg[i : i + 1999] for i in range(0, len(msg), 1999)]
try:
for s in split:
await channel.send(s)
await asyncio.sleep(0.21)
except Exception as e:
await channel.send(e)
async def safe_send(self, ctx, text, file, file_size):
if not ctx.channel.permissions_for(ctx.me).send_messages:
file.close()
return
if not ctx.channel.permissions_for(ctx.me).attach_files:
await ctx.send("I don't have permission to attach files.")
file.close()
return
BASE_FILESIZE_LIMIT = 8388608
if ctx.guild and file_size < ctx.guild.filesize_limit:
await ctx.send(content=text, file=file)
elif not ctx.guild and file_size < BASE_FILESIZE_LIMIT:
await ctx.send(content=text, file=file)
else:
await ctx.send("The contents of this command is too large to upload!")
file.close()
async def bytes_download(
self, url: Union[discord.Asset, discord.Attachment, str]
) -> Tuple[Union[BytesIO, bool], Union[str, bool]]:
if isinstance(url, discord.Asset) or isinstance(url, discord.Attachment):
log.debug("Pulling data from discord")
try:
b = BytesIO()
await url.save(b)
mime = getattr(url, "content_type", "None")
return b, mime
except discord.HTTPException:
return False, False
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.read()
mime = resp.headers.get("Content-type", "").lower()
b = BytesIO(data)
b.seek(0)
return b, mime
else:
return False, False
except asyncio.TimeoutError:
return False, False
except Exception:
log.error("Error downloading to bytes", exc_info=True)
return False, False
def do_magik(self, scale, img):
try:
list_imgs = []
exif = {}
exif_msg = ""
count = 0
i = wand.image.Image(file=img)
i.format = "png"
i.alpha_channel = True
if i.size >= (3000, 3000):
return ":warning: `Image exceeds maximum resolution >= (3000, 3000).`", None, 0
exif.update(
{count: (k[5:], v) for k, v in i.metadata.items() if k.startswith("exif:")}
)
count += 1
i.transform(resize="800x800>")
i.liquid_rescale(
width=int(i.width * 0.5),
height=int(i.height * 0.5),
delta_x=int(0.5 * scale) if scale else 1,
rigidity=0,
)
i.liquid_rescale(
width=int(i.width * 1.5),
height=int(i.height * 1.5),
delta_x=scale if scale else 2,
rigidity=0,
)
magikd = BytesIO()
i.save(file=magikd)
file_size = magikd.tell()
magikd.seek(0)
list_imgs.append(magikd)
for x in exif:
if len(exif[x]) >= 2000:
continue
exif_msg += "**Exif data for image #{0}**\n".format(str(x + 1)) + code.format(
exif[x]
)
else:
if len(exif_msg) == 0:
exif_msg = None
file = discord.File(list_imgs[0], filename="magik.png")
i.close()
for img in list_imgs:
img.close()
return file, exif_msg, file_size
except Exception:
log.error("Error processing magik", exc_info=True)
@commands.command(aliases=["imagemagic", "imagemagick", "magic", "magick", "cas", "liquid"])
@commands.cooldown(2, 5, commands.BucketType.user)
async def magik(self, ctx, urls: ImageFinder = None, scale: int = 2, scale_msg: str = ""):
"""
Apply magik to Image(s)
`[p]magik image_url` or `[p]magik image_url image_url_2`
"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
msg = await ctx.message.channel.send("ok, processing")
async with ctx.typing():
b, mime = await self.bytes_download(urls[0])
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
await msg.delete()
task = self.bot.loop.run_in_executor(None, self.do_magik, scale, b)
try:
file, content_msg, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, TypeError):
return await ctx.send(
"That image is either too large or given image format is unsupported."
)
if type(file) == str:
await ctx.send(file)
return
if content_msg is None:
content_msg = scale_msg
else:
content_msg = scale_msg + content_msg
await self.safe_send(ctx, content_msg, file, file_size)
def do_gmagik(self, image, frame_delay):
final = BytesIO()
# if is_gif:
is_gif = False
with wand.image.Image() as new_image:
with wand.image.Image(file=image) as img:
if len(getattr(img, "sequence", [])) > 1:
is_gif = True
if is_gif:
log.debug("Is gif")
for change in img.sequence:
change.transform(resize="512x512>")
change.liquid_rescale(
width=int(change.width * 0.5),
height=int(change.height * 0.5),
delta_x=1,
rigidity=0,
)
change.liquid_rescale(
width=int(change.width * 1.5),
height=int(change.height * 1.5),
delta_x=2,
rigidity=0,
)
# change.sample(200, 200)
# i.save(filename=image)
new_image.sequence.append(change)
# for i in range(len(img.sequence)):
# with img.sequence[i] as change:
else:
log.debug("Is not gif")
for x in range(0, 30):
if x == 0:
log.debug("Cloning initial image")
i = img.clone().convert("gif")
else:
i = new_image.sequence[-1].clone()
i.transform(resize="512x512>")
i.liquid_rescale(
width=int(i.width * 0.75),
height=int(i.height * 0.75),
delta_x=1,
rigidity=0,
)
i.liquid_rescale(
width=int(i.width * 1.25),
height=int(i.height * 1.25),
delta_x=2,
rigidity=0,
)
i.resize(img.width, img.height)
new_image.sequence.append(i)
new_image.format = "gif"
new_image.dispose = "background"
new_image.type = "optimize"
new_image.save(file=final)
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="gmagik.gif")
final.close()
return file, file_size
@commands.command()
@commands.cooldown(1, 20, commands.BucketType.guild)
@commands.bot_has_permissions(attach_files=True)
async def gmagik(self, ctx, urls: ImageFinder = None, frame_delay: int = 1):
"""Attempt to do magik on a gif"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
x = await ctx.message.channel.send("ok, processing (this might take a while for big gifs)")
async with ctx.typing():
if frame_delay > 60:
frame_delay = 60
elif frame_delay < 0:
frame_delay = 1
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
try:
task = self.bot.loop.run_in_executor(None, self.do_gmagik, b, frame_delay)
file, file_size = await asyncio.wait_for(task, timeout=120)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
except Exception:
log.exception("Error running gmagik")
await ctx.send(":warning: Gmagik failed...")
return
await self.safe_send(ctx, None, file, file_size)
await x.delete()
@commands.command()
@commands.bot_has_permissions(attach_files=True)
async def caption(
self,
ctx,
urls: Optional[ImageFinder] = None,
text: str = "Caption",
color: str = "white",
size: int = 40,
x: int = 0,
y: int = 0,
):
"""
Add caption to an image
`[urls]` are the image urls or users or previous images in chat to add a caption to.
`[text=Caption]` is the text to caption on the image.
`[color=white]` is the color of the text.
`[size=40]` is the size of the text
`[x=0]` is the height the text starts at between 0 and 100% where 0 is the top and 100 is the bottom of the image.
`[y=0]` is the width the text starts at between 0 and 100% where 0 is the left and 100 is the right of the image.
"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
if url is None:
await ctx.send(
"Error: Invalid Syntax\n`.caption <image_url> <text>**"
" <color>* <size>* <x>* <y>*`\n`* = Optional`\n`** = Wrap text in quotes`"
)
return
async with ctx.typing():
xx = await ctx.message.channel.send("ok, processing")
b, mime = await self.bytes_download(url)
if mime not in self.image_mimes and not isinstance(url, discord.Asset):
return await ctx.send("That is not a valid image!")
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
is_gif = mime in self.gif_mimes
font_path = str(bundled_data_path(self)) + "/arial.ttf"
try:
color = wand.color.Color(color)
except ValueError:
await ctx.send(":warning: **That is not a valid color!**")
await xx.delete()
return
font = wand.font.Font(path=font_path, size=size, color=color)
if x > 100:
x = 100
if x < 0:
x = 0
if y > 100:
y = 100
if y < 0:
y = 0
def make_caption_image(b, text, color, font, x, y, is_gif):
final = BytesIO()
with wand.image.Image(file=b) as img:
i = img.clone()
x = int(i.height * (x * 0.01))
y = int(i.width * (y * 0.01))
if not is_gif:
i.caption(str(text), left=x, top=y, font=font)
else:
with wand.image.Image() as new_image:
for frame in img.sequence:
frame.caption(str(text), left=x, top=y, font=font)
new_image.sequence.append(frame)
new_image.save(file=final)
i.save(file=final)
file_size = final.tell()
final.seek(0)
filename = f"caption.{'png' if not is_gif else 'gif'}"
file = discord.File(final, filename=filename)
final.close()
return file, file_size
await xx.delete()
task = ctx.bot.loop.run_in_executor(
None, make_caption_image, b, text, color, font, x, y, is_gif
)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
await ctx.send(file=file)
def trigger_image(self, path: BytesIO, t_path: BytesIO) -> Tuple[discord.File, int]:
final = BytesIO()
with wand.image.Image(width=512, height=680) as img:
img.format = "gif"
img.dispose = "background"
img.type = "optimize"
with wand.image.Image(file=path) as top_img:
top_img.transform(resize="640x640!")
with wand.image.Image(file=t_path) as trigger:
with wand.image.Image(width=512, height=660) as temp_img:
i = top_img.clone()
t = trigger.clone()
temp_img.composite(i, -60, -60)
temp_img.composite(t, 0, 572)
img.composite(temp_img)
with wand.image.Image(width=512, height=660) as temp_img:
i = top_img.clone()
t = trigger.clone()
temp_img.composite(i, -45, -50)
temp_img.composite(t, 0, 572)
img.sequence.append(temp_img)
with wand.image.Image(width=512, height=660) as temp_img:
i = top_img.clone()
t = trigger.clone()
temp_img.composite(i, -50, -45)
temp_img.composite(t, 0, 572)
img.sequence.append(temp_img)
with wand.image.Image(width=512, height=660) as temp_img:
i = top_img.clone()
t = trigger.clone()
temp_img.composite(i, -45, -65)
temp_img.composite(t, 0, 572)
img.sequence.append(temp_img)
# img.optimize_layers()
# img.optimize_transparency()
for frame in img.sequence:
frame.delay = 2
img.save(file=final)
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="triggered.gif")
final.close()
return file, file_size
@commands.command()
@commands.cooldown(1, 5)
@commands.bot_has_permissions(attach_files=True)
async def triggered(self, ctx, urls: ImageFinder = None):
"""Generate a Triggered Gif for a User or Image"""
if urls is None:
urls = [ctx.author.avatar_url_as(format="png")]
avatar = urls[0]
async with ctx.typing():
img, mime = await self.bytes_download(str(avatar))
trig, mime = await self.bytes_download("https://i.imgur.com/zDAY2yo.jpg")
if img is False or trig is False:
await ctx.send(":warning: **Command download function failed...**")
return
try:
task = ctx.bot.loop.run_in_executor(None, self.trigger_image, img, trig)
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("Error creating trigger image")
await self.safe_send(ctx, None, file, file_size)
@commands.command(aliases=["aes"])
@commands.bot_has_permissions(attach_files=True)
async def aesthetics(self, ctx, *, text: str):
"""Returns inputed text in aesthetics"""
final = ""
pre = " ".join(text)
for char in pre:
if not ord(char) in range(33, 127):
final += char
continue
final += chr(ord(char) + 65248)
await self.truncate(ctx.message.channel, final)
def do_ascii(self, text):
try:
i = Image.new("RGB", (2000, 1000))
img = ImageDraw.Draw(i)
txt = figlet_format(text, font="starwars")
img.text((20, 20), figlet_format(text, font="starwars"), fill=(0, 255, 0))
text_width, text_height = img.textsize(figlet_format(text, font="starwars"))
imgs = Image.new("RGB", (text_width + 30, text_height))
ii = ImageDraw.Draw(imgs)
ii.text((20, 20), figlet_format(text, font="starwars"), fill=(0, 255, 0))
text_width, text_height = ii.textsize(figlet_format(text, font="starwars"))
final = BytesIO()
imgs.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="ascii.png")
final.close()
imgs.close()
return file, txt, file_size
except Exception:
return False, False
@commands.command(aliases=["expand"])
@commands.cooldown(1, 5)
@commands.bot_has_permissions(attach_files=True)
async def ascii(self, ctx, *, text: str):
"""Convert text into ASCII"""
if len(text) > 1000:
await ctx.send("Text is too long!")
return
if text == "donger" or text == "dong":
text = "8====D"
async with ctx.typing():
task = self.bot.loop.run_in_executor(None, self.do_ascii, text)
try:
file, txt, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
if file is False:
await ctx.send(":no_entry: go away with your invalid characters.")
return
if len(txt) >= 1999:
# await self.gist(ctx, text, txt)
msg = None
elif len(txt) <= 600:
msg = "```fix\n{0}```".format(txt)
else:
msg = None
await self.safe_send(ctx, msg, file, file_size)
def generate_ascii(self, image):
font = ImageFont.truetype(str(cog_data_path(self)) + "/FreeMonoBold.ttf", 15)
image_width, image_height = image.size
aalib_screen_width = int(image_width / 24.9) * 10
aalib_screen_height = int(image_height / 41.39) * 10
screen = aalib.AsciiScreen(width=aalib_screen_width, height=aalib_screen_height)
im = image.convert("L").resize(screen.virtual_size)
screen.put_image((0, 0), im)
y = 0
how_many_rows = len(screen.render().splitlines())
new_img_width, font_size = font.getsize(screen.render().splitlines()[0])
img = Image.new("RGBA", (new_img_width, how_many_rows * 15), (255, 255, 255))
draw = ImageDraw.Draw(img)
for lines in screen.render().splitlines():
draw.text((0, y), lines, (0, 0, 0), font=font)
y += 15
imagefit = ImageOps.fit(img, (image_width, image_height), Image.ANTIALIAS)
final = BytesIO()
img.save(final, "png")
file_size = final.tell()
final.seek(0)
# file = discord.File(final, filename="iascii.png")
# final.close()
img.close()
return final, file_size
async def check_font_file(self):
try:
ImageFont.truetype(cog_data_path(self) / "FreeMonoBold.ttf", 15)
except Exception:
async with aiohttp.ClientSession() as session:
async with session.get(
"https://github.com/opensourcedesign/fonts"
"/raw/master/gnu-freefont_freemono/FreeMonoBold.ttf"
) as resp:
data = await resp.read()
with open(cog_data_path(self) / "FreeMonoBold.ttf", "wb") as save_file:
save_file.write(data)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.check(lambda ctx: AALIB_INSTALLED)
@commands.bot_has_permissions(attach_files=True)
async def iascii(self, ctx, urls: ImageFinder = None):
"""Generate an ascii art image of last image in chat or from URL"""
if not AALIB_INSTALLED:
await ctx.send("aalib couldn't be found on this machine!")
return
await self.check_font_file()
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
x = await ctx.send("ok, processing")
async with ctx.typing():
b, mime = await self.bytes_download(url)
if mime not in self.image_mimes and not isinstance(url, discord.Asset):
return await ctx.send("That is not a valid image!")
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
im = Image.open(b)
task = self.bot.loop.run_in_executor(None, self.generate_ascii, im)
try:
temp, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"That image is either too large or image filetype is unsupported."
)
file = discord.File(temp, "iascii.png")
temp.close()
await x.delete()
await self.safe_send(ctx, None, file, file_size)
def do_gascii(self, b):
img_list = []
temp = BytesIO()
try:
image = Image.open(b)
gif_list = [frame.copy() for frame in ImageSequence.Iterator(image)]
count = 0
for frame in gif_list[:20]:
im = frame.copy()
new_im, size = self.generate_ascii(im)
img = Image.open(new_im)
img_list.append(img)
count += 1
temp = BytesIO()
img.save(temp, format="GIF", save_all=True, append_images=img_list, duration=0, loop=0)
file_size = temp.tell()
temp.seek(0)
file = discord.File(temp, filename="gascii.gif")
temp.close()
image.close()
return file, file_size
except Exception:
raise
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.guild)
@commands.check(lambda ctx: AALIB_INSTALLED)
@commands.bot_has_permissions(attach_files=True)
async def gascii(self, ctx, urls: ImageFinder = None):
"""Gif to ASCII"""
if not AALIB_INSTALLED:
await ctx.send("aalib couldn't be found on this machine!")
return
await self.check_font_file()
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
x = await ctx.message.channel.send("ok, processing")
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
result = self.bot.loop.run_in_executor(None, self.do_gascii, b)
try:
file, file_size = await asyncio.wait_for(result, timeout=60)
except asyncio.TimeoutError:
return
except Exception:
log.exception("Error Running gascii")
return await ctx.send("There was an error performing gascii.")
await x.delete()
await self.safe_send(ctx, None, file, file_size)
@commands.command()
@commands.bot_has_permissions(attach_files=True)
async def rip(self, ctx, name: str = None, *, text: str = None):
"""Generate tombstone image with name and optional text"""
if name is None:
name = ctx.message.author.name
if len(ctx.message.mentions) >= 1:
name = ctx.message.mentions[0].name
b, mime = await self.bytes_download("https://i.imgur.com/xNWxZHn.jpg")
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
if not text:
text = f"{name}'s\n Hopes and dreams"
else:
text = f"{name}\n{text}"
if not b:
return
def make_rip(image, text):
img = Image.open(image).convert("RGB")
draw = ImageDraw.Draw(img)
font_path = str(bundled_data_path(self)) + "/arial.ttf"
font1 = ImageFont.truetype(font_path, 35)
text = "\n".join(line for line in textwrap.wrap(text, width=15))
w, h = draw.multiline_textsize(text, font=font1)
draw.multiline_text(
(((400 - w) / 2) - 1, 50), text, fill=(50, 50, 50), font=font1, align="center"
)
draw.multiline_text(
(((400 - w) / 2) + 1, 50), text, fill=(50, 50, 50), font=font1, align="center"
)
draw.multiline_text(
(((400 - w) / 2), 49), text, fill=(50, 50, 50), font=font1, align="center"
)
draw.multiline_text(
(((400 - w) / 2), 51), text, fill=(50, 50, 50), font=font1, align="center"
)
draw.multiline_text(
((400 - w) / 2, 50), text, fill=(105, 105, 105), font=font1, align="center"
)
final = BytesIO()
img.save(final, "JPEG")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="rip.jpg")
final.close()
img.close()
return file, file_size
task = ctx.bot.loop.run_in_executor(None, make_rip, b, text)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
await self.safe_send(ctx, None, file, file_size)
@commands.command()
@commands.cooldown(1, 5)
@commands.bot_has_permissions(attach_files=True) # ImageFinder consumes rest this is so goat'd
async def merge(self, ctx, vertical: Optional[bool] = True, *, urls: Optional[ImageFinder]):
"""
Merge/Combine Two Photos
`[vertical=True]` `true` or `false` to merge vertically.
`[urls]` The Image URL's you want to merge together. If not supplied
images are searched from message history.
"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
if not urls:
return await ctx.send("No images found.")
async with ctx.typing():
if len(urls) == 1:
await ctx.send("You need to supply more than 1 image.")
return
xx = await ctx.message.channel.send("ok, processing")
count = 0
list_im = []
for url in urls:
log.debug(url)
count += 1
b, mime = await self.bytes_download(str(url))
if sys.getsizeof(b) == 215:
await ctx.send(":no_entry: Image `{0}` is invalid!".format(str(count)))
continue
if not b:
continue
list_im.append(b)
def make_merge(list_im):
imgs = [Image.open(i).convert("RGBA") for i in list_im]
if vertical:
# Vertical
max_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[1][1]
imgs_comb = np.vstack([np.asarray(i.resize(max_shape)) for i in imgs])
else:
# Horizontal
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in imgs])
imgs_comb = Image.fromarray(imgs_comb)
final = BytesIO()
imgs_comb.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="merge.png")
final.close()
for i in imgs:
i.close()
return file, file_size
if len(list_im) < 2:
return await ctx.send("You need to supply more than 1 image.")
await xx.delete()
task = ctx.bot.loop.run_in_executor(None, make_merge, list_im)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"That image is either too large or image filetype is unsupported."
)
await self.safe_send(ctx, None, file, file_size)
@commands.command()
async def emojify(self, ctx, *, txt: str):
"""Replace characters in text with emojis"""
txt = txt.lower()
msg = ""
for s in txt:
if s in self.emoji_map:
msg += "{0}".format(self.emoji_map[s])
else:
msg += s
await ctx.send(msg)
async def get_colour(self, channel):
try:
if await self.bot.db.guild(channel.guild).use_bot_color():
return channel.guild.me.colour
else:
return await self.bot.db.color()
except AttributeError:
return await self.bot.get_embed_colour(channel)
@commands.command(aliases=["needsmorejpeg", "jpegify", "magik2"])
@commands.cooldown(2, 5, commands.BucketType.user)
@commands.bot_has_permissions(attach_files=True)
async def jpeg(self, ctx, urls: Optional[ImageFinder] = None, quality: int = 1):
"""
Add more JPEG to an Image
Needs More JPEG!
`[urls]` is optional, if not provided will search chat for a valid image.
`[quality]` is the quality of the new jpeg image to make
"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
if quality > 10:
quality = 10
elif quality < 1:
quality = 1
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
def make_jpeg(b):
img = Image.open(b).convert("RGB")
final = BytesIO()
img.save(final, "JPEG", quality=quality)
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="needsmorejpeg.jpg")
final.close()
return file, file_size
task = ctx.bot.loop.run_in_executor(None, make_jpeg, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"That image is either too large or image filetype is unsupported."
)
await self.safe_send(ctx, None, file, file_size)
def do_vw(self, b, txt):
im = Image.open(b)
k = random.randint(0, 100)
im = macintoshplus.draw_method1(k, txt, im)
final = BytesIO()
im.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="vapewave.png")
final.close()
return file, file_size
@commands.command(aliases=["vaporwave", "vape", "vapewave"])
@commands.cooldown(2, 5)
@commands.bot_has_permissions(attach_files=True)
async def vw(self, ctx, urls: ImageFinder = None, *, txt: str = None):
"""Add vaporwave flavours to an image"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
if txt is None:
txt = "vapor wave"
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
try:
task = self.bot.loop.run_in_executor(None, self.do_vw, b, txt)
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
except Exception:
return await ctx.send("That image cannot be vaporwaved.")
await self.safe_send(ctx, None, file, file_size)
@commands.command(aliases=["achievement"])
@commands.bot_has_permissions(attach_files=True)
async def minecraftachievement(self, ctx, *, txt: str):
"""Generate a Minecraft Achievement"""
b, mime = await self.bytes_download("https://i.imgur.com/JtNJFZy.png")
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
if len(txt) > 20:
txt = txt[:20] + " ..."
def make_mc(b, txt):
image = Image.open(b).convert("RGBA")
draw = ImageDraw.Draw(image)
font_path = str(bundled_data_path(self)) + "/Minecraftia.ttf"
font = ImageFont.truetype(font_path, 17)
draw.text((60, 30), txt, (255, 255, 255), font=font)
final = BytesIO()
image.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="achievement.png")
final.close()
return file, file_size
try:
task = self.bot.loop.run_in_executor(None, make_mc, b, txt)
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
except Exception:
return await ctx.send("I cannot make that minecraft achievement.")
await self.safe_send(ctx, None, file, file_size)
@commands.command(aliases=["wm"])
@commands.bot_has_permissions(attach_files=True)
async def watermark(
self,
ctx,
urls: ImageFinder = None,
mark: str = None,
x: int = 0,
y: int = 0,
transparency: Union[int, float] = 0,
):
"""
Add a watermark to an image
`[urls]` are the image urls or users or previous images in chat to add a watermark to.
`[mark]` is the image to use as the watermark. By default the brazzers icon is used.
`[x=0]` is the height the watermark will be at between 0 and 100% where 0 is the top and 100 is the bottom of the image.
`[y=0]` is the width the watermark will be at between 0 and 100% where 0 is the left and 100 is the right of the image.
`[transparency=0]` is a value from 0 to 100 which determines the percentage the watermark will be transparent.
"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
if x > 100:
x = 100
if x < 0:
x = 0
if y > 100:
y = 100
if y < 0:
y = 0
if transparency > 1 and transparency < 100:
transparency = transparency * 0.01
if transparency < 0:
transparency = 0
if transparency > 100:
transparency = 1
b, mime = await self.bytes_download(url)
if mime not in self.image_mimes + self.gif_mimes and not isinstance(
url, discord.Asset
):
return await ctx.send("That is not a valid image.")
if mark == "brazzers" or mark is None:
wmm, mime = await self.bytes_download("https://i.imgur.com/YAb1RMZ.png")
if wmm is False or b is False:
await ctx.send(":warning: **Command download function failed...**")
return
wmm.name = "watermark.png"
wm_gif = False
else:
wmm, mime = await self.bytes_download(mark)
wm_gif = mime in self.gif_mimes
if wmm is False or b is False:
await ctx.send(":warning: **Command download function failed...**")
return
wmm.name = "watermark.png"
if wm_gif:
wmm.name = "watermark.gif"
def add_watermark(b, wmm, x, y, transparency, wm_gif=False):
final = BytesIO()
with wand.image.Image(file=b) as img:
is_gif = len(getattr(img, "sequence")) > 1
if not is_gif and not wm_gif:
log.debug("There are no gifs")
with img.clone() as new_img:
new_img.transform(resize="65536@")
final_x = int(new_img.height * (x * 0.01))
final_y = int(new_img.width * (y * 0.01))
with wand.image.Image(file=wmm) as wm:
new_img.watermark(
image=wm, left=final_x, top=final_y, transparency=transparency
)
new_img.save(file=final)
elif is_gif and not wm_gif:
log.debug("The base image is a gif")
wm = wand.image.Image(file=wmm)
with wand.image.Image() as new_image:
with img.clone() as new_img:
for frame in new_img.sequence:
frame.transform(resize="65536@")
final_x = int(frame.height * (x * 0.01))
final_y = int(frame.width * (y * 0.01))
frame.watermark(
image=wm,
left=final_x,
top=final_y,
transparency=transparency,
)
new_image.sequence.append(frame)
new_image.save(file=final)
else:
log.debug("The mark is a gif")
with wand.image.Image() as new_image:
with wand.image.Image(file=wmm) as new_img:
for frame in new_img.sequence:
with img.clone() as clone:
if is_gif:
clone = clone.sequence[0]
# we only care about the first frame of the gif in this case
else:
clone = clone.convert("gif")
clone.transform(resize="65536@")
final_x = int(clone.height * (x * 0.01))
final_y = int(clone.width * (y * 0.01))
clone.watermark(
image=frame,
left=final_x,
top=final_y,
transparency=transparency,
)
new_image.sequence.append(clone)
new_image.dispose = "background"
with new_image.sequence[-1] as new_frame:
new_frame.delay = frame.delay
new_image.save(file=final)
size = final.tell()
final.seek(0)
filename = f"watermark.{'gif' if is_gif or wm_gif else 'png'}"
file = discord.File(final, filename=filename)
final.close()
return file, size
try:
task = ctx.bot.loop.run_in_executor(
None, add_watermark, b, wmm, x, y, transparency, wm_gif
)
file, file_size = await asyncio.wait_for(task, timeout=120)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
await self.safe_send(ctx, None, file, file_size)
def do_glitch(self, b, amount, seed, iterations):
img = Image.open(b)
is_gif = img.is_animated
if not is_gif:
img = img.convert("RGB")
b = BytesIO()
img.save(b, format="JPEG")
b.seek(0)
img = jpglitch.Jpeg(bytearray(b.getvalue()), amount, seed, iterations)
final = BytesIO()
final.name = "glitch.jpg"
img.save_image(final)
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="glitch.jpeg")
final.close()
# img.close()
else:
# img.close()
b = bytearray(b.getvalue())
for x in range(0, sys.getsizeof(b)):
if b[x] == 33:
if b[x + 1] == 255:
end = x
break
elif b[x + 1] == 249:
end = x
break
for x in range(13, end):
b[x] = random.randint(0, 255)
final = BytesIO(b)
file_size = final.tell()
file = discord.File(final, filename="glitch.jpeg")
final.close()
return file, file_size
@commands.command(aliases=["jpglitch"])
@commands.cooldown(2, 5)
@commands.bot_has_permissions(attach_files=True)
async def glitch(
self,
ctx,
urls: ImageFinder = None,
iterations: int = None,
amount: int = None,
seed: int = None,
):
"""Glitch a gif or png"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
if iterations is None:
iterations = random.randint(1, 30)
if amount is None:
amount = random.randint(1, 20)
elif amount > 99:
amount = 99
if seed is None:
seed = random.randint(1, 20)
b, mime = await self.bytes_download(url)
gif = mime in self.gif_mimes
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
task = self.bot.loop.run_in_executor(None, self.do_glitch, b, amount, seed, iterations)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"The image is either too large or image filetype is unsupported."
)
msg = f"Iterations: `{iterations}` | Amount: `{amount}` | Seed: `{seed}`"
await self.safe_send(ctx, msg, file, file_size)
@commands.command(aliases=["pixel"])
@commands.bot_has_permissions(attach_files=True)
async def pixelate(self, ctx, urls: ImageFinder = None, pixels: int = 9):
"""Pixelate an image"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
img_urls = urls[0]
b, mime = await self.bytes_download(url)
if b is False:
if len(img_urls) > 1:
await ctx.send(":warning: **Command download function failed...**")
return
if mime in self.gif_mimes:
task = ctx.bot.loop.run_in_executor(None, self.make_pixel_gif, b, pixels)
else:
task = ctx.bot.loop.run_in_executor(None, self.make_pixel, b, pixels)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("The image is too large.")
await self.safe_send(ctx, None, file, file_size)
def make_pixel(self, b: BytesIO, pixels: int) -> Tuple[discord.File, int]:
bg = (0, 0, 0)
img = Image.open(b)
img = img.resize((int(img.size[0] / pixels), int(img.size[1] / pixels)), Image.NEAREST)
img = img.resize((int(img.size[0] * pixels), int(img.size[1] * pixels)), Image.NEAREST)
load = img.load()
for i in range(0, img.size[0], pixels):
for j in range(0, img.size[1], pixels):
for r in range(pixels):
load[i + r, j] = bg
load[i, j + r] = bg
final = BytesIO()
img.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="pixelated.png")
final.close()
img.close()
return file, file_size
def make_pixel_gif(self, b, pixels, scale_msg):
try:
image = Image.open(b)
gif_list = [frame.copy() for frame in ImageSequence.Iterator(image)]
except IOError:
return ":warning: Cannot load gif."
bg = (0, 0, 0)
img_list = []
for frame in gif_list:
img = Image.new("RGBA", frame.size)
img.paste(frame, (0, 0))
img = img.resize((int(img.size[0] / pixels), int(img.size[1] / pixels)), Image.NEAREST)
img = img.resize((int(img.size[0] * pixels), int(img.size[1] * pixels)), Image.NEAREST)
load = img.load()
for i in range(0, img.size[0], pixels):
for j in range(0, img.size[1], pixels):
for r in range(pixels):
load[i + r, j] = bg
load[i, j + r] = bg
img_list.append(img)
final = BytesIO()
img.save(final, format="GIF", save_all=True, append_images=img_list, duration=0, loop=0)
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="pixelated.gif")
final.close()
img.close()
return file, file_size
def do_waaw(self, b):
f = BytesIO()
f2 = BytesIO()
with wand.image.Image(file=b) as img:
h1 = img.clone()
width = int(img.width / 2) if int(img.width / 2) > 0 else 1
h1.crop(width=width, height=int(img.height), gravity="east")
h2 = h1.clone()
h1.rotate(degree=180)
h1.flip()
h1.save(file=f)
h2.save(file=f2)
f.seek(0)
f2.seek(0)
list_im = [f2, f]
imgs = [ImageOps.mirror(Image.open(i).convert("RGBA")) for i in list_im]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in imgs])
imgs_comb = Image.fromarray(imgs_comb)
final = BytesIO()
imgs_comb.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="waaw.png")
f.close()
f2.close()
final.close()
return file, file_size
# Thanks to Iguniisu#9746 for the idea
@commands.command(aliases=["magik3", "mirror"])
@commands.cooldown(2, 5, commands.BucketType.user)
@commands.bot_has_permissions(attach_files=True)
async def waaw(self, ctx, urls: ImageFinder = None):
"""Mirror an image vertically right to left"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
task = self.bot.loop.run_in_executor(None, self.do_waaw, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, wand.exceptions.MissingDelegateError):
return await ctx.send(
"The image is either too large or you're missing delegates for this image format."
)
await self.safe_send(ctx, None, file, file_size)
def do_haah(self, b):
f = BytesIO()
f2 = BytesIO()
with wand.image.Image(file=b) as img:
h1 = img.clone()
h1.transform("50%x100%")
h2 = h1.clone()
h2.rotate(degree=180)
h2.flip()
h1.save(file=f)
h2.save(file=f2)
f.seek(0)
f2.seek(0)
list_im = [f2, f]
imgs = [ImageOps.mirror(Image.open(i).convert("RGBA")) for i in list_im]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in imgs])
imgs_comb = Image.fromarray(imgs_comb)
final = BytesIO()
imgs_comb.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="haah.png")
f.close()
f2.close()
final.close()
return file, file_size
@commands.command(aliases=["magik4", "mirror2"])
@commands.cooldown(2, 5, commands.BucketType.user)
@commands.bot_has_permissions(attach_files=True)
async def haah(self, ctx, urls: ImageFinder = None):
"""Mirror an image vertically left to right"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
task = self.bot.loop.run_in_executor(None, self.do_haah, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, wand.exceptions.MissingDelegateError):
return await ctx.send(
"The image is either too large or you're missing delegates for this image format."
)
await self.safe_send(ctx, None, file, file_size)
def do_woow(self, b):
f = BytesIO()
f2 = BytesIO()
with wand.image.Image(file=b) as img:
h1 = img.clone()
width = int(img.width) if int(img.width) > 0 else 1
h1.crop(width=width, height=int(img.height / 2), gravity="north")
h2 = h1.clone()
h2.rotate(degree=180)
h2.flop()
h1.save(file=f)
h2.save(file=f2)
f.seek(0)
f2.seek(0)
list_im = [f, f2]
imgs = [Image.open(i).convert("RGBA") for i in list_im]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.vstack([np.asarray(i.resize(min_shape)) for i in imgs])
imgs_comb = Image.fromarray(imgs_comb)
final = BytesIO()
imgs_comb.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="woow.png")
f.close()
f2.close()
final.close()
return file, file_size
@commands.command(aliases=["magik5", "mirror3"])
@commands.cooldown(2, 5, commands.BucketType.user)
@commands.bot_has_permissions(attach_files=True)
async def woow(self, ctx, urls: ImageFinder = None):
"""Mirror an image horizontally top to bottom"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
task = self.bot.loop.run_in_executor(None, self.do_woow, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, wand.exceptions.MissingDelegateError):
return await ctx.send(
"The image is either too large or you're missing delegates for this image format."
)
await self.safe_send(ctx, None, file, file_size)
def do_hooh(self, b):
f = BytesIO()
f2 = BytesIO()
with wand.image.Image(file=b) as img:
h1 = img.clone()
width = int(img.width) if int(img.width) > 0 else 1
h1.crop(width=width, height=int(img.height / 2), gravity="south")
h2 = h1.clone()
h1.rotate(degree=180)
h2.flop()
h1.save(file=f)
h2.save(file=f2)
f.seek(0)
f2.seek(0)
list_im = [f, f2]
imgs = [Image.open(i).convert("RGBA") for i in list_im]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.vstack([np.asarray(i.resize(min_shape)) for i in imgs])
imgs_comb = Image.fromarray(imgs_comb)
final = BytesIO()
imgs_comb.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="hooh.png")
f.close()
f2.close()
final.close()
return file, file_size
@commands.command(aliases=["magik6", "mirror4"])
@commands.cooldown(2, 5, commands.BucketType.user)
@commands.bot_has_permissions(attach_files=True)
async def hooh(self, ctx, urls: ImageFinder = None):
"""Mirror an image horizontally bottom to top"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
task = self.bot.loop.run_in_executor(None, self.do_hooh, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, wand.exceptions.MissingDelegateError):
return await ctx.send(
"The image is either too large or you're missing delegates for this image format."
)
await self.safe_send(ctx, None, file, file_size)
@commands.command()
@commands.bot_has_permissions(attach_files=True)
async def flipimg(self, ctx, urls: ImageFinder = None):
"""Rotate an image 180 degrees"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
def flip_img(b):
with Image.open(b) as img:
img = ImageOps.flip(img)
with BytesIO() as final:
img.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="flip.png")
return file, file_size
task = ctx.bot.loop.run_in_executor(None, flip_img, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"The image is either too large or image filetype is unsupported."
)
await self.safe_send(ctx, None, file, file_size)
@commands.command()
@commands.bot_has_permissions(attach_files=True)
async def flop(self, ctx, urls: ImageFinder = None):
"""Flip an image"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if mime not in self.image_mimes and not isinstance(url, discord.Asset):
return await ctx.send("That is not a valid image!")
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
def flop_img(b):
with Image.open(b) as img:
img = ImageOps.mirror(img)
with BytesIO() as final:
img.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="flop.png")
return file, file_size
task = ctx.bot.loop.run_in_executor(None, flop_img, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return await ctx.send("That image is too large.")
await self.safe_send(ctx, None, file, file_size)
@commands.command(aliases=["inverse", "negate"])
@commands.bot_has_permissions(attach_files=True)
async def invert(self, ctx, urls: ImageFinder = None):
"""Invert the colours of an image"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if b is False:
await ctx.send(":warning: **Command download function failed...**")
return
def invert_img(b):
with Image.open(b).convert("RGB") as img:
img = ImageOps.invert(img)
with BytesIO() as final:
img.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="flop.png")
return file, file_size
task = ctx.bot.loop.run_in_executor(None, invert_img, b)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"That image is either too large or image filetype is unsupported."
)
await self.safe_send(ctx, None, file, file_size)
@commands.command()
@commands.bot_has_permissions(attach_files=True)
async def rotate(self, ctx, degrees: int = 90, urls: ImageFinder = None):
"""Rotate image X degrees"""
if urls is None:
urls = await ImageFinder().search_for_images(ctx)
url = urls[0]
async with ctx.typing():
b, mime = await self.bytes_download(url)
if not b:
return await ctx.send("That's not a valid image to rotate.")
def rotate_img(b, degrees):
with Image.open(b).convert("RGBA") as img:
img = img.rotate(int(degrees))
with BytesIO() as final:
img.save(final, "png")
file_size = final.tell()
final.seek(0)
file = discord.File(final, filename="rotate.png")
return file, file_size
task = ctx.bot.loop.run_in_executor(None, rotate_img, b, degrees)
try:
file, file_size = await asyncio.wait_for(task, timeout=60)
except (asyncio.TimeoutError, PIL.UnidentifiedImageError):
return await ctx.send(
"That image is either too large or image filetype is unsupported."
)
await self.safe_send(ctx, f"Rotated: `{degrees}°`", file, file_size)
| 40.321972 | 128 | 0.514529 |
0447f4da666c2bbe4f03d070932dc204637c1bfb
| 1,024 |
py
|
Python
|
pilgram/maven.py
|
akiomik/pilgram
|
c585ca4f7f08549842befdcd05dd7d9972f7b0a2
|
[
"Apache-2.0"
] | 46 |
2019-03-21T21:41:36.000Z
|
2022-01-19T16:01:03.000Z
|
pilgram/maven.py
|
akiomik/pilgram
|
c585ca4f7f08549842befdcd05dd7d9972f7b0a2
|
[
"Apache-2.0"
] | 102 |
2019-03-21T21:41:05.000Z
|
2022-03-21T19:04:22.000Z
|
pilgram/maven.py
|
akiomik/pilgram
|
c585ca4f7f08549842befdcd05dd7d9972f7b0a2
|
[
"Apache-2.0"
] | 5 |
2019-05-28T02:40:38.000Z
|
2020-10-30T22:22:01.000Z
|
# Copyright 2019 Akiomi Kamakura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pilgram import css
from pilgram import util
def maven(im):
"""Applies Maven filter.
Arguments:
im: An input image.
Returns:
The output image.
"""
cb = util.or_convert(im, 'RGB')
cs = util.fill(cb.size, [3, 230, 26, .2])
cr = css.blending.hue(cb, cs)
cr = css.sepia(cr, .25)
cr = css.brightness(cr, .95)
cr = css.contrast(cr, .95)
cr = css.saturate(cr, 1.5)
return cr
| 25.6 | 74 | 0.678711 |
26ee601a4c29ebd60f92b84e793e26b54ec15ab8
| 6,080 |
py
|
Python
|
align-dlib.py
|
4Dager/4DFace
|
aa9dd0d8d028b7ddb77212bc8488984377bb1fdc
|
[
"Apache-2.0"
] | null | null | null |
align-dlib.py
|
4Dager/4DFace
|
aa9dd0d8d028b7ddb77212bc8488984377bb1fdc
|
[
"Apache-2.0"
] | null | null | null |
align-dlib.py
|
4Dager/4DFace
|
aa9dd0d8d028b7ddb77212bc8488984377bb1fdc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
import argparse
import cv2
import numpy as np
import os
import random
import shutil
import errno
import facelib
from facelib.data import iterImgs
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def write(vals, fName):
if os.path.isfile(fName):
print("{} exists. Backing up.".format(fName))
os.rename(fName, "{}.bak".format(fName))
with open(fName, 'w') as f:
for p in vals:
f.write(",".join(str(x) for x in p))
f.write("\n")
def computeMeanMain(args):
align = facelib.AlignDlib(args.dlibFacePredictor)
imgs = list(iterImgs(args.inputDir))
if args.numImages > 0:
imgs = random.sample(imgs, args.numImages)
facePoints = []
for img in imgs:
rgb = img.getRGB()
bb = align.getLargestFaceBoundingBox(rgb)
alignedPoints = align.align(rgb, bb)
if alignedPoints:
facePoints.append(alignedPoints)
facePointsNp = np.array(facePoints)
mean = np.mean(facePointsNp, axis=0)
std = np.std(facePointsNp, axis=0)
write(mean, "{}/mean.csv".format(args.modelDir))
write(std, "{}/std.csv".format(args.modelDir))
# Only import in this mode.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(mean[:, 0], -mean[:, 1], color='k')
ax.axis('equal')
for i, p in enumerate(mean):
ax.annotate(str(i), (p[0] + 0.005, -p[1] + 0.005), fontsize=8)
plt.savefig("{}/mean.png".format(args.modelDir))
def mkdirP(path):
assert path is not None
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def alignMain(args):
mkdirP(args.outputDir)
imgs = list(iterImgs(args.inputDir))
random.shuffle(imgs)
landmarkMap = {
'outerEyesAndNose': facelib.AlignDlib.OUTER_EYES_AND_NOSE,
'innerEyesAndBottomLip': facelib.AlignDlib.INNER_EYES_AND_BOTTOM_LIP
}
if args.landmarks not in landmarkMap:
raise Exception("Landmarks unrecognized: {}".format(args.landmarks))
landmarkIndices = landmarkMap[args.landmarks]
align = facelib.AlignDlib(args.dlibFacePredictor)
nFallbacks = 0
for imgObject in imgs:
print("=== {} ===".format(imgObject.path))
outDir = os.path.join(args.outputDir, imgObject.cls)
mkdirP(outDir)
outputPrefix = os.path.join(outDir, imgObject.name)
imgName = outputPrefix + ".png"
if os.path.isfile(imgName):
if args.verbose:
print(" + Already found, skipping.")
else:
rgb = imgObject.getRGB()
if rgb is None:
if args.verbose:
print(" + Unable to load.")
outRgb = None
else:
outRgb = align.align(args.size, rgb,
landmarkIndices=landmarkIndices,
skipMulti=args.skipMulti)
if outRgb is None and args.verbose:
print(" + Unable to align.")
if args.fallbackLfw and outRgb is None:
nFallbacks += 1
deepFunneled = "{}/{}.jpg".format(os.path.join(args.fallbackLfw,
imgObject.cls),
imgObject.name)
shutil.copy(deepFunneled, "{}/{}.jpg".format(os.path.join(args.outputDir,
imgObject.cls),
imgObject.name))
if outRgb is not None:
if args.verbose:
print(" + Writing aligned file to disk.")
outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR)
cv2.imwrite(imgName, outBgr)
if args.fallbackLfw:
print('nFallbacks:', nFallbacks)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('inputDir', type=str, help="Input image directory.")
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default="./models/shape_predictor_68_face_landmarks.dat")
subparsers = parser.add_subparsers(dest='mode', help="Mode")
computeMeanParser = subparsers.add_parser(
'computeMean', help='Compute the image mean of a directory of images.')
computeMeanParser.add_argument('--numImages', type=int, help="The number of images. '0' for all images.",
default=0) # <= 0 ===> all imgs
alignmentParser = subparsers.add_parser(
'align', help='Align a directory of images.')
alignmentParser.add_argument('landmarks', type=str,
choices=['outerEyesAndNose',
'innerEyesAndBottomLip',
'eyes_1'],
help='The landmarks to align to.')
alignmentParser.add_argument(
'outputDir', type=str, help="Output directory of aligned images.")
alignmentParser.add_argument('--size', type=int, help="Default image size.",
default=96)
alignmentParser.add_argument('--fallbackLfw', type=str,
help="If alignment doesn't work, fallback to copying the deep funneled version from this directory..")
alignmentParser.add_argument(
'--skipMulti', action='store_true', help="Skip images with more than one face.")
alignmentParser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
if args.mode == 'computeMean':
computeMeanMain(args)
else:
alignMain(args)
| 35.976331 | 135 | 0.573684 |
51bab872e320c798ae7f372c1ac1ef8e419f882e
| 970 |
py
|
Python
|
metriq/config.py
|
unitaryfund/metriq-client
|
7d8831d5015baa490ec77a04ea704d2e9aa9d8d0
|
[
"Apache-2.0"
] | null | null | null |
metriq/config.py
|
unitaryfund/metriq-client
|
7d8831d5015baa490ec77a04ea704d2e9aa9d8d0
|
[
"Apache-2.0"
] | null | null | null |
metriq/config.py
|
unitaryfund/metriq-client
|
7d8831d5015baa490ec77a04ea704d2e9aa9d8d0
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from tea_client.config import TeaClientConfig
from tea_console.config import TeaConsoleConfig, ConfigField
class Config(TeaConsoleConfig, TeaClientConfig):
ENTRIES = {
**TeaConsoleConfig.ENTRIES,
"server_url": ConfigField(section="server", option="url"),
"api_version": ConfigField(
section="server", option="api_version", type=int
),
"token_access": ConfigField(section="auth", option="token_access"),
"token_refresh": ConfigField(section="auth", option="token_refresh"),
}
def __init__(self):
# Path to the configuration file
self.config_dir = (
Path("~").expanduser() / ".metriq"
).absolute()
TeaClientConfig.__init__(
self, server_url="http://18.190.41.255", api_version=1
)
TeaConsoleConfig.__init__(
self, config_file=self.config_dir / "metriq.ini"
)
config = Config()
| 30.3125 | 77 | 0.635052 |
315e7f422e1c1f2b118225794bf59cea653deb4f
| 510 |
py
|
Python
|
test/test_utils.py
|
vishoo7/TwitterAutoReply
|
268c7da6176381714a0676602513af1dbaa22078
|
[
"MIT"
] | 2 |
2016-10-29T14:44:31.000Z
|
2018-09-10T05:31:04.000Z
|
test/test_utils.py
|
vishoo7/TwitterAutoReply
|
268c7da6176381714a0676602513af1dbaa22078
|
[
"MIT"
] | 1 |
2016-10-17T19:16:38.000Z
|
2016-10-17T19:16:38.000Z
|
test/test_utils.py
|
vishoo7/TwitterAutoReply
|
268c7da6176381714a0676602513af1dbaa22078
|
[
"MIT"
] | null | null | null |
from utils import gen_hashtags
class TestUtils:
def test_gen_hashtags(self):
single_hashtag = gen_hashtags(['test'])
single_hashtag2 = gen_hashtags(['123'])
multiple_hashtags = gen_hashtags(['foo', 'bar', 'baz'])
multiple_hashtags2 = gen_hashtags(['quux', '123', 'war&peace'])
assert single_hashtag == '#test'
assert single_hashtag2 == '#123'
assert multiple_hashtags == '#foo #bar #baz'
assert multiple_hashtags2 == '#quux #123 #war&peace'
| 39.230769 | 71 | 0.641176 |
0db82ed617c8d7e0af4e09499898c04187c71cfa
| 445 |
py
|
Python
|
gui/app.py
|
ManojG13/OS-Simulator
|
0a7dc42ffbdf4db6182e6988059b2e9207fac3f4
|
[
"MIT"
] | null | null | null |
gui/app.py
|
ManojG13/OS-Simulator
|
0a7dc42ffbdf4db6182e6988059b2e9207fac3f4
|
[
"MIT"
] | null | null | null |
gui/app.py
|
ManojG13/OS-Simulator
|
0a7dc42ffbdf4db6182e6988059b2e9207fac3f4
|
[
"MIT"
] | null | null | null |
from flask import Flask , jsonify, render_template, request
import json
app = Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
print ("Here")
return render_template('data.html')
@app.route('/api/gateway',methods=['POST'])
def gateway():
print("Ha")
data = json.loads(request.form['data'])
print(data)
print(data[0]['arrivaltime'])
return render_template('results.html')
if __name__ == "__main__":
app.run(debug=True)
| 21.190476 | 59 | 0.698876 |
ce3642c5cfe21f0713409f704477e83f3ddccad2
| 41,768 |
py
|
Python
|
scikit-learn-weighted_kde/sklearn/ensemble/tests/test_forest.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | 1 |
2016-10-24T13:36:23.000Z
|
2016-10-24T13:36:23.000Z
|
scikit-learn-weighted_kde/sklearn/ensemble/tests/test_forest.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
scikit-learn-weighted_kde/sklearn/ensemble/tests/test_forest.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| 35.247257 | 83 | 0.655909 |
efd4fa057c5592bdd043422d383a7d0dc566a2bf
| 818 |
py
|
Python
|
ifttt-youtubedl.py
|
pathakamit88/scripts
|
21f19203eff85c2ee244e48219f761617306280a
|
[
"MIT"
] | null | null | null |
ifttt-youtubedl.py
|
pathakamit88/scripts
|
21f19203eff85c2ee244e48219f761617306280a
|
[
"MIT"
] | null | null | null |
ifttt-youtubedl.py
|
pathakamit88/scripts
|
21f19203eff85c2ee244e48219f761617306280a
|
[
"MIT"
] | null | null | null |
"""
Script to download all the liked youtube videos.
This script works in combination with IFTTT applet
'if-new-liked-video-then-append-to-a-text-file-in-dropbox'
"""
import os
import subprocess
def main():
filepath = os.path.expanduser('~/Dropbox/IFTTT/YouTube/youtubelikes.txt')
output_path = os.path.expanduser('~/Downloads/YouTube/')
try:
os.makedirs(output_path)
except OSError:
pass
for line in open(filepath, 'rb'):
line = line.strip()
cmd = 'cd %s && youtube-dl -f 22 --no-playlist %s' % (output_path, line)
mp3cmd = 'cd %s && youtube-dl --extract-audio --audio-format mp3 %s' % (
output_path, line)
subprocess.Popen(mp3cmd, shell=True)
p = subprocess.Popen(cmd, shell=True)
p.communicate()
os.remove(filepath)
if __name__ == '__main__':
main()
| 23.371429 | 76 | 0.671149 |
2128e9e1e9fb1715c5198a6a70dcd5facdfd46b9
| 2,560 |
py
|
Python
|
process.py
|
PAULUAPAUL/MasterThesis_AssociationRulesBiodiversity
|
0855abc5ec4835a28be4aa305e5e45e73297b389
|
[
"MIT"
] | null | null | null |
process.py
|
PAULUAPAUL/MasterThesis_AssociationRulesBiodiversity
|
0855abc5ec4835a28be4aa305e5e45e73297b389
|
[
"MIT"
] | null | null | null |
process.py
|
PAULUAPAUL/MasterThesis_AssociationRulesBiodiversity
|
0855abc5ec4835a28be4aa305e5e45e73297b389
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from preprocess import *
import time
import sys
def ProcessData (minSup = 0.3,minConf = 0.5,minInt = 0.1,write_csv= True):
print('Minimum Support: ', minSup)
print('Minimum Confidence: ', minConf)
print('Minimum Interest: ', minInt)
print("FP-Growth")
start = time.time()
freqItemSet,rules = fpgrowth(data_grouped['SPECIES_ID2'].tolist(), minSupRatio=minSup, minConf=minConf)
end = time.time()
print('FP-growth needed: ',end - start, ' sec')
if write_csv:
write_csvfile_freq2('data/FPGrowth_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_freq.csv',freqItemSet)
write_csvfile_rules('data/FPGrowth_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_rules.csv',rules)
print("apriori")
start = time.time()
freqItemSet,rules = apriori(data_grouped['SPECIES_ID2'].tolist(), minSup=minSup, minConf=minConf)
end = time.time()
print('apriori needed: ',end - start, ' sec')
if write_csv:
write_csvfile_freq('data/Apriori_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_freq.csv',freqItemSet)
write_csvfile_rules('data/Apriori_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_rules.csv',rules)
print("apriori interest")
start = time.time()
freqItemSet,rules = apriori_interest(data_grouped['SPECIES_ID2'].tolist(), minSup=minSup, minConf=minConf, minInt=minInt)
end = time.time()
print('apriori int needed: ',end - start, ' sec')
if write_csv:
write_csvfile_freq('data/AprioriInt_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_freq.csv',freqItemSet)
write_csvfile_rules('data/AprioriInt_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_rules.csv',rules)
print("apriori tid")
start = time.time()
freqItemSet,rules = apriori_tid(data_grouped['SPECIES_ID2'].tolist(), minSup=minSup, minConf=minConf)
end = time.time()
print('apriori tid needed: ',end - start, ' sec')
if write_csv:
write_csvfile_freq('data/AprioriTid_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_freq.csv',freqItemSet)
write_csvfile_rules('data/AprioriTid_minSup_'+ str(minSup) +'_minConf_'+ str(minConf)+'_rules.csv',rules)
# main
PreprocessData('GBIF',r'C:\Users\Paul\Documents\Studium_PP\Master\Masterarbeit\Gitlab\master-thesis-data-mining\Datasets\GBIF\full records\0079101-200613084148143\occurrence.txt',True)
for i in range (1,1,-1):
ProcessData(minSup=float(i/10),minConf=0.5,minInt=0.1,True)
| 44.137931 | 185 | 0.687109 |
f08393ef0817bef46002709c8ee770010800d787
| 66 |
py
|
Python
|
python/11_built-ins/2_input().py
|
jaimiles23/hacker_rank
|
0580eac82e5d0989afabb5c2e66faf09713f891b
|
[
"Apache-2.0"
] | null | null | null |
python/11_built-ins/2_input().py
|
jaimiles23/hacker_rank
|
0580eac82e5d0989afabb5c2e66faf09713f891b
|
[
"Apache-2.0"
] | null | null | null |
python/11_built-ins/2_input().py
|
jaimiles23/hacker_rank
|
0580eac82e5d0989afabb5c2e66faf09713f891b
|
[
"Apache-2.0"
] | 3 |
2021-09-22T11:06:58.000Z
|
2022-01-25T09:29:24.000Z
|
Solution to [Input()](https://www.hackerrank.com/challenges/input)
| 66 | 66 | 0.772727 |
3b857406627357515a3ff877809f987017228228
| 18 |
py
|
Python
|
hhh.py
|
zhuoya123/Python
|
64d3ffd39b197dbc35ba025b1b5709fbf6939ef2
|
[
"Apache-2.0"
] | null | null | null |
hhh.py
|
zhuoya123/Python
|
64d3ffd39b197dbc35ba025b1b5709fbf6939ef2
|
[
"Apache-2.0"
] | null | null | null |
hhh.py
|
zhuoya123/Python
|
64d3ffd39b197dbc35ba025b1b5709fbf6939ef2
|
[
"Apache-2.0"
] | null | null | null |
print ('hello Gi')
| 18 | 18 | 0.666667 |
f387e7690b349e8e1b07ec21afd965b3c1178260
| 1,749 |
py
|
Python
|
task_interface.py
|
kwoodham/sublime
|
ed3dcd7b0a22938d1d7de2d55db3cc2960ed117f
|
[
"CC-BY-4.0"
] | 1 |
2015-08-04T11:43:34.000Z
|
2015-08-04T11:43:34.000Z
|
task_interface.py
|
kwoodham/sublime
|
ed3dcd7b0a22938d1d7de2d55db3cc2960ed117f
|
[
"CC-BY-4.0"
] | null | null | null |
task_interface.py
|
kwoodham/sublime
|
ed3dcd7b0a22938d1d7de2d55db3cc2960ed117f
|
[
"CC-BY-4.0"
] | null | null | null |
import sublime_plugin
import sublime
# 20171211 - read in states via settings file - also use settings to define which states
# are considered to be active - so that only those states are displayed for "All Active"
# 20171106 - added in capability to choose all (including done) and all-active (not done).
# See corresponding way to search for inclusion in the list of states in "show_instances.py"
# 20171108 - corrected last line to pass a single element list instead of a text string if
# one item selected: self.a[index] --> [self.a[index]]
class TaskInterfaceCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings("Task.sublime-settings")
# List of task state keywords
keywords = settings.get('keywords')
# List of keywords that are considered to be active (including those waiting)
self.active = settings.get('active')
self.a = []
self.a.append("All-Active")
self.a.extend(keywords)
# timeout fix at https://github.com/tosher/Mediawiker/blob/master/mediawiker.py
sublime.set_timeout(lambda: self.view.window().show_quick_panel(self.a, self.on_done), 1)
def on_done(self, index):
if index == -1:
return
if self.a[index] == "All-Active":
self.a.remove("All-Active") # If selecting all active, parse out inactive tasks
b = []
for x in range(0, (len(self.a) - 1)):
if self.active[x]:
b.append(self.a[x])
self.a = b
self.view.run_command("show_instances", {"args": {'text': self.a}})
else:
self.view.run_command("show_instances", {"args": {'text': [self.a[index]]}})
| 38.866667 | 97 | 0.638651 |
2d64d44c841b9011b233eb180eb8fc3dc202bf6e
| 25,180 |
py
|
Python
|
ocs_ci/ocs/resources/storage_cluster.py
|
deepshikhaaa/ocs-ci
|
5477ee85c395e808589ba45e3db34d4efe54fc97
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/resources/storage_cluster.py
|
deepshikhaaa/ocs-ci
|
5477ee85c395e808589ba45e3db34d4efe54fc97
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/resources/storage_cluster.py
|
deepshikhaaa/ocs-ci
|
5477ee85c395e808589ba45e3db34d4efe54fc97
|
[
"MIT"
] | null | null | null |
"""
StorageCluster related functionalities
"""
import re
import logging
import tempfile
from jsonschema import validate
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults, ocp
from ocs_ci.ocs.exceptions import ResourceNotFoundError, UnsupportedFeatureError
from ocs_ci.ocs.ocp import get_images, OCP
from ocs_ci.ocs.resources.ocs import get_ocs_csv
from ocs_ci.ocs.resources.pod import get_pods_having_label, get_osd_pods
from ocs_ci.ocs.resources.pvc import get_deviceset_pvcs
from ocs_ci.ocs.node import get_osds_per_node
from ocs_ci.utility import localstorage, utils, templating, kms as KMS
from ocs_ci.utility.rgwutils import get_rgw_count
from ocs_ci.utility.utils import run_cmd, get_ocp_version
from ocs_ci.ocs.ui.add_replace_device_ui import AddReplaceDeviceUI
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
log = logging.getLogger(__name__)
class StorageCluster(OCP):
"""
This class represent StorageCluster and contains all related
methods we need to do with StorageCluster.
"""
_has_phase = True
def __init__(self, resource_name="", *args, **kwargs):
"""
Constructor method for StorageCluster class
Args:
resource_name (str): Name of StorageCluster
"""
super(StorageCluster, self).__init__(
resource_name=resource_name, kind="StorageCluster", *args, **kwargs
)
def ocs_install_verification(
timeout=600,
skip_osd_distribution_check=False,
ocs_registry_image=None,
post_upgrade_verification=False,
version_before_upgrade=None,
):
"""
Perform steps necessary to verify a successful OCS installation
Args:
timeout (int): Number of seconds for timeout which will be used in the
checks used in this function.
skip_osd_distribution_check (bool): If true skip the check for osd
distribution.
ocs_registry_image (str): Specific image to check if it was installed
properly.
post_upgrade_verification (bool): Set to True if this function is
called after upgrade.
version_before_upgrade (float): Set to OCS version before upgrade
"""
from ocs_ci.ocs.node import get_nodes
from ocs_ci.ocs.resources.pvc import get_deviceset_pvcs
from ocs_ci.ocs.resources.pod import get_ceph_tools_pod, get_all_pods
from ocs_ci.ocs.cluster import validate_cluster_on_pvc
from ocs_ci.ocs.resources.fips import check_fips_enabled
number_of_worker_nodes = len(get_nodes())
namespace = config.ENV_DATA["cluster_namespace"]
log.info("Verifying OCS installation")
# Verify OCS CSV is in Succeeded phase
log.info("verifying ocs csv")
ocs_csv = get_ocs_csv()
# Verify if OCS CSV has proper version.
csv_version = ocs_csv.data["spec"]["version"]
ocs_version = config.ENV_DATA["ocs_version"]
log.info(f"Check if OCS version: {ocs_version} matches with CSV: {csv_version}")
assert (
ocs_version in csv_version
), f"OCS version: {ocs_version} mismatch with CSV version {csv_version}"
# Verify if OCS CSV has the same version in provided CI build.
ocs_registry_image = ocs_registry_image or config.DEPLOYMENT.get(
"ocs_registry_image"
)
if ocs_registry_image and ocs_registry_image.endswith(".ci"):
ocs_registry_image = ocs_registry_image.rsplit(":", 1)[1]
log.info(
f"Check if OCS registry image: {ocs_registry_image} matches with "
f"CSV: {csv_version}"
)
ignore_csv_mismatch = config.DEPLOYMENT.get("ignore_csv_mismatch")
if ignore_csv_mismatch:
log.info(
"The possible mismatch will be ignored as you deployed "
"the different version than the default version from the CSV"
)
else:
assert ocs_registry_image in csv_version, (
f"OCS registry image version: {ocs_registry_image} mismatch "
f"with CSV version {csv_version}"
)
# Verify OCS Cluster Service (ocs-storagecluster) is Ready
storage_cluster_name = config.ENV_DATA["storage_cluster_name"]
log.info("Verifying status of storage cluster: %s", storage_cluster_name)
storage_cluster = StorageCluster(
resource_name=storage_cluster_name,
namespace=namespace,
)
log.info(
f"Check if StorageCluster: {storage_cluster_name} is in" f"Succeeded phase"
)
storage_cluster.wait_for_phase(phase="Ready", timeout=timeout)
# Verify pods in running state and proper counts
log.info("Verifying pod states and counts")
pod = OCP(kind=constants.POD, namespace=namespace)
if not config.DEPLOYMENT["external_mode"]:
osd_count = int(
storage_cluster.data["spec"]["storageDeviceSets"][0]["count"]
) * int(storage_cluster.data["spec"]["storageDeviceSets"][0]["replica"])
rgw_count = None
if config.ENV_DATA.get("platform") in constants.ON_PREM_PLATFORMS:
rgw_count = get_rgw_count(
ocs_version, post_upgrade_verification, version_before_upgrade
)
min_eps = constants.MIN_NB_ENDPOINT_COUNT_POST_DEPLOYMENT
max_eps = (
constants.MAX_NB_ENDPOINT_COUNT
if float(config.ENV_DATA["ocs_version"]) >= 4.6
else 1
)
if config.ENV_DATA.get("platform") == constants.IBM_POWER_PLATFORM:
min_eps = 1
max_eps = 1
nb_db_label = (
constants.NOOBAA_DB_LABEL_46_AND_UNDER
if float(config.ENV_DATA["ocs_version"]) < 4.7
else constants.NOOBAA_DB_LABEL_47_AND_ABOVE
)
resources_dict = {
nb_db_label: 1,
constants.OCS_OPERATOR_LABEL: 1,
constants.OPERATOR_LABEL: 1,
constants.NOOBAA_OPERATOR_POD_LABEL: 1,
constants.NOOBAA_CORE_POD_LABEL: 1,
constants.NOOBAA_ENDPOINT_POD_LABEL: min_eps,
}
if not config.DEPLOYMENT["external_mode"]:
resources_dict.update(
{
constants.MON_APP_LABEL: 3,
constants.CSI_CEPHFSPLUGIN_LABEL: number_of_worker_nodes,
constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL: 2,
constants.CSI_RBDPLUGIN_LABEL: number_of_worker_nodes,
constants.CSI_RBDPLUGIN_PROVISIONER_LABEL: 2,
constants.OSD_APP_LABEL: osd_count,
constants.MGR_APP_LABEL: 1,
constants.MDS_APP_LABEL: 2,
constants.RGW_APP_LABEL: rgw_count,
}
)
for label, count in resources_dict.items():
if label == constants.RGW_APP_LABEL:
if not config.ENV_DATA.get("platform") in constants.ON_PREM_PLATFORMS:
continue
assert pod.wait_for_resource(
condition=constants.STATUS_RUNNING,
selector=label,
resource_count=count,
timeout=timeout,
)
nb_ep_pods = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
)
assert len(nb_ep_pods) <= max_eps, (
f"The number of running NooBaa endpoint pods ({len(nb_ep_pods)}) "
f"is greater than the maximum defined in the NooBaa CR ({max_eps})"
)
# Verify StorageClasses (1 ceph-fs, 1 ceph-rbd)
log.info("Verifying storage classes")
storage_class = OCP(kind=constants.STORAGECLASS, namespace=namespace)
storage_cluster_name = config.ENV_DATA["storage_cluster_name"]
required_storage_classes = {
f"{storage_cluster_name}-cephfs",
f"{storage_cluster_name}-ceph-rbd",
}
if config.DEPLOYMENT["external_mode"]:
required_storage_classes.update(
{
f"{storage_cluster_name}-ceph-rgw",
f'{config.ENV_DATA["cluster_namespace"]}.noobaa.io',
}
)
storage_classes = storage_class.get()
storage_class_names = {
item["metadata"]["name"] for item in storage_classes["items"]
}
assert required_storage_classes.issubset(storage_class_names)
# Verify OSDs are distributed
if not config.DEPLOYMENT["external_mode"]:
if not skip_osd_distribution_check:
log.info("Verifying OSDs are distributed evenly across worker nodes")
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
osds = ocp_pod_obj.get(selector=constants.OSD_APP_LABEL)["items"]
deviceset_count = get_deviceset_count()
node_names = [osd["spec"]["nodeName"] for osd in osds]
for node in node_names:
assert (
not node_names.count(node) > deviceset_count
), "OSD's are not distributed evenly across worker nodes"
# Verify that CSI driver object contains provisioner names
log.info("Verifying CSI driver object contains provisioner names.")
csi_driver = OCP(kind="CSIDriver")
csi_drivers = {item["metadata"]["name"] for item in csi_driver.get()["items"]}
assert defaults.CSI_PROVISIONERS.issubset(csi_drivers)
# Verify node and provisioner secret names in storage class
log.info("Verifying node and provisioner secret names in storage class.")
if config.DEPLOYMENT["external_mode"]:
sc_rbd = storage_class.get(
resource_name=constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
)
sc_cephfs = storage_class.get(
resource_name=(constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS)
)
else:
sc_rbd = storage_class.get(resource_name=constants.DEFAULT_STORAGECLASS_RBD)
sc_cephfs = storage_class.get(
resource_name=constants.DEFAULT_STORAGECLASS_CEPHFS
)
assert (
sc_rbd["parameters"]["csi.storage.k8s.io/node-stage-secret-name"]
== constants.RBD_NODE_SECRET
)
assert (
sc_rbd["parameters"]["csi.storage.k8s.io/provisioner-secret-name"]
== constants.RBD_PROVISIONER_SECRET
)
assert (
sc_cephfs["parameters"]["csi.storage.k8s.io/node-stage-secret-name"]
== constants.CEPHFS_NODE_SECRET
)
assert (
sc_cephfs["parameters"]["csi.storage.k8s.io/provisioner-secret-name"]
== constants.CEPHFS_PROVISIONER_SECRET
)
log.info("Verified node and provisioner secret names in storage class.")
ct_pod = get_ceph_tools_pod()
# https://github.com/red-hat-storage/ocs-ci/issues/3820
# Verify ceph osd tree output
if not (
config.DEPLOYMENT.get("ui_deployment") or config.DEPLOYMENT["external_mode"]
):
log.info(
"Verifying ceph osd tree output and checking for device set PVC names "
"in the output."
)
if config.DEPLOYMENT.get("local_storage"):
deviceset_pvcs = [osd.get_node() for osd in get_osd_pods()]
# removes duplicate hostname
deviceset_pvcs = list(set(deviceset_pvcs))
if config.ENV_DATA.get("platform") == constants.BAREMETAL_PLATFORM:
deviceset_pvcs = [
deviceset.replace(".", "-") for deviceset in deviceset_pvcs
]
else:
deviceset_pvcs = [pvc.name for pvc in get_deviceset_pvcs()]
osd_tree = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree", format="json")
schemas = {
"root": constants.OSD_TREE_ROOT,
"rack": constants.OSD_TREE_RACK,
"host": constants.OSD_TREE_HOST,
"osd": constants.OSD_TREE_OSD,
"region": constants.OSD_TREE_REGION,
"zone": constants.OSD_TREE_ZONE,
}
schemas["host"]["properties"]["name"] = {"enum": deviceset_pvcs}
for item in osd_tree["nodes"]:
validate(instance=item, schema=schemas[item["type"]])
if item["type"] == "host":
deviceset_pvcs.remove(item["name"])
assert not deviceset_pvcs, (
f"These device set PVCs are not given in ceph osd tree output "
f"- {deviceset_pvcs}"
)
log.info(
"Verified ceph osd tree output. Device set PVC names are given in the "
"output."
)
# TODO: Verify ceph osd tree output have osd listed as ssd
# TODO: Verify ceph osd tree output have zone or rack based on AZ
# Verify CSI snapshotter sidecar container is not present
# if the OCS version is < 4.6
if float(config.ENV_DATA["ocs_version"]) < 4.6:
log.info("Verifying CSI snapshotter is not present.")
provisioner_pods = get_all_pods(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
selector=[
constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
],
)
for pod_obj in provisioner_pods:
pod_info = pod_obj.get()
for container, image in get_images(data=pod_info).items():
assert ("snapshot" not in container) and ("snapshot" not in image), (
f"Snapshot container is present in {pod_obj.name} pod. "
f"Container {container}. Image {image}"
)
deployments = ocs_csv.get()["spec"]["install"]["spec"]["deployments"]
rook_ceph_operator_deployment = [
deployment_val
for deployment_val in deployments
if deployment_val["name"] == "rook-ceph-operator"
]
assert {"name": "CSI_ENABLE_SNAPSHOTTER", "value": "false"} in (
rook_ceph_operator_deployment[0]["spec"]["template"]["spec"]["containers"][
0
]["env"]
), "CSI_ENABLE_SNAPSHOTTER value is not set to 'false'."
log.info("Verified: CSI snapshotter is not present.")
# Verify pool crush rule is with "type": "zone"
if utils.get_az_count() == 3:
log.info("Verifying pool crush rule is with type: zone")
crush_dump = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush dump", format="")
pool_names = [
constants.METADATA_POOL,
constants.DEFAULT_BLOCKPOOL,
constants.DATA_POOL,
]
crush_rules = [
rule for rule in crush_dump["rules"] if rule["rule_name"] in pool_names
]
for crush_rule in crush_rules:
assert [
item for item in crush_rule["steps"] if item.get("type") == "zone"
], f"{crush_rule['rule_name']} is not with type as zone"
log.info("Verified - pool crush rule is with type: zone")
log.info("Validate cluster on PVC")
validate_cluster_on_pvc()
# Verify ceph health
log.info("Verifying ceph health")
health_check_tries = 20
health_check_delay = 30
if post_upgrade_verification:
# In case of upgrade with FIO we have to wait longer time to see
# health OK. See discussion in BZ:
# https://bugzilla.redhat.com/show_bug.cgi?id=1817727
health_check_tries = 180
assert utils.ceph_health_check(namespace, health_check_tries, health_check_delay)
if config.ENV_DATA.get("fips"):
# In case that fips is enabled when deploying,
# a verification of the installation of it will run
# on all running state pods
check_fips_enabled()
if config.ENV_DATA.get("encryption_at_rest"):
osd_encryption_verification()
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.post_deploy_verification()
storage_cluster_obj = get_storage_cluster()
is_flexible_scaling = (
storage_cluster_obj.get()["items"][0].get("spec").get("flexibleScaling", False)
)
if is_flexible_scaling is True:
failure_domain = storage_cluster_obj.data["items"][0]["status"]["failureDomain"]
assert failure_domain == "host", (
f"The expected failure domain on cluster with flexible scaling is 'host',"
f" the actaul failure domain is {failure_domain}"
)
def osd_encryption_verification():
"""
Verify if OSD encryption at rest if successfully deployed on OCS
Raises:
UnsupportedFeatureError: OCS version is smaller than 4.6
EnvironmentError: The OSD is not encrypted
"""
ocs_version = float(config.ENV_DATA["ocs_version"])
if ocs_version < 4.6:
error_message = "Encryption at REST can be enabled only on OCS >= 4.6!"
raise UnsupportedFeatureError(error_message)
osd_size = get_osd_size()
log.info("Get 'lsblk' command output on nodes where osd running")
osd_node_names = get_osds_per_node()
lsblk_output_list = []
for worker_node in osd_node_names:
lsblk_cmd = "oc debug node/" + worker_node + " -- chroot /host lsblk"
out = run_cmd(lsblk_cmd)
log.info(f"the output from lsblk command is {out}")
lsblk_output_list.append((out, len(osd_node_names[worker_node])))
log.info("Verify 'lsblk' command results are as expected")
for node_output_lsblk in lsblk_output_list:
node_lsb = node_output_lsblk[0].split()
log.info("Search 'crypt' in node_lsb list")
all_occurrences_crypt = [
index for index, element in enumerate(node_lsb) if element == "crypt"
]
log.info("Verify all OSDs encrypted on node")
if len(all_occurrences_crypt) != node_output_lsblk[1]:
raise EnvironmentError("OSD is not encrypted")
log.info("Verify that OSD is encrypted, and not another component like sda")
for index_crypt in all_occurrences_crypt:
encrypted_component_size = int(
(re.findall(r"\d+", node_lsb[index_crypt - 2]))[0]
)
if encrypted_component_size != osd_size:
raise EnvironmentError(
"The OSD is not encrypted, another mount encrypted."
)
def add_capacity(osd_size_capacity_requested):
"""
Add storage capacity to the cluster
Args:
osd_size_capacity_requested(int): Requested osd size capacity
Returns:
new storage device set count (int) : Returns True if all OSDs are in Running state
Note:
"StoragedeviceSets->count" represents the set of 3 OSDs.
That is, if there are 3 OSDs in the system then count will be 1.
If there are 6 OSDs then count is 2 and so on.
By changing this value,we can add extra devices to the cluster.
For example, if we want to expand the cluster by 3 more osds in a cluster that already has 3 osds,
we can set count as 2. So, with each increase of count by 1,
we get 3 OSDs extra added to the cluster.
This is how we are going to 'add capacity' via automation.
As we know that OCS has 3 way replica. That is, same data is placed in 3 OSDs.
Because of this, the total usable capacity for apps from 3 OSDs
will be the size of one OSD (all osds are of same size).
If we want to add more capacity to the cluster then we need to add 3 OSDs of same size
as that of the original OSD. add_capacity needs to accept the 'capacity_to_add' as an argument.
From this we need to arrive at storagedeviceSets -> count and then
"Patch" this count to get the required capacity to add.
To do so, we use following formula:
storageDeviceSets->count = (capacity reqested / osd capacity ) + existing count storageDeviceSets
"""
osd_size_existing = get_osd_size()
device_sets_required = int(osd_size_capacity_requested / osd_size_existing)
old_storage_devices_sets_count = get_deviceset_count()
new_storage_devices_sets_count = int(
device_sets_required + old_storage_devices_sets_count
)
lvpresent = localstorage.check_local_volume()
ocp_version = get_ocp_version()
platform = config.ENV_DATA.get("platform", "").lower()
is_lso = config.DEPLOYMENT.get("local_storage")
if (
ocp_version == "4.7"
and (
platform == constants.AWS_PLATFORM or platform == constants.VSPHERE_PLATFORM
)
and (not is_lso)
):
logging.info("Add capacity via UI")
setup_ui = login_ui()
add_ui_obj = AddReplaceDeviceUI(setup_ui)
add_ui_obj.add_capacity_ui()
close_browser(setup_ui)
else:
if lvpresent:
ocp_obj = OCP(
kind="localvolume", namespace=config.ENV_DATA["local_storage_namespace"]
)
localvolume_data = ocp_obj.get(resource_name="local-block")
device_list = localvolume_data["spec"]["storageClassDevices"][0][
"devicePaths"
]
final_device_list = localstorage.get_new_device_paths(
device_sets_required, osd_size_capacity_requested
)
device_list.sort()
final_device_list.sort()
if device_list == final_device_list:
raise ResourceNotFoundError("No Extra device found")
param = f"""[{{ "op": "replace", "path": "/spec/storageClassDevices/0/devicePaths",
"value": {final_device_list}}}]"""
log.info(f"Final device list : {final_device_list}")
lvcr = localstorage.get_local_volume_cr()
log.info("Patching Local Volume CR...")
lvcr.patch(
resource_name=lvcr.get()["items"][0]["metadata"]["name"],
params=param.strip("\n"),
format_type="json",
)
localstorage.check_pvs_created(
int(len(final_device_list) / new_storage_devices_sets_count)
)
sc = get_storage_cluster()
# adding the storage capacity to the cluster
params = f"""[{{ "op": "replace", "path": "/spec/storageDeviceSets/0/count",
"value": {new_storage_devices_sets_count}}}]"""
sc.patch(
resource_name=sc.get()["items"][0]["metadata"]["name"],
params=params.strip("\n"),
format_type="json",
)
return new_storage_devices_sets_count
def get_storage_cluster(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Get storage cluster name
Args:
namespace (str): Namespace of the resource
Returns:
storage cluster (obj) : Storage cluster object handler
"""
sc_obj = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
return sc_obj
def get_osd_count():
"""
Get osd count from Storage cluster
Returns:
int: osd count
"""
sc = get_storage_cluster()
return int(sc.get().get("items")[0]["spec"]["storageDeviceSets"][0]["count"]) * int(
sc.get().get("items")[0]["spec"]["storageDeviceSets"][0]["replica"]
)
def get_osd_size():
"""
Get osd size from Storage cluster
Returns:
int: osd size
"""
sc = get_storage_cluster()
size = (
sc.get()
.get("items")[0]
.get("spec")
.get("storageDeviceSets")[0]
.get("dataPVCTemplate")
.get("spec")
.get("resources")
.get("requests")
.get("storage")
)
if size.isdigit or config.DEPLOYMENT.get("local_storage"):
# In the case of UI deployment of LSO cluster, the value in StorageCluster CR
# is set to 1, so we can not take OSD size from there. For LSO we will return
# the size from PVC.
pvc = get_deviceset_pvcs()[0]
return int(pvc.get()["status"]["capacity"]["storage"][:-2])
else:
return int(size[:-2])
def get_deviceset_count():
"""
Get storageDeviceSets count from storagecluster
Returns:
int: storageDeviceSets count
"""
sc = get_storage_cluster()
return int(
sc.get().get("items")[0].get("spec").get("storageDeviceSets")[0].get("count")
)
def get_all_storageclass():
"""
Function for getting all storageclass excluding 'gp2' and 'flex'
Returns:
list: list of storageclass
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result["items"]
storageclass = [
item
for item in sample
if (
item.get("metadata").get("name")
not in (constants.IGNORE_SC_GP2, constants.IGNORE_SC_FLEX)
)
]
return storageclass
def setup_ceph_debug():
"""
Set Ceph to run in debug log level using a ConfigMap.
This functionality is available starting OCS 4.7.
"""
ceph_debug_log_configmap_data = templating.load_yaml(
constants.CEPH_CONFIG_DEBUG_LOG_LEVEL_CONFIGMAP
)
ceph_debug_log_configmap_data["data"]["config"] = (
constants.ROOK_CEPH_CONFIG_VALUES + constants.CEPH_DEBUG_CONFIG_VALUES
)
ceph_configmap_yaml = tempfile.NamedTemporaryFile(
mode="w+", prefix="config_map", delete=False
)
templating.dump_data_to_temp_yaml(
ceph_debug_log_configmap_data, ceph_configmap_yaml.name
)
log.info("Setting Ceph to work in debug log level using a new ConfigMap resource")
run_cmd(f"oc create -f {ceph_configmap_yaml.name}")
| 38.151515 | 102 | 0.647855 |
316409bcca978335e1f22705d2342e5f34efb493
| 15,200 |
py
|
Python
|
pylsp_rope/plugin.py
|
python-rope/pylsp-rope
|
431415560779881b57048dc563802705f7556bca
|
[
"MIT"
] | 16 |
2021-10-03T07:18:20.000Z
|
2022-03-28T00:11:53.000Z
|
pylsp_rope/plugin.py
|
python-rope/pylsp-rope
|
431415560779881b57048dc563802705f7556bca
|
[
"MIT"
] | 7 |
2021-10-03T06:37:42.000Z
|
2021-11-02T17:13:27.000Z
|
pylsp_rope/plugin.py
|
python-rope/pylsp-rope
|
431415560779881b57048dc563802705f7556bca
|
[
"MIT"
] | null | null | null |
import ast
import logging
from typing import List
from pylsp import hookimpl
from pylsp.lsp import MessageType
from rope.refactor import (
extract,
inline,
method_object,
usefunction,
localtofield,
importutils,
introduce_parameter,
)
from pylsp_rope import typing, commands
from pylsp_rope.project import (
get_project,
get_resource,
get_resources,
apply_rope_changeset,
)
from pylsp_rope.typing import DocumentUri, CodeActionKind
logger = logging.getLogger(__name__)
@hookimpl
def pylsp_settings():
logger.info("Initializing pylsp_rope")
# Disable default plugins that conflicts with our plugin
return {
"plugins": {
# "autopep8_format": {"enabled": False},
# "definition": {"enabled": False},
# "flake8_lint": {"enabled": False},
# "folding": {"enabled": False},
# "highlight": {"enabled": False},
# "hover": {"enabled": False},
# "jedi_completion": {"enabled": False},
# "jedi_rename": {"enabled": False},
# "mccabe_lint": {"enabled": False},
# "preload_imports": {"enabled": False},
# "pycodestyle_lint": {"enabled": False},
# "pydocstyle_lint": {"enabled": False},
# "pyflakes_lint": {"enabled": False},
# "pylint_lint": {"enabled": False},
# "references": {"enabled": False},
# "rope_completion": {"enabled": False},
# "rope_rename": {"enabled": False},
# "signature": {"enabled": False},
# "symbols": {"enabled": False},
# "yapf_format": {"enabled": False},
},
}
@hookimpl
def pylsp_commands(config, workspace) -> List[str]:
return [getattr(commands, cmd) for cmd in dir(commands) if not cmd.startswith("_")]
@hookimpl
def pylsp_code_actions(
config, workspace, document, range, context
) -> List[typing.CodeAction]:
logger.info("textDocument/codeAction: %s %s %s", document, range, context)
class info:
current_document, resource = get_resource(workspace, document.uri)
position = range["start"]
start_offset = current_document.offset_at_position(range["start"])
end_offset = current_document.offset_at_position(range["end"])
selected_text = document.source[start_offset:end_offset]
project = get_project(workspace)
for resource in get_resources(workspace, workspace.documents.keys()):
project.pycore._invalidate_resource_cache(resource)
commands = {}
commands.update(
CommandRefactorExtractMethod.get_code_actions(
workspace,
document=document,
range=range,
),
)
commands.update(
CommandRefactorExtractVariable.get_code_actions(
workspace,
document=document,
range=range,
),
)
commands.update(
{
"Inline method/variable/parameter": CommandRefactorInline(
workspace,
document_uri=document.uri,
position=info.position,
),
"Use function": CommandRefactorUseFunction(
workspace,
document_uri=document.uri,
position=info.position,
),
"Use function for current file only": CommandRefactorUseFunction(
workspace,
document_uri=document.uri,
position=info.position,
documents=[document.uri],
),
"To method object": CommandRefactorMethodToMethodObject(
workspace,
document_uri=document.uri,
position=info.position,
),
"Convert local variable to field": CommandRefactorLocalToField(
workspace,
document_uri=document.uri,
position=info.position,
),
"Organize import": CommandSourceOrganizeImport(
workspace,
document_uri=document.uri,
),
"Introduce parameter": CommandIntroduceParameter(
workspace,
document_uri=document.uri,
position=info.position,
),
}
)
return [
cmd.get_code_action(title=title)
for title, cmd in commands.items()
if cmd.is_valid(info)
]
@hookimpl
def pylsp_execute_command(config, workspace, command, arguments):
logger.info("workspace/executeCommand: %s %s", command, arguments)
commands = {cmd.name: cmd for cmd in Command.__subclasses__()}
try:
return commands[command](workspace, **arguments[0])()
except Exception as exc:
logger.exception(
"Exception when doing workspace/executeCommand: %s",
str(exc),
exc_info=exc,
)
workspace.show_message(
f"pylsp-rope: {exc}",
msg_type=MessageType.Error,
)
class Command:
name: str
title: str
kind: CodeActionKind
def __init__(self, workspace, **arguments):
self.workspace = workspace
self.arguments = arguments
self.__dict__.update(**arguments)
def __call__(self):
rope_changeset = self.get_changes()
if rope_changeset is not None:
apply_rope_changeset(self.workspace, rope_changeset)
def get_changes(self):
"""
Calculate the rope changeset to perform this refactoring.
"""
def validate(self, info) -> None:
"""
Override this method to raise an exception if this refactoring command
cannot be performed
"""
def is_valid(self, info):
try:
self.validate(info)
except Exception:
return False
else:
return True
return False
def get_code_action(self, title: str) -> typing.CodeAction:
return {
"title": title,
"kind": self.kind,
"command": {
"title": title,
"command": self.name,
"arguments": [self.arguments],
},
}
@property # FIXME: backport cached_property
def project(self):
if not hasattr(self, "_project"):
self._project = get_project(self.workspace)
return self._project
class CommandRefactorExtractMethod(Command):
name = commands.COMMAND_REFACTOR_EXTRACT_METHOD
kind: CodeActionKind = "refactor.extract"
document_uri: DocumentUri
range: typing.Range
similar: bool
global_: bool
# FIXME: requires rope.refactor.extract._ExceptionalConditionChecker for proper checking
# def _is_valid(self, info):
# ...
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = extract.ExtractMethod(
project=self.project,
resource=resource,
start_offset=current_document.offset_at_position(self.range["start"]),
end_offset=current_document.offset_at_position(self.range["end"]),
)
rope_changeset = refactoring.get_changes(
extracted_name="extracted_method",
similar=self.similar,
global_=self.global_,
)
return rope_changeset
@classmethod
def get_code_actions(cls, workspace, document, range):
return {
"Extract method including similar statements": cls(
workspace,
document_uri=document.uri,
range=range,
global_=False,
similar=True,
),
"Extract method": cls(
workspace,
document_uri=document.uri,
range=range,
global_=False,
similar=False,
),
"Extract global method including similar statements": cls(
workspace,
document_uri=document.uri,
range=range,
global_=True,
similar=True,
),
"Extract global method": cls(
workspace,
document_uri=document.uri,
range=range,
global_=True,
similar=False,
),
}
class CommandRefactorExtractVariable(Command):
name = commands.COMMAND_REFACTOR_EXTRACT_VARIABLE
kind: CodeActionKind = "refactor.extract"
document_uri: DocumentUri
range: typing.Range
similar: bool
global_: bool
def validate(self, info):
# FIXME: requires rope.refactor.extract._ExceptionalConditionChecker for proper checking
ast.parse(info.selected_text, mode="eval")
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = extract.ExtractVariable(
project=self.project,
resource=resource,
start_offset=current_document.offset_at_position(self.range["start"]),
end_offset=current_document.offset_at_position(self.range["end"]),
)
rope_changeset = refactoring.get_changes(
extracted_name="extracted_variable",
similar=self.similar,
global_=self.global_,
)
return rope_changeset
@classmethod
def get_code_actions(cls, workspace, document, range):
return {
"Extract variable including similar statements": cls(
workspace,
document_uri=document.uri,
range=range,
global_=False,
similar=True,
),
"Extract variable": cls(
workspace,
document_uri=document.uri,
range=range,
global_=False,
similar=False,
),
"Extract global variable including similar statements": cls(
workspace,
document_uri=document.uri,
range=range,
global_=True,
similar=True,
),
"Extract global variable": cls(
workspace,
document_uri=document.uri,
range=range,
global_=True,
similar=False,
),
}
class CommandRefactorInline(Command):
name = commands.COMMAND_REFACTOR_INLINE
kind: CodeActionKind = "refactor.inline"
document_uri: DocumentUri
position: typing.Range
def validate(self, info):
inline.create_inline(
project=self.project,
resource=info.resource,
offset=info.current_document.offset_at_position(info.position),
)
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = inline.create_inline(
project=self.project,
resource=resource,
offset=current_document.offset_at_position(self.position),
)
rope_changeset = refactoring.get_changes()
return rope_changeset
class CommandRefactorUseFunction(Command):
name = commands.COMMAND_REFACTOR_USE_FUNCTION
kind: CodeActionKind = "refactor"
document_uri: DocumentUri
position: typing.Range
def validate(self, info):
usefunction.UseFunction(
project=self.project,
resource=info.resource,
offset=info.current_document.offset_at_position(info.position),
)
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = usefunction.UseFunction(
project=self.project,
resource=resource,
offset=current_document.offset_at_position(self.position),
)
rope_changeset = refactoring.get_changes(
resources=get_resources(self.workspace, getattr(self, "documents", None)),
)
return rope_changeset
class CommandRefactorMethodToMethodObject(Command):
name = commands.COMMAND_REFACTOR_METHOD_TO_METHOD_OBJECT
kind: CodeActionKind = "refactor.rewrite"
document_uri: DocumentUri
position: typing.Range
def validate(self, info):
method_object.MethodObject(
project=self.project,
resource=info.resource,
offset=info.current_document.offset_at_position(self.position),
)
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = method_object.MethodObject(
project=self.project,
resource=resource,
offset=current_document.offset_at_position(self.position),
)
rope_changeset = refactoring.get_changes(classname="NewMethodObject")
return rope_changeset
class CommandRefactorLocalToField(Command):
name = commands.COMMAND_REFACTOR_LOCAL_TO_FIELD
kind: CodeActionKind = "refactor.rewrite"
document_uri: DocumentUri
position: typing.Range
def validate(self, info):
localtofield.LocalToField(
project=self.project,
resource=info.resource,
offset=info.current_document.offset_at_position(self.position),
)
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = localtofield.LocalToField(
project=self.project,
resource=resource,
offset=current_document.offset_at_position(self.position),
)
rope_changeset = refactoring.get_changes()
return rope_changeset
class CommandSourceOrganizeImport(Command):
name = commands.COMMAND_SOURCE_ORGANIZE_IMPORT
kind: CodeActionKind = "source.organizeImports"
document_uri: DocumentUri
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
organizer = importutils.ImportOrganizer(
project=self.project,
)
rope_changeset = organizer.organize_imports(
resource=resource,
)
return rope_changeset
class CommandIntroduceParameter(Command):
name = commands.COMMAND_INTRODUCE_PARAMETER
kind: CodeActionKind = "refactor"
document_uri: DocumentUri
position: typing.Range
def validate(self, info):
introduce_parameter.IntroduceParameter(
project=self.project,
resource=info.resource,
offset=info.current_document.offset_at_position(self.position),
)
def get_changes(self):
current_document, resource = get_resource(self.workspace, self.document_uri)
refactoring = introduce_parameter.IntroduceParameter(
project=self.project,
resource=resource,
offset=current_document.offset_at_position(self.position),
)
rope_changeset = refactoring.get_changes(
new_parameter="new_parameter",
)
return rope_changeset
| 30.831643 | 96 | 0.605329 |
c648a2a434cbf85b73bf9caa38aa515a7eb985c0
| 6,509 |
py
|
Python
|
test/test_template_rbac.py
|
1995chen/python-common-libs
|
dcf36376ae244426fb8da5b76b96fe8e0c3911af
|
[
"MIT"
] | null | null | null |
test/test_template_rbac.py
|
1995chen/python-common-libs
|
dcf36376ae244426fb8da5b76b96fe8e0c3911af
|
[
"MIT"
] | null | null | null |
test/test_template_rbac.py
|
1995chen/python-common-libs
|
dcf36376ae244426fb8da5b76b96fe8e0c3911af
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
import unittest
import inject
import template_logging
from template_rbac import OAuth2SSO
from flask import Flask, make_response
# 创建日志目录
os.makedirs('./logs/', exist_ok=True)
template_logging.init_logger()
logger = template_logging.getLogger(__name__)
class TestTemplateRbacMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
进行该测试用例整体的初始化
"""
# 这里以keycloak为例
# keycloak配置查看地址
# https://sso.local.domain/auth/realms/{realm}/.well-known/openid-configuration
def my_config(binder):
oauth2_instance: OAuth2SSO = OAuth2SSO(
client_id="flask_template",
client_secret="34ddefb0-4063-40db-9b93-4b12b81cc147",
authorization_endpoint="https://sso.local.domain/auth/realms/production/protocol/openid-connect/auth",
token_endpoint="https://sso.local.domain/auth/realms/production/protocol/openid-connect/token",
userinfo_endpoint="https://sso.local.domain/auth/realms/production/protocol/openid-connect/userinfo",
app_root_url="http://127.0.0.1:8080/",
api_auth_path="/api/login",
api_logout_path="/api/logout",
jwt_secret="abcd1234"
)
# 设置初始化方法
binder.bind(OAuth2SSO, oauth2_instance)
# 将实例绑定到inject
inject.configure(my_config)
app = Flask(__name__)
@app.route('/', methods=['GET'])
def info():
return make_response({
'hello': "hello",
})
sso_instance = inject.instance(OAuth2SSO)
# 注册
app.register_blueprint(sso_instance.get_resources())
logger.info(f"auth url is {sso_instance.sso_auth_url}")
# sso_instance.refresh_token(
# "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9."
# "eyJpYXQiOjE2NDYwMzQ3MzksImlzcyI6Imh0dHBzOi8vZmxhc2stdGVtcGxhdGUubG9jYWwu"
# "ZG9tYWluIiwianRpIjoiOWE2ZWI0NGQtNTBjZi00YmZmLWFkYzUtYjJiYWM1ZDY5ZDdjIiwi"
# "ZGF0YSI6eyJhY2Nlc3NfdG9rZW4iOiJleUpoYkdjaU9pSlNVekkxTmlJc0luUjVjQ0lnT2lB"
# "aVNsZFVJaXdpYTJsa0lpQTZJQ0pJTUdwdWVXUkRhRzVsVERoSE5HRnJUakZITlZGcE1VMDFV"
# "VnB1Ym5Sck5ERktjMWhDU3pSeFRVRnpJbjAuZXlKbGVIQWlPakUyTkRZd016VXdNemtzSW1s"
# "aGRDSTZNVFkwTmpBek5EY3pPU3dpWVhWMGFGOTBhVzFsSWpveE5qUTJNRE15T1RRMExDSnFk"
# "R2tpT2lJeU4yVTVZVGxqT0Mxa1lqQXpMVFJsWXpRdE9URTRNUzA0WlRCa1lqWmpZakZpTjJV"
# "aUxDSnBjM01pT2lKb2RIUndjem92TDNOemJ5NXNiMk5oYkM1a2IyMWhhVzR2WVhWMGFDOXla"
# "V0ZzYlhNdmNISnZaSFZqZEdsdmJpSXNJbUYxWkNJNld5SnlaV0ZzYlMxdFlXNWhaMlZ0Wlc1"
# "MElpd2ljbUZ1WTJobGNpSXNJbWhoY21KdmNpSXNJbUp5YjJ0bGNpSXNJbUZqWTI5MWJuUWlY"
# "U3dpYzNWaUlqb2lOekUyWm1FMk9ESXRaVE5tTUMwMFltTTBMVGc1WmpndFpUSTROV0k1TnpN"
# "ME1HUTVJaXdpZEhsd0lqb2lRbVZoY21WeUlpd2lZWHB3SWpvaVpteGhjMnRmZEdWdGNHeGhk"
# "R1VpTENKelpYTnphVzl1WDNOMFlYUmxJam9pTVdGak1qUmhaR010TldRM1pDMDBNMkZpTFdJ"
# "NU5qZ3RZVEptTmpVNU1HVmlORGd6SWl3aVlXTnlJam9pTUNJc0ltRnNiRzkzWldRdGIzSnBa"
# "Mmx1Y3lJNld5SXZLaUpkTENKeVpXRnNiVjloWTJObGMzTWlPbnNpY205c1pYTWlPbHNpYjJa"
# "bWJHbHVaVjloWTJObGMzTWlMQ0oxYldGZllYVjBhRzl5YVhwaGRHbHZiaUpkZlN3aWNtVnpi"
# "M1Z5WTJWZllXTmpaWE56SWpwN0luSmxZV3h0TFcxaGJtRm5aVzFsYm5RaU9uc2ljbTlzWlhN"
# "aU9sc2lkbWxsZHkxcFpHVnVkR2wwZVMxd2NtOTJhV1JsY25NaUxDSjJhV1YzTFhKbFlXeHRJ"
# "aXdpYldGdVlXZGxMV2xrWlc1MGFYUjVMWEJ5YjNacFpHVnljeUlzSW1sdGNHVnljMjl1WVhS"
# "cGIyNGlMQ0p5WldGc2JTMWhaRzFwYmlJc0ltTnlaV0YwWlMxamJHbGxiblFpTENKdFlXNWha"
# "MlV0ZFhObGNuTWlMQ0p4ZFdWeWVTMXlaV0ZzYlhNaUxDSjJhV1YzTFdGMWRHaHZjbWw2WVhS"
# "cGIyNGlMQ0p4ZFdWeWVTMWpiR2xsYm5Seklpd2ljWFZsY25rdGRYTmxjbk1pTENKdFlXNWha"
# "MlV0WlhabGJuUnpJaXdpYldGdVlXZGxMWEpsWVd4dElpd2lkbWxsZHkxbGRtVnVkSE1pTENK"
# "MmFXVjNMWFZ6WlhKeklpd2lkbWxsZHkxamJHbGxiblJ6SWl3aWJXRnVZV2RsTFdGMWRHaHZj"
# "bWw2WVhScGIyNGlMQ0p0WVc1aFoyVXRZMnhwWlc1MGN5SXNJbkYxWlhKNUxXZHliM1Z3Y3lK"
# "ZGZTd2ljbUZ1WTJobGNpSTZleUp5YjJ4bGN5STZXeUprWVhSaFltRnpaU0lzSW1SbFptRjFi"
# "SFFpTENKemVYTjBaVzBpTENKd2NtOWtkV04wYVc5dUlpd2laR1YyWld4dmNDSXNJbVJoYzJo"
# "aWIyRnlaQ0pkZlN3aWFHRnlZbTl5SWpwN0luSnZiR1Z6SWpwYkltRmtiV2x1SWwxOUxDSmlj"
# "bTlyWlhJaU9uc2ljbTlzWlhNaU9sc2ljbVZoWkMxMGIydGxiaUpkZlN3aVlXTmpiM1Z1ZENJ"
# "NmV5SnliMnhsY3lJNld5SnRZVzVoWjJVdFlXTmpiM1Z1ZENJc0luWnBaWGN0WVhCd2JHbGpZ"
# "WFJwYjI1eklpd2lkbWxsZHkxamIyNXpaVzUwSWl3aWJXRnVZV2RsTFdGalkyOTFiblF0Ykds"
# "dWEzTWlMQ0p0WVc1aFoyVXRZMjl1YzJWdWRDSXNJblpwWlhjdGNISnZabWxzWlNKZGZYMHNJ"
# "bk5qYjNCbElqb2laVzFoYVd3Z2NISnZabWxzWlNJc0ltVnRZV2xzWDNabGNtbG1hV1ZrSWpw"
# "bVlXeHpaU3dpYm1GdFpTSTZJa3hwWVc1bklFTm9aVzRpTENKd2NtVm1aWEp5WldSZmRYTmxj"
# "bTVoYldVaU9pSmphR1Z1YkdsaGJtY2lMQ0puYVhabGJsOXVZVzFsSWpvaVRHbGhibWNpTENK"
# "bVlXMXBiSGxmYm1GdFpTSTZJa05vWlc0aUxDSmxiV0ZwYkNJNkltTm9aVzVzTWpRME9ETTJO"
# "VEE0T0VCbmJXRnBiQzVqYjIwaWZRLmVtNGpRM2NLMlctaXBzSWpDUkdtZTA3ak5raElmQXdq"
# "RmViZ3Q5djY5dHFsaGtKS1VRazNTRnVIcHoycHlUQThuZ2YxZlFaNXRYdHB6S2JnLVBvTlVm"
# "WDZJVHg4SXZEN3ZxY3pRQzdqVGVaLTBibHFLM3FjbVhHRnVVTVBMUmN3dVBkZjZuZl9vYU5Z"
# "Ym8wQUo0a0ctMmRGMkVqZk9CSzZuX2VvNG5hRFBMRW52ejA2UnZ0eFRuTm94U3FrTk9JSDZS"
# "R3dDS2QzSTU5dTVTQVNZT2lhRUE5ZVJsdWM3NWJOeVBrUlpoaTl5RlFhaDhIeXR4YlM0MXVy"
# "UVZNREZVdTFGcnVoYVEzbFN5RkpZc3lCcUlQOFRjT2QybVlPY1NXV0JTWEl1TkI0eHBMTHRT"
# "RklsUEZ3N0tsMGZUemliSmRYaDNUSkM3X0djdDE5bEt0WTVxS0lYdyIsImV4cGlyZXNfYXQi"
# "OjE2NDYwMzQ3MzksInVzZXJuYW1lIjoiY2hlbmxpYW5nIiwiZW1haWwiOiJjaGVubDI0NDgz"
# "NjUwODhAZ21haWwuY29tIn0sImV4cCI6MTY0NjEyMTEzOX0.hd0lL4Px61yL9elovMD3A_Ne"
# "bgHydLTBxDvK-RBGd5U"
# )
# # 该测试用例暂不支持自动化
# app.run("0.0.0.0", 8080)
@classmethod
def tearDownClass(cls):
# 清理
inject.clear()
def setUp(self):
self.passed: bool = False
def tearDown(self):
# 打印结果
logger.info(
f"func {self.__class__.__name__}.{self._testMethodName}.........{'passed' if self.passed else 'failed'}"
)
def test_transaction_decorator(self):
"""
测试事务注解
"""
self.passed = True
if __name__ == '__main__':
unittest.main()
| 47.860294 | 118 | 0.727762 |
175e04698f1d310ba1e2e71266c45c0f85f270cc
| 1,637 |
py
|
Python
|
SWE573/pulse/helpers/sentiment_helper.py
|
umutseven92/SWE573
|
04aaf665fb2bbc54de632bbea9aa0fc93685fe67
|
[
"MIT"
] | null | null | null |
SWE573/pulse/helpers/sentiment_helper.py
|
umutseven92/SWE573
|
04aaf665fb2bbc54de632bbea9aa0fc93685fe67
|
[
"MIT"
] | 21 |
2018-02-12T18:56:14.000Z
|
2018-04-22T14:59:39.000Z
|
SWE573/pulse/helpers/sentiment_helper.py
|
umutseven92/Sentweet
|
04aaf665fb2bbc54de632bbea9aa0fc93685fe67
|
[
"MIT"
] | 2 |
2018-03-25T15:20:52.000Z
|
2018-06-01T14:17:51.000Z
|
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def get_sentiment_scores(body):
sid = SentimentIntensityAnalyzer()
ss = sid.polarity_scores(body)
return ss
def get_sentiment_info(tweets):
daily_positive, daily_negative, daily_neutral, daily_compound = 0, 0, 0, 0
neg_tweet_count, pos_tweet_count, neu_tweet_count, all_tweet_count, total_comp = 0, 0, 0, 0, 0
for tweet in tweets:
ss = get_sentiment_scores(tweet)
compound = ss['compound']
daily_compound = daily_compound + compound
total_comp += compound
if compound == 0.0:
neu_tweet_count += 1
elif compound > 0.0:
pos_tweet_count += 1
else:
neg_tweet_count += 1
daily_positive += ss['pos']
daily_negative += ss['neg']
daily_neutral += ss['neu']
tweet_count = len(tweets)
all_tweet_count += tweet_count
if tweet_count == 0:
average_comp = 0
average_pos = 0
average_neg = 0
average_neu = 0
else:
average_comp = daily_compound / tweet_count
average_pos = daily_positive / tweet_count
average_neg = daily_negative / tweet_count
average_neu = daily_neutral / tweet_count
result = {'pos': average_pos, 'neg': -1 * average_neg,
'neu': average_neu, 'compound': average_comp,
'count': tweet_count}
misc = {'neg_tweet_count': neg_tweet_count, 'pos_tweet_count': pos_tweet_count, 'neu_tweet_count': neu_tweet_count,
'all_tweet_count': all_tweet_count, 'total_comp': total_comp}
return result, misc
| 29.763636 | 119 | 0.643861 |
8a740cfbc64f320a41cc67d9beb28eb5bcde7a39
| 6,062 |
py
|
Python
|
dashboard/dashboard/auto_bisect.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/auto_bisect.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/auto_bisect.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint for a cron job to automatically run bisects."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import logging
from dashboard import can_bisect
from dashboard import pinpoint_request
from dashboard.common import namespaced_stored_object
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.services import pinpoint_service
class NotBisectableError(Exception):
"""An error indicating that a bisect couldn't be automatically started."""
pass
def StartNewBisectForBug(bug_id):
"""Tries to trigger a bisect job for the alerts associated with a bug.
Args:
bug_id: A bug ID number.
Returns:
If successful, a dict containing "issue_id" and "issue_url" for the
bisect job. Otherwise, a dict containing "error", with some description
of the reason why a job wasn't started.
"""
try:
return _StartBisectForBug(bug_id)
except NotBisectableError as e:
logging.info('New bisect errored out with message: ' + e.message)
return {'error': e.message}
def _StartBisectForBug(bug_id):
anomalies, _, _ = anomaly.Anomaly.QueryAsync(
bug_id=bug_id, limit=500).get_result()
if not anomalies:
raise NotBisectableError('No Anomaly alerts found for this bug.')
test_anomaly = _ChooseTest(anomalies)
test = None
if test_anomaly:
test = test_anomaly.GetTestMetadataKey().get()
if not test or not can_bisect.IsValidTestForBisect(test.test_path):
raise NotBisectableError('Could not select a test.')
bot_configurations = namespaced_stored_object.Get('bot_configurations')
if test.bot_name not in list(bot_configurations.keys()):
raise NotBisectableError(
'Bot: %s has no corresponding Pinpoint bot.' % test.bot_name)
return _StartPinpointBisect(bug_id, test_anomaly, test)
def _StartPinpointBisect(bug_id, test_anomaly, test):
# Convert params to Pinpoint compatible
params = {
'test_path': test.test_path,
'start_commit': test_anomaly.start_revision - 1,
'end_commit': test_anomaly.end_revision,
'bug_id': bug_id,
'bisect_mode': 'performance',
'story_filter': test.unescaped_story_name,
'alerts': json.dumps([test_anomaly.key.urlsafe()])
}
try:
results = pinpoint_service.NewJob(
pinpoint_request.PinpointParamsFromBisectParams(params))
except pinpoint_request.InvalidParamsError as e:
raise NotBisectableError(e.message)
# For compatibility with existing bisect, switch these to issueId/url
if 'jobId' in results:
results['issue_id'] = results['jobId']
test_anomaly.pinpoint_bisects.append(str(results['jobId']))
test_anomaly.put()
del results['jobId']
if 'jobUrl' in results:
results['issue_url'] = results['jobUrl']
del results['jobUrl']
return results
def _ChooseTest(anomalies):
"""Chooses a test to use for a bisect job.
The particular TestMetadata chosen determines the command and metric name that
is chosen. The test to choose could depend on which of the anomalies has the
largest regression size.
Ideally, the choice of bisect bot to use should be based on bisect bot queue
length, and the choice of metric should be based on regression size and noise
level.
However, we don't choose bisect bot and metric independently, since some
regressions only happen for some tests on some platforms; we should generally
only bisect with a given bisect bot on a given metric if we know that the
regression showed up on that platform for that metric.
Args:
anomalies: A non-empty list of Anomaly entities.
Returns:
An Anomaly entity, or None if no valid entity could be chosen.
Raises:
NotBisectableError: The only matching tests are on domains that have been
excluded for automatic bisects on alert triage.
"""
if not anomalies:
return None
anomalies.sort(cmp=_CompareAnomalyBisectability)
found_excluded_domain = False
for anomaly_entity in anomalies:
if can_bisect.IsValidTestForBisect(
utils.TestPath(anomaly_entity.GetTestMetadataKey())):
if can_bisect.DomainIsExcludedFromTriageBisects(
anomaly_entity.master_name):
found_excluded_domain = True
continue
return anomaly_entity
if found_excluded_domain:
raise NotBisectableError(
'Did not kick off bisect because only available domains are '
'excluded from automatic bisects on triage.')
return None
def _CompareAnomalyBisectability(a1, a2):
"""Compares two Anomalies to decide which Anomaly's TestMetadata is better to
use.
Note: Potentially, this could be made more sophisticated by using
more signals:
- Bisect bot queue length
- Platform
- Test run time
- Stddev of test
Args:
a1: The first Anomaly entity.
a2: The second Anomaly entity.
Returns:
Negative integer if a1 is better than a2, positive integer if a2 is better
than a1, or zero if they're equally good.
"""
if a1.percent_changed > a2.percent_changed:
return -1
elif a1.percent_changed < a2.percent_changed:
return 1
return 0
def GetRevisionForBisect(revision, test_key):
"""Gets a start or end revision value which can be used when bisecting.
Note: This logic is parallel to that in elements/chart-container.html
in the method getRevisionForBisect.
Args:
revision: The ID of a Row, not necessarily an actual revision number.
test_key: The ndb.Key for a TestMetadata.
Returns:
An int or string value which can be used when bisecting.
"""
row_parent_key = utils.GetTestContainerKey(test_key)
row = graph_data.Row.get_by_id(revision, parent=row_parent_key)
if row and hasattr(row, 'a_default_rev') and hasattr(row, row.a_default_rev):
return getattr(row, row.a_default_rev)
return revision
| 32.417112 | 80 | 0.743979 |
2270d1e7ac5e5f60c4218227b768c90026398ceb
| 6,904 |
py
|
Python
|
submodular_optimization/algorithms/scaled_single_threshold_greedy.py
|
smnikolakaki/submodular-linear-cost-maximization
|
98be3e79c11e4a36c253ed9a4800e6976b4aa3bf
|
[
"MIT"
] | null | null | null |
submodular_optimization/algorithms/scaled_single_threshold_greedy.py
|
smnikolakaki/submodular-linear-cost-maximization
|
98be3e79c11e4a36c253ed9a4800e6976b4aa3bf
|
[
"MIT"
] | null | null | null |
submodular_optimization/algorithms/scaled_single_threshold_greedy.py
|
smnikolakaki/submodular-linear-cost-maximization
|
98be3e79c11e4a36c253ed9a4800e6976b4aa3bf
|
[
"MIT"
] | null | null | null |
"""
This class implements the scaled single-threshold Greedy algorithm
1/2(3 - sqrt(5)) approximation
"""
import logging
import numpy as np
import collections
import operator
import sys
class ScaledSingleThresholdGreedy(object):
"""
Scaled single-threshold Greedy algorithm implementation
"""
def __init__(self, config, init_submodular_func_coverage, submodular_func, cost_func, E, k):
"""
Constructor
:param config:
:param submodular_func:
:param cost_func:
:param E -- a python set:
:param k:
:return:
"""
self.config = config
self.logger = logging.getLogger("so_logger")
self.submodular_func = submodular_func
self.cost_func = cost_func
self.init_submodular_func_coverage = init_submodular_func_coverage
self.E = E
self.k = k
self.epsilon = self.config['algorithms']['scaled_single_threshold_greedy_config']['epsilon']
def calc_marginal_gain(self, skills_covered, e):
"""
Calculates the marginal gain for adding element e to the current solution sol
:param sol:
:param e:
:return marginal_gain:
"""
prev_val, skills_covered = self.submodular_func(skills_covered, [])
# print('Previous value:',prev_val)
new_val, skills_covered = self.submodular_func(skills_covered, [e])
# print('New value:',new_val)
marginal_gain = new_val - prev_val
# print('Marginal gain:',marginal_gain)
return marginal_gain
def calc_scaled_objective(self, skills_covered, user_id, sol, val):
"""
Calculates the scaled objective
:param sol:
:return obj_val:
"""
# Weight scaling is constant c
c = (1/2)*(3 + np.sqrt(5))
submodular_gain, skills_covered = self.submodular_func(skills_covered, user_id)
val = val + submodular_gain
weighted_cost = c * self.cost_func(sol)
obj_val = val - weighted_cost
return obj_val
def get_set_of_thresholds(self, m):
"""
Returns the set of thresholds
:param m:
:return O:
"""
O = []
lb = m
ub = 2 * self.k * m
if m == 0 or self.k == 0:
li = 0
ui = 1
else:
li = np.log(m) / np.log(1 + self.epsilon)
ui = np.log(2 * self.k * m) / np.log(1 + self.epsilon) + 1
li = int(np.ceil(li)) # smallest integer greater than li
ui = int(np.floor(ui)) # largest integer not greater than ui
for i in range(li,ui):
v = np.power((1+self.epsilon), i)
if lb <= v and v <= ub:
O.append(v)
if v > ub:
break
return O
def update_set_keys(self, S,O):
"""
Updates the sets of the thresholds
:param S:
:param O:
:return S:
"""
# Create empty Sv for v in Oi that are new
for v in O:
if v not in S:
S[v] = {}
S[v]['solution'] = list()
S[v]['skills_covered'] = self.init_submodular_func_coverage()
S[v]['value'] = 0
# Delete sets Sv for v that do not exist in Oi
S_vs = set(S.keys())
O_set = set(O)
remove_vs = S_vs - O_set
for v in remove_vs:
del S[v]
return S
def scaled_greedy_criterion(self, skills_covered, e):
"""
Calculates the contribution of element e to greedy solution
:param sol:
:param e:
:return greedy_contrib:
"""
# Weight scaling is constant c
c = (1/2)*(3 + np.sqrt(5))
marginal_gain = self.calc_marginal_gain(skills_covered, e)
weighted_cost = c * self.cost_func([e])
greedy_contrib = marginal_gain - weighted_cost
return greedy_contrib
def update_sets_new_element(self, v, e, S):
"""
Updates the sets with the new element
:param v:
:param e:
:param S:
:return :
"""
Sv_solution = S[v]['solution']
Sv_skills_covered = S[v]['skills_covered']
Sv_value = S[v]['value']
if self.k == 0:
return S
# Threshold tau wrt the value of the scaled objective - from original paper
# denominator = self.k - len(Sv_solution)
# if denominator == 0:
# return S
# nominator = (v/2) - self.calc_scaled_objective(Sv_skills_covered, [], Sv_solution, Sv_value)
# tau = nominator / denominator
tau = (1/self.k)*((1/2)*(3 - np.sqrt(5))*Sv_value - self.cost_func(Sv_solution))
# Marginal gain wrt scaled objective
marg_gain = self.scaled_greedy_criterion(Sv_skills_covered, e)
if tau < 0 :
tau = 0
if marg_gain >= tau and len(Sv_solution) < self.k:
S[v]['solution'].append(e)
submodular_gain, skills_covered = self.submodular_func(Sv_skills_covered, [e])
S[v]['skills_covered'] = skills_covered
S[v]['value'] = Sv_value + submodular_gain
return S
def find_max(self, S):
max_solution = []
max_value = -float("inf")
for v, nested_dict in S.items():
# print('Nested dictionary:',nested_dict['solution'])
submodular_value = nested_dict['value']; solution = nested_dict['solution']
value = submodular_value - self.cost_func(solution)
if max_value < value:
max_value = value
max_solution = solution
return max_solution, max_value
def run(self):
"""
Execute algorithm
:param:
:return:
"""
print(self.epsilon)
curr_sol = []
curr_val = 0
S = collections.defaultdict(list)
m = 0
# Initialize the submodular function coverage skills
self.skills_covered = self.init_submodular_func_coverage()
for e_i in self.E:
# Thresholds defined over the scaled objective value
m = max(m, self.calc_scaled_objective(self.skills_covered,[e_i],[],0))
# Creating set of thresholds
Oi = self.get_set_of_thresholds(m)
# Update the set Sv keys
S = self.update_set_keys(S,Oi)
# Update the sets Sv with new element in parallel
for v in Oi:
S = self.update_sets_new_element(v, e_i, S)
if S:
# Return the solution that maximizes original objective value
curr_sol, curr_val = self.find_max(S)
# print(max(S, key=lambda sol: S[sol]['value'] - self.cost_func(S[sol]['solution'])))
self.logger.info("Best solution: {}\nBest value: {}".format(curr_sol, curr_val))
return curr_sol
| 32.413146 | 102 | 0.566338 |
4089b6d5816eaebb48b531389c15005db4d9c707
| 405 |
py
|
Python
|
__init__.py
|
YorkSu/deepgo
|
2f22ad50d2958a4f1c7dfc0af6fcd448f5e7e18d
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
YorkSu/deepgo
|
2f22ad50d2958a4f1c7dfc0af6fcd448f5e7e18d
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
YorkSu/deepgo
|
2f22ad50d2958a4f1c7dfc0af6fcd448f5e7e18d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Deep Go
======
An framework for Deep Learning
Version:
Aiur 0.1.0
Author:
York Su
"""
__author__ = "York Su"
__version__ = "0.1.0"
__codename__ = "Aiur"
__release_date__ = "2020-08-16"
import os as _os
import sys as _sys
this_dir = _os.path.dirname(_os.path.abspath(__file__))
if this_dir not in _sys.path:
_sys.path.append(this_dir)
del _os, _sys
| 14.464286 | 55 | 0.654321 |
300360d08510daa4752fa2ca27ace2295469c7c2
| 5,843 |
py
|
Python
|
pytorch_lightning/strategies/dp.py
|
HabanaAI/pytorch-lightning
|
07b4452b71dc7397fefb35477f922eff096752ad
|
[
"Apache-2.0"
] | 15,666 |
2020-01-14T07:16:15.000Z
|
2022-03-31T23:22:26.000Z
|
pytorch_lightning/strategies/dp.py
|
HabanaAI/pytorch-lightning
|
07b4452b71dc7397fefb35477f922eff096752ad
|
[
"Apache-2.0"
] | 9,140 |
2020-01-14T03:10:42.000Z
|
2022-03-31T19:57:09.000Z
|
pytorch_lightning/strategies/dp.py
|
HabanaAI/pytorch-lightning
|
07b4452b71dc7397fefb35477f922eff096752ad
|
[
"Apache-2.0"
] | 2,340 |
2020-01-14T06:45:32.000Z
|
2022-03-31T22:57:07.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional
import torch
from torch.nn import DataParallel, Module
import pytorch_lightning as pl
from pytorch_lightning.overrides.data_parallel import LightningParallelModule
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.parallel import ParallelStrategy
from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device
from pytorch_lightning.utilities.enums import _StrategyType
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.types import _METRIC_COLLECTION, STEP_OUTPUT
class DataParallelStrategy(ParallelStrategy):
"""Implements data-parallel training in a single process, i.e., the model gets replicated to each device and
each gets a split of the data."""
distributed_backend = _StrategyType.DP
def __init__(
self,
accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None,
parallel_devices: Optional[List[torch.device]] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[PrecisionPlugin] = None,
):
super().__init__(
accelerator=accelerator,
parallel_devices=parallel_devices,
cluster_environment=None,
checkpoint_io=checkpoint_io,
precision_plugin=precision_plugin,
)
@property
def global_rank(self) -> int:
return 0
@property
def local_rank(self) -> int:
return 0
@property
def node_rank(self) -> int:
return 0
@property
def world_size(self) -> int:
return 1
def setup(self, trainer: "pl.Trainer") -> None:
# model needs to be moved to the device before it is wrapped
self.model_to_device()
self.model = self._setup_model(LightningParallelModule(self.model))
super().setup(trainer)
def batch_to_device(self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0) -> Any:
"""Moves the batch to the correct device.
The input and the output is the same type.
Args:
batch: The batch of samples to move to the correct device
device: The target device
dataloader_idx: The index of the dataloader to which the batch belongs.
"""
return move_data_to_device(batch, device=device or self.root_device)
def _setup_model(self, model: Module) -> DataParallel:
"""Wraps the given model into a :class:`~torch.nn.parallel.DataParallel` module."""
return DataParallel(module=model, device_ids=self.parallel_devices)
def reduce(self, collection: _METRIC_COLLECTION, *args, **kwargs) -> _METRIC_COLLECTION:
"""Reduces a collection of tensors from all processes. It can be applied to just a single tensor.
Args:
collection: The collection of tensors to sync and reduce.
*args: ignored for DP
**kwargs: ignored for DP
Return:
Reduced tensor values or the same value if it was not or did not contain a tensor.
"""
def mean(t: torch.Tensor) -> torch.Tensor:
original_dtype = t.dtype
return t.float().mean().to(original_dtype)
return apply_to_collection(collection, torch.Tensor, mean)
@property
def root_device(self):
return self.parallel_devices[0]
def model_to_device(self) -> None:
self.model.to(self.root_device)
def barrier(self, *args, **kwargs):
pass
def broadcast(self, obj: object, src: int = 0) -> object:
return obj
def reduce_boolean_decision(self, decision: bool) -> bool:
return decision
def training_step(self, *args, **kwargs) -> STEP_OUTPUT:
with self.precision_plugin.train_step_context():
return self.model(*args, **kwargs)
def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
with self.precision_plugin.val_step_context():
return self.model(*args, **kwargs)
def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
with self.precision_plugin.test_step_context():
return self.model(*args, **kwargs)
def predict_step(self, *args, **kwargs) -> STEP_OUTPUT:
with self.precision_plugin.predict_step_context():
return self.model(*args, **kwargs)
def training_step_end(self, output):
if not is_overridden("training_step_end", self.lightning_module):
return self.reduce(output)
return output
def validation_step_end(self, output):
if not is_overridden("validation_step_end", self.lightning_module):
return self.reduce(output)
return output
def test_step_end(self, output):
if not is_overridden("test_step_end", self.lightning_module):
return self.reduce(output)
return output
def teardown(self) -> None:
super().teardown()
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
| 36.291925 | 113 | 0.683553 |
eb8f924fbfc6afd66b0d7f71527fd6bd71626165
| 8,780 |
py
|
Python
|
ext/raith21/functionsim.py
|
edgerun/faas-sim
|
fe35e70bc0500bc8b7d94e65b9faf478011edebf
|
[
"MIT"
] | 19 |
2021-03-07T06:24:40.000Z
|
2022-03-25T11:43:08.000Z
|
ext/raith21/functionsim.py
|
edgerun/faas-sim
|
fe35e70bc0500bc8b7d94e65b9faf478011edebf
|
[
"MIT"
] | 8 |
2020-11-13T13:21:05.000Z
|
2021-05-28T19:45:35.000Z
|
ext/raith21/functionsim.py
|
edgerun/faas-sim
|
fe35e70bc0500bc8b7d94e65b9faf478011edebf
|
[
"MIT"
] | 6 |
2020-09-03T08:58:11.000Z
|
2022-03-13T00:36:29.000Z
|
import logging
from typing import Callable, Optional, Dict
from simpy import Resource
from sim.core import Environment
from sim.docker import pull as docker_pull
from sim.faas import FunctionSimulator, FunctionRequest, FunctionReplica, SimulatorFactory, simulate_data_download, \
simulate_data_upload, FunctionCharacterization, FunctionContainer
def linear_queue_fet_increase(current_requests: int, max_requests: int) -> float:
return current_requests / max_requests
class PythonHTTPSimulator(FunctionSimulator):
def __init__(self, queue: Resource, scale: Callable[[int, int], float], fn: FunctionContainer,
characterization: FunctionCharacterization):
self.worker_threads = queue.capacity
self.queue = queue
self.scale = scale
self.delay = 0
self.fn = fn
self.characterization = characterization
def invoke(self, env: Environment, replica: FunctionReplica, request: FunctionRequest):
token = self.queue.request()
yield token # wait for access
# because of GIL and Threads, we can easily estimate the additional time caused by concurrent requests to the
# same Function
factor = max(1, self.scale(self.queue.count, self.queue.capacity))
try:
fet = self.characterization.sample_fet(replica.node.name)
if fet is None:
logging.error(f"FET for node {replica.node.name} for function {self.fn.image} was not found")
raise ValueError(f'{replica.node.name}')
fet = float(fet) * factor
yield env.timeout(fet)
except KeyError:
pass
self.queue.release(token)
class PythonHttpSimulatorFactory(SimulatorFactory):
def __init__(self, fn_characterizations: Dict[str, FunctionCharacterization]):
self.fn_characterizations = fn_characterizations
def create(self, env: Environment, fn: FunctionContainer) -> FunctionSimulator:
workers = int(fn.labels['workers'])
queue = Resource(env=env, capacity=workers)
return PythonHTTPSimulator(queue, linear_queue_fet_increase, fn, self.fn_characterizations[fn.image])
class FunctionCall:
replica: FunctionReplica
request: FunctionRequest
start: int
end: Optional[int] = None
def __init__(self, request, replica, start, end=None):
self.request = request
self.replica = replica
self.start = start
self.end = end
@property
def request_id(self):
return self.request.request_id
class InterferenceAwarePythonHttpSimulatorFactory(SimulatorFactory):
def __init__(self, fn_characterizations: Dict[str, FunctionCharacterization]):
self.fn_characterizations = fn_characterizations
def create(self, env: Environment, fn: FunctionContainer) -> FunctionSimulator:
workers = int(fn.labels['workers'])
queue = Resource(env=env, capacity=workers)
return InterferenceAwarePythonHttpSimulator(queue, linear_queue_fet_increase, fn,
self.fn_characterizations[fn.image])
class AIPythonHTTPSimulatorFactory(SimulatorFactory):
def __init__(self, fn_characterizations: Dict[str, FunctionCharacterization]):
self.fn_characterizations = fn_characterizations
def create(self, env: Environment, fn: FunctionContainer) -> FunctionSimulator:
workers = int(fn.labels['workers'])
queue = Resource(env=env, capacity=workers)
return AIPythonHTTPSimulator(queue, linear_queue_fet_increase, fn, self.fn_characterizations[fn.image])
class AIPythonHTTPSimulator(FunctionSimulator):
def __init__(self, queue: Resource, scale: Callable[[int, int], float], fn: FunctionContainer,
characterization: FunctionCharacterization):
self.worker_threads = queue.capacity
self.queue = queue
self.scale = scale
self.deployment = fn
self.delay = 0
self.characterization = characterization
def deploy(self, env: Environment, replica: FunctionReplica):
yield from docker_pull(env, replica.image, replica.node.ether_node)
def setup(self, env: Environment, replica: FunctionReplica):
image = replica.pod.spec.containers[0].image
if 'inference' in image:
yield from simulate_data_download(env, replica)
def invoke(self, env: Environment, replica: FunctionReplica, request: FunctionRequest):
token = self.queue.request()
t_wait_start = env.now
yield token # wait for access
t_wait_end = env.now
t_fet_start = env.now
# because of GIL and Threads, we can easily estimate the additional time caused by concurrent requests to the
# same Function
factor = max(1, self.scale(self.queue.count, self.queue.capacity))
try:
fet = self.characterization.sample_fet(replica.node.name)
if fet is None:
logging.error(f"FET for node {replica.node.name} for function {self.deployment.image} was not found")
raise ValueError(f'{replica.node.name}')
fet = float(fet) * factor
image = replica.pod.spec.containers[0].image
if 'preprocessing' in image or 'training' in image:
yield from simulate_data_download(env, replica)
start = env.now
call = FunctionCall(request, replica, start)
replica.node.all_requests.append(call)
yield env.timeout(fet)
if 'preprocessing' in image or 'training' in image:
yield from simulate_data_upload(env, replica)
t_fet_end = env.now
env.metrics.log_fet(request.name, replica.image, replica.node.name, t_fet_start, t_fet_end,
id(replica), request.request_id, t_wait_start=t_wait_start, t_wait_end=t_wait_end)
replica.node.set_end(request.request_id, t_fet_end)
except KeyError:
pass
self.queue.release(token)
class InterferenceAwarePythonHttpSimulator(FunctionSimulator):
def __init__(self, queue: Resource, scale: Callable[[int, int], float], fn: FunctionContainer,
characterization: FunctionCharacterization):
self.worker_threads = queue.capacity
self.queue = queue
self.scale = scale
self.deployment = fn
self.delay = 0
self.characterization = characterization
def deploy(self, env: Environment, replica: FunctionReplica):
yield from docker_pull(env, replica.image, replica.node.ether_node)
def setup(self, env: Environment, replica: FunctionReplica):
image = replica.pod.spec.containers[0].image
if 'inference' in image:
yield from simulate_data_download(env, replica)
def invoke(self, env: Environment, replica: FunctionReplica, request: FunctionRequest):
token = self.queue.request()
t_wait_start = env.now
yield token # wait for access
t_wait_end = env.now
t_fet_start = env.now
# because of GIL and Threads, we can easily estimate the additional time caused by concurrent requests to the
# same Function
factor = max(1, self.scale(self.queue.count, self.queue.capacity))
try:
fet = self.characterization.sample_fet(replica.node.name)
if fet is None:
logging.error(f"FET for node {replica.node.name} for function {self.deployment.image} was not found")
raise ValueError(f'{replica.node.name}')
fet = float(fet) * factor
image = replica.pod.spec.containers[0].image
if 'preprocessing' in image or 'training' in image:
yield from simulate_data_download(env, replica)
start = env.now
call = FunctionCall(request, replica, start)
replica.node.all_requests.append(call)
yield env.timeout(fet)
# add degradation
end = env.now
degradation = replica.node.estimate_degradation(self.characterization.resource_oracle, start, end)
delay = max(0, (fet * degradation) - fet)
yield env.timeout(delay)
if 'preprocessing' in image or 'training' in image:
yield from simulate_data_upload(env, replica)
t_fet_end = env.now
env.metrics.log_fet(request.name, replica.image, replica.node.name, t_fet_start, t_fet_end,
t_wait_start, t_wait_end, degradation,
id(replica))
replica.node.set_end(request.request_id, t_fet_end)
except KeyError:
pass
self.queue.release(token)
| 41.611374 | 117 | 0.661845 |
e2b1d5196d38654ddbd2a0aee053068505b359f7
| 2,694 |
py
|
Python
|
video_behavior_tracking/track_behavior.py
|
droumis/video_behavior_tracking
|
98b031a4a8dda1c0112f823b661ebad2feeaa43e
|
[
"MIT"
] | 1 |
2018-10-01T23:33:44.000Z
|
2018-10-01T23:33:44.000Z
|
video_behavior_tracking/track_behavior.py
|
droumis/video_behavior_tracking
|
98b031a4a8dda1c0112f823b661ebad2feeaa43e
|
[
"MIT"
] | 1 |
2020-01-07T01:55:30.000Z
|
2020-01-07T01:55:30.000Z
|
video_behavior_tracking/track_behavior.py
|
droumis/video_behavior_tracking
|
98b031a4a8dda1c0112f823b661ebad2feeaa43e
|
[
"MIT"
] | 2 |
2018-11-15T00:14:35.000Z
|
2020-11-16T07:50:09.000Z
|
import glob
import json
from argparse import ArgumentParser
from logging import getLogger
from video_behavior_tracking import (convert_to_loren_frank_data_format,
detect_LEDs, extract_position_data,
make_video, position_dataframe,
save_loren_frank_data,
video_filename_to_epoch_key)
logger = getLogger(__name__)
def main(args=None):
parser = ArgumentParser()
parser.add_argument('video_filename', type=str, help='Path to file')
parser.add_argument('config_file', type=str, help='Path to file')
parser.add_argument('--save_path', type=str,
help='Path to save file directory')
parser.add_argument('--save_video', action='store_true',
help='Save video containing extracted position')
parser.add_argument('--disable_progressbar', action='store_true',
help='Disables the progress bar')
args = parser.parse_args(args)
with open(args.config_file) as data_file:
config = json.load(data_file)
for video_filename in glob.glob(args.video_filename):
logger.info(f'\nProcessing {video_filename}')
logger.info('\t.Detecting LED positions...')
centroids, frame_rate, frame_size, n_frames = detect_LEDs(
video_filename, disable_progressbar=args.disable_progressbar)
logger.info('\tFiltering and smoothing data...')
position = extract_position_data(
centroids, frame_rate, frame_size, n_frames,
config['cm_to_pixels'],
disable_progressbar=args.disable_progressbar)
logger.info('\tSaving data...')
position_info = position_dataframe(position, start_time=0.0)
save_data = convert_to_loren_frank_data_format(
position_info, config['cm_to_pixels'])
epoch_key = video_filename_to_epoch_key(
video_filename, config['date_to_day'])
save_loren_frank_data(epoch_key, 'pos', save_data,
save_path=args.save_path)
if args.save_video:
logger.info('\tSaving video...')
animal, day, epoch = epoch_key
output_video_filename = f'{animal}_{day:02}_{epoch:02}_pos.avi'
make_video(video_filename, position.centroids,
position.head_position_mean,
position.head_orientation_mean,
output_video_filename=output_video_filename,
cm_to_pixels=config['cm_to_pixels'],
disable_progressbar=args.disable_progressbar)
| 44.9 | 75 | 0.629176 |
9c0ec0508501394467f3f067338f5029b07116ab
| 11,688 |
py
|
Python
|
train.py
|
FrankCAN/GAPointNet
|
9fb9fd4577950b29f996baa5135927e13df45408
|
[
"MIT"
] | 29 |
2019-10-27T07:23:58.000Z
|
2022-03-12T02:31:32.000Z
|
train.py
|
jtpils/ShufflePointNet
|
30cb1ab1e43ef042b8fbe3d9b6f82312320dd967
|
[
"MIT"
] | 8 |
2019-10-28T07:02:09.000Z
|
2021-04-06T04:06:33.000Z
|
train.py
|
jtpils/ShufflePointNet
|
30cb1ab1e43ef042b8fbe3d9b6f82312320dd967
|
[
"MIT"
] | 4 |
2019-11-12T15:28:58.000Z
|
2021-03-17T10:42:12.000Z
|
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
# import tf_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='network', help='Model name: dgcnn')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 2048
NUM_CLASSES = 40
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
accuracy_max = []
HOSTNAME = socket.gethostname()
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver(max_to_keep=250)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
# To fix the bug introduced in TF 0.12.1 as in
# http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
#sess.run(init)
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer, epoch)
avg_accuracy = eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
# save_path = saver.save(sess, os.path.join(LOG_DIR, "epoch_" + str(epoch) + "_model.ckpt"))
# log_string("Model saved in file: %s" % save_path)
if avg_accuracy > 0.917:
save_path = saver.save(sess, os.path.join(LOG_DIR, "epoch_" + str(epoch) + "_model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer, epoch):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
for fn in range(len(TRAIN_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
current_data = current_data[:, 0:NUM_POINT, :]
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
# Augment batched point clouds by rotation and jittering
if epoch < MAX_EPOCH - 49:
rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
jittered_data = provider.jitter_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(jittered_data)
jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
jittered_data = provider.shift_point_cloud(jittered_data)
else:
jittered_data = current_data[start_idx:end_idx, :, :]
feed_dict = {ops['pointclouds_pl']: jittered_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
for fn in range(len(TEST_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
accuracy_max.append(total_correct / float(total_seen))
return total_correct / float(total_seen)
if __name__ == "__main__":
train()
log_string('maximum accuracy: %f' % (np.max(accuracy_max)))
LOG_FOUT.close()
| 42.043165 | 125 | 0.629363 |
e3bd827aed69bcbde357b51a8c78960d12ef4ce8
| 1,059 |
py
|
Python
|
facedetectr.py
|
vijanipiyawardana/kidsplaymate
|
5821eb0c6c63661de80a0348aba17d99e5b992f5
|
[
"MIT"
] | null | null | null |
facedetectr.py
|
vijanipiyawardana/kidsplaymate
|
5821eb0c6c63661de80a0348aba17d99e5b992f5
|
[
"MIT"
] | null | null | null |
facedetectr.py
|
vijanipiyawardana/kidsplaymate
|
5821eb0c6c63661de80a0348aba17d99e5b992f5
|
[
"MIT"
] | null | null | null |
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
# Defining a function that will do the detections
def detect(gray, frame):
gray1 = None
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
gray1 = roi_gray
return gray1
# Doing some Face Recognition with the webcam
video_capture = cv2.VideoCapture(0)
i = 0
while (i < 300):
print('vijani ' + str(i))
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face = detect(gray, frame)
if face is not None:
print('/home/vijani/Desktop/movetopi/images/img'+ str(i) + '.jpg')
cv2.imwrite('/home/vijani/Desktop/movetopi/images/img'+ str(i) + '.jpg', face)
i = i + 1
'''
cv2.imshow('Video', face)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
'''
| 27.868421 | 86 | 0.621341 |
8666c531d61415e8488993d4b92ddd3aa1a73dde
| 435 |
py
|
Python
|
py/guess-number-higher-or-lower-ii.py
|
ckclark/leetcode
|
844c6f18d06dcb397db76436e5f4b8ddcb1beddc
|
[
"Apache-2.0"
] | null | null | null |
py/guess-number-higher-or-lower-ii.py
|
ckclark/leetcode
|
844c6f18d06dcb397db76436e5f4b8ddcb1beddc
|
[
"Apache-2.0"
] | null | null | null |
py/guess-number-higher-or-lower-ii.py
|
ckclark/leetcode
|
844c6f18d06dcb397db76436e5f4b8ddcb1beddc
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def getMoneyAmount(self, n, table=dict()):
"""
:type n: int
:rtype: int
"""
def dp(L, U):
if (L, U) not in table:
if L + 1 >= U:
table[L, U] = 0
else:
table[L, U] = min(j + max(dp(L, j), dp(j + 1, U)) for j in xrange(L, U))
return table[L, U]
return dp(1, n + 1)
| 29 | 92 | 0.37931 |
181e90a23883297619da8c5bf92fa5f3988bd2e7
| 6,355 |
py
|
Python
|
utils/data_generator.py
|
SaverioVad/HAD_Net
|
67b2dfb6bae5b971c86b42d747975684d96e36b1
|
[
"Net-SNMP",
"Xnet"
] | 13 |
2021-04-01T10:42:21.000Z
|
2022-03-17T07:47:29.000Z
|
utils/data_generator.py
|
SaverioVad/HAD_Net
|
67b2dfb6bae5b971c86b42d747975684d96e36b1
|
[
"Net-SNMP",
"Xnet"
] | 2 |
2021-07-08T00:49:50.000Z
|
2021-11-26T00:02:17.000Z
|
utils/data_generator.py
|
SaverioVad/HAD_Net
|
67b2dfb6bae5b971c86b42d747975684d96e36b1
|
[
"Net-SNMP",
"Xnet"
] | 1 |
2021-09-08T13:32:38.000Z
|
2021-09-08T13:32:38.000Z
|
from torch.utils.data import Dataset, DataLoader
import torch
import os
import numpy as np
# Data generator class
class data_generator(Dataset):
def __init__(self, list_ids, root_path, semantics):
# Store important information.
self.list_ids = list_ids
self.root_path = root_path
self.semantics = semantics
def __len__(self):
return len(self.list_ids)
def __getitem__(self, idx):
# Extract semantic info.
set_x, set_y, set_z = self.semantics[0]
num_teacher_mods = self.semantics[1]
num_student_mods = self.semantics[2]
# Data folder path.
root_path = self.root_path
# Get folder path for this sample.
sample = (self.list_ids)[idx]
sample_path = os.path.join(root_path, sample)
# Get the list of modalities/sequences.
H_or_L = os.listdir(sample_path)
for hl in H_or_L:
if "HGG" in hl:
full_path = os.path.join(sample_path, "HGG")
elif "LGG" in hl:
full_path = os.path.join(sample_path, "LGG")
else:
raise Exception("ERROR: Empty folder.")
# Get and sort modalities/sequences.
modalities = os.listdir(full_path)
modalities = sorted(modalities)
# Holds the modalities/sequences.
x_teacher = []
x_student = []
##########################################
############### BRAIN MASK ###############
##########################################
# Get the brain mask.
mask_path = os.path.join(full_path, "mask.npy")
mask = np.load(mask_path)
unpadded_mask = np.copy(mask)
# Determine the required padding.
x_init,y_init,z_init = np.shape(mask)
x_diff = set_x - x_init
y_diff = set_y - y_init
z_diff = set_z - z_init
x_start = x_diff//2
x_end = x_diff - x_start
y_start = y_diff//2
y_end = y_diff - y_start
z_start = z_diff//2
z_end = z_diff - z_start
# Pad the brain mask.
mask = np.pad(mask,((x_start,x_end),(y_start,y_end),(z_start,z_end)))
x_fin, y_fin, z_fin = np.shape(mask)
if ((x_fin != set_x) or (y_fin != set_y) or (z_fin != set_z)):
raise Exception("Error: Wrong size after padding the brain mask.")
# Convert brain mask to tensor.
mask = torch.from_numpy(np.int16(mask))
##########################################
################# SEG MAP ################
##########################################
# Get the segmentation map.
seg_path = os.path.join(full_path, "seg.npy")
seg = np.load(seg_path)
seg[np.where(seg==4)] = 3
# Pad the ground truth segmentation map.
seg = np.pad(seg,((x_start,x_end),(y_start,y_end),(z_start,z_end)))
x_fin, y_fin, z_fin = np.shape(seg)
if ((x_fin != set_x) or (y_fin != set_y) or (z_fin != set_z)):
raise Exception("Error: Wrong size after padding the segmentation map.")
# Convert the segmentation map to tensor.
y = torch.from_numpy(np.int16(seg))
##########################################
################### MODS #################
##########################################
# Each folder contains 4 modalities/sequences, the brain mask, and the segmentation ground truth.
for modality_name in modalities:
# We only want the modalities/sequences (i.e., not the brain mask or the segmentation map).
if ".npy" in modality_name:
if (("mask" not in modality_name) and ("seg" not in modality_name)):
# Get modality.
mod_path = os.path.join(full_path, modality_name)
modality = np.load(mod_path)
modality = np.float32(modality)
# Normalize the modalities/sequences so that they have 0 mean and unit standard deviation.
brain_mask = np.where(unpadded_mask==1)
mu = modality[brain_mask].mean()
sigma = modality[brain_mask].std()
modality = (modality - mu) / sigma
modality = np.clip(modality, np.min(modality),3)
modality = (modality + (-np.min(modality))) / (3-np.min(modality))
# Pad the modality/sequence.
modality = np.pad(modality,((x_start,x_end),(y_start,y_end),(z_start,z_end)), 'constant', constant_values=(0))
x_fin, y_fin, z_fin = np.shape(modality)
if ((x_fin != set_x) or (y_fin != set_y) or (z_fin != set_z)):
raise Exception("Error: Wrong size after padding the modality.")
# Save the modality/sequence as a tensor.
modality = torch.from_numpy(modality)
modality = modality.unsqueeze(0)
# Append the modality/sequence to the list of teacher modalities/sequences.
x_teacher.append(modality)
# Append the modality/sequence to the list of student modalities/sequences, if it is not post-contrast.
if ("t1c" not in modality_name):
x_student.append(modality)
# Check lengths of the modality/sequence lists.
if(len(x_teacher)!=num_teacher_mods):
raise Exception("ERROR: length of x_teacher is not", num_teacher_mods,"! It is ", len(x_teacher),"!")
if(len(x_student)!=num_student_mods):
raise Exception("ERROR: length of x_student is not", num_student_mods,"! It is ", len(x_student),"!")
# Concatenate the input modalities.
x_cat_teacher = torch.cat((x_teacher[0],x_teacher[1],x_teacher[2],x_teacher[3]), dim=0)
x_cat_student = torch.cat((x_student[0],x_student[1],x_student[2]), dim=0)
# Return the inputs and target.
return(x_cat_teacher, x_cat_student, y, sample)
| 41.266234 | 130 | 0.521951 |
d9d3c7ddb7f04067a59f2832d3be81864f43b6e7
| 1,790 |
py
|
Python
|
awesome-python/projects/python-auto-login/core.py
|
fadhilsaheer/projects
|
976e4c175bd81563ec82b8445e9735c82146641c
|
[
"MIT"
] | 3 |
2021-02-08T09:45:54.000Z
|
2021-09-30T13:33:50.000Z
|
awesome-python/projects/python-auto-login/core.py
|
fadhilsaheer/projects
|
976e4c175bd81563ec82b8445e9735c82146641c
|
[
"MIT"
] | null | null | null |
awesome-python/projects/python-auto-login/core.py
|
fadhilsaheer/projects
|
976e4c175bd81563ec82b8445e9735c82146641c
|
[
"MIT"
] | 1 |
2021-05-15T09:25:52.000Z
|
2021-05-15T09:25:52.000Z
|
#importing module
import pyfiglet
#home page
def home_page():
home = pyfiglet.figlet_format("AUTOMATE")
print(home)
#option page
def option_page():
print("")
print("Gmail Login : a \n")
print("Facebook Login : b\n")
print("Instagram Login : c\n")
print("Twitter Login : d \n")
print("Github Login : e \n\n")
print("Exit : 1")
print("")
#pages
class page:
def __init__(self):
username = self.username
password = self.password
def google(username, password):
import auth
auth.google(username, password)
def facebook(username, password):
import auth
auth.facebook(username, password)
def instagram(username, password):
import auth
auth.instagram(username, password)
def twitter(username, password):
import auth
auth.twitter(username, password)
def github(username, password):
import auth
auth.github(username, password)
def ask_input(keyword):
username = input(f"Enter Your {keyword} :) ")
password = input("Enter Your Password :) ")
array = [username, password]
return array
#checking
def option(opt):
if opt == "1":
quit()
if opt == "a":
data = ask_input("Gmail Id")
page.google(data[0], data[1])
if opt == "b":
data = ask_input("Facebook Id Or Email")
page.facebook(data[0], data[1])
if opt == "c":
data = ask_input("Instagram Id Or Email")
page.instagram(data[0], data[1])
if opt == "d":
data = ask_input("Twitter Id Or Email")
page.twitter(data[0], data[1])
if opt == "e":
data = ask_input("Githbum Id Or Email")
page.github(data[0], data[1])
else:
return False
| 20.340909 | 49 | 0.585475 |
05bfb490cb8a38d8a0b57e33371036d872227ada
| 14,641 |
py
|
Python
|
homeassistant/components/google/__init__.py
|
sebr/home-assistant
|
88ed2f3b3ed5a52ad5c94e170d981520940e977e
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/google/__init__.py
|
sebr/home-assistant
|
88ed2f3b3ed5a52ad5c94e170d981520940e977e
|
[
"Apache-2.0"
] | 5 |
2022-03-01T06:32:33.000Z
|
2022-03-31T07:06:35.000Z
|
homeassistant/components/google/__init__.py
|
sebr/home-assistant
|
88ed2f3b3ed5a52ad5c94e170d981520940e977e
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Google - Calendar Event Devices."""
from datetime import datetime, timedelta, timezone
from enum import Enum
import logging
import os
from googleapiclient import discovery as google_discovery
import httplib2
from oauth2client.client import (
FlowExchangeError,
OAuth2DeviceCodeError,
OAuth2WebServerFlow,
)
from oauth2client.file import Storage
import voluptuous as vol
from voluptuous.error import Error as VoluptuousError
import yaml
from homeassistant.components import persistent_notification
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_NAME,
CONF_OFFSET,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import convert
_LOGGER = logging.getLogger(__name__)
DOMAIN = "google"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_TRACK_NEW = "track_new_calendar"
CONF_CAL_ID = "cal_id"
CONF_TRACK = "track"
CONF_SEARCH = "search"
CONF_IGNORE_AVAILABILITY = "ignore_availability"
CONF_MAX_RESULTS = "max_results"
CONF_CALENDAR_ACCESS = "calendar_access"
DEFAULT_CONF_TRACK_NEW = True
DEFAULT_CONF_OFFSET = "!!"
EVENT_CALENDAR_ID = "calendar_id"
EVENT_DESCRIPTION = "description"
EVENT_END_CONF = "end"
EVENT_END_DATE = "end_date"
EVENT_END_DATETIME = "end_date_time"
EVENT_IN = "in"
EVENT_IN_DAYS = "days"
EVENT_IN_WEEKS = "weeks"
EVENT_START_CONF = "start"
EVENT_START_DATE = "start_date"
EVENT_START_DATETIME = "start_date_time"
EVENT_SUMMARY = "summary"
EVENT_TYPES_CONF = "event_types"
NOTIFICATION_ID = "google_calendar_notification"
NOTIFICATION_TITLE = "Google Calendar Setup"
GROUP_NAME_ALL_CALENDARS = "Google Calendar Sensors"
SERVICE_SCAN_CALENDARS = "scan_for_calendars"
SERVICE_FOUND_CALENDARS = "found_calendar"
SERVICE_ADD_EVENT = "add_event"
DATA_INDEX = "google_calendars"
YAML_DEVICES = f"{DOMAIN}_calendars.yaml"
TOKEN_FILE = f".{DOMAIN}.token"
class FeatureAccess(Enum):
"""Class to represent different access scopes."""
read_only = "https://www.googleapis.com/auth/calendar.readonly"
read_write = "https://www.googleapis.com/auth/calendar"
def __init__(self, scope: str) -> None:
"""Init instance."""
self._scope = scope
@property
def scope(self) -> str:
"""Google calendar scope for the feature."""
return self._scope
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CALENDAR_ACCESS, default="read_write"): cv.enum(
FeatureAccess
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
_SINGLE_CALSEARCH_CONFIG = vol.All(
cv.deprecated(CONF_MAX_RESULTS),
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_IGNORE_AVAILABILITY, default=True): cv.boolean,
vol.Optional(CONF_OFFSET): cv.string,
vol.Optional(CONF_SEARCH): cv.string,
vol.Optional(CONF_TRACK): cv.boolean,
vol.Optional(CONF_MAX_RESULTS): cv.positive_int, # Now unused
}
),
)
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CAL_ID): cv.string,
vol.Required(CONF_ENTITIES, None): vol.All(
cv.ensure_list, [_SINGLE_CALSEARCH_CONFIG]
),
},
extra=vol.ALLOW_EXTRA,
)
_EVENT_IN_TYPES = vol.Schema(
{
vol.Exclusive(EVENT_IN_DAYS, EVENT_TYPES_CONF): cv.positive_int,
vol.Exclusive(EVENT_IN_WEEKS, EVENT_TYPES_CONF): cv.positive_int,
}
)
ADD_EVENT_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(EVENT_CALENDAR_ID): cv.string,
vol.Required(EVENT_SUMMARY): cv.string,
vol.Optional(EVENT_DESCRIPTION, default=""): cv.string,
vol.Exclusive(EVENT_START_DATE, EVENT_START_CONF): cv.date,
vol.Exclusive(EVENT_END_DATE, EVENT_END_CONF): cv.date,
vol.Exclusive(EVENT_START_DATETIME, EVENT_START_CONF): cv.datetime,
vol.Exclusive(EVENT_END_DATETIME, EVENT_END_CONF): cv.datetime,
vol.Exclusive(EVENT_IN, EVENT_START_CONF, EVENT_END_CONF): _EVENT_IN_TYPES,
}
)
def do_authentication(hass, hass_config, config):
"""Notify user of actions and authenticate.
Notify user of user_code and verification_url then poll
until we have an access token.
"""
oauth = OAuth2WebServerFlow(
client_id=config[CONF_CLIENT_ID],
client_secret=config[CONF_CLIENT_SECRET],
scope=config[CONF_CALENDAR_ACCESS].scope,
redirect_uri="Home-Assistant.io",
)
try:
dev_flow = oauth.step1_get_device_and_user_codes()
except OAuth2DeviceCodeError as err:
persistent_notification.create(
hass,
f"Error: {err}<br />You will need to restart hass after fixing." "",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
persistent_notification.create(
hass,
(
f"In order to authorize Home-Assistant to view your calendars "
f'you must visit: <a href="{dev_flow.verification_url}" target="_blank">{dev_flow.verification_url}</a> and enter '
f"code: {dev_flow.user_code}"
),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
def step2_exchange(now):
"""Keep trying to validate the user_code until it expires."""
_LOGGER.debug("Attempting to validate user code")
# For some reason, oauth.step1_get_device_and_user_codes() returns a datetime
# object without tzinfo. For the comparison below to work, it needs one.
user_code_expiry = dev_flow.user_code_expiry.replace(tzinfo=timezone.utc)
if now >= user_code_expiry:
persistent_notification.create(
hass,
"Authentication code expired, please restart "
"Home-Assistant and try again",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
listener()
return
try:
credentials = oauth.step2_exchange(device_flow_info=dev_flow)
except FlowExchangeError:
# not ready yet, call again
return
storage = Storage(hass.config.path(TOKEN_FILE))
storage.put(credentials)
do_setup(hass, hass_config, config)
listener()
persistent_notification.create(
hass,
(
f"We are all setup now. Check {YAML_DEVICES} for calendars that have "
f"been found"
),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
listener = track_utc_time_change(
hass, step2_exchange, second=range(0, 60, dev_flow.interval)
)
return True
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Google platform."""
if DATA_INDEX not in hass.data:
hass.data[DATA_INDEX] = {}
if not (conf := config.get(DOMAIN, {})):
# component is set up by tts platform
return True
token_file = hass.config.path(TOKEN_FILE)
if not os.path.isfile(token_file):
_LOGGER.debug("Token file does not exist, authenticating for first time")
do_authentication(hass, config, conf)
else:
if not check_correct_scopes(hass, token_file, conf):
_LOGGER.debug("Existing scopes are not sufficient, re-authenticating")
do_authentication(hass, config, conf)
else:
do_setup(hass, config, conf)
return True
def check_correct_scopes(hass, token_file, config):
"""Check for the correct scopes in file."""
creds = Storage(token_file).get()
if not creds or not creds.scopes:
return False
target_scope = config[CONF_CALENDAR_ACCESS].scope
return target_scope in creds.scopes
def setup_services(
hass, hass_config, config, track_new_found_calendars, calendar_service
):
"""Set up the service listeners."""
def _found_calendar(call: ServiceCall) -> None:
"""Check if we know about a calendar and generate PLATFORM_DISCOVER."""
calendar = get_calendar_info(hass, call.data)
if hass.data[DATA_INDEX].get(calendar[CONF_CAL_ID]) is not None:
return
hass.data[DATA_INDEX].update({calendar[CONF_CAL_ID]: calendar})
update_config(
hass.config.path(YAML_DEVICES), hass.data[DATA_INDEX][calendar[CONF_CAL_ID]]
)
discovery.load_platform(
hass,
Platform.CALENDAR,
DOMAIN,
hass.data[DATA_INDEX][calendar[CONF_CAL_ID]],
hass_config,
)
hass.services.register(DOMAIN, SERVICE_FOUND_CALENDARS, _found_calendar)
def _scan_for_calendars(call: ServiceCall) -> None:
"""Scan for new calendars."""
service = calendar_service.get()
cal_list = service.calendarList()
calendars = cal_list.list().execute()["items"]
for calendar in calendars:
calendar["track"] = track_new_found_calendars
hass.services.call(DOMAIN, SERVICE_FOUND_CALENDARS, calendar)
hass.services.register(DOMAIN, SERVICE_SCAN_CALENDARS, _scan_for_calendars)
def _add_event(call: ServiceCall) -> None:
"""Add a new event to calendar."""
service = calendar_service.get()
start = {}
end = {}
if EVENT_IN in call.data:
if EVENT_IN_DAYS in call.data[EVENT_IN]:
now = datetime.now()
start_in = now + timedelta(days=call.data[EVENT_IN][EVENT_IN_DAYS])
end_in = start_in + timedelta(days=1)
start = {"date": start_in.strftime("%Y-%m-%d")}
end = {"date": end_in.strftime("%Y-%m-%d")}
elif EVENT_IN_WEEKS in call.data[EVENT_IN]:
now = datetime.now()
start_in = now + timedelta(weeks=call.data[EVENT_IN][EVENT_IN_WEEKS])
end_in = start_in + timedelta(days=1)
start = {"date": start_in.strftime("%Y-%m-%d")}
end = {"date": end_in.strftime("%Y-%m-%d")}
elif EVENT_START_DATE in call.data:
start = {"date": str(call.data[EVENT_START_DATE])}
end = {"date": str(call.data[EVENT_END_DATE])}
elif EVENT_START_DATETIME in call.data:
start_dt = str(
call.data[EVENT_START_DATETIME].strftime("%Y-%m-%dT%H:%M:%S")
)
end_dt = str(call.data[EVENT_END_DATETIME].strftime("%Y-%m-%dT%H:%M:%S"))
start = {"dateTime": start_dt, "timeZone": str(hass.config.time_zone)}
end = {"dateTime": end_dt, "timeZone": str(hass.config.time_zone)}
event = {
"summary": call.data[EVENT_SUMMARY],
"description": call.data[EVENT_DESCRIPTION],
"start": start,
"end": end,
}
service_data = {"calendarId": call.data[EVENT_CALENDAR_ID], "body": event}
event = service.events().insert(**service_data).execute()
# Only expose the add event service if we have the correct permissions
if config.get(CONF_CALENDAR_ACCESS) is FeatureAccess.read_write:
hass.services.register(
DOMAIN, SERVICE_ADD_EVENT, _add_event, schema=ADD_EVENT_SERVICE_SCHEMA
)
return True
def do_setup(hass, hass_config, config):
"""Run the setup after we have everything configured."""
_LOGGER.debug("Setting up integration")
# Load calendars the user has configured
hass.data[DATA_INDEX] = load_config(hass.config.path(YAML_DEVICES))
calendar_service = GoogleCalendarService(hass.config.path(TOKEN_FILE))
track_new_found_calendars = convert(
config.get(CONF_TRACK_NEW), bool, DEFAULT_CONF_TRACK_NEW
)
setup_services(
hass, hass_config, config, track_new_found_calendars, calendar_service
)
for calendar in hass.data[DATA_INDEX].values():
discovery.load_platform(hass, Platform.CALENDAR, DOMAIN, calendar, hass_config)
# Look for any new calendars
hass.services.call(DOMAIN, SERVICE_SCAN_CALENDARS, None)
return True
class GoogleCalendarService:
"""Calendar service interface to Google."""
def __init__(self, token_file):
"""Init the Google Calendar service."""
self.token_file = token_file
def get(self):
"""Get the calendar service from the storage file token."""
credentials = Storage(self.token_file).get()
http = credentials.authorize(httplib2.Http())
service = google_discovery.build(
"calendar", "v3", http=http, cache_discovery=False
)
return service
def get_calendar_info(hass, calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_TRACK: calendar["track"],
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: generate_entity_id(
"{}", calendar["summary"], hass=hass
),
}
],
}
)
return calendar_info
def load_config(path):
"""Load the google_calendar_devices.yaml."""
calendars = {}
try:
with open(path, encoding="utf8") as file:
data = yaml.safe_load(file)
for calendar in data:
try:
calendars.update({calendar[CONF_CAL_ID]: DEVICE_SCHEMA(calendar)})
except VoluptuousError as exception:
# keep going
_LOGGER.warning("Calendar Invalid Data: %s", exception)
except FileNotFoundError:
# When YAML file could not be loaded/did not contain a dict
return {}
return calendars
def update_config(path, calendar):
"""Write the google_calendar_devices.yaml."""
with open(path, "a", encoding="utf8") as out:
out.write("\n")
yaml.dump([calendar], out, default_flow_style=False)
| 32.753915 | 127 | 0.64777 |
9992fa1e7570a42484e4ceaad027811944f110bf
| 1,856 |
py
|
Python
|
eth/vm/forks/berlin/computation.py
|
Peppece/py-evm
|
db9ae3c1aa617e28525344db159db2a312a03033
|
[
"MIT"
] | null | null | null |
eth/vm/forks/berlin/computation.py
|
Peppece/py-evm
|
db9ae3c1aa617e28525344db159db2a312a03033
|
[
"MIT"
] | null | null | null |
eth/vm/forks/berlin/computation.py
|
Peppece/py-evm
|
db9ae3c1aa617e28525344db159db2a312a03033
|
[
"MIT"
] | null | null | null |
import math
from eth_utils.toolz import (
merge,
)
from eth.precompiles.modexp import (
compute_adjusted_exponent_length,
extract_lengths,
modexp,
)
from eth._utils.address import (
force_bytes_to_address,
)
from eth._utils.padding import (
zpad_right,
)
from eth.vm.forks.berlin import constants
from eth.vm.forks.muir_glacier.computation import (
MUIR_GLACIER_PRECOMPILES
)
from eth.vm.forks.muir_glacier.computation import (
MuirGlacierComputation,
)
from .opcodes import BERLIN_OPCODES
def _calculate_multiplication_complexity(base_length: int, modulus_length: int) -> int:
max_length = max(base_length, modulus_length)
words = math.ceil(max_length / 8)
return words**2
def _compute_modexp_gas_fee_eip_2565(data: bytes) -> int:
base_length, exponent_length, modulus_length = extract_lengths(data)
base_end_idx = 96 + base_length
exponent_end_idx = base_end_idx + exponent_length
exponent_bytes = zpad_right(
data[base_end_idx:exponent_end_idx],
to_size=exponent_length,
)
multiplication_complexity = _calculate_multiplication_complexity(base_length, modulus_length)
iteration_count = compute_adjusted_exponent_length(exponent_length, exponent_bytes)
return max(200,
multiplication_complexity * iteration_count
// constants.GAS_MOD_EXP_QUADRATIC_DENOMINATOR_EIP_2565)
BERLIN_PRECOMPILES = merge(
MUIR_GLACIER_PRECOMPILES,
{force_bytes_to_address(b'\x05'): modexp(gas_calculator=_compute_modexp_gas_fee_eip_2565)},
)
class BerlinComputation(MuirGlacierComputation):
"""
A class for all execution computations in the ``Berlin`` fork.
Inherits from :class:`~eth.vm.forks.muir_glacier.MuirGlacierComputation`
"""
# Override
opcodes = BERLIN_OPCODES
_precompiles = BERLIN_PRECOMPILES
| 26.898551 | 97 | 0.757004 |
a881c6404aa021f00ec38597cc814c4787412315
| 294 |
py
|
Python
|
easy/693.py
|
oneTaken/leetcode
|
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
|
[
"Apache-2.0"
] | null | null | null |
easy/693.py
|
oneTaken/leetcode
|
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
|
[
"Apache-2.0"
] | null | null | null |
easy/693.py
|
oneTaken/leetcode
|
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
num = bin(n)[2:]
start = '10'
if len(num) % 2 == 1:
start = '01'
num = '0' + num
return start * (len(num) // 2) == num
| 22.615385 | 45 | 0.394558 |
ca07175b8d5f683d30ea2ad530077ffc50fa9649
| 1,118 |
py
|
Python
|
danceschool/financial/migrations/0013_auto_20181219_2044.py
|
benjwrdill/django-danceschool
|
9ecb2754502e62d0f49aa23d08ca6de6cae3c99a
|
[
"BSD-3-Clause"
] | 32 |
2017-09-12T04:25:25.000Z
|
2022-03-21T10:48:07.000Z
|
danceschool/financial/migrations/0013_auto_20181219_2044.py
|
benjwrdill/django-danceschool
|
9ecb2754502e62d0f49aa23d08ca6de6cae3c99a
|
[
"BSD-3-Clause"
] | 97 |
2017-09-01T02:43:08.000Z
|
2022-01-03T18:20:34.000Z
|
danceschool/financial/migrations/0013_auto_20181219_2044.py
|
benjwrdill/django-danceschool
|
9ecb2754502e62d0f49aa23d08ca6de6cae3c99a
|
[
"BSD-3-Clause"
] | 19 |
2017-09-26T13:34:46.000Z
|
2022-03-21T10:48:10.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-20 01:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('financial', '0012_auto_20181216_1615'),
]
operations = [
migrations.RemoveField(
model_name='expenseitem',
name='payToLocation',
),
migrations.RemoveField(
model_name='expenseitem',
name='payToName',
),
migrations.RemoveField(
model_name='expenseitem',
name='payToUser',
),
migrations.RemoveField(
model_name='genericrepeatedexpense',
name='payToLocation',
),
migrations.RemoveField(
model_name='genericrepeatedexpense',
name='payToName',
),
migrations.RemoveField(
model_name='genericrepeatedexpense',
name='payToUser',
),
migrations.RemoveField(
model_name='revenueitem',
name='receivedFromName',
),
]
| 25.409091 | 49 | 0.563506 |
6f9420d6cf4273b8e0fd7374782585bc16e694d9
| 924 |
py
|
Python
|
networks/base.py
|
Jarvis73/DeepCNN
|
e52723e8d607c8cbf49080ea040bf7c3718c6326
|
[
"MIT"
] | 2 |
2019-08-21T03:03:07.000Z
|
2019-08-30T02:50:09.000Z
|
networks/base.py
|
Jarvis73/DeepCNN
|
e52723e8d607c8cbf49080ea040bf7c3718c6326
|
[
"MIT"
] | null | null | null |
networks/base.py
|
Jarvis73/DeepCNN
|
e52723e8d607c8cbf49080ea040bf7c3718c6326
|
[
"MIT"
] | 1 |
2019-08-30T03:53:53.000Z
|
2019-08-30T03:53:53.000Z
|
# Copyright 2019 Jianwei Zhang All Right Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =================================================================================
class BaseNet(object):
def __init__(self, name=None):
self.name = name + "/" if name else ""
def __call__(self, inputs, *args, **kwargs):
pass
def __repr__(self):
return self.__class__.__name__ + "()"
| 34.222222 | 83 | 0.646104 |
994e89e4a4da3efc5679e141a7294aa95d156b02
| 1,026 |
py
|
Python
|
backend/app/router/auth.py
|
kentayamada-dev/mercari-clone
|
37cfa97876ebd4a003c4ca339cb4cdc5e012d502
|
[
"MIT"
] | 6 |
2022-01-21T06:17:05.000Z
|
2022-03-27T05:45:23.000Z
|
backend/app/router/auth.py
|
kentayamada-dev/mercari-clone
|
37cfa97876ebd4a003c4ca339cb4cdc5e012d502
|
[
"MIT"
] | null | null | null |
backend/app/router/auth.py
|
kentayamada-dev/mercari-clone
|
37cfa97876ebd4a003c4ca339cb4cdc5e012d502
|
[
"MIT"
] | 1 |
2022-03-27T05:45:31.000Z
|
2022-03-27T05:45:31.000Z
|
from app.core.schema.jwt import Secret
from app.core.schema.message import Message
from app.core.utils.auth import auth
from app.crud.seller import authenticate_seller
from app.db.database import get_db
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
router = APIRouter()
@router.post(
"/token",
response_model=Secret,
responses={status.HTTP_401_UNAUTHORIZED: {"model": Message}},
)
def create_token(
form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_db),
) -> dict[str, str]:
user = authenticate_seller(db, form_data.username, form_data.password)
if user is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="incorrect email or password",
headers={"WWW-Authenticate": "Bearer"},
)
token = auth.create_jwt_token(user.email)
return {"access_token": token, "token_type": "Bearer"}
| 33.096774 | 74 | 0.727096 |
adc7e813298c8320c34e8b3a2461bab897f6c38f
| 602 |
py
|
Python
|
questions/q317_non_repeating_numbers/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
questions/q317_non_repeating_numbers/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | 1 |
2021-05-15T07:56:51.000Z
|
2021-05-15T07:56:51.000Z
|
questions/q317_non_repeating_numbers/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
class Solution:
def singleNumber(self, nums):
xor = 0
for n in nums :
xor = xor ^ n
first = 0
second = 0
# Find the two's complement of the overall xor
val = xor & (- xor)
for n in nums :
if n & val :
first = first ^ n
else :
second = second ^ n
ans = [first, second]
ans.sort()
return ans
if __name__ == '__main__':
T=int(input())
for i in range(T):
n = int(input())
v = list(map(int,input().split()))
ob = Solution();
ans = ob.singleNumber(v)
for i in ans:
print(i, end = " ")
print()
| 18.242424 | 48 | 0.513289 |
0e780ea8a125a31824e00c422a3e6c5c9b4a20ab
| 891 |
py
|
Python
|
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oimport_sql_table.py
|
My-Technical-Architect/h2o-3
|
d383802fb7f9c3ec9c72b7869fe636059a333d88
|
[
"Apache-2.0"
] | 1 |
2017-03-28T09:10:12.000Z
|
2017-03-28T09:10:12.000Z
|
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oimport_sql_table.py
|
My-Technical-Architect/h2o-3
|
d383802fb7f9c3ec9c72b7869fe636059a333d88
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oimport_sql_table.py
|
My-Technical-Architect/h2o-3
|
d383802fb7f9c3ec9c72b7869fe636059a333d88
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import sys, os
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import inspect
def h2oimport_sql_table():
"""
Python API test: h2o.import_sql_table(connection_url, table, username, password, columns=None, optimize=True)
Not a real test, just make sure arguments are not changed.
"""
command_list = ['connection_url', 'table', 'username', 'password', 'columns', 'optimize']
try:
allargs = inspect.getargspec(h2o.import_sql_table)
for arg_name in command_list:
assert arg_name in allargs.args, "argument "+arg_name+" is missing from h2o.import_sql_table() command"
except Exception as e:
assert False, "h2o.import_sql_table() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oimport_sql_table)
else:
h2oimport_sql_table()
| 34.269231 | 115 | 0.712682 |
a65bec2e39e006d2df3ecaac41a96a8bba78a253
| 9,775 |
py
|
Python
|
diplomacy_research/proto/tensorflow_serving/apis/classification_pb2.py
|
wwongkamjan/dipnet_press
|
787263c1b9484698904f525c8d78d0e333e1c0d9
|
[
"MIT"
] | 39 |
2019-09-06T13:42:24.000Z
|
2022-03-18T18:38:43.000Z
|
diplomacy_research/proto/tensorflow_serving/apis/classification_pb2.py
|
wwongkamjan/dipnet_press
|
787263c1b9484698904f525c8d78d0e333e1c0d9
|
[
"MIT"
] | 9 |
2019-09-19T22:35:32.000Z
|
2022-02-24T18:04:57.000Z
|
diplomacy_research/proto/tensorflow_serving/apis/classification_pb2.py
|
wwongkamjan/dipnet_press
|
787263c1b9484698904f525c8d78d0e333e1c0d9
|
[
"MIT"
] | 8 |
2019-10-16T21:09:14.000Z
|
2022-02-23T05:20:37.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/classification.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow_serving.apis import input_pb2 as tensorflow__serving_dot_apis_dot_input__pb2
from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/classification.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n,tensorflow_serving/apis/classification.proto\x12\x12tensorflow.serving\x1a#tensorflow_serving/apis/input.proto\x1a#tensorflow_serving/apis/model.proto\"%\n\x05\x43lass\x12\r\n\x05label\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\"=\n\x0f\x43lassifications\x12*\n\x07\x63lasses\x18\x01 \x03(\x0b\x32\x19.tensorflow.serving.Class\"T\n\x14\x43lassificationResult\x12<\n\x0f\x63lassifications\x18\x01 \x03(\x0b\x32#.tensorflow.serving.Classifications\"t\n\x15\x43lassificationRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12(\n\x05input\x18\x02 \x01(\x0b\x32\x19.tensorflow.serving.Input\"\x85\x01\n\x16\x43lassificationResponse\x12\x31\n\nmodel_spec\x18\x02 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x38\n\x06result\x18\x01 \x01(\x0b\x32(.tensorflow.serving.ClassificationResultB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow__serving_dot_apis_dot_input__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR,])
_CLASS = _descriptor.Descriptor(
name='Class',
full_name='tensorflow.serving.Class',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='tensorflow.serving.Class.label', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='score', full_name='tensorflow.serving.Class.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=142,
serialized_end=179,
)
_CLASSIFICATIONS = _descriptor.Descriptor(
name='Classifications',
full_name='tensorflow.serving.Classifications',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='classes', full_name='tensorflow.serving.Classifications.classes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=181,
serialized_end=242,
)
_CLASSIFICATIONRESULT = _descriptor.Descriptor(
name='ClassificationResult',
full_name='tensorflow.serving.ClassificationResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='classifications', full_name='tensorflow.serving.ClassificationResult.classifications', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=328,
)
_CLASSIFICATIONREQUEST = _descriptor.Descriptor(
name='ClassificationRequest',
full_name='tensorflow.serving.ClassificationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.ClassificationRequest.model_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input', full_name='tensorflow.serving.ClassificationRequest.input', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=446,
)
_CLASSIFICATIONRESPONSE = _descriptor.Descriptor(
name='ClassificationResponse',
full_name='tensorflow.serving.ClassificationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_spec', full_name='tensorflow.serving.ClassificationResponse.model_spec', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='result', full_name='tensorflow.serving.ClassificationResponse.result', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=449,
serialized_end=582,
)
_CLASSIFICATIONS.fields_by_name['classes'].message_type = _CLASS
_CLASSIFICATIONRESULT.fields_by_name['classifications'].message_type = _CLASSIFICATIONS
_CLASSIFICATIONREQUEST.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_CLASSIFICATIONREQUEST.fields_by_name['input'].message_type = tensorflow__serving_dot_apis_dot_input__pb2._INPUT
_CLASSIFICATIONRESPONSE.fields_by_name['model_spec'].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC
_CLASSIFICATIONRESPONSE.fields_by_name['result'].message_type = _CLASSIFICATIONRESULT
DESCRIPTOR.message_types_by_name['Class'] = _CLASS
DESCRIPTOR.message_types_by_name['Classifications'] = _CLASSIFICATIONS
DESCRIPTOR.message_types_by_name['ClassificationResult'] = _CLASSIFICATIONRESULT
DESCRIPTOR.message_types_by_name['ClassificationRequest'] = _CLASSIFICATIONREQUEST
DESCRIPTOR.message_types_by_name['ClassificationResponse'] = _CLASSIFICATIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Class = _reflection.GeneratedProtocolMessageType('Class', (_message.Message,), dict(
DESCRIPTOR = _CLASS,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Class)
))
_sym_db.RegisterMessage(Class)
Classifications = _reflection.GeneratedProtocolMessageType('Classifications', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONS,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Classifications)
))
_sym_db.RegisterMessage(Classifications)
ClassificationResult = _reflection.GeneratedProtocolMessageType('ClassificationResult', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONRESULT,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationResult)
))
_sym_db.RegisterMessage(ClassificationResult)
ClassificationRequest = _reflection.GeneratedProtocolMessageType('ClassificationRequest', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONREQUEST,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationRequest)
))
_sym_db.RegisterMessage(ClassificationRequest)
ClassificationResponse = _reflection.GeneratedProtocolMessageType('ClassificationResponse', (_message.Message,), dict(
DESCRIPTOR = _CLASSIFICATIONRESPONSE,
__module__ = 'tensorflow_serving.apis.classification_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationResponse)
))
_sym_db.RegisterMessage(ClassificationResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.035019 | 887 | 0.778312 |
6afcb7e7336e38c7a77be68995f715883c901e56
| 13,533 |
py
|
Python
|
boardfarm/lib/bft_logging.py
|
nickberry17/boardfarm
|
80f24fc97eff9a987250a6334b76eff08e001189
|
[
"BSD-3-Clause-Clear"
] | 17 |
2018-04-19T08:35:47.000Z
|
2021-11-01T01:38:33.000Z
|
boardfarm/lib/bft_logging.py
|
nickberry17/boardfarm
|
80f24fc97eff9a987250a6334b76eff08e001189
|
[
"BSD-3-Clause-Clear"
] | 190 |
2018-04-19T07:00:18.000Z
|
2022-02-11T01:42:51.000Z
|
boardfarm/lib/bft_logging.py
|
nickberry17/boardfarm
|
80f24fc97eff9a987250a6334b76eff08e001189
|
[
"BSD-3-Clause-Clear"
] | 30 |
2018-04-12T01:49:21.000Z
|
2022-02-11T14:53:19.000Z
|
"""Global functions related to logging messages."""
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
import inspect
import logging
import os
import re
import sys
import time
import types
from functools import wraps
import debtcollector
from termcolor import colored
from boardfarm.lib.ConfigHelper import ConfigHelper
logger = logging.getLogger("bft")
def now_short(_format="%Y%m%d-%H%M%S"):
"""Get current date and time string.
:param _format: time stamp format, defaults to "%Y%m%d-%H%M%S"
:type _format: string, optional
:return: timestamp in YYYYMMDD-hhmmss
:rtype: string
"""
timeString = time.strftime(_format, time.localtime()) + "\t"
return timeString
def logfile_assert_message(s, condition, message):
"""Log and assert based on condition.
If condition True, log message as PASS to testcase log file.
If condition False, Assert and Print message with status FAIL.
:param s: Instance of the class
:type s: Class
:param condition: condition to validate
:type condition: Condition
:param message: Message to log and print
:type message: String
:raise assertion: Assert on condition is FALSE
"""
if not condition:
s.log_to_file += now_short() + message + ": FAIL\r\n"
assert 0, message + ": FAIL\r\n"
else:
log_message(s, message + ": PASS")
def write_test_log(t, output_dir):
"""Write detailed log file for given test."""
if t.log_to_file is not None and hasattr(t, "stop_time"):
filename = type(t).__name__ + "-" + time.strftime("%Y%m%d-%H%M%S") + ".txt"
testtime = t.stop_time - t.start_time
with open(os.path.join(output_dir, filename), "w") as log:
log.write("\t=======================================================")
log.write("\n\tTest case ID: %s" % (type(t).__name__))
log.write("\n\tTest case Description: %s" % (type(t).__doc__))
log.write("\n\t=======================================================\n")
log.write(t.log_to_file)
log.write("\n\t=======================================================")
log.write("\n\t%s test result: %s" % (type(t).__name__, t.result_grade))
log.write("\n\tTotal test time: %s seconds" % testtime)
log.write("\n\t=======================================================")
class LoggerMeta(type):
"""To wrap functions with logging messages."""
def __new__(cls, name, bases, attrs):
"""Magic method to create instance object reference.
Using this method you can customize the instance creation.
:param cls: Class to be instantiated(LoggerMeta)
:type cls: Class
:param name: name of the new Class instantiated
:type name: Class
:param bases: Tuple of base parent classes
:type bases: Class
:param attrs: Class attributes
:type attrs: Arguments(args)
:return: Return the instance object created
:rtype: Object
"""
if "model" in attrs:
cls.check_signature(name, bases, attrs)
for attr_name, attr_value in attrs.items():
if isinstance(attr_value, types.FunctionType):
attrs[attr_name] = cls.deco(attr_value)
return super(LoggerMeta, cls).__new__(cls, name, bases, attrs)
@classmethod
def deco(cls, func):
"""Write functions calls to log file with time.
:param cls: Instance of the class LoggerMeta
:type cls: Class
:param func: function called this method
:type func: Object
:return: Return of the called function
:rtype: string
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""Parse the calling function arguments and send to logs with date time.
:param args: any number of extra arguments
:type args: Arguments(args)
:param kwargs: arguments, where you provide a name to the variable as you pass it into the function
:type kwargs: Arguments(args)
:return: String with parent class, calling/returning of function
:rtype: string
"""
if "pytest" in sys.modules:
# if in pytest bypass all this
return func(*args, **kwargs)
func_args_str = "%s %s" % (repr(args), repr(kwargs))
to_log = "%s.%s ( %s )" % (func.__module__, func.__name__, func_args_str)
args[0].log_calls += "[%.6f]calling %s\r\n" % (time.process_time(), to_log)
clsname = args[0].__class__.__name__
err_injection_dict = ConfigHelper().get("err_injection_dict", None)
if (
err_injection_dict
and clsname in err_injection_dict
and func.__name__ in err_injection_dict[clsname]
):
ret = err_injection_dict[clsname][func.__name__]
args[0].log_calls += "[%.6f]injecting %s = %s\r\n" % (
time.process_time(),
to_log,
repr(ret),
)
else:
ret = func(*args, **kwargs)
args[0].log_calls += "[%.6f]returned %s = %s\r\n" % (
time.process_time(),
to_log,
repr(ret),
)
return ret
return wrapper
@classmethod
def check_signature(cls, name, bases, attr):
"""function to check signatures with reference to parent class.
:param cls: Instance of the class LoggerMeta
:type cls: Class
:param name: name of the new Class instantiated
:type name: Class
:param bases: Tuple of base parent classes
:type bases: Class
:param attrs: Class attributes
:type attrs: Arguments(args)
:return: Return None
"""
check_bases = []
for base in bases:
all_bases = base.__mro__
for i in all_bases:
if (
i is not object
and "sign_check" in i.__dict__
and i not in check_bases
):
check_bases.append(i)
for methodName in attr:
f = attr[methodName]
if not isinstance(f, types.FunctionType):
continue
for baseClass in check_bases:
try:
fBase = getattr(baseClass, methodName)
if isinstance(fBase, types.FunctionType):
if not inspect.signature(f) == inspect.signature(fBase):
debtcollector.deprecate(
"{}.{} Method signature are not identical with base class {}".format(
name, methodName, baseClass
),
category=UserWarning,
)
break
else:
debtcollector.deprecate(
"{}.{} Method is not FunctionType in base class {}".format(
name, methodName, baseClass
),
category=UserWarning,
)
break
except AttributeError:
# This method was not defined in this base class,
# So just go to the next base class.
continue
def log_message(s, msg, header=False):
"""Write log messages to console and to log file(with timestamp).
:param s: Instance of the class
:type s: Class
:param msg: Message to log and print
:type msg: String
:param header: True or False, defaults to False. To display message as header
:type header: Boolean, Optional
"""
if s.log_to_file is None:
s.log_to_file = ""
line_sep = "=" * min(len(msg), 80)
full_msg = "\n\t\t" + line_sep + "\n\t\t" + msg + "\n\t\t" + line_sep + "\n"
if header:
logger.debug("\n\n\t\t\t***" + msg + "***\n\n")
s.log_to_file += now_short() + full_msg + "\r\n"
else:
logger.debug(full_msg)
s.log_to_file += now_short() + msg + "\r\n"
class o_helper(object):
"""Class to handle output logging."""
def __init__(self, parent, out, color):
"""Instance initialisation to handle the output logging.
:param parent: Parent class
:type parent: Class
:param out: Output stream (stdout)
:type out: Streams
:param color: text colour for the device(provided in Json)
:type color: String
"""
self.color = color
self.out = out
self.parent = parent
self.first_write = True
def write(self, string):
"""Write or stdout input messages in colored(if defined).
Create the file if not already present.
For example: <Testcase>.txt file creation
:param string: Message to write in the output file
:type string: String
"""
if self.out is not None:
if self.first_write:
self.first_write = False
string = "\r\n" + string
if self.color is not None:
self.out.write(colored(string, self.color))
else:
self.out.write(string)
# check for the split case
if (
len(self.parent.log) > 1
and self.parent.log[-1] == "\r"
and string[0] == "\n"
):
tmp = "\n[%.6f]" % time.process_time()
tmp += string[1:]
string = tmp
to_log = re.sub("\r\n", "\r\n[%.6f]" % time.process_time(), string)
self.parent.log += to_log
if hasattr(self.parent, "test_to_log"):
self.parent.test_to_log.log += re.sub(
r"\r\n\[", "\r\n%s: [" % self.parent.test_prefix, to_log
)
def extra_log(self, string):
"""Add process time with the log messages."""
if hasattr(self.parent, "log"):
self.parent.log += "\r\n[%s] " % time.process_time()
self.parent.log += string + "\r\n"
def flush(self):
"""Flushes the buffer storage in console before pexpect."""
if self.out is not None:
self.out.flush()
def create_file_logs(config, board, tests_to_run, logger):
"""Add and write log messages to a combined list."""
combined_list = []
def add_to_combined_list(log, name, combined_list=combined_list):
for line in log.split("\r\n"):
try:
if line == "":
continue
if line.startswith("\n"):
line = line[1:]
if line.startswith(" ["):
line = line[1:]
ts, text = line.split("]", 1)
timestamp = float(ts[1:-1])
else:
text = line
timestamp = 0.0
combined_list.append(
{"time": timestamp, "text": str(text), "name": name}
)
except Exception as error:
logger.error(error)
logger.debug("Failed to parse log line = %s" % repr(line))
idx = 1
console_combined = []
for console in board.consoles:
with open(os.path.join(config.output_dir, "console-%s.log" % idx), "w") as clog:
clog.write(console.log)
add_to_combined_list(console.log, "console-%s" % idx)
add_to_combined_list(console.log_calls, "console-%s" % idx)
add_to_combined_list(console.log, "", console_combined)
idx = idx + 1
def write_combined_log(combined_list, fname):
with open(os.path.join(config.output_dir, fname), "w") as clog:
for e in combined_list:
try:
if e["name"] == "":
clog.write("[%s]%s\r\n" % (e["time"], repr(e["text"])))
else:
clog.write(
"%s: [%s] %s\n" % (e["name"], e["time"], repr(e["text"]))
)
except Exception as error:
logger.error(error)
logger.debug("failed to parse line: %s" % repr(e))
import operator
console_combined.sort(key=operator.itemgetter("time"))
write_combined_log(console_combined, "console-combined.log")
for device in config.devices:
with open(os.path.join(config.output_dir, device + ".log"), "w") as clog:
d = getattr(config, device)
if hasattr(d, "log"):
clog.write(d.log)
add_to_combined_list(d.log, device)
add_to_combined_list(d.log_calls, device)
for test in tests_to_run:
if hasattr(test, "log") and test.log != "":
with open(
os.path.join(config.output_dir, "%s.log" % test.__class__.__name__), "w"
) as clog:
clog.write(test.log)
if hasattr(test, "log_calls"):
add_to_combined_list(test.log_calls, test.__class__.__name__)
combined_list.sort(key=operator.itemgetter("time"))
write_combined_log(combined_list, "all.log")
| 35.613158 | 111 | 0.532624 |
73b0bd3cfaf7edd518616d936627b95c93b563bb
| 1,551 |
py
|
Python
|
data_prep/01_billtags/tag_extraction.py
|
LegibleLegislation/CongressChallenge
|
43ce79cbbba42208f7062eb95435444de750214d
|
[
"CC0-1.0"
] | null | null | null |
data_prep/01_billtags/tag_extraction.py
|
LegibleLegislation/CongressChallenge
|
43ce79cbbba42208f7062eb95435444de750214d
|
[
"CC0-1.0"
] | null | null | null |
data_prep/01_billtags/tag_extraction.py
|
LegibleLegislation/CongressChallenge
|
43ce79cbbba42208f7062eb95435444de750214d
|
[
"CC0-1.0"
] | null | null | null |
import json
import sqlalchemy
import requests
import os
# Connect to local PostgreSQL
user = 'ubuntu'
password = ''
dbname = 'congress'
host = 'localhost'
local_port = '5432'
es = "postgresql+psycopg2://"+user+":"+password+"@/"+dbname+"?host="+host+"&port="+local_port
engine = sqlalchemy.create_engine(es)
print(engine)
# NOTE: CHANGE THESE PARAMETERS
# AFTER TESTING.
IN_TABLE = 'congress_intelligent_tags_test'
OUT_TABLE = 'congress_tagging_test'
LIMIT = 1
OFFSET = 0
with engine.connect() as conn:
rows = conn.execute("SELECT data_id,bill_id,data::jsonb FROM {0} LIMIT {1} OFFSET {2};".format(IN_TABLE,LIMIT,OFFSET))
print(rows.keys())
query = '''DROP TABLE IF EXISTS {0}'''.format(OUT_TABLE)
print(query)
conn.execute(query)
query = '''CREATE TABLE {0}
(bill_id TEXT,
social_tags TEXT,
relevance INT);
'''.format(OUT_TABLE)
print(query)
conn.execute(query)
for row in rows:
first = row[1]
for t in list(row[2]):
if 'SocialTag' in t:
name = row[2][t][u'name']
name = name.replace("'", '')
query = '''INSERT INTO
{0} (bill_id, social_tags, relevance)
VALUES
('{1}', '{2}','{3}');'''.format(OUT_TABLE,str(first),name,str(row[2][t][u'importance']))
print(query) # NOTE toggle for debugging
try:
conn.execute(query)
except:
print(query)
raise
| 25.42623 | 122 | 0.568665 |
9af869489c8c736c52ee5f961357df47b20078d7
| 839 |
py
|
Python
|
core/security_interface.py
|
alpha19/StockAnalysis
|
011b8392c1623c61233db6d08a2ff72010c51bb9
|
[
"MIT"
] | 2 |
2020-05-11T02:04:02.000Z
|
2020-05-11T02:04:08.000Z
|
core/security_interface.py
|
Spich9215/StockAnalysis
|
5679415cd4bc306b7ef7d06c3cccd18842a29f09
|
[
"MIT"
] | 9 |
2019-06-01T23:29:08.000Z
|
2021-12-13T20:49:46.000Z
|
core/security_interface.py
|
Spich9215/StockAnalysis
|
5679415cd4bc306b7ef7d06c3cccd18842a29f09
|
[
"MIT"
] | 1 |
2019-12-26T18:53:57.000Z
|
2019-12-26T18:53:57.000Z
|
from abc import ABCMeta, abstractmethod
import time
__author__ = 'kdedow'
class SecurityInterface(object):
"""
Interface for different types of securities
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, secTarget=""):
"""
:param secTarget:
:return:
"""
self.target = secTarget
self.dateStr = time.strftime("%m/%d/%Y")
self.timeStr = time.strftime("%H:%M")
@abstractmethod
def queryAPI(self):
"""
All subclasses must run some type of analysis
:return:
"""
pass
@abstractmethod
def getInfo(self):
"""
Return formatted info about security (to be printed to console)
:return:
"""
pass
@abstractmethod
def updateInfo(self):
pass
| 19.511628 | 71 | 0.567342 |
58c52a655be763974cfdfe23b0b277212b677cda
| 372 |
py
|
Python
|
den/helpers/color.py
|
MonliH/Den
|
9c2e69744dcf26ae01154eac32aa4ea8ff2adee3
|
[
"MIT"
] | null | null | null |
den/helpers/color.py
|
MonliH/Den
|
9c2e69744dcf26ae01154eac32aa4ea8ff2adee3
|
[
"MIT"
] | null | null | null |
den/helpers/color.py
|
MonliH/Den
|
9c2e69744dcf26ae01154eac32aa4ea8ff2adee3
|
[
"MIT"
] | null | null | null |
def init_color():
import os, sys
if sys.platform.lower() == "win32":
os.system("color")
class Color:
BLACK = "\033[30m"
BOLD = "\033[1m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
UNDERLINE = "\033[4m"
RESET = "\033[0m"
| 18.6 | 39 | 0.510753 |
454eef314ede3e47c98335fb76e79d440918f047
| 8,619 |
py
|
Python
|
src/python/pants/backend/project_info/tasks/idea_plugin_gen.py
|
mpopenko-exos/pants
|
47d27037c8b13291fc9023e56ddd1b1defdf1b8e
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/project_info/tasks/idea_plugin_gen.py
|
mpopenko-exos/pants
|
47d27037c8b13291fc9023e56ddd1b1defdf1b8e
|
[
"Apache-2.0"
] | 1 |
2018-09-04T17:37:34.000Z
|
2018-09-04T19:42:58.000Z
|
src/python/pants/backend/project_info/tasks/idea_plugin_gen.py
|
mpopenko-exos/pants
|
47d27037c8b13291fc9023e56ddd1b1defdf1b8e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import logging
import os
import pkgutil
import re
import shutil
import subprocess
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.build_environment import get_buildroot, get_scm
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.task.console_task import ConsoleTask
from pants.util import desktop
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import safe_mkdir
_TEMPLATE_BASEDIR = 'templates/idea'
# Follow `export.py` for versioning strategy.
IDEA_PLUGIN_VERSION = '0.0.4'
class IdeaPluginGen(ConsoleTask):
"""Invoke IntelliJ Pants plugin (installation required) to create a project.
The ideal workflow is to programmatically open idea -> select import -> import as pants project -> select project
path, but IDEA does not have CLI support for "select import" and "import as pants project" once it is opened.
Therefore, this task takes another approach to embed the target specs into a `iws` workspace file along
with an skeleton `ipr` project file.
Sample `iws`:
********************************************************
<?xml version="1.0"?>
<project version="4">
<component name="PropertiesComponent">
<property name="targets" value="["/Users/me/workspace/pants/testprojects/tests/scala/org/pantsbuild/testproject/cp-directories/::"]" />
<property name="project_path" value="/Users/me/workspace/pants/testprojects/tests/scala/org/pantsbuild/testproject/cp-directories/" />
</component>
</project>
********************************************************
Once pants plugin sees `targets` and `project_path`, it will simulate the import process on and populate the
existing skeleton project into a Pants project as if user is importing these targets.
"""
PROJECT_NAME_LIMIT = 200
@classmethod
def register_options(cls, register):
super().register_options(register)
# TODO: https://github.com/pantsbuild/pants/issues/3198
# scala/java-language level should use what Pants already knows.
register('--open', type=bool, default=True,
help='Attempts to open the generated project in IDEA.')
register('--incremental-import', type=int, default=None,
help='Enable incremental import of targets with the given graph depth. Supported '
'by IntelliJ Pants plugin versions `>= 1.9.2`.')
register('--dep-as-jar', type=bool, default=False,
help='If true, treat source dependencies as 3rdparty jars.')
register('--java-encoding', default='UTF-8',
help='Sets the file encoding for java files in this project.')
register('--open-with', type=str, default=None, recursive=True,
help='Program used to open the generated IntelliJ project.')
register('--debug_port', type=int, default=5005,
help='Port to use for launching tasks under the debugger.')
register('--java-jdk-name', default=None,
help='Sets the jdk used to compile the project\'s java sources. If unset the default '
'jdk name for the --java-language-level is used')
register('--java-language-level', type=int, default=8,
help='Sets the java language and jdk used to compile the project\'s java sources.')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.open = self.get_options().open
self.java_encoding = self.get_options().java_encoding
self.project_template = os.path.join(_TEMPLATE_BASEDIR,
'project-12.mustache')
self.workspace_template = os.path.join(_TEMPLATE_BASEDIR,
'workspace-12.mustache')
self.rootmodule_template = os.path.join(_TEMPLATE_BASEDIR,
'rootmodule-12.mustache')
self.java_language_level = self.get_options().java_language_level
if self.get_options().java_jdk_name:
self.java_jdk = self.get_options().java_jdk_name
else:
self.java_jdk = '1.{}'.format(self.java_language_level)
output_dir = os.path.join(get_buildroot(), ".idea", self.__class__.__name__)
safe_mkdir(output_dir)
with temporary_dir(root_dir=output_dir, cleanup=False) as output_project_dir:
project_name = self.get_project_name(self.context.options.specs)
self.gen_project_workdir = output_project_dir
self.project_filename = os.path.join(self.gen_project_workdir,
'{}.ipr'.format(project_name))
self.workspace_filename = os.path.join(self.gen_project_workdir,
'{}.iws'.format(project_name))
self.rootmodule_filename = os.path.join(self.gen_project_workdir,
'rootmodule.iml')
self.intellij_output_dir = os.path.join(self.gen_project_workdir, 'out')
@classmethod
def get_project_name(cls, target_specs):
escaped_name = re.sub('[^0-9a-zA-Z:_]+', '.', '__'.join(target_specs))
# take up to PROJECT_NAME_LIMIT chars as project file name due to filesystem constraint.
return escaped_name[:cls.PROJECT_NAME_LIMIT]
# TODO: https://github.com/pantsbuild/pants/issues/3198
def generate_project(self):
outdir = os.path.abspath(self.intellij_output_dir)
if not os.path.exists(outdir):
os.makedirs(outdir)
scm = get_scm()
configured_project = TemplateData(
root_dir=get_buildroot(),
outdir=outdir,
git_root=scm.worktree if scm else None,
java=TemplateData(
encoding=self.java_encoding,
jdk=self.java_jdk,
language_level='JDK_1_{}'.format(self.java_language_level)
),
debug_port=self.get_options().debug_port,
)
abs_target_specs = [os.path.join(get_buildroot(), spec) for spec in self.context.options.specs]
configured_workspace = TemplateData(
targets=json.dumps(abs_target_specs),
project_path=os.path.join(get_buildroot(), abs_target_specs[0].split(':')[0]),
idea_plugin_version=IDEA_PLUGIN_VERSION,
incremental_import=self.get_options().incremental_import,
dep_as_jar=self.get_options().dep_as_jar,
)
# Generate (without merging in any extra components).
safe_mkdir(os.path.abspath(self.intellij_output_dir))
def gen_file(template_file_name, **mustache_kwargs):
return self._generate_to_tempfile(
Generator(pkgutil.get_data(__name__, template_file_name).decode(), **mustache_kwargs)
)
ipr = gen_file(self.project_template, project=configured_project)
iws = gen_file(self.workspace_template, workspace=configured_workspace)
iml_root = gen_file(self.workspace_template)
shutil.move(ipr, self.project_filename)
shutil.move(iws, self.workspace_filename)
shutil.move(iml_root, self.rootmodule_filename)
return self.project_filename
def _generate_to_tempfile(self, generator):
"""Applies the specified generator to a temp file and returns the path to that file.
We generate into a temp file so that we don't lose any manual customizations on error."""
with temporary_file(cleanup=False, binary_mode=False) as output:
generator.write(output)
return output.name
def console_output(self, _targets):
if not self.context.options.specs:
raise TaskError("No targets specified.")
# Heuristics to guess whether user tries to load a python project,
# in which case intellij project sdk has to be set up manually.
jvm_target_num = len([x for x in self.context.target_roots if isinstance(x, JvmTarget)])
python_target_num = len([x for x in self.context.target_roots if isinstance(x, PythonTarget)])
if python_target_num > jvm_target_num:
logging.warn('This is likely a python project. Please make sure to '
'select the proper python interpreter as Project SDK in IntelliJ.')
ide_file = self.generate_project()
yield self.gen_project_workdir
if ide_file and self.get_options().open:
open_with = self.get_options().open_with
if open_with:
null = open(os.devnull, 'wb')
subprocess.Popen([open_with, ide_file], stdout=null, stderr=null)
else:
try:
desktop.ui_open(ide_file)
except desktop.OpenError as e:
raise TaskError(e)
| 43.751269 | 153 | 0.69138 |
6033e4992f526791f441fc59f0ec335f4e4343e2
| 2,485 |
py
|
Python
|
tests/configs/realview64-switcheroo-full.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 30 |
2019-07-12T02:35:33.000Z
|
2022-02-22T03:34:35.000Z
|
tests/configs/realview64-switcheroo-full.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 8 |
2020-02-05T17:47:10.000Z
|
2021-09-06T03:58:56.000Z
|
tests/configs/realview64-switcheroo-full.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 25 |
2017-12-02T00:46:04.000Z
|
2022-02-18T19:28:53.000Z
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
machine_type='VExpress_EMM64',
mem_class=DDR3_1600_8x8,
cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, MinorCPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| 48.72549 | 72 | 0.794769 |
dc575b2da59f73be75f86c4d83f82021c85633c4
| 369 |
py
|
Python
|
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/MESA/pack_invert.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/MESA/pack_invert.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/MESA/pack_invert.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
'''OpenGL extension MESA.pack_invert
This module customises the behaviour of the
OpenGL.raw.GL.MESA.pack_invert to provide a more
Python-friendly API
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.MESA.pack_invert import *
### END AUTOGENERATED SECTION
| 30.75 | 56 | 0.807588 |
120317bb5bebfa3d7a949e8e8b445d3db4e63bf1
| 6,936 |
py
|
Python
|
autotest/osr/osr_metacrs.py
|
Esri/gdal
|
8d9af5086ddb96f707ed281786a1cd278066a7f2
|
[
"MIT"
] | 9 |
2019-05-30T17:01:56.000Z
|
2021-01-30T01:06:41.000Z
|
autotest/osr/osr_metacrs.py
|
Esri/gdal
|
8d9af5086ddb96f707ed281786a1cd278066a7f2
|
[
"MIT"
] | 4 |
2018-10-23T18:43:35.000Z
|
2019-07-01T19:29:49.000Z
|
autotest/osr/osr_metacrs.py
|
Esri/gdal
|
8d9af5086ddb96f707ed281786a1cd278066a7f2
|
[
"MIT"
] | 6 |
2019-02-03T14:19:32.000Z
|
2021-12-19T06:36:49.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test with MetaCRS TestSuite
# Author: Frank Warmerdam, [email protected]
#
###############################################################################
# Copyright (c) 2009, Frank Warmerdam <[email protected]>
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import csv
sys.path.append('../pymod')
import gdaltest
from osgeo import osr, gdal
###############################################################################
# Class to perform the tests.
class MetaCRSTest:
def __init__(self, test_line):
self.test_line = test_line
def parse_line(self):
test_line = self.test_line
self.src_srs = self.build_srs(test_line['srcCrsAuth'],
test_line['srcCrs'])
try:
self.dst_srs = self.build_srs(test_line['tgtCrsAuth'],
test_line['tgtCrs'])
except:
# Old style
self.dst_srs = self.build_srs(test_line['tgtCrsType'],
test_line['tgtCrs'])
if self.src_srs is None or self.dst_srs is None:
return 'fail'
try:
self.src_xyz = (float(test_line['srcOrd1']),
float(test_line['srcOrd2']),
float(test_line['srcOrd3']))
except:
self.src_xyz = (float(test_line['srcOrd1']),
float(test_line['srcOrd2']),
0.0)
try:
self.dst_xyz = (float(test_line['tgtOrd1']),
float(test_line['tgtOrd2']),
float(test_line['tgtOrd3']))
except:
self.dst_xyz = (float(test_line['tgtOrd1']),
float(test_line['tgtOrd2']),
0.0)
try:
self.dst_error = max(float(test_line['tolOrd1']),
float(test_line['tolOrd2']),
float(test_line['tolOrd3']))
except:
self.dst_error = max(float(test_line['tolOrd1']),
float(test_line['tolOrd2']))
return 'success'
def build_srs(self, type, crstext):
if type == 'EPSG':
srs = osr.SpatialReference()
if srs.ImportFromEPSGA(int(crstext)) == 0:
return srs
else:
gdaltest.post_reason('failed to translate EPSG:' + crstext)
return None
else:
gdaltest.post_reason('unsupported srs type: ' + type)
return None
def testMetaCRS(self):
result = self.parse_line()
if result != 'success':
return result
try:
gdal.PushErrorHandler('CPLQuietErrorHandler')
ct = osr.CoordinateTransformation(self.src_srs, self.dst_srs)
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('Unable to load PROJ.4') != -1:
gdaltest.post_reason('PROJ.4 missing, transforms not available.')
return 'skip'
except ValueError:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('Unable to load PROJ.4') != -1:
gdaltest.post_reason('PROJ.4 missing, transforms not available.')
return 'skip'
else:
gdaltest.post_reason('failed to create coordinate transformation. %s' % gdal.GetLastErrorMsg())
return 'fail'
except:
gdal.PopErrorHandler()
gdaltest.post_reason('failed to create coordinate transformation. %s' % gdal.GetLastErrorMsg())
return 'fail'
######################################################################
# Transform source point to destination SRS, swapping EPSG GEOGCS
# axes if needed.
if self.src_srs.EPSGTreatsAsLatLong():
self.src_xyz = (self.src_xyz[1], self.src_xyz[0], self.src_xyz[2])
result = ct.TransformPoint(self.src_xyz[0], self.src_xyz[1], self.src_xyz[2])
if self.src_srs.EPSGTreatsAsLatLong():
result = (result[1], result[0], result[2])
######################################################################
# Check results.
error = abs(result[0] - self.dst_xyz[0]) \
+ abs(result[1] - self.dst_xyz[1]) \
+ abs(result[2] - self.dst_xyz[2])
if error > self.dst_error:
err_msg = 'Dest error is %g, src=%g,%g,%g, dst=%g,%g,%g, exp=%g,%g,%g' \
% (error,
self.src_xyz[0], self.src_xyz[1], self.src_xyz[2],
result[0], result[1], result[2],
self.dst_xyz[0], self.dst_xyz[1], self.dst_xyz[2])
gdaltest.post_reason(err_msg)
gdal.Debug('OSR', 'Src SRS:\n%s\n\nDst SRS:\n%s\n'
% (self.src_srs.ExportToPrettyWkt(),
self.dst_srs.ExportToPrettyWkt()))
return 'fail'
return 'success'
###############################################################################
# When imported build a list of units based on the files available.
gdaltest_list = []
csv_reader = csv.DictReader(open('data/Test_Data_File.csv', 'rt'))
for test in csv_reader:
ut = MetaCRSTest(test)
gdaltest_list.append((ut.testMetaCRS, test['testName']))
if __name__ == '__main__':
gdaltest.setup_run('osr_metacrs')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| 37.901639 | 111 | 0.531574 |
1d446032e39cabfb02a069ce376b6a0a0fe79e4a
| 3,776 |
py
|
Python
|
GSF2App/GSF2App_data.py
|
rmca16/GSF2App
|
171a4a388e1f0e59cc11a709c2774826317a6f62
|
[
"MIT"
] | null | null | null |
GSF2App/GSF2App_data.py
|
rmca16/GSF2App
|
171a4a388e1f0e59cc11a709c2774826317a6f62
|
[
"MIT"
] | null | null | null |
GSF2App/GSF2App_data.py
|
rmca16/GSF2App
|
171a4a388e1f0e59cc11a709c2774826317a6f62
|
[
"MIT"
] | null | null | null |
###################################################################################
# Author: Ricardo Pereira
# Date: 06-06-2021
# Last Modified data: 10-09-2021
# Abstract: GSF2App: Dataset management file
###################################################################################
import numpy as np
import time
import glob
import cv2
import os
import torch
from torch.utils import data
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
from YOLOv3.models import *
from YOLOv3.utils import *
class Dataset(data.Dataset):
def __init__(self, data_path):
'Initialization'
self.training_stage = 1
self.imgs_path = glob.glob(data_path+'*.png')
if not self.imgs_path:
print("No png images available")
sys.exit()
self.yolo = YOLOv3()
def __len__(self):
'Denotes the total number of samples'
return len(self.imgs_path)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
img_path = self.imgs_path[index]
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (224, 224), fx = 1, fy = 1, interpolation = cv2.INTER_LINEAR)
# Load data and get label
X = torch.from_numpy(img).float()
X_data = X.permute(2,0,1)
y = (np.long(0))
print("PLEASE, ADD SOME CODE TO READ LABELS FROM DE DATASET")
if self.training_stage == 1:
return X_data, y
elif self.training_stage == 2:
yolo_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
sfv = self.yolo.get_semantic_features(yolo_img)
sfv = sfv.astype(np.float16)
sfv = torch.from_numpy(sfv).float()
return X_data, sfv, y
class YOLOv3(object):
def __init__(self):
self.model_def = os.path.join('YOLOv3','config','yolov3.cfg')
self.weights_path = os.path.join('YOLOv3','weights','yolov3.weights')
self.class_path = os.path.join('YOLOv3','weights','coco.names')
self.conf_thres = 0.2
self.nms_thres = 0.45
self.img_size = 416
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Set up model
self.model = Darknet(self.model_def, img_size = self.img_size).to(self.device)
if self.weights_path.endswith(".weights"):
self.model.load_darknet_weights(self.weights_path)
else:
self.model.load_state_dict(torch.load(self.weights_path))
self.model.eval() # Set in evaluation mode
self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
self.yolo_classes = load_classes(self.class_path)
def get_semantic_features(self, image):
img_shape = image.shape
# Extract image as PyTorch tensor
img = transforms.ToTensor()(image)
# Pad to square resolution
img, _ = pad_to_square(img, 0)
# Resize
img = resize(img, self.img_size)
# Configure input
input_img = Variable(img.type(self.Tensor))
# Get detections
with torch.no_grad():
#print(input_img.unsqueeze(0).shape)
detections = self.model(input_img.unsqueeze(0))
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
sfv = np.zeros((len(self.yolo_classes)), dtype = np.int8)
for idx, img_detections in enumerate(detections):
if img_detections is not None:
# Rescale boxes to original image
img_detections = rescale_boxes(img_detections, self.img_size, img_shape[:2])
unique_labels = img_detections[:, -1].cpu().unique()
for x1, y1, x2, y2, conf, cls_conf, cls_pred in img_detections:
sfv[int(cls_pred)] += 1
return sfv
| 30.95082 | 92 | 0.626324 |
848a57fc25a23118245585d7ad15faa4c36b3262
| 6,457 |
py
|
Python
|
controllers/contacts_helper.py
|
qwc-services/sogis-agdi
|
f278612c42f648da07448905f2b8021b279e66bc
|
[
"MIT"
] | null | null | null |
controllers/contacts_helper.py
|
qwc-services/sogis-agdi
|
f278612c42f648da07448905f2b8021b279e66bc
|
[
"MIT"
] | null | null | null |
controllers/contacts_helper.py
|
qwc-services/sogis-agdi
|
f278612c42f648da07448905f2b8021b279e66bc
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm import joinedload
class ContactsHelper:
"""Helper class for querying contacts"""
ROLE_RESPONSIBLE = "Verantwortlicher"
ROLE_DATA_OWNER = "Datenherr"
ROLE_SUPPLIER = "Lieferant"
def __init__(self, config_models, logger):
"""Constructor
:param ConfigModels config_models: Helper for ORM models
:param Logger logger: Application logger
"""
self.config_models = config_models
self.logger = logger
self.Contact = self.config_models.model('contact')
self.Person = self.config_models.model('person')
self.Organisation = self.config_models.model('organisation')
self.ContactRole = self.config_models.model('contact_role')
self.ResourceContact = self.config_models.model('resource_contact')
def contact_choices(self):
"""Return choices for a contact select field."""
choices = []
session = self.config_models.session()
# get contacts, organisations first
query = session.query(self.Contact).order_by(self.Contact.type.desc())
# order by organisation name then contact name
query = query.outerjoin(self.Contact.organisation) \
.order_by(self.Organisation.name, self.Contact.name)
# eager load relations
query = query.options(joinedload(self.Contact.organisation))
# collect person IDs and names
for contact in query.all():
name = contact.name
organisation = contact.organisation
if organisation:
name = "%s / %s" % (
contact.name, organisation.abbreviation or organisation.name
)
choices.append((contact.id, name))
session.close()
return choices
def person_choices(self):
"""Return choices for a person select field."""
choices = []
session = self.config_models.session()
# get persons
query = session.query(self.Person)
# order by organisation name then person name
query = query.outerjoin(self.Person.organisation) \
.order_by(self.Organisation.name, self.Person.name)
# eager load relations
query = query.options(joinedload(self.Person.organisation))
# collect person IDs and names
for person in query.all():
name = person.name
organisation = person.organisation
if organisation:
name = "%s / %s" % (
person.name, organisation.abbreviation or organisation.name
)
choices.append((person.id, name))
session.close()
return choices
def resource_contact_id(self, gdi_resource_id, role_type):
"""Return assigned contact ID for a GDI resource and role.
:param int gdi_resource_id: GDI resource ID
:param str role_type: Contact role type
"""
contact_id = None
# find resource_contact
session = self.config_models.session()
resource_contact = self.resource_contact(
gdi_resource_id, role_type, session
)
if resource_contact is not None:
# get contact ID
contact_id = resource_contact.id_contact
session.close()
return contact_id
def resource_contact(self, gdi_resource_id, role_type, session):
"""Return resource_contact for a GDI resource and role.
:param int gdi_resource_id: GDI resource ID
:param str role_type: Contact role type
:param Session session: DB session
"""
resource_contact = None
# find contact role
role = self.role(role_type, session)
if role is not None:
# find resource_contact
query = session.query(self.ResourceContact).filter_by(
id_contact_role=role.id,
gdi_oid_resource=gdi_resource_id
)
resource_contact = query.first()
return resource_contact
def update_resource_contact(self, gdi_resource_id, role_type, contact_id,
session):
"""Update resource_contact for a GDI resource
NOTE: Creates new contact role if missing
:param int gdi_resource_id: GDI resource ID
:param str role_type: Contact role type
:param int contact_id: Contact ID (set 0 to remove)
:param Session session: DB session
"""
# find existing resource_contact
resource_contact = self.resource_contact(
gdi_resource_id, role_type, session
)
if resource_contact is None:
if contact_id > 0:
# find contact role
role = self.role(role_type, session)
if role is None:
# create new contact role if missing
self.logger.info(
"Creating new contact role '%s'" % role_type
)
role = self.ContactRole(type=role_type)
session.add(role)
session.flush()
# create new resource_contact
resource_contact = self.ResourceContact(
id_contact_role=role.id,
id_contact=contact_id,
gdi_oid_resource=gdi_resource_id
)
session.add(resource_contact)
else:
if contact_id > 0:
# update existing resource_contact
resource_contact.id_contact = contact_id
else:
# remove existing resource_contact
session.delete(resource_contact)
def remove_resource_contacts(self, gdi_resource_id, session):
"""Remove all resource_contacts for a GDI resource.
:param int gdi_resource_id: GDI resource ID
:param Session session: DB session
"""
query = session.query(self.ResourceContact).filter_by(
gdi_oid_resource=gdi_resource_id
)
query.delete()
def role(self, role_type, session):
"""Return contact_role by role type.
:param str role_type: Contact role type
:param Session session: DB session
"""
# get contact_role from DB
query = session.query(self.ContactRole).filter_by(type=role_type)
role = query.first()
return role
| 33.984211 | 80 | 0.599659 |
0491101ed10e2ff22c342edfa26e81e7ca17547b
| 1,852 |
py
|
Python
|
tests/sklearn/test_SklearnTfidfVectorizerConverter.py
|
yuhonghong66/onnxmltools
|
a7cab9fe950fece6fcc1a84d1a60f3f99a68c22c
|
[
"MIT"
] | null | null | null |
tests/sklearn/test_SklearnTfidfVectorizerConverter.py
|
yuhonghong66/onnxmltools
|
a7cab9fe950fece6fcc1a84d1a60f3f99a68c22c
|
[
"MIT"
] | null | null | null |
tests/sklearn/test_SklearnTfidfVectorizerConverter.py
|
yuhonghong66/onnxmltools
|
a7cab9fe950fece6fcc1a84d1a60f3f99a68c22c
|
[
"MIT"
] | null | null | null |
"""
Tests scikit-learn's binarizer converter.
"""
import unittest
import numpy
from sklearn.feature_extraction.text import TfidfVectorizer
from onnxmltools import convert_sklearn
from onnxmltools.convert.common.data_types import StringTensorType
from onnxmltools.utils import dump_data_and_model
class TestSklearnTfidfVectorizer(unittest.TestCase):
def test_model_tfidf_vectorizer11(self):
corpus = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
vect = TfidfVectorizer(ngram_range=(1, 1))
vect.fit(corpus)
pred = vect.transform(corpus)
model_onnx = convert_sklearn(vect, 'scikit-learn count vectorizer', [('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
# REVIEW: enable the test when the runtime implements the primitives.
# dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer")
def test_model_tfidf_vectorizer13(self):
corpus = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
vect = TfidfVectorizer(ngram_range=(1, 3))
vect.fit(corpus)
pred = vect.transform(corpus)
model_onnx = convert_sklearn(vect, 'scikit-learn count vectorizer', [('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
# REVIEW: enable the test when the runtime implements the primitives.
# dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer")
if __name__ == "__main__":
unittest.main()
| 39.404255 | 114 | 0.655508 |
3906ad6156ef40bfe5aa32808ab9c479332bbbf7
| 3,610 |
py
|
Python
|
cms/forms/utils.py
|
jinktv/django-cms
|
d8c689957f0d098a106829e896e0c91d0c1abd86
|
[
"BSD-3-Clause"
] | 1 |
2015-09-28T10:07:38.000Z
|
2015-09-28T10:07:38.000Z
|
cms/forms/utils.py
|
jinktv/django-cms
|
d8c689957f0d098a106829e896e0c91d0c1abd86
|
[
"BSD-3-Clause"
] | 1 |
2021-03-19T15:46:42.000Z
|
2021-03-19T15:46:42.000Z
|
cms/forms/utils.py
|
jinktv/django-cms
|
d8c689957f0d098a106829e896e0c91d0c1abd86
|
[
"BSD-3-Clause"
] | 1 |
2016-11-07T01:42:14.000Z
|
2016-11-07T01:42:14.000Z
|
# -*- coding: utf-8 -*-
from cms.models import Page
from cms.models.titlemodels import Title
from cms.utils import i18n
from collections import defaultdict
from cms.utils.conf import get_cms_setting
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
from django.utils import translation
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
def update_site_and_page_choices(lang=None):
lang = lang or translation.get_language()
SITE_CHOICES_KEY = get_site_cache_key(lang)
PAGE_CHOICES_KEY = get_page_cache_key(lang)
title_queryset = (Title.objects.drafts()
.select_related('page', 'page__site')
.order_by('page__tree_id', 'page__lft', 'page__rght'))
pages = defaultdict(SortedDict)
sites = {}
for title in title_queryset:
page = pages[title.page.site.pk].get(title.page.pk, {})
page[title.language] = title
pages[title.page.site.pk][title.page.pk] = page
sites[title.page.site.pk] = title.page.site.name
site_choices = []
page_choices = [('', '----')]
language_order = [lang] + i18n.get_fallback_languages(lang)
for sitepk, sitename in sites.items():
site_choices.append((sitepk, sitename))
site_page_choices = []
for titles in pages[sitepk].values():
title = None
for language in language_order:
title = titles.get(language)
if title:
break
if not title:
continue
indent = u" " * title.page.level
page_title = mark_safe(u"%s%s" % (indent, title.title))
site_page_choices.append((title.page.pk, page_title))
page_choices.append((sitename, site_page_choices))
# We set it to 1 day here because we actively invalidate this cache.
cache.set(SITE_CHOICES_KEY, site_choices, 86400)
cache.set(PAGE_CHOICES_KEY, page_choices, 86400)
return site_choices, page_choices
def get_site_choices(lang=None):
lang = lang or translation.get_language()
site_choices = cache.get(get_site_cache_key(lang))
if site_choices is None:
site_choices, page_choices = update_site_and_page_choices(lang)
return site_choices
def get_page_choices(lang=None):
lang = lang or translation.get_language()
page_choices = cache.get(get_page_cache_key(lang))
if page_choices is None:
site_choices, page_choices = update_site_and_page_choices(lang)
return page_choices
def _get_key(prefix, lang):
return "%s-%s" % (prefix, lang)
def get_site_cache_key(lang):
return _get_key(get_cms_setting('SITE_CHOICES_CACHE_KEY'), lang)
def get_page_cache_key(lang):
return _get_key(get_cms_setting('PAGE_CHOICES_CACHE_KEY'), lang)
def _clean_many(prefix):
keys = []
for lang in [language[0] for language in settings.LANGUAGES]:
keys.append(_get_key(prefix, lang))
cache.delete_many(keys)
def clean_site_choices_cache(sender, **kwargs):
_clean_many(get_cms_setting('SITE_CHOICES_CACHE_KEY'))
def clean_page_choices_cache(sender, **kwargs):
_clean_many(get_cms_setting('PAGE_CHOICES_CACHE_KEY'))
post_save.connect(clean_page_choices_cache, sender=Page)
post_save.connect(clean_site_choices_cache, sender=Site)
post_delete.connect(clean_page_choices_cache, sender=Page)
post_delete.connect(clean_site_choices_cache, sender=Site)
| 36.464646 | 76 | 0.701385 |
b065f3be95cae0c74446d0d180ef9ba1431f24ad
| 3,957 |
py
|
Python
|
keymint_keymake/authorities.py
|
keymint/keymint_keymake
|
adc38e07ce5f16d6ba4b36294d7d2e8a361153f0
|
[
"Apache-2.0"
] | null | null | null |
keymint_keymake/authorities.py
|
keymint/keymint_keymake
|
adc38e07ce5f16d6ba4b36294d7d2e8a361153f0
|
[
"Apache-2.0"
] | null | null | null |
keymint_keymake/authorities.py
|
keymint/keymint_keymake
|
adc38e07ce5f16d6ba4b36294d7d2e8a361153f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import os
# import datetime
from copy import deepcopy
# from xml.etree import cElementTree as ElementTree
# import xmlschema
# from .exceptions import InvalidPermissionsXML
# from .namespace import DDSNamespaceHelper
# from .schemas import get_dds_schema_path
from keymint_keymake.pki.asymmetric import AsymmetricHelper
from keymint_keymake.pki.certificate import CertificateHelper
from keymint_keymake.pki.certificate import get_ca_csr, get_ca_key
# from cryptography import x509
# from cryptography.hazmat.backends import default_backend
# from cryptography.hazmat.primitives import serialization
class AuthoritiesHelper:
"""Help build authorities into artifacts."""
def __init__(self):
pass
def init(self, context):
raise NotImplementedError
class DDSAuthoritiesHelper(AuthoritiesHelper):
"""Help build authorities into artifacts."""
def __init__(self):
self.dds_asymmetric_helper = AsymmetricHelper()
self.dds_certificate_helper = CertificateHelper()
def _build_key(self, context, key):
asymmetric_types = key.find('asymmetric_type')
asymmetric_type = asymmetric_types.getchildren()[0]
generator = getattr(self.dds_asymmetric_helper, asymmetric_type.tag)
dds_key = generator(context, asymmetric_type)
return dds_key
def _build_csr(self, context, authority, csr, dds_key):
dds_csr = self.dds_certificate_helper.build_csr(context, authority, csr, dds_key)
return dds_csr
def _build_authority(self, context, authority):
dds_authority = {}
dds_authority['name'] = authority.get('name')
key = authority.find('key')
dds_key = self._build_key(context, key)
dds_key_bytes = self.dds_asymmetric_helper.serialize(context, key, dds_key)
csr = authority.find('cert')
dds_csr = self._build_csr(context, authority, csr, dds_key)
dds_csr_bytes = self.dds_certificate_helper.serialize(context, csr, dds_csr)
dds_authority['dds_key'] = {'object': dds_key, 'bytes': dds_key_bytes}
dds_authority['dds_csr'] = {'object': dds_csr, 'bytes': dds_csr_bytes}
return dds_authority
def build_iter(self, context):
authorities = deepcopy(context.profile_manifest.authorities)
for authority in authorities.findall('authority'):
dds_authority = self._build_authority(context, authority)
yield dds_authority
return
def _install_authority(self, context, authority):
dds_authority = {}
dds_authority['name'] = authority.get('name')
cert = authority.find('cert')
dds_csr = get_ca_csr(context, dds_authority['name'])
dds_key = get_ca_key(context, dds_authority['name'])
dds_cert = self.dds_certificate_helper.install_cert(context, cert, dds_csr, dds_key)
dds_cert_bytes = self.dds_certificate_helper.serialize(context, cert, dds_cert)
dds_authority['dds_cert'] = {'object': dds_cert, 'bytes': dds_cert_bytes}
return dds_authority
def install_iter(self, context):
authorities = deepcopy(context.profile_manifest.authorities)
for authority in authorities.findall('authority'):
dds_authority = self._install_authority(context, authority)
yield dds_authority
return
| 33.533898 | 92 | 0.718979 |
37ee61417eeab3011f317c865efb982d6b6e214b
| 72,020 |
py
|
Python
|
pandas/tests/computation/test_eval.py
|
HQDragon/pandas
|
8713f2d1237a471a4f42f3fb547887bc022a5b94
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2 |
2022-02-02T02:05:28.000Z
|
2022-02-02T02:09:37.000Z
|
pandas/tests/computation/test_eval.py
|
HQDragon/pandas
|
8713f2d1237a471a4f42f3fb547887bc022a5b94
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2 |
2021-02-16T06:43:48.000Z
|
2021-03-19T00:07:02.000Z
|
pandas/tests/computation/test_eval.py
|
HQDragon/pandas
|
8713f2d1237a471a4f42f3fb547887bc022a5b94
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 |
2020-10-28T03:32:40.000Z
|
2020-10-28T03:32:40.000Z
|
from distutils.version import LooseVersion
from functools import reduce
from itertools import product
import operator
from typing import Dict, List, Type
import warnings
import numpy as np
import pytest
from pandas.compat import is_platform_windows, np_version_under1p17
from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar
import pandas as pd
from pandas import DataFrame, Series, compat, date_range
import pandas._testing as tm
from pandas.core.computation import pytables
from pandas.core.computation.check import NUMEXPR_VERSION
from pandas.core.computation.engines import ENGINES, NumExprClobberingError
import pandas.core.computation.expr as expr
from pandas.core.computation.expr import (
BaseExprVisitor,
PandasExprVisitor,
PythonExprVisitor,
)
from pandas.core.computation.expressions import NUMEXPR_INSTALLED, USE_NUMEXPR
from pandas.core.computation.ops import (
ARITH_OPS_SYMS,
SPECIAL_CASE_ARITH_OPS_SYMS,
_binary_math_ops,
_binary_ops_dict,
_unary_math_ops,
)
@pytest.fixture(
params=(
pytest.param(
engine,
marks=pytest.mark.skipif(
engine == "numexpr" and not USE_NUMEXPR,
reason=f"numexpr enabled->{USE_NUMEXPR}, "
f"installed->{NUMEXPR_INSTALLED}",
),
)
for engine in ENGINES
)
)
def engine(request):
return request.param
@pytest.fixture(params=expr.PARSERS)
def parser(request):
return request.param
@pytest.fixture
def ne_lt_2_6_9():
if NUMEXPR_INSTALLED and NUMEXPR_VERSION >= LooseVersion("2.6.9"):
pytest.skip("numexpr is >= 2.6.9")
return "numexpr"
def _get_unary_fns_for_ne():
if NUMEXPR_INSTALLED:
if NUMEXPR_VERSION >= LooseVersion("2.6.9"):
return list(_unary_math_ops)
else:
return [x for x in _unary_math_ops if x not in ["floor", "ceil"]]
return []
@pytest.fixture(params=_get_unary_fns_for_ne())
def unary_fns_for_ne(request):
return request.param
def engine_has_neg_frac(engine):
return ENGINES[engine].has_neg_frac
def _eval_single_bin(lhs, cmp1, rhs, engine):
c = _binary_ops_dict[cmp1]
if engine_has_neg_frac(engine):
try:
return c(lhs, rhs)
except ValueError as e:
if str(e).startswith(
"negative number cannot be raised to a fractional power"
):
return np.nan
raise
return c(lhs, rhs)
def _series_and_2d_ndarray(lhs, rhs):
return (
isinstance(lhs, Series) and isinstance(rhs, np.ndarray) and rhs.ndim > 1
) or (isinstance(rhs, Series) and isinstance(lhs, np.ndarray) and lhs.ndim > 1)
def _series_and_frame(lhs, rhs):
return (isinstance(lhs, Series) and isinstance(rhs, DataFrame)) or (
isinstance(rhs, Series) and isinstance(lhs, DataFrame)
)
def _bool_and_frame(lhs, rhs):
return isinstance(lhs, bool) and isinstance(rhs, pd.core.generic.NDFrame)
def _is_py3_complex_incompat(result, expected):
return isinstance(expected, (complex, np.complexfloating)) and np.isnan(result)
_good_arith_ops = sorted(set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS))
# TODO: using range(5) here is a kludge
@pytest.fixture(params=list(range(5)))
def lhs(request):
nan_df1 = DataFrame(np.random.rand(10, 5))
nan_df1[nan_df1 > 0.5] = np.nan
opts = (
DataFrame(np.random.randn(10, 5)),
Series(np.random.randn(5)),
Series([1, 2, np.nan, np.nan, 5]),
nan_df1,
np.random.randn(),
)
return opts[request.param]
rhs = lhs
midhs = lhs
@td.skip_if_no_ne
class TestEvalNumexprPandas:
exclude_cmp: List[str] = []
exclude_bool: List[str] = []
engine = "numexpr"
parser = "pandas"
@classmethod
def setup_class(cls):
import numexpr as ne
cls.ne = ne
@property
def current_engines(self):
return (engine for engine in ENGINES if engine != self.engine)
@pytest.mark.parametrize(
"cmp1",
["!=", "==", "<=", ">=", "<", ">"],
ids=["ne", "eq", "le", "ge", "lt", "gt"],
)
@pytest.mark.parametrize("cmp2", [">", "<"], ids=["gt", "lt"])
@pytest.mark.parametrize("binop", expr.BOOL_OPS_SYMS)
def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs):
if binop in self.exclude_bool:
pytest.skip()
lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine)
rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)
expected = _eval_single_bin(lhs_new, binop, rhs_new, self.engine)
ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
self.check_equal(result, expected)
@pytest.mark.parametrize("cmp_op", expr.CMP_OPS_SYMS)
def test_simple_cmp_ops(self, cmp_op):
if cmp_op in self.exclude_cmp:
pytest.skip()
bool_lhses = (
DataFrame(tm.randbool(size=(10, 5))),
Series(tm.randbool((5,))),
tm.randbool(),
)
bool_rhses = (
DataFrame(tm.randbool(size=(10, 5))),
Series(tm.randbool((5,))),
tm.randbool(),
)
for lhs, rhs in product(bool_lhses, bool_rhses):
self.check_simple_cmp_op(lhs, cmp_op, rhs)
@pytest.mark.parametrize("op", _good_arith_ops)
def test_binary_arith_ops(self, op, lhs, rhs, request):
if (
op == "/"
and isinstance(lhs, DataFrame)
and isinstance(rhs, DataFrame)
and not lhs.isna().any().any()
and rhs.shape == (10, 5)
and np_version_under1p17
and is_platform_windows()
and compat.PY38
):
mark = pytest.mark.xfail(
reason="GH#37328 floating point precision on Windows builds"
)
request.node.add_marker(mark)
self.check_binary_arith_op(lhs, op, rhs)
def test_modulus(self, lhs, rhs):
self.check_modulus(lhs, "%", rhs)
def test_floor_division(self, lhs, rhs):
self.check_floor_division(lhs, "//", rhs)
@td.skip_if_windows
def test_pow(self, lhs, rhs):
# odd failure on win32 platform, so skip
self.check_pow(lhs, "**", rhs)
@pytest.mark.parametrize("op", expr.CMP_OPS_SYMS)
def test_single_invert_op(self, op, lhs):
if op in self.exclude_cmp:
pytest.skip()
self.check_single_invert_op(lhs, op)
@pytest.mark.parametrize("op", expr.CMP_OPS_SYMS)
def test_compound_invert_op(self, op, lhs, rhs):
if op in self.exclude_cmp:
pytest.skip()
self.check_compound_invert_op(lhs, op, rhs)
@pytest.mark.parametrize("cmp1", ["<", ">"])
@pytest.mark.parametrize("cmp2", ["<", ">"])
def test_chained_cmp_op(self, cmp1, cmp2, lhs, midhs, rhs):
self.check_chained_cmp_op(lhs, cmp1, midhs, cmp2, rhs)
def check_equal(self, result, expected):
if isinstance(result, DataFrame):
tm.assert_frame_equal(result, expected)
elif isinstance(result, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
def check_operands(left, right, cmp_op):
return _eval_single_bin(left, cmp_op, right, self.engine)
lhs_new = check_operands(lhs, mid, cmp1)
rhs_new = check_operands(mid, rhs, cmp2)
if lhs_new is not None and rhs_new is not None:
ex1 = f"lhs {cmp1} mid {cmp2} rhs"
ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs"
ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)"
expected = _eval_single_bin(lhs_new, "&", rhs_new, self.engine)
for ex in (ex1, ex2, ex3):
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_almost_equal(result, expected)
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = f"lhs {cmp1} rhs"
msg = (
r"only list-like( or dict-like)? objects are allowed to be "
r"passed to (DataFrame\.)?isin\(\), you passed a "
r"(\[|')bool(\]|')|"
"argument of type 'bool' is not iterable"
)
if cmp1 in ("in", "not in") and not is_list_like(rhs):
with pytest.raises(TypeError, match=msg):
pd.eval(
ex,
engine=self.engine,
parser=self.parser,
local_dict={"lhs": lhs, "rhs": rhs},
)
else:
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
self.check_equal(result, expected)
def check_binary_arith_op(self, lhs, arith1, rhs):
ex = f"lhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = _eval_single_bin(lhs, arith1, rhs, self.engine)
tm.assert_almost_equal(result, expected)
ex = f"lhs {arith1} rhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
nlhs = _eval_single_bin(lhs, arith1, rhs, self.engine)
self.check_alignment(result, nlhs, rhs, arith1)
def check_alignment(self, result, nlhs, ghs, op):
try:
nlhs, ghs = nlhs.align(ghs)
except (ValueError, TypeError, AttributeError):
# ValueError: series frame or frame series align
# TypeError, AttributeError: series or frame with scalar align
pass
else:
# direct numpy comparison
expected = self.ne.evaluate(f"nlhs {op} ghs")
# Update assert statement due to unreliable numerical
# precision component (GH37328)
# TODO: update testing code so that assert_almost_equal statement
# can be replaced again by the assert_numpy_array_equal statement
tm.assert_almost_equal(result.values, expected)
# modulus, pow, and floor division require special casing
def check_modulus(self, lhs, arith1, rhs):
ex = f"lhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
tm.assert_almost_equal(result, expected)
expected = self.ne.evaluate(f"expected {arith1} rhs")
if isinstance(result, (DataFrame, Series)):
tm.assert_almost_equal(result.values, expected)
else:
tm.assert_almost_equal(result, expected.item())
def check_floor_division(self, lhs, arith1, rhs):
ex = f"lhs {arith1} rhs"
if self.engine == "python":
res = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs // rhs
self.check_equal(res, expected)
else:
msg = (
r"unsupported operand type\(s\) for //: 'VariableNode' and "
"'VariableNode'"
)
with pytest.raises(TypeError, match=msg):
pd.eval(
ex,
local_dict={"lhs": lhs, "rhs": rhs},
engine=self.engine,
parser=self.parser,
)
def get_expected_pow_result(self, lhs, rhs):
try:
expected = _eval_single_bin(lhs, "**", rhs, self.engine)
except ValueError as e:
if str(e).startswith(
"negative number cannot be raised to a fractional power"
):
if self.engine == "python":
pytest.skip(str(e))
else:
expected = np.nan
else:
raise
return expected
def check_pow(self, lhs, arith1, rhs):
ex = f"lhs {arith1} rhs"
expected = self.get_expected_pow_result(lhs, rhs)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
if (
is_scalar(lhs)
and is_scalar(rhs)
and _is_py3_complex_incompat(result, expected)
):
msg = "(DataFrame.columns|numpy array) are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(result, expected)
else:
tm.assert_almost_equal(result, expected)
ex = f"(lhs {arith1} rhs) {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = self.get_expected_pow_result(
self.get_expected_pow_result(lhs, rhs), rhs
)
tm.assert_almost_equal(result, expected)
def check_single_invert_op(self, elem, cmp1):
# simple
try:
elb = elem.astype(bool)
except AttributeError:
elb = np.array([bool(elem)])
expected = ~elb
result = pd.eval("~elb", engine=self.engine, parser=self.parser)
tm.assert_almost_equal(expected, result)
for engine in self.current_engines:
tm.assert_almost_equal(
result, pd.eval("~elb", engine=engine, parser=self.parser)
)
def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = ["in", "not in"]
ex = f"~(lhs {cmp1} rhs)"
msg = (
r"only list-like( or dict-like)? objects are allowed to be "
r"passed to (DataFrame\.)?isin\(\), you passed a "
r"(\[|')float(\]|')|"
"argument of type 'float' is not iterable"
)
if is_scalar(rhs) and cmp1 in skip_these:
with pytest.raises(TypeError, match=msg):
pd.eval(
ex,
engine=self.engine,
parser=self.parser,
local_dict={"lhs": lhs, "rhs": rhs},
)
else:
# compound
if is_scalar(lhs) and is_scalar(rhs):
lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs))
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
if is_scalar(expected):
expected = not expected
else:
expected = ~expected
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_almost_equal(expected, result)
# make sure the other engines work the same as this one
for engine in self.current_engines:
ev = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_almost_equal(ev, result)
def ex(self, op, var_name="lhs"):
return f"{op}{var_name}"
def test_frame_invert(self):
expr = self.ex("~")
# ~ ##
# frame
# float always raises
lhs = DataFrame(np.random.randn(5, 2))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert_dd'"
with pytest.raises(NotImplementedError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
msg = "ufunc 'invert' not supported for the input types"
with pytest.raises(TypeError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
lhs = DataFrame(np.random.randint(5, size=(5, 2)))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert"
with pytest.raises(NotImplementedError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# bool always works
lhs = DataFrame(np.random.rand(5, 2) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# object raises
lhs = DataFrame({"b": ["a", 1, 2.0], "c": np.random.rand(3) > 0.5})
if self.engine == "numexpr":
with pytest.raises(ValueError, match="unknown type object"):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
msg = "bad operand type for unary ~: 'str'"
with pytest.raises(TypeError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
def test_series_invert(self):
# ~ ####
expr = self.ex("~")
# series
# float raises
lhs = Series(np.random.randn(5))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert_dd'"
with pytest.raises(NotImplementedError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
msg = "ufunc 'invert' not supported for the input types"
with pytest.raises(TypeError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
lhs = Series(np.random.randint(5, size=5))
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'invert"
with pytest.raises(NotImplementedError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
# bool
lhs = Series(np.random.rand(5) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
# float
# int
# bool
# object
lhs = Series(["a", 1, 2.0])
if self.engine == "numexpr":
with pytest.raises(ValueError, match="unknown type object"):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
msg = "bad operand type for unary ~: 'str'"
with pytest.raises(TypeError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
def test_frame_negate(self):
expr = self.ex("-")
# float
lhs = DataFrame(np.random.randn(5, 2))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# int
lhs = DataFrame(np.random.randint(5, size=(5, 2)))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = DataFrame(np.random.rand(5, 2) > 0.5)
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'neg_bb'"
with pytest.raises(NotImplementedError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
def test_series_negate(self):
expr = self.ex("-")
# float
lhs = Series(np.random.randn(5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
# int
lhs = Series(np.random.randint(5, size=5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = Series(np.random.rand(5) > 0.5)
if self.engine == "numexpr":
msg = "couldn't find matching opcode for 'neg_bb'"
with pytest.raises(NotImplementedError, match=msg):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
@pytest.mark.parametrize(
"lhs",
[
# Float
DataFrame(np.random.randn(5, 2)),
# Int
DataFrame(np.random.randint(5, size=(5, 2))),
# bool doesn't work with numexpr but works elsewhere
DataFrame(np.random.rand(5, 2) > 0.5),
],
)
def test_frame_pos(self, lhs):
expr = self.ex("+")
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
@pytest.mark.parametrize(
"lhs",
[
# Float
Series(np.random.randn(5)),
# Int
Series(np.random.randint(5, size=5)),
# bool doesn't work with numexpr but works elsewhere
Series(np.random.rand(5) > 0.5),
],
)
def test_series_pos(self, lhs):
expr = self.ex("+")
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
def test_scalar_unary(self):
msg = "bad operand type for unary ~: 'float'"
with pytest.raises(TypeError, match=msg):
pd.eval("~1.0", engine=self.engine, parser=self.parser)
assert pd.eval("-1.0", parser=self.parser, engine=self.engine) == -1.0
assert pd.eval("+1.0", parser=self.parser, engine=self.engine) == +1.0
assert pd.eval("~1", parser=self.parser, engine=self.engine) == ~1
assert pd.eval("-1", parser=self.parser, engine=self.engine) == -1
assert pd.eval("+1", parser=self.parser, engine=self.engine) == +1
assert pd.eval("~True", parser=self.parser, engine=self.engine) == ~True
assert pd.eval("~False", parser=self.parser, engine=self.engine) == ~False
assert pd.eval("-True", parser=self.parser, engine=self.engine) == -True
assert pd.eval("-False", parser=self.parser, engine=self.engine) == -False
assert pd.eval("+True", parser=self.parser, engine=self.engine) == +True
assert pd.eval("+False", parser=self.parser, engine=self.engine) == +False
def test_unary_in_array(self):
# GH 11235
tm.assert_numpy_array_equal(
pd.eval(
"[-True, True, ~True, +True,"
"-False, False, ~False, +False,"
"-37, 37, ~37, +37]"
),
np.array(
[
-True,
True,
~True,
+True,
-False,
False,
~False,
+False,
-37,
37,
~37,
+37,
],
dtype=np.object_,
),
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_float_comparison_bin_op(self, dtype):
# GH 16363
df = DataFrame({"x": np.array([0], dtype=dtype)})
res = df.eval("x < -0.1")
assert res.values == np.array([False])
res = df.eval("-5 > x")
assert res.values == np.array([False])
def test_disallow_scalar_bool_ops(self):
exprs = "1 or 2", "1 and 2"
exprs += "a and b", "a or b"
exprs += ("1 or 2 and (3 + 2) > 3",)
exprs += ("2 * x > 2 or 1 and 2",)
exprs += ("2 * df > 3 and 1 or a",)
x, a, b, df = np.random.randn(3), 1, 2, DataFrame(np.random.randn(3, 2)) # noqa
for ex in exprs:
msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, engine=self.engine, parser=self.parser)
def test_identical(self):
# see gh-10546
x = 1
result = pd.eval("x", engine=self.engine, parser=self.parser)
assert result == 1
assert is_scalar(result)
x = 1.5
result = pd.eval("x", engine=self.engine, parser=self.parser)
assert result == 1.5
assert is_scalar(result)
x = False
result = pd.eval("x", engine=self.engine, parser=self.parser)
assert not result
assert is_bool(result)
assert is_scalar(result)
x = np.array([1])
result = pd.eval("x", engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([1]))
assert result.shape == (1,)
x = np.array([1.5])
result = pd.eval("x", engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([1.5]))
assert result.shape == (1,)
x = np.array([False]) # noqa
result = pd.eval("x", engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([False]))
assert result.shape == (1,)
def test_line_continuation(self):
# GH 11149
exp = """1 + 2 * \
5 - 1 + 2 """
result = pd.eval(exp, engine=self.engine, parser=self.parser)
assert result == 12
def test_float_truncation(self):
# GH 14241
exp = "1000000000.006"
result = pd.eval(exp, engine=self.engine, parser=self.parser)
expected = np.float64(exp)
assert result == expected
df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
cutoff = 1000000000.0006
result = df.query(f"A < {cutoff:.4f}")
assert result.empty
cutoff = 1000000000.0010
result = df.query(f"A > {cutoff:.4f}")
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = df.query(f"A == {exact:.4f}")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_disallow_python_keywords(self):
# GH 18221
df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"])
msg = "Python keyword not valid identifier in numexpr query"
with pytest.raises(SyntaxError, match=msg):
df.query("class == 0")
df = DataFrame()
df.index.name = "lambda"
with pytest.raises(SyntaxError, match=msg):
df.query("lambda == 0")
@td.skip_if_no_ne
class TestEvalNumexprPython(TestEvalNumexprPandas):
exclude_cmp = ["in", "not in"]
exclude_bool = ["and", "or"]
engine = "numexpr"
parser = "python"
@classmethod
def setup_class(cls):
super().setup_class()
import numexpr as ne
cls.ne = ne
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
ex1 = f"lhs {cmp1} mid {cmp2} rhs"
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex1, engine=self.engine, parser=self.parser)
class TestEvalPythonPython(TestEvalNumexprPython):
engine = "python"
parser = "python"
def check_modulus(self, lhs, arith1, rhs):
ex = f"lhs {arith1} rhs"
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
tm.assert_almost_equal(result, expected)
expected = _eval_single_bin(expected, arith1, rhs, self.engine)
tm.assert_almost_equal(result, expected)
def check_alignment(self, result, nlhs, ghs, op):
try:
nlhs, ghs = nlhs.align(ghs)
except (ValueError, TypeError, AttributeError):
# ValueError: series frame or frame series align
# TypeError, AttributeError: series or frame with scalar align
pass
else:
expected = eval(f"nlhs {op} ghs")
tm.assert_almost_equal(result, expected)
class TestEvalPythonPandas(TestEvalPythonPython):
engine = "python"
parser = "pandas"
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
TestEvalNumexprPandas.check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs)
f = lambda *args, **kwargs: np.random.randn()
# -------------------------------------
# gh-12388: Typecasting rules consistency with python
class TestTypeCasting:
@pytest.mark.parametrize("op", ["+", "-", "*", "**", "/"])
# maybe someday... numexpr has too many upcasting rules now
# chain(*(np.sctypes[x] for x in ['uint', 'int', 'float']))
@pytest.mark.parametrize("dt", [np.float32, np.float64])
def test_binop_typecasting(self, engine, parser, op, dt):
df = tm.makeCustomDataframe(5, 3, data_gen_f=f, dtype=dt)
s = f"df {op} 3"
res = pd.eval(s, engine=engine, parser=parser)
assert df.values.dtype == dt
assert res.values.dtype == dt
tm.assert_frame_equal(res, eval(s))
s = f"3 {op} df"
res = pd.eval(s, engine=engine, parser=parser)
assert df.values.dtype == dt
assert res.values.dtype == dt
tm.assert_frame_equal(res, eval(s))
# -------------------------------------
# Basic and complex alignment
def _is_datetime(x):
return issubclass(x.dtype.type, np.datetime64)
def should_warn(*args):
not_mono = not any(map(operator.attrgetter("is_monotonic"), args))
only_one_dt = reduce(operator.xor, map(_is_datetime, args))
return not_mono and only_one_dt
class TestAlignment:
index_types = ["i", "u", "dt"]
lhs_index_types = index_types + ["s"] # 'p'
def test_align_nested_unary_op(self, engine, parser):
s = "df * ~2"
df = tm.makeCustomDataframe(5, 3, data_gen_f=f)
res = pd.eval(s, engine=engine, parser=parser)
tm.assert_frame_equal(res, df * ~2)
@pytest.mark.parametrize("lr_idx_type", lhs_index_types)
@pytest.mark.parametrize("rr_idx_type", index_types)
@pytest.mark.parametrize("c_idx_type", index_types)
def test_basic_frame_alignment(
self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type
):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
df = tm.makeCustomDataframe(
10, 10, data_gen_f=f, r_idx_type=lr_idx_type, c_idx_type=c_idx_type
)
df2 = tm.makeCustomDataframe(
20, 10, data_gen_f=f, r_idx_type=rr_idx_type, c_idx_type=c_idx_type
)
# only warns if not monotonic and not sortable
if should_warn(df.index, df2.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + df2", engine=engine, parser=parser)
else:
res = pd.eval("df + df2", engine=engine, parser=parser)
tm.assert_frame_equal(res, df + df2)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
@pytest.mark.parametrize("c_idx_type", lhs_index_types)
def test_frame_comparison(self, engine, parser, r_idx_type, c_idx_type):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
res = pd.eval("df < 2", engine=engine, parser=parser)
tm.assert_frame_equal(res, df < 2)
df3 = DataFrame(np.random.randn(*df.shape), index=df.index, columns=df.columns)
res = pd.eval("df < df3", engine=engine, parser=parser)
tm.assert_frame_equal(res, df < df3)
@pytest.mark.parametrize("r1", lhs_index_types)
@pytest.mark.parametrize("c1", index_types)
@pytest.mark.parametrize("r2", index_types)
@pytest.mark.parametrize("c2", index_types)
def test_medium_complex_frame_alignment(self, engine, parser, r1, c1, r2, c2):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
df = tm.makeCustomDataframe(
3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1
)
df2 = tm.makeCustomDataframe(
4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
df3 = tm.makeCustomDataframe(
5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
if should_warn(df.index, df2.index, df3.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
else:
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
tm.assert_frame_equal(res, df + df2 + df3)
@pytest.mark.parametrize("index_name", ["index", "columns"])
@pytest.mark.parametrize("c_idx_type", index_types)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
def test_basic_frame_series_alignment(
self, engine, parser, index_name, r_idx_type, c_idx_type
):
def testit(r_idx_type, c_idx_type, index_name):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + s", engine=engine, parser=parser)
else:
res = pd.eval("df + s", engine=engine, parser=parser)
if r_idx_type == "dt" or c_idx_type == "dt":
expected = df.add(s) if engine == "numexpr" else df + s
else:
expected = df + s
tm.assert_frame_equal(res, expected)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
testit(r_idx_type, c_idx_type, index_name)
@pytest.mark.parametrize("index_name", ["index", "columns"])
def test_basic_series_frame_alignment(self, engine, parser, index_name):
def testit(r_idx_type, c_idx_type, index_name):
df = tm.makeCustomDataframe(
10, 7, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(s.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("s + df", engine=engine, parser=parser)
else:
res = pd.eval("s + df", engine=engine, parser=parser)
if r_idx_type == "dt" or c_idx_type == "dt":
expected = df.add(s) if engine == "numexpr" else s + df
else:
expected = s + df
tm.assert_frame_equal(res, expected)
# only test dt with dt, otherwise weird joins result
args = product(["i", "u", "s"], ["i", "u", "s"])
with warnings.catch_warnings(record=True):
# avoid warning about comparing strings and ints
warnings.simplefilter("ignore", RuntimeWarning)
for r_idx_type, c_idx_type in args:
testit(r_idx_type, c_idx_type, index_name)
# dt with dt
args = product(["dt"], ["dt"])
with warnings.catch_warnings(record=True):
# avoid warning about comparing strings and ints
warnings.simplefilter("ignore", RuntimeWarning)
for r_idx_type, c_idx_type in args:
testit(r_idx_type, c_idx_type, index_name)
@pytest.mark.parametrize("c_idx_type", index_types)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
@pytest.mark.parametrize("index_name", ["index", "columns"])
@pytest.mark.parametrize("op", ["+", "*"])
def test_series_frame_commutativity(
self, engine, parser, index_name, op, r_idx_type, c_idx_type
):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
df = tm.makeCustomDataframe(
10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
lhs = f"s {op} df"
rhs = f"df {op} s"
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
a = pd.eval(lhs, engine=engine, parser=parser)
with tm.assert_produces_warning(RuntimeWarning):
b = pd.eval(rhs, engine=engine, parser=parser)
else:
a = pd.eval(lhs, engine=engine, parser=parser)
b = pd.eval(rhs, engine=engine, parser=parser)
if r_idx_type != "dt" and c_idx_type != "dt":
if engine == "numexpr":
tm.assert_frame_equal(a, b)
@pytest.mark.parametrize("r1", lhs_index_types)
@pytest.mark.parametrize("c1", index_types)
@pytest.mark.parametrize("r2", index_types)
@pytest.mark.parametrize("c2", index_types)
def test_complex_series_frame_alignment(self, engine, parser, r1, c1, r2, c2):
import random
n = 3
m1 = 5
m2 = 2 * m1
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
index_name = random.choice(["index", "columns"])
obj_name = random.choice(["df", "df2"])
df = tm.makeCustomDataframe(
m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1
)
df2 = tm.makeCustomDataframe(
m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
index = getattr(locals().get(obj_name), index_name)
ser = Series(np.random.randn(n), index[:n])
if r2 == "dt" or c2 == "dt":
if engine == "numexpr":
expected2 = df2.add(ser)
else:
expected2 = df2 + ser
else:
expected2 = df2 + ser
if r1 == "dt" or c1 == "dt":
if engine == "numexpr":
expected = expected2.add(df)
else:
expected = expected2 + df
else:
expected = expected2 + df
if should_warn(df2.index, ser.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
else:
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
assert res.shape == expected.shape
tm.assert_frame_equal(res, expected)
def test_performance_warning_for_poor_alignment(self, engine, parser):
df = DataFrame(np.random.randn(1000, 10))
s = Series(np.random.randn(10000))
if engine == "numexpr":
seen = PerformanceWarning
else:
seen = False
with tm.assert_produces_warning(seen):
pd.eval("df + s", engine=engine, parser=parser)
s = Series(np.random.randn(1000))
with tm.assert_produces_warning(False):
pd.eval("df + s", engine=engine, parser=parser)
df = DataFrame(np.random.randn(10, 10000))
s = Series(np.random.randn(10000))
with tm.assert_produces_warning(False):
pd.eval("df + s", engine=engine, parser=parser)
df = DataFrame(np.random.randn(10, 10))
s = Series(np.random.randn(10000))
is_python_engine = engine == "python"
if not is_python_engine:
wrn = PerformanceWarning
else:
wrn = False
with tm.assert_produces_warning(wrn) as w:
pd.eval("df + s", engine=engine, parser=parser)
if not is_python_engine:
assert len(w) == 1
msg = str(w[0].message)
loged = np.log10(s.size - df.shape[1])
expected = (
f"Alignment difference on axis 1 is larger "
f"than an order of magnitude on term 'df', "
f"by more than {loged:.4g}; performance may suffer"
)
assert msg == expected
# ------------------------------------
# Slightly more complex ops
@td.skip_if_no_ne
class TestOperationsNumExprPandas:
exclude_arith: List[str] = []
engine = "numexpr"
parser = "pandas"
@classmethod
def setup_class(cls):
cls.arith_ops = [
op
for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS
if op not in cls.exclude_arith
]
def eval(self, *args, **kwargs):
kwargs["engine"] = self.engine
kwargs["parser"] = self.parser
kwargs["level"] = kwargs.pop("level", 0) + 1
return pd.eval(*args, **kwargs)
def test_simple_arith_ops(self):
ops = (op for op in self.arith_ops if op != "//")
for op in ops:
ex = f"1 {op} 1"
ex2 = f"x {op} 1"
ex3 = f"1 {op} (x + 1)"
if op in ("in", "not in"):
msg = "argument of type 'int' is not iterable"
with pytest.raises(TypeError, match=msg):
pd.eval(ex, engine=self.engine, parser=self.parser)
else:
expec = _eval_single_bin(1, op, 1, self.engine)
x = self.eval(ex, engine=self.engine, parser=self.parser)
assert x == expec
expec = _eval_single_bin(x, op, 1, self.engine)
y = self.eval(
ex2, local_dict={"x": x}, engine=self.engine, parser=self.parser
)
assert y == expec
expec = _eval_single_bin(1, op, x + 1, self.engine)
y = self.eval(
ex3, local_dict={"x": x}, engine=self.engine, parser=self.parser
)
assert y == expec
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_simple_bool_ops(self, rhs, lhs, op):
ex = f"{lhs} {op} {rhs}"
res = self.eval(ex)
exp = eval(ex)
assert res == exp
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_bool_ops_with_constants(self, rhs, lhs, op):
ex = f"{lhs} {op} {rhs}"
res = self.eval(ex)
exp = eval(ex)
assert res == exp
def test_4d_ndarray_fails(self):
x = np.random.randn(3, 4, 5, 6)
y = Series(np.random.randn(10))
msg = "N-dimensional objects, where N > 2, are not supported with eval"
with pytest.raises(NotImplementedError, match=msg):
self.eval("x + y", local_dict={"x": x, "y": y})
def test_constant(self):
x = self.eval("1")
assert x == 1
def test_single_variable(self):
df = DataFrame(np.random.randn(10, 2))
df2 = self.eval("df", local_dict={"df": df})
tm.assert_frame_equal(df, df2)
def test_truediv(self):
s = np.array([1])
ex = "s / 1"
d = {"s": s} # noqa
# FutureWarning: The `truediv` parameter in pd.eval is deprecated and will be
# removed in a future version.
with tm.assert_produces_warning(FutureWarning):
res = self.eval(ex, truediv=False)
tm.assert_numpy_array_equal(res, np.array([1.0]))
with tm.assert_produces_warning(FutureWarning):
res = self.eval(ex, truediv=True)
tm.assert_numpy_array_equal(res, np.array([1.0]))
with tm.assert_produces_warning(FutureWarning):
res = self.eval("1 / 2", truediv=True)
expec = 0.5
assert res == expec
with tm.assert_produces_warning(FutureWarning):
res = self.eval("1 / 2", truediv=False)
expec = 0.5
assert res == expec
with tm.assert_produces_warning(FutureWarning):
res = self.eval("s / 2", truediv=False)
expec = 0.5
assert res == expec
with tm.assert_produces_warning(FutureWarning):
res = self.eval("s / 2", truediv=True)
expec = 0.5
assert res == expec
def test_failing_subscript_with_name_error(self):
df = DataFrame(np.random.randn(5, 3)) # noqa
with pytest.raises(NameError, match="name 'x' is not defined"):
self.eval("df[x > 2] > 2")
def test_lhs_expression_subscript(self):
df = DataFrame(np.random.randn(5, 3))
result = self.eval("(df + 1)[df > 2]", local_dict={"df": df})
expected = (df + 1)[df > 2]
tm.assert_frame_equal(result, expected)
def test_attr_expression(self):
df = DataFrame(np.random.randn(5, 3), columns=list("abc"))
expr1 = "df.a < df.b"
expec1 = df.a < df.b
expr2 = "df.a + df.b + df.c"
expec2 = df.a + df.b + df.c
expr3 = "df.a + df.b + df.c[df.b < 0]"
expec3 = df.a + df.b + df.c[df.b < 0]
exprs = expr1, expr2, expr3
expecs = expec1, expec2, expec3
for e, expec in zip(exprs, expecs):
tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df}))
def test_assignment_fails(self):
df = DataFrame(np.random.randn(5, 3), columns=list("abc"))
df2 = DataFrame(np.random.randn(5, 3))
expr1 = "df = df2"
msg = "cannot assign without a target object"
with pytest.raises(ValueError, match=msg):
self.eval(expr1, local_dict={"df": df, "df2": df2})
def test_assignment_column(self):
df = DataFrame(np.random.randn(5, 2), columns=list("ab"))
orig_df = df.copy()
# multiple assignees
with pytest.raises(SyntaxError, match="invalid syntax"):
df.eval("d c = a + b")
# invalid assignees
msg = "left hand side of an assignment must be a single name"
with pytest.raises(SyntaxError, match=msg):
df.eval("d,c = a + b")
if compat.PY38:
msg = "cannot assign to function call"
else:
msg = "can't assign to function call"
with pytest.raises(SyntaxError, match=msg):
df.eval('Timestamp("20131001") = a + b')
# single assignment - existing variable
expected = orig_df.copy()
expected["a"] = expected["a"] + expected["b"]
df = orig_df.copy()
df.eval("a = a + b", inplace=True)
tm.assert_frame_equal(df, expected)
# single assignment - new variable
expected = orig_df.copy()
expected["c"] = expected["a"] + expected["b"]
df = orig_df.copy()
df.eval("c = a + b", inplace=True)
tm.assert_frame_equal(df, expected)
# with a local name overlap
def f():
df = orig_df.copy()
a = 1 # noqa
df.eval("a = 1 + b", inplace=True)
return df
df = f()
expected = orig_df.copy()
expected["a"] = 1 + expected["b"]
tm.assert_frame_equal(df, expected)
df = orig_df.copy()
def f():
a = 1 # noqa
old_a = df.a.copy()
df.eval("a = a + b", inplace=True)
result = old_a + df.b
tm.assert_series_equal(result, df.a, check_names=False)
assert result.name is None
f()
# multiple assignment
df = orig_df.copy()
df.eval("c = a + b", inplace=True)
msg = "can only assign a single expression"
with pytest.raises(SyntaxError, match=msg):
df.eval("c = a = b")
# explicit targets
df = orig_df.copy()
self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True)
expected = orig_df.copy()
expected["c"] = expected["a"] + expected["b"]
tm.assert_frame_equal(df, expected)
def test_column_in(self):
# GH 11235
df = DataFrame({"a": [11], "b": [-32]})
result = df.eval("a in [11, -32]")
expected = Series([True])
tm.assert_series_equal(result, expected)
def assignment_not_inplace(self):
# see gh-9297
df = DataFrame(np.random.randn(5, 2), columns=list("ab"))
actual = df.eval("c = a + b", inplace=False)
assert actual is not None
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
tm.assert_frame_equal(df, expected)
def test_multi_line_expression(self):
# GH 11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
expected["d"] = expected["c"] + expected["b"]
ans = df.eval(
"""
c = a + b
d = c + b""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert ans is None
expected["a"] = expected["a"] - 1
expected["e"] = expected["a"] + 2
ans = df.eval(
"""
a = a - 1
e = a + 2""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert ans is None
# multi-line not valid if not all assignments
msg = "Multi-line expressions are only valid if all expressions contain"
with pytest.raises(ValueError, match=msg):
df.eval(
"""
a = b + 2
b - 2""",
inplace=False,
)
def test_multi_line_expression_not_inplace(self):
# GH 11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
expected["d"] = expected["c"] + expected["b"]
df = df.eval(
"""
c = a + b
d = c + b""",
inplace=False,
)
tm.assert_frame_equal(expected, df)
expected["a"] = expected["a"] - 1
expected["e"] = expected["a"] + 2
df = df.eval(
"""
a = a - 1
e = a + 2""",
inplace=False,
)
tm.assert_frame_equal(expected, df)
def test_multi_line_expression_local_variable(self):
# GH 15342
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
local_var = 7
expected["c"] = expected["a"] * local_var
expected["d"] = expected["c"] + local_var
ans = df.eval(
"""
c = a * @local_var
d = c + @local_var
""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert ans is None
def test_multi_line_expression_callable_local_variable(self):
# 26426
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
def local_func(a, b):
return b
expected = df.copy()
expected["c"] = expected["a"] * local_func(1, 7)
expected["d"] = expected["c"] + local_func(1, 7)
ans = df.eval(
"""
c = a * @local_func(1, 7)
d = c + @local_func(1, 7)
""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert ans is None
def test_multi_line_expression_callable_local_variable_with_kwargs(self):
# 26426
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
def local_func(a, b):
return b
expected = df.copy()
expected["c"] = expected["a"] * local_func(b=7, a=1)
expected["d"] = expected["c"] + local_func(b=7, a=1)
ans = df.eval(
"""
c = a * @local_func(b=7, a=1)
d = c + @local_func(b=7, a=1)
""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert ans is None
def test_assignment_in_query(self):
# GH 8664
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df_orig = df.copy()
msg = "cannot assign without a target object"
with pytest.raises(ValueError, match=msg):
df.query("a = 1")
tm.assert_frame_equal(df, df_orig)
def test_query_inplace(self):
# see gh-11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
expected = expected[expected["a"] == 2]
df.query("a == 2", inplace=True)
tm.assert_frame_equal(expected, df)
df = {}
expected = {"a": 3}
self.eval("a = 1 + 2", target=df, inplace=True)
tm.assert_dict_equal(df, expected)
@pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)])
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_cannot_item_assign(self, invalid_target):
msg = "Cannot assign expression output to target"
expression = "a = 1 + 2"
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=invalid_target, inplace=True)
if hasattr(invalid_target, "copy"):
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=invalid_target, inplace=False)
@pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)])
def test_cannot_copy_item(self, invalid_target):
msg = "Cannot return a copy of the target"
expression = "a = 1 + 2"
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=invalid_target, inplace=False)
@pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}])
def test_inplace_no_assignment(self, target):
expression = "1 + 2"
assert self.eval(expression, target=target, inplace=False) == 3
msg = "Cannot operate inplace if there is no assignment"
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=target, inplace=True)
def test_basic_period_index_boolean_expression(self):
df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type="p", r_idx_type="i")
e = df < 2
r = self.eval("df < 2", local_dict={"df": df})
x = df < 2
tm.assert_frame_equal(r, e)
tm.assert_frame_equal(x, e)
def test_basic_period_index_subscript_expression(self):
df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type="p", r_idx_type="i")
r = self.eval("df[df < 2 + 3]", local_dict={"df": df})
e = df[df < 2 + 3]
tm.assert_frame_equal(r, e)
def test_nested_period_index_subscript_expression(self):
df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type="p", r_idx_type="i")
r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df})
e = df[df[df < 2] < 2] + df * 2
tm.assert_frame_equal(r, e)
def test_date_boolean(self):
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
res = self.eval(
"df.dates1 < 20130101",
local_dict={"df": df},
engine=self.engine,
parser=self.parser,
)
expec = df.dates1 < "20130101"
tm.assert_series_equal(res, expec, check_names=False)
def test_simple_in_ops(self):
if self.parser != "python":
res = pd.eval("1 in [1, 2]", engine=self.engine, parser=self.parser)
assert res
res = pd.eval("2 in (1, 2)", engine=self.engine, parser=self.parser)
assert res
res = pd.eval("3 in (1, 2)", engine=self.engine, parser=self.parser)
assert not res
res = pd.eval("3 not in (1, 2)", engine=self.engine, parser=self.parser)
assert res
res = pd.eval("[3] not in (1, 2)", engine=self.engine, parser=self.parser)
assert res
res = pd.eval("[3] in ([3], 2)", engine=self.engine, parser=self.parser)
assert res
res = pd.eval("[[3]] in [[[3]], 2]", engine=self.engine, parser=self.parser)
assert res
res = pd.eval("(3,) in [(3,), 2]", engine=self.engine, parser=self.parser)
assert res
res = pd.eval(
"(3,) not in [(3,), 2]", engine=self.engine, parser=self.parser
)
assert not res
res = pd.eval(
"[(3,)] in [[(3,)], 2]", engine=self.engine, parser=self.parser
)
assert res
else:
msg = "'In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval("1 in [1, 2]", engine=self.engine, parser=self.parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval("2 in (1, 2)", engine=self.engine, parser=self.parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval("3 in (1, 2)", engine=self.engine, parser=self.parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval(
"[(3,)] in (1, 2, [(3,)])", engine=self.engine, parser=self.parser
)
msg = "'NotIn' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval("3 not in (1, 2)", engine=self.engine, parser=self.parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval(
"[3] not in (1, 2, [[3]])", engine=self.engine, parser=self.parser
)
@td.skip_if_no_ne
class TestOperationsNumExprPython(TestOperationsNumExprPandas):
exclude_arith: List[str] = ["in", "not in"]
engine = "numexpr"
parser = "python"
def test_check_many_exprs(self):
a = 1 # noqa
expr = " * ".join("a" * 33)
expected = 1
res = pd.eval(expr, engine=self.engine, parser=self.parser)
assert res == expected
def test_fails_and(self):
df = DataFrame(np.random.randn(5, 3))
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(
"df > 2 and df > 3",
local_dict={"df": df},
parser=self.parser,
engine=self.engine,
)
def test_fails_or(self):
df = DataFrame(np.random.randn(5, 3))
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(
"df > 2 or df > 3",
local_dict={"df": df},
parser=self.parser,
engine=self.engine,
)
def test_fails_not(self):
df = DataFrame(np.random.randn(5, 3))
msg = "'Not' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(
"not df > 2",
local_dict={"df": df},
parser=self.parser,
engine=self.engine,
)
def test_fails_ampersand(self):
df = DataFrame(np.random.randn(5, 3)) # noqa
ex = "(df + 2)[df > 1] > 0 & (df > 0)"
msg = "cannot evaluate scalar only bool ops"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_fails_pipe(self):
df = DataFrame(np.random.randn(5, 3)) # noqa
ex = "(df + 2)[df > 1] > 0 | (df > 0)"
msg = "cannot evaluate scalar only bool ops"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, parser=self.parser, engine=self.engine)
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_bool_ops_with_constants(self, lhs, rhs, op):
ex = f"{lhs} {op} {rhs}"
if op in ("and", "or"):
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
self.eval(ex)
else:
res = self.eval(ex)
exp = eval(ex)
assert res == exp
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_simple_bool_ops(self, lhs, rhs, op):
ex = f"lhs {op} rhs"
if op in ("and", "or"):
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, engine=self.engine, parser=self.parser)
else:
res = pd.eval(ex, engine=self.engine, parser=self.parser)
exp = eval(ex)
assert res == exp
class TestOperationsPythonPython(TestOperationsNumExprPython):
engine = "python"
parser = "python"
class TestOperationsPythonPandas(TestOperationsNumExprPandas):
exclude_arith: List[str] = []
engine = "python"
parser = "pandas"
@td.skip_if_no_ne
class TestMathPythonPython:
engine = "python"
parser = "pandas"
def eval(self, *args, **kwargs):
kwargs["engine"] = self.engine
kwargs["parser"] = self.parser
kwargs["level"] = kwargs.pop("level", 0) + 1
return pd.eval(*args, **kwargs)
def test_unary_functions(self, unary_fns_for_ne):
df = DataFrame({"a": np.random.randn(10)})
a = df.a
fn = unary_fns_for_ne
expr = f"{fn}(a)"
got = self.eval(expr)
with np.errstate(all="ignore"):
expect = getattr(np, fn)(a)
tm.assert_series_equal(got, expect, check_names=False)
@pytest.mark.parametrize("fn", ["floor", "ceil"])
def test_floor_and_ceil_functions_raise_error(self, ne_lt_2_6_9, fn):
msg = f'"{fn}" is not a supported function'
with pytest.raises(ValueError, match=msg):
expr = f"{fn}(100)"
self.eval(expr)
@pytest.mark.parametrize("fn", _binary_math_ops)
def test_binary_functions(self, fn):
df = DataFrame({"a": np.random.randn(10), "b": np.random.randn(10)})
a = df.a
b = df.b
expr = f"{fn}(a, b)"
got = self.eval(expr)
with np.errstate(all="ignore"):
expect = getattr(np, fn)(a, b)
tm.assert_almost_equal(got, expect, check_names=False)
def test_df_use_case(self):
df = DataFrame({"a": np.random.randn(10), "b": np.random.randn(10)})
df.eval(
"e = arctan2(sin(a), b)",
engine=self.engine,
parser=self.parser,
inplace=True,
)
got = df.e
expect = np.arctan2(np.sin(df.a), df.b)
tm.assert_series_equal(got, expect, check_names=False)
def test_df_arithmetic_subexpression(self):
df = DataFrame({"a": np.random.randn(10), "b": np.random.randn(10)})
df.eval("e = sin(a + b)", engine=self.engine, parser=self.parser, inplace=True)
got = df.e
expect = np.sin(df.a + df.b)
tm.assert_series_equal(got, expect, check_names=False)
def check_result_type(self, dtype, expect_dtype):
df = DataFrame({"a": np.random.randn(10).astype(dtype)})
assert df.a.dtype == dtype
df.eval("b = sin(a)", engine=self.engine, parser=self.parser, inplace=True)
got = df.b
expect = np.sin(df.a)
assert expect.dtype == got.dtype
assert expect_dtype == got.dtype
tm.assert_series_equal(got, expect, check_names=False)
def test_result_types(self):
self.check_result_type(np.int32, np.float64)
self.check_result_type(np.int64, np.float64)
self.check_result_type(np.float32, np.float32)
self.check_result_type(np.float64, np.float64)
@td.skip_if_windows
def test_result_complex128(self):
# xref https://github.com/pandas-dev/pandas/issues/12293
# this fails on Windows, apparently a floating point precision issue
# Did not test complex64 because DataFrame is converting it to
# complex128. Due to https://github.com/pandas-dev/pandas/issues/10952
self.check_result_type(np.complex128, np.complex128)
def test_undefined_func(self):
df = DataFrame({"a": np.random.randn(10)})
msg = '"mysin" is not a supported function'
with pytest.raises(ValueError, match=msg):
df.eval("mysin(a)", engine=self.engine, parser=self.parser)
def test_keyword_arg(self):
df = DataFrame({"a": np.random.randn(10)})
msg = 'Function "sin" does not support keyword arguments'
with pytest.raises(TypeError, match=msg):
df.eval("sin(x=a)", engine=self.engine, parser=self.parser)
class TestMathPythonPandas(TestMathPythonPython):
engine = "python"
parser = "pandas"
class TestMathNumExprPandas(TestMathPythonPython):
engine = "numexpr"
parser = "pandas"
class TestMathNumExprPython(TestMathPythonPython):
engine = "numexpr"
parser = "python"
_var_s = np.random.randn(10)
class TestScope:
def test_global_scope(self, engine, parser):
e = "_var_s * 2"
tm.assert_numpy_array_equal(
_var_s * 2, pd.eval(e, engine=engine, parser=parser)
)
def test_no_new_locals(self, engine, parser):
x = 1
lcls = locals().copy()
pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser)
lcls2 = locals().copy()
lcls2.pop("lcls")
assert lcls == lcls2
def test_no_new_globals(self, engine, parser):
x = 1 # noqa
gbls = globals().copy()
pd.eval("x + 1", engine=engine, parser=parser)
gbls2 = globals().copy()
assert gbls == gbls2
@td.skip_if_no_ne
def test_invalid_engine():
msg = "Invalid engine 'asdf' passed"
with pytest.raises(KeyError, match=msg):
pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf")
@td.skip_if_no_ne
def test_invalid_parser():
msg = "Invalid parser 'asdf' passed"
with pytest.raises(KeyError, match=msg):
pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf")
_parsers: Dict[str, Type[BaseExprVisitor]] = {
"python": PythonExprVisitor,
"pytables": pytables.PyTablesExprVisitor,
"pandas": PandasExprVisitor,
}
@pytest.mark.parametrize("engine", ENGINES)
@pytest.mark.parametrize("parser", _parsers)
def test_disallowed_nodes(engine, parser):
VisitorClass = _parsers[parser]
inst = VisitorClass("x + 1", engine, parser)
for ops in VisitorClass.unsupported_nodes:
msg = "nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
getattr(inst, ops)()
def test_syntax_error_exprs(engine, parser):
e = "s +"
with pytest.raises(SyntaxError, match="invalid syntax"):
pd.eval(e, engine=engine, parser=parser)
def test_name_error_exprs(engine, parser):
e = "s + t"
msg = "name 's' is not defined"
with pytest.raises(NameError, match=msg):
pd.eval(e, engine=engine, parser=parser)
@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"])
def test_invalid_local_variable_reference(engine, parser, express):
a, b = 1, 2 # noqa
if parser != "pandas":
with pytest.raises(SyntaxError, match="The '@' prefix is only"):
pd.eval(express, engine=engine, parser=parser)
else:
with pytest.raises(SyntaxError, match="The '@' prefix is not"):
pd.eval(express, engine=engine, parser=parser)
def test_numexpr_builtin_raises(engine, parser):
sin, dotted_line = 1, 2
if engine == "numexpr":
msg = "Variables in expression .+"
with pytest.raises(NumExprClobberingError, match=msg):
pd.eval("sin + dotted_line", engine=engine, parser=parser)
else:
res = pd.eval("sin + dotted_line", engine=engine, parser=parser)
assert res == sin + dotted_line
def test_bad_resolver_raises(engine, parser):
cannot_resolve = 42, 3.0
with pytest.raises(TypeError, match="Resolver of type .+"):
pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser)
def test_empty_string_raises(engine, parser):
# GH 13139
with pytest.raises(ValueError, match="expr cannot be an empty string"):
pd.eval("", engine=engine, parser=parser)
def test_more_than_one_expression_raises(engine, parser):
with pytest.raises(SyntaxError, match=("only a single expression is allowed")):
pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser)
@pytest.mark.parametrize("cmp", ("and", "or"))
@pytest.mark.parametrize("lhs", (int, float))
@pytest.mark.parametrize("rhs", (int, float))
def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
gen = {int: lambda: np.random.randint(10), float: np.random.randn}
mid = gen[lhs]() # noqa
lhs = gen[lhs]()
rhs = gen[rhs]()
ex1 = f"lhs {cmp} mid {cmp} rhs"
ex2 = f"lhs {cmp} mid and mid {cmp} rhs"
ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)"
for ex in (ex1, ex2, ex3):
msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, engine=engine, parser=parser)
@pytest.mark.parametrize(
"other",
[
"'x'",
pytest.param(
"...", marks=pytest.mark.xfail(not compat.PY38, reason="GH-28116")
),
],
)
def test_equals_various(other):
df = DataFrame({"A": ["a", "b", "c"]})
result = df.eval(f"A == {other}")
expected = Series([False, False, False], name="A")
if USE_NUMEXPR:
# https://github.com/pandas-dev/pandas/issues/10239
# lose name with numexpr engine. Remove when that's fixed.
expected.name = None
tm.assert_series_equal(result, expected)
def test_inf(engine, parser):
s = "inf + 1"
expected = np.inf
result = pd.eval(s, engine=engine, parser=parser)
assert result == expected
def test_truediv_deprecated(engine, parser):
# GH#29182
match = "The `truediv` parameter in pd.eval is deprecated"
with tm.assert_produces_warning(FutureWarning) as m:
pd.eval("1+1", engine=engine, parser=parser, truediv=True)
assert len(m) == 1
assert match in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
pd.eval("1+1", engine=engine, parser=parser, truediv=False)
assert len(m) == 1
assert match in str(m[0].message)
def test_negate_lt_eq_le(engine, parser):
df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"])
expected = df[~(df.cat > 0)]
result = df.query("~(cat > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
if parser == "python":
msg = "'Not' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("not (cat > 0)", engine=engine, parser=parser)
else:
result = df.query("not (cat > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
class TestValidate:
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(self, value):
msg = 'For argument "inplace" expected type bool, received type'
with pytest.raises(ValueError, match=msg):
pd.eval("2+2", inplace=value)
| 34.978145 | 88 | 0.574285 |
7d3642b524c3a214c44cd18481d0dd5cb11e9ca5
| 23,593 |
py
|
Python
|
vmtp/sshutils.py
|
schoksey/vmtp
|
5c2a5187c20953fd055cd7e56e72bcc342780153
|
[
"Apache-2.0"
] | null | null | null |
vmtp/sshutils.py
|
schoksey/vmtp
|
5c2a5187c20953fd055cd7e56e72bcc342780153
|
[
"Apache-2.0"
] | null | null | null |
vmtp/sshutils.py
|
schoksey/vmtp
|
5c2a5187c20953fd055cd7e56e72bcc342780153
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.SSH('root', 'example.com', port=33)
status, stdout, stderr = ssh.execute('ps ax')
if status:
raise Exception('Command failed with non-zero status.')
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if 'error' in chunk:
email_admin(chunk)
ssh = sshclient.SSH('root', 'example.com')
ssh.run('tail -f /var/log/syslog', stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH('user', 'example.com')
status, out, err = ssh.execute('/bin/sh -s arg1 arg2',
stdin=open('~/myscript.sh', 'r'))
Upload file:
ssh = sshclient.SSH('user', 'example.com')
ssh.run('cat > ~/upload/file.gz', stdin=open('/store/file.gz', 'rb'))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import re
import select
import socket
import StringIO
import sys
import time
from log import LOG
import paramiko
import scp
# from rally.openstack.common.gettextutils import _
class SSHError(Exception):
pass
class SSHTimeout(SSHError):
pass
# Check IPv4 address syntax - not completely fool proof but will catch
# some invalid formats
def is_ipv4(address):
try:
socket.inet_aton(address)
except socket.error:
return False
return True
class SSHAccess(object):
'''
A class to contain all the information needed to access a host
(native or virtual) using SSH
'''
def __init__(self, arg_value=None):
'''
decode user@host[:pwd]
'[email protected]:secret' -> ('hugo', '1.1.1.1', 'secret', None)
'[email protected]' -> ('huggy', '2.2.2.2', None, None)
None ->(None, None, None, None)
Examples of fatal errors (will call exit):
'[email protected]' (invalid IP)
'@3.3.3.3' (missing username)
'hiro@' or 'buggy' (missing host IP)
The error field will be None in case of success or will
contain a string describing the error
'''
self.username = None
self.host = None
self.password = None
# name of the file that contains the private key
self.private_key_file = None
# this is the private key itself (a long string starting with
# -----BEGIN RSA PRIVATE KEY-----
# used when the private key is not saved in any file
self.private_key = None
self.public_key_file = None
self.port = 22
self.error = None
if not arg_value:
return
match = re.search(r'^([^@]+)@([0-9\.]+):?(.*)$', arg_value)
if not match:
self.error = 'Invalid argument: ' + arg_value
return
if not is_ipv4(match.group(2)):
self.error = 'Invalid IPv4 address ' + match.group(2)
return
(self.username, self.host, self.password) = match.groups()
def copy_from(self, ssh_access):
self.username = ssh_access.username
self.host = ssh_access.host
self.port = ssh_access.port
self.password = ssh_access.password
self.private_key = ssh_access.private_key
self.public_key_file = ssh_access.public_key_file
self.private_key_file = ssh_access.private_key_file
class SSH(object):
"""Represent ssh connection."""
def __init__(self, ssh_access,
connect_timeout=60,
connect_retry_count=30,
connect_retry_wait_sec=2):
"""Initialize SSH client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
:param connect_timeout: timeout when connecting ssh
:param connect_retry_count: how many times to retry connecting
:param connect_retry_wait_sec: seconds to wait between retries
"""
self.ssh_access = ssh_access
if ssh_access.private_key:
self.pkey = self._get_pkey(ssh_access.private_key)
else:
self.pkey = None
self._client = False
self.connect_timeout = connect_timeout
self.connect_retry_count = connect_retry_count
self.connect_retry_wait_sec = connect_retry_wait_sec
self.distro_id = None
self.distro_id_like = None
self.distro_version = None
self.__get_distro()
def _get_pkey(self, key):
'''Get the binary form of the private key
from the text form
'''
if isinstance(key, basestring):
key = StringIO.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as exc:
errors.append(exc)
raise SSHError('Invalid pkey: %s' % (errors))
def _get_client(self):
if self._client:
return self._client
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for _ in range(self.connect_retry_count):
try:
self._client.connect(self.ssh_access.host,
username=self.ssh_access.username,
port=self.ssh_access.port,
pkey=self.pkey,
key_filename=self.ssh_access.private_key_file,
password=self.ssh_access.password,
timeout=self.connect_timeout)
return self._client
except (paramiko.AuthenticationException,
paramiko.BadHostKeyException,
paramiko.SSHException,
socket.error,
Exception):
time.sleep(self.connect_retry_wait_sec)
self._client = None
msg = '[%s] SSH Connection failed after %s attempts' % (self.ssh_access.host,
self.connect_retry_count)
raise SSHError(msg)
def close(self):
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, basestring):
stdin = StringIO.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ''
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
if stdout is not None:
stdout.write(data)
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
if stderr is not None:
stderr.write(stderr_data)
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {'cmd': cmd, 'host': self.ssh_access.host}
raise SSHTimeout(('Timeout executing command '
'"%(cmd)s" on host %(host)s') % args)
# if e:
# raise SSHError('Socket error.')
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = ('Command "%(cmd)s" failed with exit_status %(status)d.')
details = fmt % {'cmd': cmd, 'status': exit_status}
if stderr_data:
details += (' Last stderr data: "%s".') % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
Return tuple (exit_status, stdout, stderr)
"""
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
timeout=timeout, raise_on_error=False)
stdout.seek(0)
stderr.seek(0)
return (exit_status, stdout.read(), stderr.read())
def wait(self, timeout=120, interval=1):
"""Wait for the host will be available via ssh."""
start_time = time.time()
while True:
try:
return self.execute('uname')
except (socket.error, SSHError):
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout(('Timeout waiting for "%s"') % self.ssh_access.host)
def __extract_property(self, name, input_str):
expr = name + r'="?([\w\.]*)"?'
match = re.search(expr, input_str)
if match:
return match.group(1)
return 'Unknown'
# Get the linux distro
def __get_distro(self):
'''cat /etc/*-release | grep ID
Ubuntu:
DISTRIB_ID=Ubuntu
ID=ubuntu
ID_LIKE=debian
VERSION_ID="14.04"
RHEL:
ID="rhel"
ID_LIKE="fedora"
VERSION_ID="7.0"
'''
distro_cmd = "grep ID /etc/*-release"
(status, distro_out, _) = self.execute(distro_cmd)
if status:
distro_out = ''
self.distro_id = self.__extract_property('ID', distro_out)
self.distro_id_like = self.__extract_property('ID_LIKE', distro_out)
self.distro_version = self.__extract_property('VERSION_ID', distro_out)
def pidof(self, proc_name):
'''
Return a list containing the pids of all processes of a given name
the list is empty if there is no pid
'''
# the path update is necessary for RHEL
cmd = "PATH=$PATH:/usr/sbin pidof " + proc_name
(status, cmd_output, _) = self.execute(cmd)
if status:
return []
cmd_output = cmd_output.strip()
result = cmd_output.split()
return result
# kill pids in the given list of pids
def kill_proc(self, pid_list):
cmd = "kill -9 " + ' '.join(pid_list)
self.execute(cmd)
# check stats for a given path
def stat(self, path):
(status, cmd_output, _) = self.execute('stat ' + path)
if status:
return None
return cmd_output
def ping_check(self, target_ip, ping_count=2, pass_threshold=80):
'''helper function to ping from one host to an IP address,
for a given count and pass_threshold;
Steps:
ssh to the host and then ping to the target IP
then match the output and verify that the loss% is
less than the pass_threshold%
Return 1 if the criteria passes
Return 0, if it fails
'''
cmd = "ping -c " + str(ping_count) + " " + str(target_ip)
(_, cmd_output, _) = self.execute(cmd)
match = re.search(r'(\d*)% packet loss', cmd_output)
pkt_loss = match.group(1)
if int(pkt_loss) < int(pass_threshold):
return 1
else:
LOG.error('Ping to %s failed: %s', target_ip, cmd_output)
return 0
def get_file_from_host(self, from_path, to_path):
'''
A wrapper api on top of paramiko scp module, to scp
a remote file to the local.
'''
sshcon = self._get_client()
scpcon = scp.SCPClient(sshcon.get_transport())
try:
scpcon.get(from_path, to_path)
except scp.SCPException as exp:
LOG.error("Receive failed: [%s]", exp)
return 0
return 1
def put_file_to_host(self, from_path, to_path):
'''
A wrapper api on top of paramiko scp module, to scp
a local file to the remote.
'''
sshcon = self._get_client()
scpcon = scp.SCPClient(sshcon.get_transport())
try:
scpcon.put(from_path, remote_path=to_path)
except scp.SCPException as exp:
LOG.error("Send failed: [%s]", exp)
return 0
return 1
def read_remote_file(self, from_path):
'''
Read a remote file and save it to a buffer.
'''
cmd = "cat " + from_path
(status, cmd_output, _) = self.execute(cmd)
if status:
return None
return cmd_output
def get_host_os_version(self):
'''
Identify the host distribution/relase.
'''
os_release_file = "/etc/os-release"
sys_release_file = "/etc/system-release"
name = ""
version = ""
if self.stat(os_release_file):
data = self.read_remote_file(os_release_file)
if data is None:
LOG.error("Failed to read file %s", os_release_file)
return None
for line in data.splitlines():
mobj = re.match(r'NAME=(.*)', line)
if mobj:
name = mobj.group(1).strip("\"")
mobj = re.match(r'VERSION_ID=(.*)', line)
if mobj:
version = mobj.group(1).strip("\"")
os_name = name + " " + version
return os_name
if self.stat(sys_release_file):
data = self.read_remote_file(sys_release_file)
if data is None:
LOG.error("Failed to read file %s", sys_release_file)
return None
for line in data.splitlines():
mobj = re.match(r'Red Hat.*', line)
if mobj:
return mobj.group(0)
return None
def check_rpm_package_installed(self, rpm_pkg):
'''
Given a host and a package name, check if it is installed on the
system.
'''
check_pkg_cmd = "rpm -qa | grep " + rpm_pkg
(status, cmd_output, _) = self.execute(check_pkg_cmd)
if status:
return None
pkg_pattern = ".*" + rpm_pkg + ".*"
rpm_pattern = re.compile(pkg_pattern, re.IGNORECASE)
for line in cmd_output.splitlines():
mobj = rpm_pattern.match(line)
if mobj:
return mobj.group(0)
LOG.info("%s pkg installed ", rpm_pkg)
return None
def get_openstack_release(self, ver_str):
'''
Get the release series name from the package version
Refer to here for release tables:
https://wiki.openstack.org/wiki/Releases
'''
ver_table = {"2015.1": "Kilo",
"2014.2": "Juno",
"2014.1": "Icehouse",
"2013.2": "Havana",
"2013.1": "Grizzly",
"2012.2": "Folsom",
"2012.1": "Essex",
"2011.3": "Diablo",
"2011.2": "Cactus",
"2011.1": "Bexar",
"2010.1": "Austin"}
ver_prefix = re.search(r"20\d\d\.\d", ver_str).group(0)
if ver_prefix in ver_table:
return ver_table[ver_prefix]
else:
return "Unknown"
def check_openstack_version(self):
'''
Identify the openstack version running on the controller.
'''
nova_cmd = "nova-manage --version"
(status, _, err_output) = self.execute(nova_cmd)
if status:
return "Unknown"
ver_str = err_output.strip()
release_str = self.get_openstack_release(err_output)
return release_str + " (" + ver_str + ")"
def get_cpu_info(self):
'''
Get the CPU info of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
cmd = 'cat /proc/cpuinfo | grep -m1 "model name"'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
model_name = re.search(r":\s(.*)", std_output).group(1)
cmd = 'cat /proc/cpuinfo | grep "model name" | wc -l'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
cores = std_output.strip()
return (cores + " * " + model_name)
def get_nic_name(self, agent_type, encap, internal_iface_dict):
'''
Get the NIC info of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
# The internal_ifac_dict is a dictionary contains the mapping between
# hostname and the internal interface name like below:
# {u'hh23-4': u'eth1', u'hh23-5': u'eth1', u'hh23-6': u'eth1'}
cmd = "hostname"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
hostname = std_output.strip()
if hostname in internal_iface_dict:
iface = internal_iface_dict[hostname]
else:
return "Unknown"
# Figure out which interface is for internal traffic
if 'Linux bridge' in agent_type:
ifname = iface
elif 'Open vSwitch' in agent_type:
if encap == 'vlan':
# [root@hh23-10 ~]# ovs-vsctl list-ports br-inst
# eth1
# phy-br-inst
cmd = 'ovs-vsctl list-ports ' + iface + ' | grep -E "^[^phy].*"'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
ifname = std_output.strip()
elif encap == 'vxlan' or encap == 'gre':
# This is complicated. We need to first get the local IP address on
# br-tun, then do a reverse lookup to get the physical interface.
#
# [root@hh23-4 ~]# ip addr show to "23.23.2.14"
# 3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
# inet 23.23.2.14/24 brd 23.23.2.255 scope global eth1
# valid_lft forever preferred_lft forever
cmd = "ip addr show to " + iface + " | awk -F: '{print $2}'"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
ifname = std_output.strip()
else:
return "Unknown"
cmd = 'ethtool -i ' + ifname + ' | grep bus-info'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
bus_info = re.search(r":\s(.*)", std_output).group(1)
cmd = 'lspci -s ' + bus_info
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
nic_name = re.search(r"Ethernet controller:\s(.*)", std_output).group(1)
return (nic_name)
def get_l2agent_version(self, agent_type):
'''
Get the L2 agent version of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
if 'Linux bridge' in agent_type:
cmd = "brctl --version | awk -F',' '{print $2}'"
ver_string = "Linux Bridge "
elif 'Open vSwitch' in agent_type:
cmd = "ovs-vsctl --version | awk -F')' '{print $2}'"
ver_string = "OVS "
else:
return "Unknown"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
return ver_string + std_output.strip()
##################################################
# Only invoke the module directly for test purposes. Should be
# invoked from pns script.
##################################################
def main():
# As argument pass the SSH access string, e.g. "[email protected]:secret"
test_ssh = SSH(SSHAccess(sys.argv[1]))
print 'ID=' + test_ssh.distro_id
print 'ID_LIKE=' + test_ssh.distro_id_like
print 'VERSION_ID=' + test_ssh.distro_version
# ssh.wait()
# print ssh.pidof('bash')
# print ssh.stat('/tmp')
print test_ssh.check_openstack_version()
print test_ssh.get_cpu_info()
print test_ssh.get_l2agent_version("Open vSwitch agent")
if __name__ == "__main__":
main()
| 34.442336 | 97 | 0.554698 |
583a186a132ddb239cb45e6b268c36d1d4633f9c
| 361 |
py
|
Python
|
receivers.py
|
HectorTa1989/MIMO-PHY-security-with-Eavesdropper
|
8a0b248f5e1324d1055ead6ed0a4ea38fc2df011
|
[
"MIT"
] | 1 |
2021-10-30T23:55:55.000Z
|
2021-10-30T23:55:55.000Z
|
receivers.py
|
HectorTa1989/MIMO-PHY-security-with-Eavesdropper
|
8a0b248f5e1324d1055ead6ed0a4ea38fc2df011
|
[
"MIT"
] | 1 |
2021-10-30T15:00:26.000Z
|
2021-10-30T15:00:26.000Z
|
receivers.py
|
HectorTa1989/MIMO-PHY-security-analysis-with-Eavesdropper
|
8a0b248f5e1324d1055ead6ed0a4ea38fc2df011
|
[
"MIT"
] | null | null | null |
import numpy as np
class Receiver:
def receive(self):
pass
class MMRC(Receiver):
def __init__(self, nr, channel):
self.Channel = channel
self.ChannelMatrix = channel.getChannel()
def receive(self, x):
self.s = np.multiply(x,np.conj(self.ChannelMatrix))
self.ssum = sum(self.s)
return self.ssum
| 22.5625 | 59 | 0.617729 |
9063b110b91abce7864f14e325fd50d0330b515f
| 219 |
py
|
Python
|
flaskio.py
|
ricklon/audioplayer
|
407bc3858676ac638603fe941b946b91480a60b2
|
[
"Apache-2.0"
] | null | null | null |
flaskio.py
|
ricklon/audioplayer
|
407bc3858676ac638603fe941b946b91480a60b2
|
[
"Apache-2.0"
] | null | null | null |
flaskio.py
|
ricklon/audioplayer
|
407bc3858676ac638603fe941b946b91480a60b2
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template
from flask_socketio import SocketIO
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
if __name__ == '__main__':
socketio.run(app)
| 16.846154 | 40 | 0.726027 |
3236f4093e1953cf7e9ea0dc7b8d31fa75069d99
| 21,138 |
py
|
Python
|
commands/iam_report.py
|
andrewkrug/cloudmapper
|
79a7d526331ed40c8eac43fb285cee03b3b94fbe
|
[
"BSD-3-Clause"
] | null | null | null |
commands/iam_report.py
|
andrewkrug/cloudmapper
|
79a7d526331ed40c8eac43fb285cee03b3b94fbe
|
[
"BSD-3-Clause"
] | null | null | null |
commands/iam_report.py
|
andrewkrug/cloudmapper
|
79a7d526331ed40c8eac43fb285cee03b3b94fbe
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import argparse
import json
import datetime
import os.path
from abc import ABCMeta
from six import add_metaclass
from jinja2 import Template
from enum import Enum
from policyuniverse.policy import Policy
from shared.common import parse_arguments, get_regions
from shared.query import query_aws, get_parameter_file
from shared.nodes import Account, Region
__description__ = "Create IAM report"
class OutputFormat(Enum):
json = "json"
html = "html"
REPORT_OUTPUT_FILE = os.path.join("web", "account-data", "iam_report")
def tolink(s):
# TODO sanitize
return s
def load_credential_report():
users = []
json_blob = query_aws(region.account, "iam-get-credential-report", region)
csv_lines = json_blob["Content"].split("\n")
# Skip header
csv_lines.pop(0)
# Header:
# user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,
# password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,
# access_key_1_last_used_date,access_key_1_last_used_region,access_key_1_last_used_service,
# access_key_2_active,access_key_2_last_rotated,access_key_2_last_used_date,
# access_key_2_last_used_region,access_key_2_last_used_service,cert_1_active,cert_1_last_rotated,
# cert_2_active,cert_2_last_rotated
for line in csv_lines:
parts = line.split(",")
user = {
"user": parts[0],
"arn": parts[1],
"user_creation_time": parts[2],
"password_enabled": parts[3],
"password_last_used": parts[4],
"password_last_changed": parts[5],
"password_next_rotation": parts[6],
"mfa_active": parts[7],
"access_key_1_active": parts[8],
"access_key_1_last_rotated": parts[9],
"access_key_1_last_used_date": parts[10],
"access_key_1_last_used_region": parts[11],
"access_key_1_last_used_service": parts[12],
"access_key_2_active": parts[13],
"access_key_2_last_rotated": parts[14],
"access_key_2_last_used_date": parts[15],
"access_key_2_last_used_region": parts[16],
"access_key_2_last_used_service": parts[17],
"cert_1_active": parts[18],
"cert_1_last_rotated": parts[19],
"cert_2_active": parts[20],
"cert_2_last_rotated": parts[21],
}
users.append[user]
return users
def get_access_advisor(region, principal_stats, json_account_auth_details, args):
for principal_auth in [
*json_account_auth_details["UserDetailList"],
*json_account_auth_details["RoleDetailList"],
]:
stats = {}
stats["auth"] = principal_auth
job_id = get_parameter_file(
region,
"iam",
"generate-service-last-accessed-details",
principal_auth["Arn"],
)["JobId"]
json_last_access_details = get_parameter_file(
region, "iam", "get-service-last-accessed-details", job_id
)
stats["last_access"] = json_last_access_details
stats["is_inactive"] = True
job_completion_date = datetime.datetime.strptime(
json_last_access_details["JobCompletionDate"][0:10], "%Y-%m-%d"
)
for service in json_last_access_details["ServicesLastAccessed"]:
if "LastAuthenticated" in service:
last_access_date = datetime.datetime.strptime(
service["LastAuthenticated"][0:10], "%Y-%m-%d"
)
service["days_since_last_use"] = (
job_completion_date - last_access_date
).days
if service["days_since_last_use"] < args.max_age:
stats["is_inactive"] = False
break
principal_stats[principal_auth["Arn"]] = stats
def get_service_count_and_used(service_last_accessed):
service_count = 0
service_used_count = 0
for service_last_access in service_last_accessed:
service_count += 1
if service_last_access["TotalAuthenticatedEntities"] > 0:
service_used_count += 1
return {"service_count": service_count, "service_used_count": service_used_count}
def html_service_chart(principal, services_used, services_granted):
chartid = "serviceChart" + principal
return (
'<div style="width:30%"><canvas id="{}" width="100" height="15"></canvas></div>'
+ '<script>makeServiceUnusedChart("{}", {}, {});</script>'
).format(chartid, chartid, services_used, services_granted - services_used)
@add_metaclass(ABCMeta)
class graph_node(object):
__key = ""
__children = None
__parents = None
__name = ""
def cytoscape_data(self):
response = {
"data": {"id": self.key(), "name": self.name(), "type": self.get_type()}
}
return response
def key(self):
return self.__key
def set_key(self, key):
self.__key = key
def set_name(self, name):
self.__name = name
def name(self):
if self.__name == "":
return self.key()
return self.__name
def is_principal(self):
pass
def get_type(self):
pass
def add_child(self, node):
self.__children.append(node)
def add_parent(self, node):
self.__parents.append(node)
def children(self):
return self.__children
def parents(self):
return self.__parents
def get_services_allowed(self):
services = {}
for child in self.children():
for service, source in child.get_services_allowed().items():
source_list = services.get(service, [])
if self.is_principal():
source_path = source
else:
source_path = []
for s in source:
source_path.append("{}.{}".format(self.name(), s))
source_list.extend(source_path)
services[service] = source_list
return services
def __init__(self):
self.__children = []
self.__parents = []
class user_node(graph_node):
__auth = None
def is_principal(self):
return True
def get_type(self):
return "user"
def __init__(self, auth, auth_graph):
super().__init__()
self.set_key(auth["Arn"])
self.set_name(auth["UserName"])
self.__auth = auth
for policy in auth["AttachedManagedPolicies"]:
policy_node = auth_graph[policy["PolicyArn"]]
self.add_child(policy_node)
policy_node.add_parent(self)
for policy in auth.get("UserPolicyList", []):
policy_node = inline_policy_node(self, policy)
auth_graph[policy_node.key()] = policy_node
for group_name in auth.get("GroupList", []):
group_key = self.key()[0:26] + "group" + auth['Path'] + group_name
group_node = auth_graph[group_key]
group_node.add_parent(self)
self.add_child(group_node)
class role_node(graph_node):
def is_principal(self):
return True
def get_type(self):
return "role"
def __init__(self, auth, auth_graph):
super().__init__()
self.set_key(auth["Arn"])
self.set_name(auth["RoleName"])
for policy in auth["AttachedManagedPolicies"]:
policy_node = auth_graph[policy["PolicyArn"]]
self.add_child(policy_node)
policy_node.add_parent(self)
for policy in auth.get("RolePolicyList", []):
policy_node = inline_policy_node(self, policy)
auth_graph[policy_node.key()] = policy_node
class group_node(graph_node):
def is_principal(self):
return False
def get_type(self):
return "group"
def __init__(self, auth, auth_graph):
super().__init__()
self.set_key(auth["Arn"])
self.set_name(auth["GroupName"])
for policy in auth["AttachedManagedPolicies"]:
policy_node = auth_graph[policy["PolicyArn"]]
self.add_child(policy_node)
policy_node.add_parent(self)
for policy in auth.get("GroupPolicyList", []):
policy_node = inline_policy_node(self, policy)
auth_graph[policy_node.key()] = policy_node
class policy_node(graph_node):
__policy_document = {}
__policy_summary = None
def is_principal(self):
return False
def get_services_allowed(self):
response = {}
services = self.__policy_summary.action_summary().keys()
for service in services:
response[service] = [self.name()]
return response
def set_policy_document(self, doc):
self.__policy_document = doc
self.__policy_summary = Policy(doc)
class managed_policy_node(policy_node):
def get_type(self):
return "managed policy"
def __init__(self, auth):
super().__init__()
self.set_key(auth["Arn"])
self.set_name(auth["PolicyName"])
for policy_doc in auth["PolicyVersionList"]:
if policy_doc["IsDefaultVersion"]:
self.set_policy_document(policy_doc["Document"])
class inline_policy_node(policy_node):
def get_type(self):
return "inline policy"
def __init__(self, parent, auth):
super().__init__()
self.set_key(parent.key() + "/policy/" + auth["PolicyName"])
self.set_key(auth["PolicyName"])
parent.add_child(self)
self.add_parent(parent)
self.set_policy_document(auth["PolicyDocument"])
def get_iam_graph(auth):
iam_graph = {}
for policy in auth["Policies"]:
iam_graph[policy["Arn"]] = managed_policy_node(policy)
for policy_version in policy["PolicyVersionList"]:
if policy_version["IsDefaultVersion"]:
iam_graph[policy["Arn"]].set_policy_document(policy_version["Document"])
for group in auth["GroupDetailList"]:
iam_graph[group["Arn"]] = group_node(group, iam_graph)
for user in auth["UserDetailList"]:
iam_graph[user["Arn"]] = user_node(user, iam_graph)
for role in auth["RoleDetailList"]:
iam_graph[role["Arn"]] = role_node(role, iam_graph)
return iam_graph
def build_cytoscape_graph(iam_graph):
cytoscape_json = []
for k in iam_graph:
node = iam_graph[k]
if len(node.children()) > 0 or len(node.parents()) > 0:
cytoscape_json.append(iam_graph[k].cytoscape_data())
for k in iam_graph:
node = iam_graph[k]
for child in node.children():
edge = {
"data": {"source": node.key(), "target": child.key(), "type": "edge"}
}
cytoscape_json.append(edge)
return cytoscape_json
def iam_report(accounts, config, args):
"""Create IAM report"""
principal_stats = {}
json_account_auth_details = None
# Ensure only one account is given
if len(accounts) > 1:
raise Exception("This command only works with one account at a time")
account = accounts.pop()
# Create directory for output file if it doesn't already exists
try:
os.mkdir(os.path.dirname(REPORT_OUTPUT_FILE))
except OSError:
# Already exists
pass
# Read template
with open(os.path.join("templates", "iam_report.html"), "r") as report_template:
template = Template(report_template.read())
# Data to be passed to the template
t = {}
account = Account(None, account)
principal_stats = {}
print("Creating IAM report for: {}".format(account.name))
t["account_name"] = account.name
t["account_id"] = account.local_id
t["report_generated_time"] = datetime.datetime.now().strftime("%Y-%m-%d")
t["graph"] = ""
if args.show_graph:
t["graph"] = '<br><iframe width=700 height=700 src="./map.html"></iframe>'
for region_json in get_regions(account):
region = Region(account, region_json)
if region.name == "us-east-1":
json_account_auth_details = query_aws(
region.account, "iam-get-account-authorization-details", region
)
get_access_advisor(region, principal_stats, json_account_auth_details, args)
users = []
roles = []
inactive_principals = []
for principal, stats in principal_stats.items():
if "RoleName" in stats["auth"]:
stats["short_name"] = stats["auth"]["RoleName"]
stats["type"] = "role"
if stats["is_inactive"]:
inactive_principals.append(principal)
continue
roles.append(principal)
else:
stats["short_name"] = stats["auth"]["UserName"]
stats["type"] = "user"
if stats["is_inactive"]:
inactive_principals.append(principal)
continue
users.append(principal)
print("* Generating IAM graph")
# This needs to be generated even if we don't show the graph,
# because this data is needed for other functionality in this command
iam_graph = get_iam_graph(json_account_auth_details)
cytoscape_json = build_cytoscape_graph(iam_graph)
with open(os.path.join("web", "account-data", "data.json"), "w") as outfile:
json.dump(cytoscape_json, outfile, indent=4)
print("* Generating the rest of the report")
t["users"] = []
for principal in sorted(users):
service_counts = get_service_count_and_used(
principal_stats[principal]["last_access"]["ServicesLastAccessed"]
)
t["users"].append(
{
"arn": principal,
"name": principal_stats[principal]["auth"]["UserName"],
"services_used": service_counts["service_used_count"],
"services_granted": service_counts["service_count"],
}
)
t["roles"] = []
for principal in sorted(roles):
service_counts = get_service_count_and_used(
principal_stats[principal]["last_access"]["ServicesLastAccessed"]
)
t["roles"].append(
{
"arn": principal,
"name": principal_stats[principal]["auth"]["RoleName"],
"services_used": service_counts["service_used_count"],
"services_granted": service_counts["service_count"],
}
)
t["inactive_principals"] = []
for principal in sorted(inactive_principals):
# Choose icon
icon = '<i class="fas fa-user-astronaut"></i>'
if principal_stats[principal]["type"] == "user":
icon = '<i class="fas fa-user"></i>'
t["inactive_principals"].append(
{
"arn": principal,
"icon": icon,
"name": principal_stats[principal]["short_name"],
}
)
t["principals"] = []
for principal, stats in principal_stats.items():
if stats["is_inactive"]:
continue
p = {}
p["arn"] = principal
if "RoleName" in stats["auth"]:
p["icon"] = '<i class="fas fa-user-astronaut"></i>'
p["arn"] = stats["auth"]["Arn"]
p["name"] = stats["auth"]["RoleName"]
if "UserName" in stats["auth"]:
p["icon"] = '<i class="fas fa-user"></i>'
p["arn"] = stats["auth"]["Arn"]
p["name"] = stats["auth"]["UserName"]
principal_node = iam_graph[stats["auth"]["Arn"]]
privilege_sources = principal_node.get_services_allowed()
# Show access advisor info
# Get collection date
report_date = datetime.datetime.strptime(
stats["last_access"]["JobCompletionDate"][0:10], "%Y-%m-%d"
)
# Show services
p["services"] = []
for service in stats["last_access"]["ServicesLastAccessed"]:
last_use = "-"
if service.get("LastAuthenticated", "-") != "-":
last_use = (
report_date
- datetime.datetime.strptime(
service["LastAuthenticated"][0:10], "%Y-%m-%d"
)
).days
style = ""
if last_use == "-" or last_use > 90:
style = "bad"
source = privilege_sources.get(service["ServiceNamespace"], ["unknown"])
source = ";".join(source)
p["services"].append(
{
"style": style,
"name": service["ServiceName"],
"last_use": last_use,
"source": source,
}
)
# List groups
groups = stats["auth"].get("GroupList", [])
p["groups"] = []
arn_prefix = stats["auth"]["Arn"][0:26]
for group in groups:
p["groups"].append(
{"link_id": tolink(arn_prefix + "group/" + group), "name": group}
)
# List attached policies
policies = stats["auth"]["AttachedManagedPolicies"]
p["managed_policies"] = []
for policy in policies:
p["managed_policies"].append(
{"link_id": tolink(policy["PolicyArn"]), "name": policy["PolicyName"]}
)
# Show inline policies
policies = stats["auth"].get("UserPolicyList", [])
policies.extend(stats["auth"].get("RolePolicyList", []))
p["inline_policies"] = []
for policy in policies:
p["inline_policies"].append(
{
"name": policy["PolicyName"],
"document": json.dumps(policy["PolicyDocument"], indent=4),
}
)
# Show AssumeRolePolicyDocument
if "RoleName" in stats["auth"]:
p["assume_role"] = json.dumps(
stats["auth"]["AssumeRolePolicyDocument"], indent=4
)
t["principals"].append(p)
t["groups"] = []
for group in json_account_auth_details["GroupDetailList"]:
g = {"link_id": tolink(group["Arn"]), "name": group["GroupName"]}
# List members
group_node = iam_graph[group["Arn"]]
g["members"] = []
for parent in group_node.parents():
g["members"].append(
{"link_id": tolink(parent.key()), "name": parent.name()}
)
g["managed_policies"] = []
for policy in group["AttachedManagedPolicies"]:
g["managed_policies"].append(
{"link_id": tolink(policy["PolicyArn"]), "name": policy["PolicyName"]}
)
g["inline_policies"] = []
for policy in group["GroupPolicyList"]:
g["inline_policies"].append(
{
"name": policy["PolicyName"],
"document": json.dumps(policy["PolicyDocument"], indent=4),
}
)
t["groups"].append(g)
t["policies"] = []
for policy in json_account_auth_details["Policies"]:
p = {
"link_id": tolink(policy["Arn"]),
"name": policy["PolicyName"],
"managed": "",
}
if "arn:aws:iam::aws:policy" in policy["Arn"]:
p["managed"] = '<i class="fab fa-amazon"></i>AWS managed policy<br>'
# Attachments
policy_node = iam_graph[policy["Arn"]]
p["attachments"] = []
for parent in policy_node.parents():
p["attachments"].append(
{"link_id": tolink(parent.key()), "name": parent.name()}
)
for version in policy["PolicyVersionList"]:
if version["IsDefaultVersion"]:
p["document"] = json.dumps(version["Document"], indent=4)
t["policies"].append(p)
# Generate report from template
if args.requested_output == OutputFormat.html:
with open("{}.html".format(REPORT_OUTPUT_FILE), "w") as f:
f.write(template.render(t=t))
elif args.requested_output == OutputFormat.json:
with open("{}.json".format(REPORT_OUTPUT_FILE), "w") as f:
json.dump(t, f)
print("Report written to {}.{}".format(REPORT_OUTPUT_FILE, args.requested_output.value))
def run(arguments):
parser = argparse.ArgumentParser()
parser.add_argument(
"--max-age",
help="Number of days a user or role hasn't been used before it's marked dead",
default=90,
type=int,
)
parser.add_argument(
"--graph",
help="Do not create and display a graph",
dest="show_graph",
action="store_true",
)
parser.add_argument(
"--output",
help="Set the output type for the report",
default=OutputFormat.html,
type=OutputFormat,
dest="requested_output"
)
parser.set_defaults(show_graph=False)
args, accounts, config = parse_arguments(arguments, parser)
iam_report(accounts, config, args)
| 32.027273 | 101 | 0.584587 |
d196ee5724ce9ff0230d58f26569b5b29f807d32
| 430 |
py
|
Python
|
Components/Component.py
|
RuoxiQin/Unmanned-Aerial-Vehicle-Tracking
|
49a0a32abcce42fc6bf9e71f5b098ec708373153
|
[
"Apache-2.0"
] | 13 |
2018-06-16T12:52:18.000Z
|
2021-08-14T02:43:24.000Z
|
Components/Component.py
|
RuoxiQin/Unmanned-Aerial-Vehicle-Tracking
|
49a0a32abcce42fc6bf9e71f5b098ec708373153
|
[
"Apache-2.0"
] | null | null | null |
Components/Component.py
|
RuoxiQin/Unmanned-Aerial-Vehicle-Tracking
|
49a0a32abcce42fc6bf9e71f5b098ec708373153
|
[
"Apache-2.0"
] | 6 |
2019-06-20T21:06:01.000Z
|
2021-08-14T02:43:28.000Z
|
#!/usr/bin/python
#-*-coding:utf-8-*-
class Component(object):
'''This is the component in the game.'''
_name = 'Component'
def __init__(self,position,num,region_size):
self.position = position
self.num = num
self._region_size = region_size
return super(Component, self).__init__()
def __str__(self):
return str(self._name)+ str(self.num) + ' position' +str(self.position)
| 30.714286 | 79 | 0.637209 |
70bf5d5135bfa9ea0752360374ef61b675b581a3
| 21,619 |
py
|
Python
|
tests/integration/shell/test_call.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/shell/test_call.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/shell/test_call.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio ([email protected])
tests.integration.shell.call
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
import re
import shutil
from datetime import datetime
import logging
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import FILES, TMP
from tests.support.mixins import ShellCaseCommonTestsMixin
from tests.support.helpers import (
destructiveTest,
flaky,
skip_if_not_root,
)
from tests.integration.utils import testprogram
from tests.integration.states.test_pkg import _PKG_TARGETS
# Import salt libs
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.yaml
from salt.ext import six
log = logging.getLogger(__name__)
class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin):
_call_binary_ = 'salt-call'
def test_default_output(self):
out = self.run_call('-l quiet test.fib 3')
expect = ['local:',
' - 2']
self.assertEqual(expect, out[:-1])
def test_text_output(self):
out = self.run_call('-l quiet --out txt test.fib 3')
expect = [
'local: (2'
]
self.assertEqual(''.join(expect), ''.join(out).rsplit(",", 1)[0])
def test_json_out_indent(self):
out = self.run_call('test.ping -l quiet --out=json --out-indent=-1')
self.assertIn('"local": true', ''.join(out))
out = self.run_call('test.ping -l quiet --out=json --out-indent=0')
self.assertIn('"local": true', ''.join(out))
out = self.run_call('test.ping -l quiet --out=json --out-indent=1')
self.assertIn('"local": true', ''.join(out))
def test_local_sls_call(self):
fileroot = os.path.join(FILES, 'file', 'base')
out = self.run_call('--file-root {0} --local state.sls saltcalllocal'.format(fileroot))
self.assertIn('Name: test.echo', ''.join(out))
self.assertIn('Result: True', ''.join(out))
self.assertIn('hello', ''.join(out))
self.assertIn('Succeeded: 1', ''.join(out))
@destructiveTest
@skip_if_not_root
@skipIf(salt.utils.platform.is_windows(), 'This test does not apply on Windows')
def test_local_pkg_install(self):
'''
Test to ensure correct output when installing package
This also tests to make sure that salt call does not execute the
function twice, see https://github.com/saltstack/salt/pull/49552
'''
def _run_call(cmd):
cmd = '--out=json --local ' + cmd
return salt.utils.json.loads(''.join(self.run_call(cmd)))['local']
os_family = _run_call('grains.get os_family')
if os_family == 'RedHat':
# This test errors in odd ways on some distros (namely Fedora, CentOS).
# There is a bug somewhere either in the test suite or Python versions
# that causes a SyntaxError. This test was skipped entirely long ago,
# likely due to this same issue. For now, let's skip the test for these
# distros and let the other OSes catch regressions here.
# The actual commands work fine, it's the test suite that has problems.
# See https://github.com/saltstack/salt-jenkins/issues/1122 and also see
# https://github.com/saltstack/salt/pull/49552 for more info.
self.skipTest('Test throws SyntaxErrors due to deep bug. Skipping until '
'issue can be resolved.')
try:
target = _PKG_TARGETS.get(os_family, [])[0]
except IndexError:
self.skipTest(
'No package targets for os_family {0}'.format(os_family))
cur_pkgs = _run_call('pkg.list_pkgs')
if target in cur_pkgs:
self.fail('Target package \'{0}\' already installed'.format(target))
out = ''.join(self.run_call('--local pkg.install {0}'.format(target)))
self.assertIn('local: ----------', out)
self.assertIn('{0}: ----------'.format(target), out)
self.assertIn('new:', out)
self.assertIn('old:', out)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@flaky
def test_user_delete_kw_output(self):
ret = self.run_call('-l quiet -d user.delete')
assert 'salt \'*\' user.delete name remove=True force=True' in ''.join(ret)
def test_salt_documentation_too_many_arguments(self):
'''
Test to see if passing additional arguments shows an error
'''
data = self.run_call('-d virtualenv.create /tmp/ve', catch_stderr=True)
self.assertIn('You can only get documentation for one method at one time', '\n'.join(data[1]))
def test_issue_6973_state_highstate_exit_code(self):
'''
If there is no tops/master_tops or state file matches
for this minion, salt-call should exit non-zero if invoked with
option --retcode-passthrough
'''
src = os.path.join(FILES, 'file/base/top.sls')
dst = os.path.join(FILES, 'file/base/top.sls.bak')
shutil.move(src, dst)
expected_comment = 'No states found for this minion'
try:
stdout, retcode = self.run_call(
'-l quiet --retcode-passthrough state.highstate',
with_retcode=True
)
finally:
shutil.move(dst, src)
self.assertIn(expected_comment, ''.join(stdout))
self.assertNotEqual(0, retcode)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@skipIf(True, 'to be re-enabled when #23623 is merged')
def test_return(self):
self.run_call('cmd.run "echo returnTOmaster"')
jobs = [a for a in self.run_run('jobs.list_jobs')]
self.assertTrue(True in ['returnTOmaster' in j for j in jobs])
# lookback jid
first_match = [(i, j)
for i, j in enumerate(jobs)
if 'returnTOmaster' in j][0]
jid, idx = None, first_match[0]
while idx > 0:
jid = re.match("([0-9]+):", jobs[idx])
if jid:
jid = jid.group(1)
break
idx -= 1
assert idx > 0
assert jid
master_out = [
a for a in self.run_run('jobs.lookup_jid {0}'.format(jid))
]
self.assertTrue(True in ['returnTOmaster' in a for a in master_out])
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@flaky
def test_issue_2731_masterless(self):
root_dir = os.path.join(TMP, 'issue-2731')
config_dir = os.path.join(root_dir, 'conf')
minion_config_file = os.path.join(config_dir, 'minion')
logfile = os.path.join(root_dir, 'minion_test_issue_2731')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('master')) as fhr:
master_config = salt.utils.yaml.safe_load(fhr)
master_root_dir = master_config['root_dir']
this_minion_key = os.path.join(
master_root_dir, 'pki', 'master', 'minions', 'minion_test_issue_2731'
)
minion_config = {
'id': 'minion_test_issue_2731',
'master': 'localhost',
'master_port': 64506,
'root_dir': master_root_dir,
'pki_dir': 'pki',
'cachedir': 'cachedir',
'sock_dir': 'minion_sock',
'open_mode': True,
'log_file': logfile,
'log_level': 'quiet',
'log_level_logfile': 'info',
'transport': self.master_opts['transport'],
}
try:
# Remove existing logfile
if os.path.isfile(logfile):
os.unlink(logfile)
start = datetime.now()
# Let's first test with a master running
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
)
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Calculate the required timeout, since next will fail.
# I needed this because after many attempts, I was unable to catch:
# WARNING: Master hostname: salt not found. Retrying in 30 seconds
ellapsed = datetime.now() - start
timeout = ellapsed.seconds + 3
# Now let's remove the master configuration
minion_config.pop('master')
minion_config.pop('master_port')
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
_, timed_out = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=timeout,
catch_timeout=True,
)
try:
self.assertTrue(timed_out)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with --local
ret = self.run_script(
'salt-call',
'--config-dir {0} --local cmd.run "echo foo"'.format(
config_dir
),
timeout=60
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with local file client
minion_config['file_client'] = 'local'
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60
)
self.assertIn('local:', ret)
finally:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('minion'), 'r') as fh_:
minion_config = salt.utils.yaml.safe_load(fh_)
minion_config['log_file'] = 'file:///dev/log/LOG_LOCAL3'
with salt.utils.files.fopen(os.path.join(config_dir, 'minion'), 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60,
catch_stderr=True,
with_retcode=True
)
try:
self.assertIn('local:', ret[0])
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
except AssertionError:
# We now fail when we're unable to properly set the syslog logger
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_syslog_file_not_found(self):
'''
test when log_file is set to a syslog file that does not exist
'''
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'log_file_incorrect')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('minion'), 'r') as fh_:
minion_config = salt.utils.yaml.load(fh_.read())
minion_config['log_file'] = 'file:///dev/doesnotexist'
with salt.utils.files.fopen(os.path.join(config_dir, 'minion'), 'w') as fh_:
fh_.write(
salt.utils.yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60,
catch_stderr=True,
with_retcode=True
)
try:
if sys.version_info >= (3, 5, 4):
self.assertIn('local:', ret[0])
self.assertIn('[WARNING ] The log_file does not exist. Logging not setup correctly or syslog service not started.', ret[1])
self.assertEqual(ret[2], 0)
else:
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
@skipIf(True, 'This test is unreliable. Need to investigate why more deeply.')
@flaky
def test_issue_15074_output_file_append(self):
output_file_append = os.path.join(TMP, 'issue-15074')
try:
# Let's create an initial output file with some data
_ = self.run_script(
'salt-call',
'-c {0} --output-file={1} test.versions'.format(
self.config_dir,
output_file_append
),
catch_stderr=True,
with_retcode=True
)
with salt.utils.files.fopen(output_file_append) as ofa:
output = ofa.read()
self.run_script(
'salt-call',
'-c {0} --output-file={1} --output-file-append test.versions'.format(
self.config_dir,
output_file_append
),
catch_stderr=True,
with_retcode=True
)
with salt.utils.files.fopen(output_file_append) as ofa:
self.assertEqual(ofa.read(), output + output)
finally:
if os.path.exists(output_file_append):
os.unlink(output_file_append)
@skipIf(True, 'This test is unreliable. Need to investigate why more deeply.')
@flaky
def test_issue_14979_output_file_permissions(self):
output_file = os.path.join(TMP, 'issue-14979')
with salt.utils.files.set_umask(0o077):
try:
# Let's create an initial output file with some data
self.run_script(
'salt-call',
'-c {0} --output-file={1} -l trace -g'.format(
self.config_dir,
output_file
),
catch_stderr=True,
with_retcode=True
)
try:
stat1 = os.stat(output_file)
except OSError:
self.fail('Failed to generate output file, see log for details')
# Let's change umask
os.umask(0o777) # pylint: disable=blacklisted-function
self.run_script(
'salt-call',
'-c {0} --output-file={1} --output-file-append -g'.format(
self.config_dir,
output_file
),
catch_stderr=True,
with_retcode=True
)
try:
stat2 = os.stat(output_file)
except OSError:
self.fail('Failed to generate output file, see log for details')
self.assertEqual(stat1.st_mode, stat2.st_mode)
# Data was appeneded to file
self.assertTrue(stat1.st_size < stat2.st_size)
# Let's remove the output file
os.unlink(output_file)
# Not appending data
self.run_script(
'salt-call',
'-c {0} --output-file={1} -g'.format(
self.config_dir,
output_file
),
catch_stderr=True,
with_retcode=True
)
try:
stat3 = os.stat(output_file)
except OSError:
self.fail('Failed to generate output file, see log for details')
# Mode must have changed since we're creating a new log file
self.assertNotEqual(stat1.st_mode, stat3.st_mode)
finally:
if os.path.exists(output_file):
os.unlink(output_file)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_42116_cli_pillar_override(self):
ret = self.run_call(
'state.apply issue-42116-cli-pillar-override '
'pillar=\'{"myhost": "localhost"}\''
)
for line in ret:
line = line.lstrip()
if line == 'Comment: Command "ping -c 2 localhost" run':
# Successful test
break
else:
log.debug('salt-call output:\n\n%s', '\n'.join(ret))
self.fail('CLI pillar override not found in pillar data')
def test_pillar_items_masterless(self):
'''
Test to ensure we get expected output
from pillar.items with salt-call
'''
get_items = self.run_call('pillar.items', local=True)
exp_out = [' - Lancelot', ' - Galahad', ' - Bedevere',
' monty:', ' python']
for out in exp_out:
self.assertIn(out, get_items)
def tearDown(self):
'''
Teardown method to remove installed packages
'''
user = ''
user_info = self.run_call('--local grains.get username')
if user_info and isinstance(user_info, (list, tuple)) and isinstance(user_info[-1], six.string_types):
user = user_info[-1].strip()
super(CallTest, self).tearDown()
# pylint: disable=invalid-name
def test_exit_status_unknown_argument(self):
'''
Ensure correct exit status when an unknown argument is passed to salt-call.
'''
call = testprogram.TestProgramSaltCall(
name='unknown_argument',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
call.setup()
stdout, stderr, status = call.run(
args=['--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout, stderr=stderr
)
def test_masterless_highstate(self):
'''
test state.highstate in masterless mode
'''
ret = self.run_call('state.highstate', local=True)
destpath = os.path.join(TMP, 'testfile')
exp_out = [' Function: file.managed', ' Result: True',
' ID: {0}'.format(destpath)]
for out in exp_out:
self.assertIn(out, ret)
self.assertTrue(os.path.exists(destpath))
def test_exit_status_correct_usage(self):
'''
Ensure correct exit status when salt-call starts correctly.
'''
call = testprogram.TestProgramSaltCall(
name='correct_usage',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
call.setup()
stdout, stderr, status = call.run(
args=['--local', 'test.true'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout, stderr=stderr
)
| 37.209983 | 139 | 0.548915 |
26ae75649b4bb25740b65f3df177015e217bdf68
| 884 |
py
|
Python
|
venv/lib/python3.6/site-packages/django_otp/plugins/otp_static/lib.py
|
ostar0816/mc-crypto
|
80ad9896aed1dc952f819a404a458ccfad207d8e
|
[
"MIT"
] | 4 |
2018-10-19T04:36:20.000Z
|
2020-02-13T16:14:09.000Z
|
venv/lib/python3.6/site-packages/django_otp/plugins/otp_static/lib.py
|
ostar0816/mc-crypto
|
80ad9896aed1dc952f819a404a458ccfad207d8e
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/django_otp/plugins/otp_static/lib.py
|
ostar0816/mc-crypto
|
80ad9896aed1dc952f819a404a458ccfad207d8e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
get_user_model = lambda: User
from .models import StaticDevice, StaticToken
def add_static_token(username, token=None):
"""
Adds a random static token to the identified user.
This is the implementation for the management command of a similar name.
Returns the StaticToken object created.
"""
user = get_user_model().objects.get_by_natural_key(username)
device = next(StaticDevice.objects.filter(user=user).iterator(), None)
if device is None:
device = StaticDevice.objects.create(user=user, name='Backup Code')
if token is None:
token = StaticToken.random_token()
return device.token_set.create(token=token)
| 29.466667 | 82 | 0.742081 |
8b679c9f87635974f180b2c84c93dc6d1ead216d
| 1,860 |
py
|
Python
|
src/models/text_pipeline.py
|
Honeyfy/semi-supervised-text-classification
|
b3f5b29be9fec4dc19807bc394be7251bbbee18c
|
[
"BSD-3-Clause"
] | null | null | null |
src/models/text_pipeline.py
|
Honeyfy/semi-supervised-text-classification
|
b3f5b29be9fec4dc19807bc394be7251bbbee18c
|
[
"BSD-3-Clause"
] | null | null | null |
src/models/text_pipeline.py
|
Honeyfy/semi-supervised-text-classification
|
b3f5b29be9fec4dc19807bc394be7251bbbee18c
|
[
"BSD-3-Clause"
] | null | null | null |
import dill
from src.models.processing_functions import proc_name_mapping
class TextPipeline:
def __init__(self,
pipeline,
language_processor=None):
self.language_processor = language_processor
# processing functions to apply for feature creation
self.pipeline = pipeline
self.transform_params = None
def fit_transform(self, X):
transform_params = {}
for name, fit_params in self.pipeline:
if name in proc_name_mapping.keys():
proc = proc_name_mapping[name]
else:
raise ValueError('unknown data transform %s' % name)
X, params = proc(X, fit=True, **fit_params)
transform_params[name] = params
self.transform_params = transform_params
return X
def transform(self, X):
for name, _ in self.pipeline:
transform_params = self.transform_params[name]
if name in proc_name_mapping.keys():
proc = proc_name_mapping[name]
else:
raise ValueError('unknown data transform %s' % name)
X, _ = proc(X, fit=False, **transform_params)
return X
def __getstate__(self):
"""Return state values to be pickled."""
odict = self.__dict__.copy()
if 'vector_model' in odict:
del odict['vector_model']
return odict
def __setstate__(self, state):
self.__dict__.update(state)
def save(self, filename, append=False):
file_mode = 'ab' if append else 'wb'
with open(filename, file_mode) as f:
dill.dump(self, f)
@staticmethod
def load(filename, offset=0):
with open(filename, 'rb') as f:
f.seek(offset)
text_pipeline = dill.load(f)
return text_pipeline
| 29.52381 | 68 | 0.594086 |
4e7ed4095bddcf178926ce64489d50523aae7ba5
| 114 |
py
|
Python
|
Codewars/Even_or _Odd - (7 kyu) .py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
Codewars/Even_or _Odd - (7 kyu) .py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
Codewars/Even_or _Odd - (7 kyu) .py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
def even_or_odd(n):
if n % 2 == 0:
return 'Even'
else:
return 'Odd'
print(even_or_odd(7))
| 16.285714 | 21 | 0.526316 |
e65c8ade0cfc8dc4760f9ebc75cb582341c46586
| 2,122 |
py
|
Python
|
scripts/cross_validate/hd_cnn.py
|
ysenarath/hate-detection-icsc-2020
|
9bd802209c7df982d63179e53628a89b14915446
|
[
"MIT"
] | 2 |
2020-06-25T05:13:22.000Z
|
2020-06-25T05:54:10.000Z
|
scripts/cross_validate/hd_cnn.py
|
ysenarath/hate-detection-icsc-2020
|
9bd802209c7df982d63179e53628a89b14915446
|
[
"MIT"
] | null | null | null |
scripts/cross_validate/hd_cnn.py
|
ysenarath/hate-detection-icsc-2020
|
9bd802209c7df982d63179e53628a89b14915446
|
[
"MIT"
] | null | null | null |
# CNN CrossValidator
import numpy as np
from nltk import TweetTokenizer
from scripts.utils import scratch_path
from tklearn.datasets import load_fdcl18, load_dwmw17
from tklearn.model_selection import CrossValidator
from tklearn.neural_network import NeuralNetClassifier
from tklearn.neural_network.model import TextCNN
from tklearn.preprocessing.tweet import TweetPreprocessor
from tklearn.text.word_vec import load_word2vec
from tklearn.utils import pprint
DATASET = 'FDCL18'
if __name__ == '__main__':
# Load Dataset and Extract Features
if DATASET.lower().startswith('f'):
df = load_fdcl18(num_classes=2)
pprint({'dataset': 'FDCL18(num_classes=2)'})
else:
df = load_dwmw17(num_classes=2)
pprint({'dataset': 'DWMW17(num_classes=2)'})
df['clean_tweets'] = df.tweet.apply(TweetPreprocessor(normalize=['link', 'mention']).preprocess)
df['tokens'] = df.clean_tweets.apply(TweetTokenizer().tokenize)
# Load Resources
word2vec = load_word2vec()
# Hyperparameters
kwargs = {
'model': 'multichannel',
'epoch': 100,
'learning_rate': 0.01,
'max_sent_len': 50,
'batch_size': 50,
# 'word_dim': 300,
'filters': [3, 4, 5],
'filter_num': [100, 100, 100],
'dropout_prob': 0.5,
'norm_limit': 3,
}
pprint(kwargs)
# """ # Additional Parameters
kwargs['module'] = TextCNN
kwargs['corpus'] = df.tokens
kwargs['word_vectors'] = word2vec
# """ # Cross-validate and save predictions
scorers = ['accuracy', 'precision', 'recall', 'f1']
estimator = NeuralNetClassifier(**kwargs)
cv = CrossValidator(NeuralNetClassifier, kwargs, n_splits=5, scoring=scorers)
df['predictions'], cv_results = cv.cross_val_predict(df.tokens, df.label, return_scores=True)
# """ Print Scores
pprint(cv_results)
scores = {}
for scorer in scorers:
scores[scorer] = ['%.2f' % (np.average(cv_results[scorer]) * 100) + ',']
pprint(scores, type='table')
# """ Save Predictions #
df.to_excel(scratch_path('cnn_predictions.xlsx'))
# """ #
| 34.786885 | 100 | 0.666352 |
8ff5ddc49c13e0e16b9bcf18d0c4b7f24aaf6dc8
| 604 |
py
|
Python
|
app/config/urls.py
|
MMartynamajchrzak/recipe-restAPI
|
54ca39f7e7ff19f8cba902895ce69504a2fcaf99
|
[
"MIT"
] | 1 |
2021-12-10T08:43:54.000Z
|
2021-12-10T08:43:54.000Z
|
app/config/urls.py
|
MMartynamajchrzak/recipe-restAPI
|
54ca39f7e7ff19f8cba902895ce69504a2fcaf99
|
[
"MIT"
] | null | null | null |
app/config/urls.py
|
MMartynamajchrzak/recipe-restAPI
|
54ca39f7e7ff19f8cba902895ce69504a2fcaf99
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('apps.user.urls')),
path('api/recipe/', include('apps.recipe.urls')),
path('docs/schema/', SpectacularAPIView.as_view(), name='docs'),
path('docs/', SpectacularSwaggerView.as_view(url_name='docs'), name='swagger-ui'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 43.142857 | 86 | 0.75 |
dbcb66ff3d94c4eaef6fa48345344bea7c94f3c3
| 4,950 |
py
|
Python
|
tclambda/function.py
|
trustcruit/tclambda
|
4413944243257d36088805d8e2f97b0d8b56b87d
|
[
"MIT"
] | null | null | null |
tclambda/function.py
|
trustcruit/tclambda
|
4413944243257d36088805d8e2f97b0d8b56b87d
|
[
"MIT"
] | 613 |
2019-06-05T10:49:01.000Z
|
2021-08-03T03:23:18.000Z
|
tclambda/function.py
|
trustcruit/tclambda
|
4413944243257d36088805d8e2f97b0d8b56b87d
|
[
"MIT"
] | null | null | null |
# https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
from __future__ import annotations
import asyncio
import json
import logging
import os
import time
from dataclasses import dataclass
from datetime import datetime
from uuid import uuid4
import boto3
from botocore.exceptions import ClientError
from .extras import timeme
s3client = boto3.client("s3")
sqsclient = boto3.client("sqs")
TC_QUEUE = os.getenv("TC_THIS_QUEUE")
TC_BUCKET = os.getenv("TC_THIS_BUCKET")
class LambdaFunction:
def __init__(self, queue_url=TC_QUEUE, s3_bucket=TC_BUCKET):
self.queue_url = queue_url
self.s3_bucket = s3_bucket
def __getattr__(self, function_name):
return LambdaWrapperFunction(self.queue_url, self.s3_bucket, function_name)
@dataclass
class Message:
result_store: str
message_body: str
def build_message(
function_name: str, args: tuple, kwargs: dict, s3_bucket: str, force_upload=False
) -> Message:
logger = logging.getLogger("tclambda.function.build_message")
key = f"{function_name}/{datetime.utcnow():%Y/%m/%d/%H%M%S}/{uuid4()}.json"
result_store = f"results/{key}"
proxy_store = f"proxy/{key}"
message_body = json.dumps(
{
"function": function_name,
"args": args,
"kwargs": kwargs,
"result_store": result_store,
}
)
logger.info(
f'Function "{function_name}", '
f'result_store: "{result_store}", '
f"message_body size: {sizeof_fmt(len(message_body))}"
)
if len(message_body) > 250000 or force_upload: # current maximum is 262144 bytes
logger.info("Uploading proxy for {function_name}")
with timeme() as dt:
s3client.put_object(Bucket=s3_bucket, Key=proxy_store, Body=message_body)
logger.info(f"Uploaded proxy for {function_name} in {dt.value}s")
message_body = json.dumps({"proxy": proxy_store})
return Message(result_store=result_store, message_body=message_body)
class LambdaWrapperFunction:
def __init__(self, queue_url, s3_bucket, function_name):
self.logger = logging.getLogger("tclambda.function.LambdaFunction")
self.queue_url = queue_url
self.s3_bucket = s3_bucket
self.function_name = function_name
def __call__(self, *args, **kwargs) -> LambdaResult:
message = build_message(
function_name=self.function_name,
args=args,
kwargs=kwargs,
s3_bucket=self.s3_bucket,
)
sqsclient.send_message(
QueueUrl=self.queue_url, MessageBody=message.message_body
)
return LambdaResult(s3_bucket=self.s3_bucket, key=message.result_store)
class LambdaResult:
def __init__(self, s3_bucket, key):
self.logger = logging.getLogger("tclambda.function.LambdaFunction")
self.s3_bucket = s3_bucket
self.key = key
self.waited = False
self._result = {}
def _iter_wait(self, delay: float, max_attempts: int):
if self.waited:
return
obj = None
start_time = time.monotonic()
for i in range(max_attempts):
try:
obj = s3client.get_object(Bucket=self.s3_bucket, Key=self.key)
end_time = time.monotonic()
self.logger.debug(
f"Found key {self.key} on {i+1} attempts and {end_time - start_time} seconds"
)
break
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
yield i
continue
raise
if not obj:
raise TimeoutError(
f"Result {self.key} not found within {delay*max_attempts} seconds"
)
self._result = json.load(obj["Body"])
self.waited = True
def wait(self, delay: int = 5, max_attempts=20):
for _ in self._iter_wait(delay, max_attempts):
time.sleep(delay)
async def async_wait(self, delay=5, max_attempts: int = 20):
for _ in self._iter_wait(delay, max_attempts):
await asyncio.sleep(delay)
def result(self, delay: int = 5, max_attempts: int = 20):
self.wait(delay, max_attempts)
try:
return self._result["result"]
except KeyError:
raise Exception(self._result["exception"]) from None
async def async_result(self, delay: int = 5, max_attempts: int = 20):
await self.async_wait(delay, max_attempts)
try:
return self._result["result"]
except KeyError:
raise Exception(self._result["exception"]) from None
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
| 31.935484 | 97 | 0.622626 |
3d441009ff32914387b2b57e4f4a012260b91d70
| 8,338 |
py
|
Python
|
utils/converters.py
|
zomatree/yert
|
bde7fb3db4501da9849bf4a11b611b5090cfca3b
|
[
"MIT"
] | null | null | null |
utils/converters.py
|
zomatree/yert
|
bde7fb3db4501da9849bf4a11b611b5090cfca3b
|
[
"MIT"
] | null | null | null |
utils/converters.py
|
zomatree/yert
|
bde7fb3db4501da9849bf4a11b611b5090cfca3b
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 - µYert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Any
import re
from contextlib import suppress
from collections import namedtuple
from io import BytesIO
import discord
from discord.ext import commands
BetterUser = namedtuple('BetterUser', ['obj', 'http_dict'])
u_conv = commands.UserConverter()
m_conv = commands.MemberConverter()
emoji_regex = "(?:\U0001f1e6[\U0001f1e8-\U0001f1ec\U0001f1ee\U0001f1f1\U0001f1f2\U0001f1f4\U0001f1f6-\U0001f1fa\U0001f1fc\U0001f1fd\U0001f1ff])|(?:\U0001f1e7[\U0001f1e6\U0001f1e7\U0001f1e9-\U0001f1ef\U0001f1f1-\U0001f1f4\U0001f1f6-\U0001f1f9\U0001f1fb\U0001f1fc\U0001f1fe\U0001f1ff])|(?:\U0001f1e8[\U0001f1e6\U0001f1e8\U0001f1e9\U0001f1eb-\U0001f1ee\U0001f1f0-\U0001f1f5\U0001f1f7\U0001f1fa-\U0001f1ff])|(?:\U0001f1e9[\U0001f1ea\U0001f1ec\U0001f1ef\U0001f1f0\U0001f1f2\U0001f1f4\U0001f1ff])|(?:\U0001f1ea[\U0001f1e6\U0001f1e8\U0001f1ea\U0001f1ec\U0001f1ed\U0001f1f7-\U0001f1fa])|(?:\U0001f1eb[\U0001f1ee-\U0001f1f0\U0001f1f2\U0001f1f4\U0001f1f7])|(?:\U0001f1ec[\U0001f1e6\U0001f1e7\U0001f1e9-\U0001f1ee\U0001f1f1-\U0001f1f3\U0001f1f5-\U0001f1fa\U0001f1fc\U0001f1fe])|(?:\U0001f1ed[\U0001f1f0\U0001f1f2\U0001f1f3\U0001f1f7\U0001f1f9\U0001f1fa])|(?:\U0001f1ee[\U0001f1e8-\U0001f1ea\U0001f1f1-\U0001f1f4\U0001f1f6-\U0001f1f9])|(?:\U0001f1ef[\U0001f1ea\U0001f1f2\U0001f1f4\U0001f1f5])|(?:\U0001f1f0[\U0001f1ea\U0001f1ec-\U0001f1ee\U0001f1f2\U0001f1f3\U0001f1f5\U0001f1f7\U0001f1fc\U0001f1fe\U0001f1ff])|(?:\U0001f1f1[\U0001f1e6-\U0001f1e8\U0001f1ee\U0001f1f0\U0001f1f7-\U0001f1fb\U0001f1fe])|(?:\U0001f1f2[\U0001f1e6\U0001f1e8-\U0001f1ed\U0001f1f0-\U0001f1ff])|(?:\U0001f1f3[\U0001f1e6\U0001f1e8\U0001f1ea-\U0001f1ec\U0001f1ee\U0001f1f1\U0001f1f4\U0001f1f5\U0001f1f7\U0001f1fa\U0001f1ff])|\U0001f1f4\U0001f1f2|(?:\U0001f1f4[\U0001f1f2])|(?:\U0001f1f5[\U0001f1e6\U0001f1ea-\U0001f1ed\U0001f1f0-\U0001f1f3\U0001f1f7-\U0001f1f9\U0001f1fc\U0001f1fe])|\U0001f1f6\U0001f1e6|(?:\U0001f1f6[\U0001f1e6])|(?:\U0001f1f7[\U0001f1ea\U0001f1f4\U0001f1f8\U0001f1fa\U0001f1fc])|(?:\U0001f1f8[\U0001f1e6-\U0001f1ea\U0001f1ec-\U0001f1f4\U0001f1f7-\U0001f1f9\U0001f1fb\U0001f1fd-\U0001f1ff])|(?:\U0001f1f9[\U0001f1e6\U0001f1e8\U0001f1e9\U0001f1eb-\U0001f1ed\U0001f1ef-\U0001f1f4\U0001f1f7\U0001f1f9\U0001f1fb\U0001f1fc\U0001f1ff])|(?:\U0001f1fa[\U0001f1e6\U0001f1ec\U0001f1f2\U0001f1f8\U0001f1fe\U0001f1ff])|(?:\U0001f1fb[\U0001f1e6\U0001f1e8\U0001f1ea\U0001f1ec\U0001f1ee\U0001f1f3\U0001f1fa])|(?:\U0001f1fc[\U0001f1eb\U0001f1f8])|\U0001f1fd\U0001f1f0|(?:\U0001f1fd[\U0001f1f0])|(?:\U0001f1fe[\U0001f1ea\U0001f1f9])|(?:\U0001f1ff[\U0001f1e6\U0001f1f2\U0001f1fc])|(?:\U0001f3f3\ufe0f\u200d\U0001f308)|(?:\U0001f441\u200d\U0001f5e8)|(?:[\U0001f468\U0001f469]\u200d\u2764\ufe0f\u200d(?:\U0001f48b\u200d)?[\U0001f468\U0001f469])|(?:(?:(?:\U0001f468\u200d[\U0001f468\U0001f469])|(?:\U0001f469\u200d\U0001f469))(?:(?:\u200d\U0001f467(?:\u200d[\U0001f467\U0001f466])?)|(?:\u200d\U0001f466\u200d\U0001f466)))|(?:(?:(?:\U0001f468\u200d\U0001f468)|(?:\U0001f469\u200d\U0001f469))\u200d\U0001f466)|[\u2194-\u2199]|[\u23e9-\u23f3]|[\u23f8-\u23fa]|[\u25fb-\u25fe]|[\u2600-\u2604]|[\u2638-\u263a]|[\u2648-\u2653]|[\u2692-\u2694]|[\u26f0-\u26f5]|[\u26f7-\u26fa]|[\u2708-\u270d]|[\u2753-\u2755]|[\u2795-\u2797]|[\u2b05-\u2b07]|[\U0001f191-\U0001f19a]|[\U0001f1e6-\U0001f1ff]|[\U0001f232-\U0001f23a]|[\U0001f300-\U0001f321]|[\U0001f324-\U0001f393]|[\U0001f399-\U0001f39b]|[\U0001f39e-\U0001f3f0]|[\U0001f3f3-\U0001f3f5]|[\U0001f3f7-\U0001f3fa]|[\U0001f400-\U0001f4fd]|[\U0001f4ff-\U0001f53d]|[\U0001f549-\U0001f54e]|[\U0001f550-\U0001f567]|[\U0001f573-\U0001f57a]|[\U0001f58a-\U0001f58d]|[\U0001f5c2-\U0001f5c4]|[\U0001f5d1-\U0001f5d3]|[\U0001f5dc-\U0001f5de]|[\U0001f5fa-\U0001f64f]|[\U0001f680-\U0001f6c5]|[\U0001f6cb-\U0001f6d2]|[\U0001f6e0-\U0001f6e5]|[\U0001f6f3-\U0001f6f6]|[\U0001f910-\U0001f91e]|[\U0001f920-\U0001f927]|[\U0001f933-\U0001f93a]|[\U0001f93c-\U0001f93e]|[\U0001f940-\U0001f945]|[\U0001f947-\U0001f94b]|[\U0001f950-\U0001f95e]|[\U0001f980-\U0001f991]|\u00a9|\u00ae|\u203c|\u2049|\u2122|\u2139|\u21a9|\u21aa|\u231a|\u231b|\u2328|\u23cf|\u24c2|\u25aa|\u25ab|\u25b6|\u25c0|\u260e|\u2611|\u2614|\u2615|\u2618|\u261d|\u2620|\u2622|\u2623|\u2626|\u262a|\u262e|\u262f|\u2660|\u2663|\u2665|\u2666|\u2668|\u267b|\u267f|\u2696|\u2697|\u2699|\u269b|\u269c|\u26a0|\u26a1|\u26aa|\u26ab|\u26b0|\u26b1|\u26bd|\u26be|\u26c4|\u26c5|\u26c8|\u26ce|\u26cf|\u26d1|\u26d3|\u26d4|\u26e9|\u26ea|\u26fd|\u2702|\u2705|\u270f|\u2712|\u2714|\u2716|\u271d|\u2721|\u2728|\u2733|\u2734|\u2744|\u2747|\u274c|\u274e|\u2757|\u2763|\u2764|\u27a1|\u27b0|\u27bf|\u2934|\u2935|\u2b1b|\u2b1c|\u2b50|\u2b55|\u3030|\u303d|\u3297|\u3299|\U0001f004|\U0001f0cf|\U0001f170|\U0001f171|\U0001f17e|\U0001f17f|\U0001f18e|\U0001f201|\U0001f202|\U0001f21a|\U0001f22f|\U0001f250|\U0001f251|\U0001f396|\U0001f397|\U0001f56f|\U0001f570|\U0001f587|\U0001f590|\U0001f595|\U0001f596|\U0001f5a4|\U0001f5a5|\U0001f5a8|\U0001f5b1|\U0001f5b2|\U0001f5bc|\U0001f5e1|\U0001f5e3|\U0001f5e8|\U0001f5ef|\U0001f5f3|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001f6f0|\U0001f930|\U0001f9c0|[#|0-9]\u20e3"
class BetterUserConverter(commands.Converter):
async def convert(self, ctx, argument):
out = ctx.author if not argument else None
for converter in (m_conv, u_conv):
if out:
break
with suppress(Exception):
out = await converter.convert(ctx, argument)
if out is None:
try:
out = await ctx.bot.fetch_user(argument)
except discord.HTTPException:
raise commands.CommandError("Invalid user provided")
http_dict = await ctx.bot.http.get_user(out.id)
return BetterUser(obj=out, http_dict=http_dict)
def maybe_url(url: Any, /) -> str:
"""Returns an hyperlink version of an url if one is found, else the text"""
url = str(url)
if match := re.findall(r'//([^/]+)', url): # we get full urls, no need to go too overkill
return f"[{match[0]}]({url})"
else:
return url
class LinkConverter(commands.PartialEmojiConverter):
def __init__(self):
self.png_header = b'\x89PNG\r\n\x1a\n'
self.jpg_header = b'\xff\xd8\xff'
async def convert(self, ctx, argument: str) -> BytesIO:
try:
return BytesIO(await (await super().convert(ctx, argument)).url.read())
except commands.BadArgument:
if re.match(emoji_regex, argument):
unicode = "-".join(map(lambda x: x[2:], map(str, map(hex, map(ord, argument)))))
url = f"https://github.com/twitter/twemoji/blob/master/assets/72x72/{unicode}.png?raw=true"
async with ctx.bot.session.get(url) as res:
return BytesIO(await res.read())
argument = argument.replace('>', '').replace('<', '')
async with ctx.bot.session.get(argument, headers=ctx.bot._headers) as response:
raw_bytes = await response.read()
if raw_bytes.startswith(self.jpg_header) or raw_bytes.startswith(self.png_header):
async with ctx.bot.session.get(argument) as res:
img_bytes = BytesIO(await res.read())
return img_bytes
else:
raise commands.BadArgument("Unable to verify the link was an png or jpg")
| 88.702128 | 4,709 | 0.726313 |
541b4a601e160393635d10142dcf2df5d7e90ce3
| 987 |
py
|
Python
|
scripts/generate_test_ship_file.py
|
Oasixer/drive-and-survive
|
134ca725efed16fdd31df0bb76c520cd15d7b1ed
|
[
"MIT"
] | 1 |
2019-02-24T23:37:34.000Z
|
2019-02-24T23:37:34.000Z
|
scripts/generate_test_ship_file.py
|
Oasixer/drive-and-survive
|
134ca725efed16fdd31df0bb76c520cd15d7b1ed
|
[
"MIT"
] | null | null | null |
scripts/generate_test_ship_file.py
|
Oasixer/drive-and-survive
|
134ca725efed16fdd31df0bb76c520cd15d7b1ed
|
[
"MIT"
] | null | null | null |
import sys, os
# ok this is a little janky but I couldnt get it to import this shit because its not technically in a package.
# should probably just make this whole game into a package but im too lazy today
# someone remind me to do that <3
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
from entities.modules.cabin import CabinModule
from entities.modules.iron import IronModule
from entities.modules.module_base import ModSize
from entities.modules.shield import ShieldModule
from entities.vehicles.player_ship import PlayerShip
from utils.load_dump_data import generate_ship_file
def generate(shipNum=0):
if shipNum == 0:
modules = [
CabinModule((87, 177)),
ShieldModule(ModSize.medium, (187, 177)),
]
#IronModule(ModSize.small, (0, 140))
test_ship = PlayerShip(modules)
test_ship.cabin = modules[0] # Temp
generate_ship_file(test_ship)
if __name__ == '__main__':
generate(0)
| 35.25 | 110 | 0.716312 |
152d6511712d6c4fa3a23b0ec22780349b575d0a
| 40,308 |
py
|
Python
|
sdk/python/kfp/v2/compiler/pipeline_spec_builder.py
|
rahulsmehta/pipelines
|
a0a8f1da8cb7ca53cde7717aa78e666b634fec75
|
[
"Apache-2.0"
] | 1 |
2022-01-28T23:27:52.000Z
|
2022-01-28T23:27:52.000Z
|
sdk/python/kfp/v2/compiler/pipeline_spec_builder.py
|
rahulsmehta/pipelines
|
a0a8f1da8cb7ca53cde7717aa78e666b634fec75
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/v2/compiler/pipeline_spec_builder.py
|
rahulsmehta/pipelines
|
a0a8f1da8cb7ca53cde7717aa78e666b634fec75
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating PipelineSpec proto objects."""
import json
from typing import List, Mapping, Optional, Tuple, Union
from google.protobuf import struct_pb2
from kfp.pipeline_spec import pipeline_spec_pb2
from kfp.v2.components import utils as component_utils
from kfp.v2.components import for_loop
from kfp.v2.components import pipeline_channel
from kfp.v2.components import pipeline_task
from kfp.v2.components import placeholders
from kfp.v2.components import tasks_group
from kfp.v2.components.types import artifact_types
from kfp.v2.components.types import type_utils
_GroupOrTask = Union[tasks_group.TasksGroup, pipeline_task.PipelineTask]
def _additional_input_name_for_pipeline_channel(
channel_or_name: Union[pipeline_channel.PipelineChannel, str]) -> str:
"""Gets the name for an additional (compiler-injected) input."""
# Adding a prefix to avoid (reduce chance of) name collision between the
# original component inputs and the injected input.
return 'pipelinechannel--' + (
channel_or_name.full_name if isinstance(
channel_or_name, pipeline_channel.PipelineChannel) else
channel_or_name)
def _to_protobuf_value(value: type_utils.PARAMETER_TYPES) -> struct_pb2.Value:
"""Creates a google.protobuf.struct_pb2.Value message out of a provide
value.
Args:
value: The value to be converted to Value message.
Returns:
A google.protobuf.struct_pb2.Value message.
Raises:
ValueError if the given value is not one of the parameter types.
"""
if isinstance(value, str):
return struct_pb2.Value(string_value=value)
elif isinstance(value, (int, float)):
return struct_pb2.Value(number_value=value)
elif isinstance(value, bool):
return struct_pb2.Value(bool_value=value)
elif isinstance(value, dict):
return struct_pb2.Value(
struct_value=struct_pb2.Struct(
fields={k: _to_protobuf_value(v) for k, v in value.items()}))
elif isinstance(value, list):
return struct_pb2.Value(
list_value=struct_pb2.ListValue(
values=[_to_protobuf_value(v) for v in value]))
else:
raise ValueError('Value must be one of the following types: '
'str, int, float, bool, dict, and list. Got: '
f'"{value}" of type "{type(value)}".')
def build_task_spec_for_task(
task: pipeline_task.PipelineTask,
parent_component_inputs: pipeline_spec_pb2.ComponentInputsSpec,
tasks_in_current_dag: List[str],
input_parameters_in_current_dag: List[str],
input_artifacts_in_current_dag: List[str],
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds PipelineTaskSpec for a pipeline task.
A task input may reference an output outside its immediate DAG.
For instance::
random_num = random_num_op(...)
with dsl.Condition(random_num.output > 5):
print_op('%s > 5' % random_num.output)
In this example, `dsl.Condition` forms a subDAG with one task from `print_op`
inside the subDAG. The task of `print_op` references output from `random_num`
task, which is outside the sub-DAG. When compiling to IR, such cross DAG
reference is disallowed. So we need to "punch a hole" in the sub-DAG to make
the input available in the subDAG component inputs if it's not already there,
Next, we can call this method to fix the tasks inside the subDAG to make them
reference the component inputs instead of directly referencing the original
producer task.
Args:
task: The task to build a PipelineTaskSpec for.
parent_component_inputs: The task's parent component's input specs.
tasks_in_current_dag: The list of tasks names for tasks in the same dag.
input_parameters_in_current_dag: The list of input parameters in the DAG
component.
input_artifacts_in_current_dag: The list of input artifacts in the DAG
component.
Returns:
A PipelineTaskSpec object representing the task.
"""
pipeline_task_spec = pipeline_spec_pb2.PipelineTaskSpec()
pipeline_task_spec.task_info.name = (
task.task_spec.display_name or task.name)
# Use task.name for component_ref.name because we may customize component
# spec for individual tasks to work around the lack of optional inputs
# support in IR.
pipeline_task_spec.component_ref.name = (
component_utils.sanitize_component_name(task.name))
pipeline_task_spec.caching_options.enable_cache = (
task.task_spec.enable_caching)
for input_name, input_value in task.inputs.items():
input_type = task.component_spec.inputs[input_name].type
if isinstance(input_value, pipeline_channel.PipelineArtifactChannel):
if input_value.task_name:
# Value is produced by an upstream task.
if input_value.task_name in tasks_in_current_dag:
# Dependent task within the same DAG.
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task = (
component_utils.sanitize_task_name(
input_value.task_name))
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.output_artifact_key = (
input_value.name)
else:
# Dependent task not from the same DAG.
component_input_artifact = (
_additional_input_name_for_pipeline_channel(input_value)
)
assert component_input_artifact in parent_component_inputs.artifacts, \
'component_input_artifact: {} not found. All inputs: {}'.format(
component_input_artifact, parent_component_inputs)
pipeline_task_spec.inputs.artifacts[
input_name].component_input_artifact = (
component_input_artifact)
else:
raise RuntimeError(
f'Artifacts must be produced by a task. Got {input_value}.')
elif isinstance(input_value, pipeline_channel.PipelineParameterChannel):
if input_value.task_name:
# Value is produced by an upstream task.
if input_value.task_name in tasks_in_current_dag:
# Dependent task within the same DAG.
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.producer_task = (
component_utils.sanitize_task_name(
input_value.task_name))
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
input_value.name)
else:
# Dependent task not from the same DAG.
component_input_parameter = (
_additional_input_name_for_pipeline_channel(input_value)
)
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
else:
# Value is from pipeline input.
component_input_parameter = input_value.full_name
if component_input_parameter not in parent_component_inputs.parameters:
component_input_parameter = (
_additional_input_name_for_pipeline_channel(input_value)
)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
elif isinstance(input_value, for_loop.LoopArgument):
component_input_parameter = (
_additional_input_name_for_pipeline_channel(input_value))
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
elif isinstance(input_value, for_loop.LoopArgumentVariable):
component_input_parameter = (
_additional_input_name_for_pipeline_channel(
input_value.loop_argument))
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
pipeline_task_spec.inputs.parameters[
input_name].parameter_expression_selector = (
'parseJson(string_value)["{}"]'.format(
input_value.subvar_name))
elif isinstance(input_value, str):
# Handle extra input due to string concat
pipeline_channels = (
pipeline_channel.extract_pipeline_channels_from_any(input_value)
)
for channel in pipeline_channels:
# value contains PipelineChannel placeholders which needs to be
# replaced. And the input needs to be added to the task spec.
# Form the name for the compiler injected input, and make sure it
# doesn't collide with any existing input names.
additional_input_name = (
_additional_input_name_for_pipeline_channel(channel))
# We don't expect collision to happen because we prefix the name
# of additional input with 'pipelinechannel--'. But just in case
# collision did happend, throw a RuntimeError so that we don't
# get surprise at runtime.
for existing_input_name, _ in task.inputs.items():
if existing_input_name == additional_input_name:
raise RuntimeError(
'Name collision between existing input name '
'{} and compiler injected input name {}'.format(
existing_input_name, additional_input_name))
additional_input_placeholder = (
placeholders.input_parameter_placeholder(
additional_input_name))
input_value = input_value.replace(channel.pattern,
additional_input_placeholder)
if channel.task_name:
# Value is produced by an upstream task.
if channel.task_name in tasks_in_current_dag:
# Dependent task within the same DAG.
pipeline_task_spec.inputs.parameters[
additional_input_name].task_output_parameter.producer_task = (
component_utils.sanitize_task_name(
channel.task_name))
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
channel.name)
else:
# Dependent task not from the same DAG.
component_input_parameter = (
_additional_input_name_for_pipeline_channel(channel)
)
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
additional_input_name].component_input_parameter = (
component_input_parameter)
else:
# Value is from pipeline input. (or loop?)
component_input_parameter = channel.full_name
if component_input_parameter not in parent_component_inputs.parameters:
component_input_parameter = (
_additional_input_name_for_pipeline_channel(channel)
)
pipeline_task_spec.inputs.parameters[
additional_input_name].component_input_parameter = (
component_input_parameter)
pipeline_task_spec.inputs.parameters[
input_name].runtime_value.constant.string_value = input_value
elif isinstance(input_value, (str, int, float, bool, dict, list)):
pipeline_task_spec.inputs.parameters[
input_name].runtime_value.constant.CopyFrom(
_to_protobuf_value(input_value))
else:
raise ValueError(
'Input argument supports only the following types: '
'str, int, float, bool, dict, and list.'
f'Got {input_value} of type {type(input_value)}.')
return pipeline_task_spec
def build_component_spec_for_task(
task: pipeline_task.PipelineTask) -> pipeline_spec_pb2.ComponentSpec:
"""Builds ComponentSpec for a pipeline task.
Args:
task: The task to build a ComponentSpec for.
Returns:
A ComponentSpec object for the task.
"""
component_spec = pipeline_spec_pb2.ComponentSpec()
component_spec.executor_label = component_utils.sanitize_executor_label(
task.name)
for input_name, input_spec in (task.component_spec.inputs or {}).items():
# skip inputs not present, as a workaround to support optional inputs.
if input_name not in task.inputs:
continue
if type_utils.is_parameter_type(input_spec.type):
component_spec.input_definitions.parameters[
input_name].parameter_type = type_utils.get_parameter_type(
input_spec.type)
else:
component_spec.input_definitions.artifacts[
input_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema(input_spec.type))
for output_name, output_spec in (task.component_spec.outputs or {}).items():
if type_utils.is_parameter_type(output_spec.type):
component_spec.output_definitions.parameters[
output_name].parameter_type = type_utils.get_parameter_type(
output_spec.type)
else:
component_spec.output_definitions.artifacts[
output_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema(output_spec.type))
return component_spec
def build_importer_spec_for_task(
task: pipeline_task.PipelineTask
) -> pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec:
"""Builds ImporterSpec for a pipeline task.
Args:
task: The task to build a ComponentSpec for.
Returns:
A ImporterSpec object for the task.
"""
type_schema = type_utils.get_artifact_type_schema(task.importer_spec.type_schema)
importer_spec = pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec(
type_schema=type_schema,
reimport=task.importer_spec.reimport)
if task.importer_spec.metadata:
metadata_protobuf_struct = struct_pb2.Struct()
metadata_protobuf_struct.update(task.importer_spec.metadata)
importer_spec.metadata.CopyFrom(metadata_protobuf_struct)
if isinstance(task.importer_spec.artifact_uri, pipeline_channel.PipelineParameterChannel):
importer_spec.artifact_uri.runtime_parameter = 'uri'
elif isinstance(task.importer_spec.artifact_uri, str):
importer_spec.artifact_uri.constant.string_value = task.importer_spec.artifact_uri
return importer_spec
def build_container_spec_for_task(
task: pipeline_task.PipelineTask
) -> pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec:
"""Builds PipelineContainerSpec for a pipeline task.
Args:
task: The task to build a ComponentSpec for.
Returns:
A PipelineContainerSpec object for the task.
"""
container_spec = (
pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec(
image=task.container_spec.image,
command=task.container_spec.commands,
args=task.container_spec.arguments,
env=[
pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec
.EnvVar(name=name, value=value)
for name, value in (task.container_spec.env or {}).items()
]
))
if task.container_spec.resources is not None:
container_spec.resources.cpu_limit = (
task.container_spec.resources.cpu_limit)
container_spec.resources.memory_limit = (
task.container_spec.resources.memory_limit)
if task.container_spec.resources.accelerator_count is not None:
container_spec.resources.accelerator.CopyFrom(
pipeline_spec_pb2.PipelineDeploymentConfig.PipelineContainerSpec
.ResourceSpec.AcceleratorConfig(
type=task.container_spec.resources.accelerator_type,
count=task.container_spec.resources.accelerator_count,
))
return container_spec
def _fill_in_component_input_default_value(
component_spec: pipeline_spec_pb2.ComponentSpec,
input_name: str,
default_value: Optional[type_utils.PARAMETER_TYPES],
) -> None:
"""Fills in the default of component input parameter.
Args:
component_spec: The ComponentSpec to update in place.
input_name: The name of the input parameter.
default_value: The default value of the input parameter.
"""
if default_value is None:
return
parameter_type = component_spec.input_definitions.parameters[
input_name].parameter_type
if pipeline_spec_pb2.ParameterType.NUMBER_INTEGER == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.number_value = default_value
elif pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.number_value = default_value
elif pipeline_spec_pb2.ParameterType.STRING == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.string_value = default_value
elif pipeline_spec_pb2.ParameterType.BOOLEAN == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.bool_value = default_value
elif pipeline_spec_pb2.ParameterType.STRUCT == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.CopyFrom(
_to_protobuf_value(default_value))
elif pipeline_spec_pb2.ParameterType.LIST == parameter_type:
component_spec.input_definitions.parameters[
input_name].default_value.CopyFrom(
_to_protobuf_value(default_value))
def build_component_spec_for_group(
pipeline_channels: List[pipeline_channel.PipelineChannel],
is_root_group: bool,
) -> pipeline_spec_pb2.ComponentSpec:
"""Builds ComponentSpec for a TasksGroup.
Args:
group: The group to build a ComponentSpec for.
pipeline_channels: The list of pipeline channels referenced by the group.
Returns:
A PipelineTaskSpec object representing the loop group.
"""
component_spec = pipeline_spec_pb2.ComponentSpec()
for channel in pipeline_channels:
input_name = (
channel.name if is_root_group else
_additional_input_name_for_pipeline_channel(channel))
if isinstance(channel, pipeline_channel.PipelineArtifactChannel):
component_spec.input_definitions.artifacts[
input_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema(channel.channel_type))
else:
# channel is one of PipelineParameterChannel, LoopArgument, or
# LoopArgumentVariable.
component_spec.input_definitions.parameters[
input_name].parameter_type = type_utils.get_parameter_type(
channel.channel_type)
# TODO: should we fill in default value for all groups and tasks?
if is_root_group:
_fill_in_component_input_default_value(
component_spec=component_spec,
input_name=input_name,
default_value=channel.value,
)
return component_spec
def _pop_input_from_task_spec(
task_spec: pipeline_spec_pb2.PipelineTaskSpec,
input_name: str,
) -> None:
"""Removes an input from task spec inputs.
Args:
task_spec: The pipeline task spec to update in place.
input_name: The name of the input, which could be an artifact or paremeter.
"""
task_spec.inputs.artifacts.pop(input_name)
task_spec.inputs.parameters.pop(input_name)
if task_spec.inputs == pipeline_spec_pb2.TaskInputsSpec():
task_spec.ClearField('inputs')
def _update_task_spec_for_loop_group(
group: tasks_group.ParallelFor,
pipeline_task_spec: pipeline_spec_pb2.PipelineTaskSpec,
) -> None:
"""Updates PipelineTaskSpec for loop group.
Args:
group: The loop group to update task spec for.
pipeline_task_spec: The pipeline task spec to update in place.
"""
if group.items_is_pipeline_channel:
loop_items_channel = group.loop_argument.items_or_pipeline_channel
input_parameter_name = _additional_input_name_for_pipeline_channel(
loop_items_channel)
loop_argument_item_name = _additional_input_name_for_pipeline_channel(
group.loop_argument.full_name)
loop_arguments_item = '{}-{}'.format(
input_parameter_name, for_loop.LoopArgument.LOOP_ITEM_NAME_BASE)
assert loop_arguments_item == loop_argument_item_name
pipeline_task_spec.parameter_iterator.items.input_parameter = (
input_parameter_name)
pipeline_task_spec.parameter_iterator.item_input = (
loop_argument_item_name)
# If the loop items itself is a loop arguments variable, handle the
# subvar name.
if isinstance(loop_items_channel, for_loop.LoopArgumentVariable):
pipeline_task_spec.inputs.parameters[
input_parameter_name].parameter_expression_selector = (
'parseJson(string_value)["{}"]'.format(
loop_items_channel.subvar_name))
pipeline_task_spec.inputs.parameters[
input_parameter_name].component_input_parameter = (
_additional_input_name_for_pipeline_channel(
loop_items_channel.loop_argument))
remove_input_name = loop_argument_item_name
else:
input_parameter_name = _additional_input_name_for_pipeline_channel(
group.loop_argument)
raw_values = group.loop_argument.items_or_pipeline_channel
pipeline_task_spec.parameter_iterator.items.raw = json.dumps(
raw_values, sort_keys=True)
pipeline_task_spec.parameter_iterator.item_input = (
input_parameter_name)
_pop_input_from_task_spec(
task_spec=pipeline_task_spec,
input_name=pipeline_task_spec.parameter_iterator.item_input)
def _resolve_condition_operands(
left_operand: Union[str, pipeline_channel.PipelineChannel],
right_operand: Union[str, pipeline_channel.PipelineChannel],
) -> Tuple[str, str]:
"""Resolves values and PipelineChannels for condition operands.
Args:
left_operand: The left operand of a condition expression.
right_operand: The right operand of a condition expression.
Returns:
A tuple of the resolved operands values:
(left_operand_value, right_operand_value).
"""
# Pre-scan the operand to get the type of constant value if there's any.
# The value_type can be used to backfill missing PipelineChannel.channel_type.
value_type = None
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type in [
pipeline_spec_pb2.ParameterType.STRUCT,
pipeline_spec_pb2.ParameterType.LIST,
pipeline_spec_pb2.ParameterType
.PARAMETER_TYPE_ENUM_UNSPECIFIED,
]:
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
raise ValueError('Conditional requires scalar parameter values'
' for comparison. Found input "{}" of type {}'
' in pipeline definition instead.'.format(
input_name,
value_or_reference.channel_type))
parameter_types = set()
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
else:
parameter_type = type_utils.get_parameter_type(
type(value_or_reference).__name__)
parameter_types.add(parameter_type)
if len(parameter_types) == 2:
# Two different types being compared. The only possible types are
# String, Boolean, Double and Integer. We'll promote the other type
# using the following precedence:
# String > Boolean > Double > Integer
if pipeline_spec_pb2.ParameterType.STRING in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif pipeline_spec_pb2.ParameterType.BOOLEAN in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
else:
# Must be a double and int, promote to double.
assert pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
assert pipeline_spec_pb2.ParameterType.NUMBER_INTEGER in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
canonical_parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
elif len(parameter_types) == 1: # Both operands are the same type.
canonical_parameter_type = parameter_types.pop()
else:
# Probably shouldn't happen.
raise ValueError('Unable to determine operand types for'
' "{}" and "{}"'.format(left_operand, right_operand))
operand_values = []
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
operand_value = "inputs.parameter_values['{input_name}']".format(
input_name=input_name)
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER:
operand_value = 'int({})'.format(operand_value)
elif isinstance(value_or_reference, str):
operand_value = "'{}'".format(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif isinstance(value_or_reference, bool):
# Booleans need to be compared as 'true' or 'false' in CEL.
operand_value = str(value_or_reference).lower()
parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
elif isinstance(value_or_reference, int):
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
else:
assert isinstance(value_or_reference, float), value_or_reference
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
if parameter_type != canonical_parameter_type:
# Type-cast to so CEL does not complain.
if canonical_parameter_type == pipeline_spec_pb2.ParameterType.STRING:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.BOOLEAN,
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = "'{}'".format(operand_value)
elif canonical_parameter_type == pipeline_spec_pb2.ParameterType.BOOLEAN:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = 'true' if int(operand_value) == 0 else 'false'
else:
assert canonical_parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
assert parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
operand_value = 'double({})'.format(operand_value)
operand_values.append(operand_value)
return tuple(operand_values)
def _update_task_spec_for_condition_group(
group: tasks_group.Condition,
pipeline_task_spec: pipeline_spec_pb2.PipelineTaskSpec,
) -> None:
"""Updates PipelineTaskSpec for condition group.
Args:
group: The condition group to update task spec for.
pipeline_task_spec: The pipeline task spec to update in place.
"""
left_operand_value, right_operand_value = _resolve_condition_operands(
group.condition.left_operand, group.condition.right_operand)
condition_string = (
f'{left_operand_value} {group.condition.operator} {right_operand_value}'
)
pipeline_task_spec.trigger_policy.CopyFrom(
pipeline_spec_pb2.PipelineTaskSpec.TriggerPolicy(
condition=condition_string))
def build_task_spec_for_exit_task(
task: pipeline_task.PipelineTask,
dependent_task: str,
pipeline_inputs: pipeline_spec_pb2.ComponentInputsSpec,
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds PipelineTaskSpec for an exit handler's exit task.
Args:
tasks: The exit handler's exit task to build task spec for.
dependent_task: The dependent task name for the exit task, i.e. the name
of the exit handler group.
pipeline_inputs: The pipeline level input definitions.
Returns:
A PipelineTaskSpec object representing the exit task.
"""
pipeline_task_spec = build_task_spec_for_task(
task=task,
parent_component_inputs=pipeline_inputs,
tasks_in_current_dag=[], # Does not matter for exit task
input_parameters_in_current_dag=pipeline_inputs.parameters.keys(),
input_artifacts_in_current_dag=[],
)
pipeline_task_spec.dependent_tasks.extend([dependent_task])
pipeline_task_spec.trigger_policy.strategy = (
pipeline_spec_pb2.PipelineTaskSpec.TriggerPolicy.TriggerStrategy
.ALL_UPSTREAM_TASKS_COMPLETED)
return pipeline_task_spec
def build_task_spec_for_group(
group: tasks_group.TasksGroup,
pipeline_channels: List[pipeline_channel.PipelineChannel],
tasks_in_current_dag: List[str],
is_parent_component_root: bool,
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds PipelineTaskSpec for a group.
Args:
group: The group to build PipelineTaskSpec for.
pipeline_channels: The list of pipeline channels referenced by the group.
tasks_in_current_dag: The list of tasks names for tasks in the same dag.
is_parent_component_root: Whether the parent component is the pipeline's
root dag.
Returns:
A PipelineTaskSpec object representing the group.
"""
pipeline_task_spec = pipeline_spec_pb2.PipelineTaskSpec()
pipeline_task_spec.task_info.name = group.display_name or group.name
pipeline_task_spec.component_ref.name = (
component_utils.sanitize_component_name(group.name))
for channel in pipeline_channels:
channel_full_name = channel.full_name
subvar_name = None
if isinstance(channel, for_loop.LoopArgumentVariable):
channel_full_name = channel.loop_argument.full_name
subvar_name = channel.subvar_name
input_name = _additional_input_name_for_pipeline_channel(channel)
channel_name = channel.name
if subvar_name:
pipeline_task_spec.inputs.parameters[
input_name].parameter_expression_selector = (
'parseJson(string_value)["{}"]'.format(subvar_name))
if not channel.is_with_items_loop_argument:
channel_name = channel.items_or_pipeline_channel.name
if isinstance(channel, pipeline_channel.PipelineArtifactChannel):
if channel.task_name and channel.task_name in tasks_in_current_dag:
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task = (
component_utils.sanitize_task_name(channel.task_name))
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.output_artifact_key = (
channel_name)
else:
pipeline_task_spec.inputs.artifacts[
input_name].component_input_artifact = (
channel_full_name
if is_parent_component_root else input_name)
else:
# channel is one of PipelineParameterChannel, LoopArgument, or
# LoopArgumentVariable
if channel.task_name and channel.task_name in tasks_in_current_dag:
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.producer_task = (
component_utils.sanitize_task_name(channel.task_name))
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
channel_name)
else:
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
channel_full_name if is_parent_component_root else
_additional_input_name_for_pipeline_channel(
channel_full_name))
if isinstance(group, tasks_group.ParallelFor):
_update_task_spec_for_loop_group(
group=group,
pipeline_task_spec=pipeline_task_spec,
)
elif isinstance(group, tasks_group.Condition):
_update_task_spec_for_condition_group(
group=group,
pipeline_task_spec=pipeline_task_spec,
)
return pipeline_task_spec
def populate_metrics_in_dag_outputs(
tasks: List[pipeline_task.PipelineTask],
task_name_to_parent_groups: Mapping[str, List[_GroupOrTask]],
task_name_to_task_spec: Mapping[str, pipeline_spec_pb2.PipelineTaskSpec],
task_name_to_component_spec: Mapping[str, pipeline_spec_pb2.ComponentSpec],
pipeline_spec: pipeline_spec_pb2.PipelineSpec,
) -> None:
"""Populates metrics artifacts in DAG outputs.
Args:
tasks: The list of tasks that may produce metrics outputs.
task_name_to_parent_groups: The dict of task name to parent groups.
Key is the task's name. Value is a list of ancestor groups including
the task itself. The list of a given op is sorted in a way that the
farthest group is the first and the task itself is the last.
task_name_to_task_spec: The dict of task name to PipelineTaskSpec.
task_name_to_component_spec: The dict of task name to ComponentSpec.
pipeline_spec: The pipeline_spec to update in-place.
"""
for task in tasks:
task_spec = task_name_to_task_spec[task.name]
component_spec = task_name_to_component_spec[task.name]
# Get the tuple of (component_name, task_name) of all its parent groups.
parent_components_and_tasks = [('_root', '')]
# skip the op itself and the root group which cannot be retrived via name.
for group_name in task_name_to_parent_groups[task.name][1:-1]:
parent_components_and_tasks.append(
(component_utils.sanitize_component_name(group_name),
component_utils.sanitize_task_name(group_name)))
# Reverse the order to make the farthest group in the end.
parent_components_and_tasks.reverse()
for output_name, artifact_spec in \
component_spec.output_definitions.artifacts.items():
if artifact_spec.artifact_type.WhichOneof(
'kind'
) == 'schema_title' and artifact_spec.artifact_type.schema_title in [
artifact_types.Metrics.TYPE_NAME,
artifact_types.ClassificationMetrics.TYPE_NAME,
]:
unique_output_name = '{}-{}'.format(task.name, output_name)
sub_task_name = task.name
sub_task_output = output_name
for component_name, task_name in parent_components_and_tasks:
group_component_spec = (
pipeline_spec.root if component_name == '_root' else
pipeline_spec.components[component_name])
group_component_spec.output_definitions.artifacts[
unique_output_name].CopyFrom(artifact_spec)
group_component_spec.dag.outputs.artifacts[
unique_output_name].artifact_selectors.append(
pipeline_spec_pb2.DagOutputsSpec
.ArtifactSelectorSpec(
producer_subtask=sub_task_name,
output_artifact_key=sub_task_output,
))
sub_task_name = task_name
sub_task_output = unique_output_name
| 45.597285 | 97 | 0.65838 |
ca52f03b6db804092857015ac60e10e14d83ba92
| 3,559 |
py
|
Python
|
lib/wx/ogllib.py
|
tacaswell/pyepics
|
de865c9cbdfdaf9826165de2b5ba9f45d0939355
|
[
"OML"
] | null | null | null |
lib/wx/ogllib.py
|
tacaswell/pyepics
|
de865c9cbdfdaf9826165de2b5ba9f45d0939355
|
[
"OML"
] | null | null | null |
lib/wx/ogllib.py
|
tacaswell/pyepics
|
de865c9cbdfdaf9826165de2b5ba9f45d0939355
|
[
"OML"
] | null | null | null |
"""
wx OGL (2d graphics library) utility functions for Epics and wxPython
interaction
OGL is a (somewhat old-fashioned) 2D drawing library included with wxPython.
There are probably newer/better drawing libraries, but OGL works quite well
for drawing simple shapes or bitmaps.
"""
import wx.lib.ogl as ogl
from wxlib import PVMixin
class PVShapeMixin(PVMixin):
"""
Mixin for any Shape that has PV callback support
"""
def __init__(self, pv=None, pvname=None):
PVMixin.__init__(self, pv, pvname)
self.brushTranslations = {}
self.penTranslations = {}
self.shownTranslations = {}
def SetBrushTranslations(self, translations):
"""
Set a dictionary of value->brush translations that will be set automatically
when the PV value changes. The brush is used to paint the shape foreground
The argument should be a dictionary with keys as PV values (string if available), and values
as wx.Brush instances.
"""
self.brushTranslations = translations
def SetPenTranslations(self, translations):
"""
Set a dictionary of value->bpen translations that will be set automatically
when the PV value changes. The pen is used to paint the shape outline.
The argument should be a dictionary with keys as PV values (string if available), and values
as wx.Brush instances.
"""
self.penTranslations = translations
def SetShownTranslations(self, translations):
"""
Set a dictionary of value->boolean 'Shown' translations that will be set automatically
when the PV value changes. The value is used to show/hide the shape.
"""
self.shownTranslations = translations
def OnPVChange(self, raw_value):
"""
Do not override this method, override PVChanged if you would like to do any
custom callback behaviour
"""
if raw_value in self.brushTranslations:
self.SetBrush(self.brushTranslations[raw_value])
if raw_value in self.penTranslations:
self.SetPen(self.penTranslations[raw_value])
if raw_value in self.shownTranslations:
self.Show(self.shownTranslations[raw_value])
self.PVChanged(raw_value)
self.Invalidate()
def PVChanged(self, raw_value):
"""
Override this method if you want your shape to do any special processing when the
PV changes
Note that the shape will be automatically invalidated (redrawn) after this method is called.
"""
pass
def Invalidate(self):
"""
Invalidate the shape's area on the parent shape canvas to cause a redraw
(convenience method)
"""
(w, h) = self.GetBoundingBoxMax()
x = self.GetX()
y = self.GetY()
self.GetCanvas().RefreshRect((x-w/2, y-h/2, w, h))
class PVRectangle(ogl.RectangleShape, PVShapeMixin):
"""
A RectangleShape which is associated with a particular PV value
"""
def __init__(self, w, h, pv=None, pvname=None):
ogl.RectangleShape.__init__(self, w, h)
PVShapeMixin.__init__(self, pv, pvname)
class PVCircle(ogl.CircleShape, PVShapeMixin):
"""
A CircleShape which is associated with a particular PV value
"""
def __init__(self, diameter, pv=None, pvname=None):
ogl.CircleShape.__init__(self, diameter)
PVShapeMixin.__init__(self, pv, pvname)
| 31.776786 | 100 | 0.64934 |
1a3137073d8f07d24e2551a07bd100679fdf6451
| 2,103 |
py
|
Python
|
Eager/github-scraper/python/ghanalysis.py
|
UCSB-CS-RACELab/eager-appscale
|
d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d
|
[
"Apache-2.0"
] | 3 |
2016-06-12T01:18:49.000Z
|
2018-07-16T18:20:23.000Z
|
Eager/github-scraper/python/ghanalysis.py
|
UCSB-CS-RACELab/eager-appscale
|
d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d
|
[
"Apache-2.0"
] | null | null | null |
Eager/github-scraper/python/ghanalysis.py
|
UCSB-CS-RACELab/eager-appscale
|
d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d
|
[
"Apache-2.0"
] | 1 |
2020-05-25T02:59:15.000Z
|
2020-05-25T02:59:15.000Z
|
from time import sleep
import datetime
import getpass
import keyring
import traceback
from github import Github
DEBUG = False
earliest = datetime.datetime(2012,1,1)
def getAERepos(username):
count = 0
try:
g = Github(username, getGithubPassword(username))
for repo in g.legacy_search_repos('app engine'):
count += 1
try:
#if repo.updated_at > earliest or repo.pushed_at > earliest:
if repo.pushed_at > earliest:
try:
print '{0};{1};{2};{3};{4};{5};{6};{7};{8}'.format(
repo.name,
repo.created_at.date(),
repo.updated_at.date(),
repo.pushed_at.date(),
repo.owner.login,
repo.language,
repo.forks,
repo.watchers,
repo.description)
except:
print 'ERROR unable to print description of repo {0}'.format(repo.name)
if DEBUG and count > 10:
break
if 'appscale' in repo.name.lower():
print '\tFound AppScale!'
except:
print 'ERROR1 unable to get repo'
sleep(2)
except:
print 'ERROR2 unable to get anything'
def printRepository(username):
g = Github(username, getGithubPassword(username))
user = g.get_user()
repositories = user.get_repos()
for repository in repositories:
print repository.name
printBranches(repository)
def printBranches(repository):
for branch in repository.get_branches():
print ' ', branch.name
tree = branch.commit.commit.tree
printTree(repository, tree, ' ')
def printTree(repository, tree, indent):
for element in tree.tree:
print indent, element.path
if element.type == 'tree':
printTree(repository, repository.get_git_tree(element.sha), indent + ' ')
def getGithubPassword(username):
service = 'github'
password = keyring.get_password(service, username)
if password == None:
print "Enter password for user", username
password = getpass.getpass()
keyring.set_password(service, username, password)
return password
# Pass your Github username as a parameter
#printRepository('ckrintz')
# step through the repos with keyword 'app engine'
getAERepos('ckrintz')
| 26.2875 | 77 | 0.681883 |
a6df70ee0b1530dad9e21d21e1f79cdd1911e91d
| 2,100 |
py
|
Python
|
datalad/core/local/repo.py
|
ypid/datalad
|
d50e6135a35edca9406faa44953a7c8ba61ee8c0
|
[
"MIT"
] | 298 |
2015-01-25T17:36:29.000Z
|
2022-03-20T03:38:47.000Z
|
datalad/core/local/repo.py
|
ypid/datalad
|
d50e6135a35edca9406faa44953a7c8ba61ee8c0
|
[
"MIT"
] | 6,387 |
2015-01-02T18:15:01.000Z
|
2022-03-31T20:58:58.000Z
|
datalad/core/local/repo.py
|
ypid/datalad
|
d50e6135a35edca9406faa44953a7c8ba61ee8c0
|
[
"MIT"
] | 109 |
2015-01-25T17:49:40.000Z
|
2022-03-06T06:54:54.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Core repository-related functionality
"""
from datalad.support.exceptions import (
InvalidGitRepositoryError,
InvalidAnnexRepositoryError,
NoSuchPathError,
)
import logging
lgr = logging.getLogger('datalad.core.local.repo')
__all__ = ["repo_from_path"]
def repo_from_path(path):
"""Get a Repo instance from a path.
Parameters
----------
path : path-like
Root path of the repository.
Returns
-------
Repo
Repo instance matching the type of the repository at path.
Raises
------
ValueError
If no repository could be found at the path, or if its type could not
be determined.
"""
# keep the imports local for now until it is clearer what the module setup
# will be
from datalad.support.gitrepo import GitRepo
from datalad.support.annexrepo import AnnexRepo
repo = None
for cls, ckw, kw in (
# Non-initialized is okay. We want to figure the correct instance
# to represent what's there - that's it.
(AnnexRepo, {'allow_noninitialized': True}, {'init': False}),
(GitRepo, {}, {})
):
if not cls.is_valid_repo(path, **ckw):
continue
try:
lgr.log(5, "Detected %s at %s", cls, path)
repo = cls(path, create=False, **kw)
break
except (InvalidGitRepositoryError, NoSuchPathError,
InvalidAnnexRepositoryError) as exc:
lgr.log(
5,
"Ignore exception after inappropriate repository type guess: "
"%s", exc)
if repo is None:
raise ValueError('No repository at {}'.format(path))
return repo
| 28.378378 | 87 | 0.565238 |
b5e892caf0a0e81c866a6a3c22893dd0a4da1e19
| 2,138 |
gyp
|
Python
|
common_video/common_video_unittests.gyp
|
mostynb/webrtc
|
24a16656da7249c0d836247bc77dbc475f71f0af
|
[
"DOC",
"BSD-3-Clause"
] | 27 |
2016-04-27T01:02:03.000Z
|
2021-12-13T08:53:19.000Z
|
common_video/common_video_unittests.gyp
|
mostynb/webrtc
|
24a16656da7249c0d836247bc77dbc475f71f0af
|
[
"DOC",
"BSD-3-Clause"
] | 2 |
2017-03-09T09:00:50.000Z
|
2017-09-21T15:48:20.000Z
|
common_video/common_video_unittests.gyp
|
mostynb/webrtc
|
24a16656da7249c0d836247bc77dbc475f71f0af
|
[
"DOC",
"BSD-3-Clause"
] | 17 |
2016-04-27T02:06:39.000Z
|
2019-12-18T08:07:00.000Z
|
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': ['../build/common.gypi'],
'targets': [
{
'target_name': 'common_video_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'<(webrtc_root)/common_video/common_video.gyp:common_video',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/test/test.gyp:test_support_main',
'<(webrtc_root)/test/test.gyp:fake_video_frames',
],
'sources': [
'i420_buffer_pool_unittest.cc',
'i420_video_frame_unittest.cc',
'libyuv/libyuv_unittest.cc',
'libyuv/scaler_unittest.cc',
],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [
4267, # size_t to int truncation.
],
'conditions': [
['OS=="android"', {
'dependencies': [
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
],
}],
],
},
], # targets
'conditions': [
['OS=="android"', {
'targets': [
{
'target_name': 'common_video_unittests_apk_target',
'type': 'none',
'dependencies': [
'<(apk_tests_path):common_video_unittests_apk',
],
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'common_video_unittests_run',
'type': 'none',
'dependencies': [
'common_video_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'common_video_unittests.isolate',
],
},
],
}],
],
}
| 29.694444 | 79 | 0.561272 |
d36c7b83a5d28c23c66e10d2f59a8d5356d5d313
| 1,003 |
py
|
Python
|
tests/Python/OWASP_a1/cwe_78/safe/cwe_78__I_hardcoded_string_input__F_no_filtering__S_ls__1-4.7_File1.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | null | null | null |
tests/Python/OWASP_a1/cwe_78/safe/cwe_78__I_hardcoded_string_input__F_no_filtering__S_ls__1-4.7_File1.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | 1 |
2022-01-31T22:22:55.000Z
|
2022-01-31T22:22:55.000Z
|
tests/Python/OWASP_a1/cwe_78/safe/cwe_78__I_hardcoded_string_input__F_no_filtering__S_ls__1-4.7_File1.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | null | null | null |
'''
Hardcoded string input
no filtering
sink: run ls in a dir
'''
'''
Created by Paul E. Black and William Mentzer 2020
This software was developed at the National Institute of Standards and Technology
by employees of the Federal Government in the course of their official duties.
Pursuant to title 17 Section 105 of the United States Code the software is not
subject to copyright protection and are in the public domain.
We would appreciate acknowledgment if the software is used.
Paul E. Black [email protected]
William Mentzer [email protected]
'''
import math
import os
import sys
def main():
tainted_2 = None
tainted_3 = None
tainted_2 = "hardcoded"
tainted_3 = tainted_2
if((math.sqrt(42)<=42)):
# No filtering (sanitization)
tainted_3 = tainted_2
elif(not (math.sqrt(42)<=42)):
{}
os.system('ls ' + tainted_3);
if __name__ == '__main__':
main()
| 21.804348 | 81 | 0.656032 |
fd062110b6d9e963b2ad5e616c732b8751442b18
| 3,966 |
py
|
Python
|
2/src/ParamLearner.py
|
Trinkle23897/dip2018
|
c8a29f1d6dcc165906165caebd41389e1be23062
|
[
"MIT"
] | 4 |
2018-04-20T11:35:39.000Z
|
2019-04-10T02:43:18.000Z
|
2/src/ParamLearner.py
|
Trinkle23897/dip2018
|
c8a29f1d6dcc165906165caebd41389e1be23062
|
[
"MIT"
] | null | null | null |
2/src/ParamLearner.py
|
Trinkle23897/dip2018
|
c8a29f1d6dcc165906165caebd41389e1be23062
|
[
"MIT"
] | 3 |
2019-04-16T05:11:33.000Z
|
2019-05-01T13:22:52.000Z
|
#!/usr/bin/env python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import torch.utils.model_zoo as model_zoo
import random
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class ParamLearner(models.AlexNet):
def __init__(self, hidden_size=4096, drop_rate=0.5):
super(ParamLearner, self).__init__()
self.drop_rate = drop_rate
self.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
for param in self.parameters():
param.requires_grad = False
# Delete last fc layer
self.classifier.__delitem__(6)
# Define param learner
self.param_learner = nn.Linear(hidden_size, hidden_size)
# Initialized with identity matrix
self.param_learner.weight.data.copy_(torch.eye(hidden_size))
def forward(self, x, R):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
# R is a (num_class, hidden_size) matrix, w is a (num_class, hidden_size) matrix
w = self.param_learner(R)
dr = torch.nn.Dropout(p=self.drop_rate)
x = torch.matmul(x, w.transpose(0, 1))
return x
def get_r(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
def forward_test(self, x, Rs):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
logits = []
for i in range(x.shape[0]):
i_logits = []
for class_Rs in Rs:
# class_Rs is a (n, hidden_size) matrix, in which n is the number of training pictures of this class.
class_w = self.param_learner(class_Rs)
class_logit = torch.matmul(class_w, x[i])
i_logits.append(class_logit.max())
logits.append(torch.stack(i_logits))
return torch.stack(logits)
class ParamLearnerDataLoader(object):
def __init__(self, data_folder, num_classes):
self.data_folder = data_folder
self.data = []
self.num_classes = num_classes
for i in range(num_classes):
self.data.append([])
for i in range(data_folder.__len__()):
img, label = data_folder.__getitem__(i)
self.data[label].append(i)
self.max_batch = 1e9
for i in range(num_classes):
self.max_batch = min(self.max_batch, len(self.data[i]))
self.index = 0
def __next__(self):
if self.index >= self.max_batch:
self.index = 0
for i in range(self.num_classes):
random.shuffle(self.data[i])
raise StopIteration
inputs = []
labels = []
for i in range(self.num_classes):
image, label = self.data_folder.__getitem__(self.data[i][index])
inputs.append(image)
labels.append(label)
inputs = torch.stack(inputs)
labels = torch.stack(labels)
index += 1
return inputs, labels
def next(self):
if self.index >= self.max_batch:
self.index = 0
for i in range(self.num_classes):
random.shuffle(self.data[i])
raise StopIteration
inputs = []
labels = []
for i in range(self.num_classes):
image, label = self.data_folder.__getitem__(self.data[i][self.index])
inputs.append(image)
labels.append(label)
inputs = torch.stack(inputs)
labels = torch.tensor(labels, dtype=torch.long)
self.index += 1
return inputs, labels
def __len__(self):
return self.max_batch
def __iter__(self):
return self
| 30.507692 | 117 | 0.587242 |
cc3031827fcd72aaf5fb21ef42f173f9701de4b2
| 194 |
py
|
Python
|
scripts/item/consume_2435456.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54 |
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2435456.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3 |
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2435456.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49 |
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Lovely Damage Skin
success = sm.addDamageSkin(2435456)
if success:
sm.chat("The Lovely Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2435456)
| 32.333333 | 94 | 0.747423 |
6debe17b2778038801a630bc60e22f357f5769be
| 140,286 |
py
|
Python
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/target_report.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24 |
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/target_report.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46 |
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/target_report.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29 |
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_1.models.sync_job_phase import SyncJobPhase # noqa: F401,E501
from isi_sdk_8_2_1.models.sync_job_service_report_item import SyncJobServiceReportItem # noqa: F401,E501
class TargetReport(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'action': 'str',
'ads_streams_replicated': 'int',
'block_specs_replicated': 'int',
'bytes_recoverable': 'int',
'bytes_transferred': 'int',
'char_specs_replicated': 'int',
'committed_files': 'int',
'corrected_lins': 'int',
'dead_node': 'bool',
'directories_replicated': 'int',
'dirs_changed': 'int',
'dirs_deleted': 'int',
'dirs_moved': 'int',
'dirs_new': 'int',
'duration': 'int',
'encrypted': 'bool',
'end_time': 'int',
'error': 'str',
'error_checksum_files_skipped': 'int',
'error_io_files_skipped': 'int',
'error_net_files_skipped': 'int',
'errors': 'list[str]',
'failed_chunks': 'int',
'fifos_replicated': 'int',
'file_data_bytes': 'int',
'files_changed': 'int',
'files_linked': 'int',
'files_new': 'int',
'files_selected': 'int',
'files_transferred': 'int',
'files_unlinked': 'int',
'files_with_ads_replicated': 'int',
'flipped_lins': 'int',
'hard_links_replicated': 'int',
'hash_exceptions_fixed': 'int',
'hash_exceptions_found': 'int',
'id': 'str',
'job_id': 'int',
'lins_total': 'int',
'network_bytes_to_source': 'int',
'network_bytes_to_target': 'int',
'new_files_replicated': 'int',
'num_retransmitted_files': 'int',
'phases': 'list[SyncJobPhase]',
'policy_id': 'str',
'policy_name': 'str',
'quotas_deleted': 'int',
'regular_files_replicated': 'int',
'resynced_lins': 'int',
'retransmitted_files': 'list[str]',
'retry': 'int',
'running_chunks': 'int',
'service_report': 'list[SyncJobServiceReportItem]',
'sockets_replicated': 'int',
'source_bytes_recovered': 'int',
'source_directories_created': 'int',
'source_directories_deleted': 'int',
'source_directories_linked': 'int',
'source_directories_unlinked': 'int',
'source_directories_visited': 'int',
'source_files_deleted': 'int',
'source_files_linked': 'int',
'source_files_unlinked': 'int',
'source_host': 'str',
'sparse_data_bytes': 'int',
'start_time': 'int',
'state': 'str',
'subreport_count': 'int',
'succeeded_chunks': 'int',
'symlinks_replicated': 'int',
'sync_type': 'str',
'target_bytes_recovered': 'int',
'target_directories_created': 'int',
'target_directories_deleted': 'int',
'target_directories_linked': 'int',
'target_directories_unlinked': 'int',
'target_files_deleted': 'int',
'target_files_linked': 'int',
'target_files_unlinked': 'int',
'target_path': 'str',
'target_snapshots': 'list[str]',
'total_chunks': 'int',
'total_data_bytes': 'int',
'total_exported_services': 'int',
'total_files': 'int',
'total_network_bytes': 'int',
'total_phases': 'int',
'unchanged_data_bytes': 'int',
'up_to_date_files_skipped': 'int',
'updated_files_replicated': 'int',
'user_conflict_files_skipped': 'int',
'warnings': 'list[str]',
'worm_committed_file_conflicts': 'int'
}
attribute_map = {
'action': 'action',
'ads_streams_replicated': 'ads_streams_replicated',
'block_specs_replicated': 'block_specs_replicated',
'bytes_recoverable': 'bytes_recoverable',
'bytes_transferred': 'bytes_transferred',
'char_specs_replicated': 'char_specs_replicated',
'committed_files': 'committed_files',
'corrected_lins': 'corrected_lins',
'dead_node': 'dead_node',
'directories_replicated': 'directories_replicated',
'dirs_changed': 'dirs_changed',
'dirs_deleted': 'dirs_deleted',
'dirs_moved': 'dirs_moved',
'dirs_new': 'dirs_new',
'duration': 'duration',
'encrypted': 'encrypted',
'end_time': 'end_time',
'error': 'error',
'error_checksum_files_skipped': 'error_checksum_files_skipped',
'error_io_files_skipped': 'error_io_files_skipped',
'error_net_files_skipped': 'error_net_files_skipped',
'errors': 'errors',
'failed_chunks': 'failed_chunks',
'fifos_replicated': 'fifos_replicated',
'file_data_bytes': 'file_data_bytes',
'files_changed': 'files_changed',
'files_linked': 'files_linked',
'files_new': 'files_new',
'files_selected': 'files_selected',
'files_transferred': 'files_transferred',
'files_unlinked': 'files_unlinked',
'files_with_ads_replicated': 'files_with_ads_replicated',
'flipped_lins': 'flipped_lins',
'hard_links_replicated': 'hard_links_replicated',
'hash_exceptions_fixed': 'hash_exceptions_fixed',
'hash_exceptions_found': 'hash_exceptions_found',
'id': 'id',
'job_id': 'job_id',
'lins_total': 'lins_total',
'network_bytes_to_source': 'network_bytes_to_source',
'network_bytes_to_target': 'network_bytes_to_target',
'new_files_replicated': 'new_files_replicated',
'num_retransmitted_files': 'num_retransmitted_files',
'phases': 'phases',
'policy_id': 'policy_id',
'policy_name': 'policy_name',
'quotas_deleted': 'quotas_deleted',
'regular_files_replicated': 'regular_files_replicated',
'resynced_lins': 'resynced_lins',
'retransmitted_files': 'retransmitted_files',
'retry': 'retry',
'running_chunks': 'running_chunks',
'service_report': 'service_report',
'sockets_replicated': 'sockets_replicated',
'source_bytes_recovered': 'source_bytes_recovered',
'source_directories_created': 'source_directories_created',
'source_directories_deleted': 'source_directories_deleted',
'source_directories_linked': 'source_directories_linked',
'source_directories_unlinked': 'source_directories_unlinked',
'source_directories_visited': 'source_directories_visited',
'source_files_deleted': 'source_files_deleted',
'source_files_linked': 'source_files_linked',
'source_files_unlinked': 'source_files_unlinked',
'source_host': 'source_host',
'sparse_data_bytes': 'sparse_data_bytes',
'start_time': 'start_time',
'state': 'state',
'subreport_count': 'subreport_count',
'succeeded_chunks': 'succeeded_chunks',
'symlinks_replicated': 'symlinks_replicated',
'sync_type': 'sync_type',
'target_bytes_recovered': 'target_bytes_recovered',
'target_directories_created': 'target_directories_created',
'target_directories_deleted': 'target_directories_deleted',
'target_directories_linked': 'target_directories_linked',
'target_directories_unlinked': 'target_directories_unlinked',
'target_files_deleted': 'target_files_deleted',
'target_files_linked': 'target_files_linked',
'target_files_unlinked': 'target_files_unlinked',
'target_path': 'target_path',
'target_snapshots': 'target_snapshots',
'total_chunks': 'total_chunks',
'total_data_bytes': 'total_data_bytes',
'total_exported_services': 'total_exported_services',
'total_files': 'total_files',
'total_network_bytes': 'total_network_bytes',
'total_phases': 'total_phases',
'unchanged_data_bytes': 'unchanged_data_bytes',
'up_to_date_files_skipped': 'up_to_date_files_skipped',
'updated_files_replicated': 'updated_files_replicated',
'user_conflict_files_skipped': 'user_conflict_files_skipped',
'warnings': 'warnings',
'worm_committed_file_conflicts': 'worm_committed_file_conflicts'
}
def __init__(self, action=None, ads_streams_replicated=None, block_specs_replicated=None, bytes_recoverable=None, bytes_transferred=None, char_specs_replicated=None, committed_files=None, corrected_lins=None, dead_node=None, directories_replicated=None, dirs_changed=None, dirs_deleted=None, dirs_moved=None, dirs_new=None, duration=None, encrypted=None, end_time=None, error=None, error_checksum_files_skipped=None, error_io_files_skipped=None, error_net_files_skipped=None, errors=None, failed_chunks=None, fifos_replicated=None, file_data_bytes=None, files_changed=None, files_linked=None, files_new=None, files_selected=None, files_transferred=None, files_unlinked=None, files_with_ads_replicated=None, flipped_lins=None, hard_links_replicated=None, hash_exceptions_fixed=None, hash_exceptions_found=None, id=None, job_id=None, lins_total=None, network_bytes_to_source=None, network_bytes_to_target=None, new_files_replicated=None, num_retransmitted_files=None, phases=None, policy_id=None, policy_name=None, quotas_deleted=None, regular_files_replicated=None, resynced_lins=None, retransmitted_files=None, retry=None, running_chunks=None, service_report=None, sockets_replicated=None, source_bytes_recovered=None, source_directories_created=None, source_directories_deleted=None, source_directories_linked=None, source_directories_unlinked=None, source_directories_visited=None, source_files_deleted=None, source_files_linked=None, source_files_unlinked=None, source_host=None, sparse_data_bytes=None, start_time=None, state=None, subreport_count=None, succeeded_chunks=None, symlinks_replicated=None, sync_type=None, target_bytes_recovered=None, target_directories_created=None, target_directories_deleted=None, target_directories_linked=None, target_directories_unlinked=None, target_files_deleted=None, target_files_linked=None, target_files_unlinked=None, target_path=None, target_snapshots=None, total_chunks=None, total_data_bytes=None, total_exported_services=None, total_files=None, total_network_bytes=None, total_phases=None, unchanged_data_bytes=None, up_to_date_files_skipped=None, updated_files_replicated=None, user_conflict_files_skipped=None, warnings=None, worm_committed_file_conflicts=None): # noqa: E501
"""TargetReport - a model defined in Swagger""" # noqa: E501
self._action = None
self._ads_streams_replicated = None
self._block_specs_replicated = None
self._bytes_recoverable = None
self._bytes_transferred = None
self._char_specs_replicated = None
self._committed_files = None
self._corrected_lins = None
self._dead_node = None
self._directories_replicated = None
self._dirs_changed = None
self._dirs_deleted = None
self._dirs_moved = None
self._dirs_new = None
self._duration = None
self._encrypted = None
self._end_time = None
self._error = None
self._error_checksum_files_skipped = None
self._error_io_files_skipped = None
self._error_net_files_skipped = None
self._errors = None
self._failed_chunks = None
self._fifos_replicated = None
self._file_data_bytes = None
self._files_changed = None
self._files_linked = None
self._files_new = None
self._files_selected = None
self._files_transferred = None
self._files_unlinked = None
self._files_with_ads_replicated = None
self._flipped_lins = None
self._hard_links_replicated = None
self._hash_exceptions_fixed = None
self._hash_exceptions_found = None
self._id = None
self._job_id = None
self._lins_total = None
self._network_bytes_to_source = None
self._network_bytes_to_target = None
self._new_files_replicated = None
self._num_retransmitted_files = None
self._phases = None
self._policy_id = None
self._policy_name = None
self._quotas_deleted = None
self._regular_files_replicated = None
self._resynced_lins = None
self._retransmitted_files = None
self._retry = None
self._running_chunks = None
self._service_report = None
self._sockets_replicated = None
self._source_bytes_recovered = None
self._source_directories_created = None
self._source_directories_deleted = None
self._source_directories_linked = None
self._source_directories_unlinked = None
self._source_directories_visited = None
self._source_files_deleted = None
self._source_files_linked = None
self._source_files_unlinked = None
self._source_host = None
self._sparse_data_bytes = None
self._start_time = None
self._state = None
self._subreport_count = None
self._succeeded_chunks = None
self._symlinks_replicated = None
self._sync_type = None
self._target_bytes_recovered = None
self._target_directories_created = None
self._target_directories_deleted = None
self._target_directories_linked = None
self._target_directories_unlinked = None
self._target_files_deleted = None
self._target_files_linked = None
self._target_files_unlinked = None
self._target_path = None
self._target_snapshots = None
self._total_chunks = None
self._total_data_bytes = None
self._total_exported_services = None
self._total_files = None
self._total_network_bytes = None
self._total_phases = None
self._unchanged_data_bytes = None
self._up_to_date_files_skipped = None
self._updated_files_replicated = None
self._user_conflict_files_skipped = None
self._warnings = None
self._worm_committed_file_conflicts = None
self.discriminator = None
self.action = action
self.ads_streams_replicated = ads_streams_replicated
self.block_specs_replicated = block_specs_replicated
self.bytes_recoverable = bytes_recoverable
self.bytes_transferred = bytes_transferred
self.char_specs_replicated = char_specs_replicated
self.committed_files = committed_files
self.corrected_lins = corrected_lins
self.dead_node = dead_node
self.directories_replicated = directories_replicated
self.dirs_changed = dirs_changed
self.dirs_deleted = dirs_deleted
self.dirs_moved = dirs_moved
self.dirs_new = dirs_new
if duration is not None:
self.duration = duration
self.encrypted = encrypted
if end_time is not None:
self.end_time = end_time
self.error = error
self.error_checksum_files_skipped = error_checksum_files_skipped
self.error_io_files_skipped = error_io_files_skipped
self.error_net_files_skipped = error_net_files_skipped
self.errors = errors
self.failed_chunks = failed_chunks
self.fifos_replicated = fifos_replicated
self.file_data_bytes = file_data_bytes
self.files_changed = files_changed
self.files_linked = files_linked
self.files_new = files_new
self.files_selected = files_selected
self.files_transferred = files_transferred
self.files_unlinked = files_unlinked
self.files_with_ads_replicated = files_with_ads_replicated
self.flipped_lins = flipped_lins
self.hard_links_replicated = hard_links_replicated
self.hash_exceptions_fixed = hash_exceptions_fixed
self.hash_exceptions_found = hash_exceptions_found
self.id = id
if job_id is not None:
self.job_id = job_id
self.lins_total = lins_total
self.network_bytes_to_source = network_bytes_to_source
self.network_bytes_to_target = network_bytes_to_target
self.new_files_replicated = new_files_replicated
self.num_retransmitted_files = num_retransmitted_files
self.phases = phases
self.policy_id = policy_id
self.policy_name = policy_name
self.quotas_deleted = quotas_deleted
self.regular_files_replicated = regular_files_replicated
self.resynced_lins = resynced_lins
self.retransmitted_files = retransmitted_files
self.retry = retry
self.running_chunks = running_chunks
if service_report is not None:
self.service_report = service_report
self.sockets_replicated = sockets_replicated
self.source_bytes_recovered = source_bytes_recovered
self.source_directories_created = source_directories_created
self.source_directories_deleted = source_directories_deleted
self.source_directories_linked = source_directories_linked
self.source_directories_unlinked = source_directories_unlinked
self.source_directories_visited = source_directories_visited
self.source_files_deleted = source_files_deleted
self.source_files_linked = source_files_linked
self.source_files_unlinked = source_files_unlinked
self.source_host = source_host
self.sparse_data_bytes = sparse_data_bytes
if start_time is not None:
self.start_time = start_time
self.state = state
self.subreport_count = subreport_count
self.succeeded_chunks = succeeded_chunks
self.symlinks_replicated = symlinks_replicated
self.sync_type = sync_type
self.target_bytes_recovered = target_bytes_recovered
self.target_directories_created = target_directories_created
self.target_directories_deleted = target_directories_deleted
self.target_directories_linked = target_directories_linked
self.target_directories_unlinked = target_directories_unlinked
self.target_files_deleted = target_files_deleted
self.target_files_linked = target_files_linked
self.target_files_unlinked = target_files_unlinked
self.target_path = target_path
self.target_snapshots = target_snapshots
self.total_chunks = total_chunks
self.total_data_bytes = total_data_bytes
if total_exported_services is not None:
self.total_exported_services = total_exported_services
self.total_files = total_files
self.total_network_bytes = total_network_bytes
self.total_phases = total_phases
self.unchanged_data_bytes = unchanged_data_bytes
self.up_to_date_files_skipped = up_to_date_files_skipped
self.updated_files_replicated = updated_files_replicated
self.user_conflict_files_skipped = user_conflict_files_skipped
self.warnings = warnings
self.worm_committed_file_conflicts = worm_committed_file_conflicts
@property
def action(self):
"""Gets the action of this TargetReport. # noqa: E501
The action to be taken by this job. # noqa: E501
:return: The action of this TargetReport. # noqa: E501
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this TargetReport.
The action to be taken by this job. # noqa: E501
:param action: The action of this TargetReport. # noqa: E501
:type: str
"""
if action is None:
raise ValueError("Invalid value for `action`, must not be `None`") # noqa: E501
allowed_values = ["resync_prep", "allow_write", "allow_write_revert", "test", "run", "none"] # noqa: E501
if action not in allowed_values:
raise ValueError(
"Invalid value for `action` ({0}), must be one of {1}" # noqa: E501
.format(action, allowed_values)
)
self._action = action
@property
def ads_streams_replicated(self):
"""Gets the ads_streams_replicated of this TargetReport. # noqa: E501
The number of ads streams replicated by this job. # noqa: E501
:return: The ads_streams_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._ads_streams_replicated
@ads_streams_replicated.setter
def ads_streams_replicated(self, ads_streams_replicated):
"""Sets the ads_streams_replicated of this TargetReport.
The number of ads streams replicated by this job. # noqa: E501
:param ads_streams_replicated: The ads_streams_replicated of this TargetReport. # noqa: E501
:type: int
"""
if ads_streams_replicated is None:
raise ValueError("Invalid value for `ads_streams_replicated`, must not be `None`") # noqa: E501
if ads_streams_replicated is not None and ads_streams_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `ads_streams_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if ads_streams_replicated is not None and ads_streams_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `ads_streams_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._ads_streams_replicated = ads_streams_replicated
@property
def block_specs_replicated(self):
"""Gets the block_specs_replicated of this TargetReport. # noqa: E501
The number of block specs replicated by this job. # noqa: E501
:return: The block_specs_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._block_specs_replicated
@block_specs_replicated.setter
def block_specs_replicated(self, block_specs_replicated):
"""Sets the block_specs_replicated of this TargetReport.
The number of block specs replicated by this job. # noqa: E501
:param block_specs_replicated: The block_specs_replicated of this TargetReport. # noqa: E501
:type: int
"""
if block_specs_replicated is None:
raise ValueError("Invalid value for `block_specs_replicated`, must not be `None`") # noqa: E501
if block_specs_replicated is not None and block_specs_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `block_specs_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if block_specs_replicated is not None and block_specs_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `block_specs_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._block_specs_replicated = block_specs_replicated
@property
def bytes_recoverable(self):
"""Gets the bytes_recoverable of this TargetReport. # noqa: E501
The number of bytes recoverable by this job. # noqa: E501
:return: The bytes_recoverable of this TargetReport. # noqa: E501
:rtype: int
"""
return self._bytes_recoverable
@bytes_recoverable.setter
def bytes_recoverable(self, bytes_recoverable):
"""Sets the bytes_recoverable of this TargetReport.
The number of bytes recoverable by this job. # noqa: E501
:param bytes_recoverable: The bytes_recoverable of this TargetReport. # noqa: E501
:type: int
"""
if bytes_recoverable is None:
raise ValueError("Invalid value for `bytes_recoverable`, must not be `None`") # noqa: E501
if bytes_recoverable is not None and bytes_recoverable > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `bytes_recoverable`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if bytes_recoverable is not None and bytes_recoverable < 0: # noqa: E501
raise ValueError("Invalid value for `bytes_recoverable`, must be a value greater than or equal to `0`") # noqa: E501
self._bytes_recoverable = bytes_recoverable
@property
def bytes_transferred(self):
"""Gets the bytes_transferred of this TargetReport. # noqa: E501
The number of bytes that have been transferred by this job. # noqa: E501
:return: The bytes_transferred of this TargetReport. # noqa: E501
:rtype: int
"""
return self._bytes_transferred
@bytes_transferred.setter
def bytes_transferred(self, bytes_transferred):
"""Sets the bytes_transferred of this TargetReport.
The number of bytes that have been transferred by this job. # noqa: E501
:param bytes_transferred: The bytes_transferred of this TargetReport. # noqa: E501
:type: int
"""
if bytes_transferred is None:
raise ValueError("Invalid value for `bytes_transferred`, must not be `None`") # noqa: E501
if bytes_transferred is not None and bytes_transferred > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `bytes_transferred`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if bytes_transferred is not None and bytes_transferred < 0: # noqa: E501
raise ValueError("Invalid value for `bytes_transferred`, must be a value greater than or equal to `0`") # noqa: E501
self._bytes_transferred = bytes_transferred
@property
def char_specs_replicated(self):
"""Gets the char_specs_replicated of this TargetReport. # noqa: E501
The number of char specs replicated by this job. # noqa: E501
:return: The char_specs_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._char_specs_replicated
@char_specs_replicated.setter
def char_specs_replicated(self, char_specs_replicated):
"""Sets the char_specs_replicated of this TargetReport.
The number of char specs replicated by this job. # noqa: E501
:param char_specs_replicated: The char_specs_replicated of this TargetReport. # noqa: E501
:type: int
"""
if char_specs_replicated is None:
raise ValueError("Invalid value for `char_specs_replicated`, must not be `None`") # noqa: E501
if char_specs_replicated is not None and char_specs_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `char_specs_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if char_specs_replicated is not None and char_specs_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `char_specs_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._char_specs_replicated = char_specs_replicated
@property
def committed_files(self):
"""Gets the committed_files of this TargetReport. # noqa: E501
The number of WORM committed files. # noqa: E501
:return: The committed_files of this TargetReport. # noqa: E501
:rtype: int
"""
return self._committed_files
@committed_files.setter
def committed_files(self, committed_files):
"""Sets the committed_files of this TargetReport.
The number of WORM committed files. # noqa: E501
:param committed_files: The committed_files of this TargetReport. # noqa: E501
:type: int
"""
if committed_files is None:
raise ValueError("Invalid value for `committed_files`, must not be `None`") # noqa: E501
if committed_files is not None and committed_files > 4294967295: # noqa: E501
raise ValueError("Invalid value for `committed_files`, must be a value less than or equal to `4294967295`") # noqa: E501
if committed_files is not None and committed_files < 0: # noqa: E501
raise ValueError("Invalid value for `committed_files`, must be a value greater than or equal to `0`") # noqa: E501
self._committed_files = committed_files
@property
def corrected_lins(self):
"""Gets the corrected_lins of this TargetReport. # noqa: E501
The number of LINs corrected by this job. # noqa: E501
:return: The corrected_lins of this TargetReport. # noqa: E501
:rtype: int
"""
return self._corrected_lins
@corrected_lins.setter
def corrected_lins(self, corrected_lins):
"""Sets the corrected_lins of this TargetReport.
The number of LINs corrected by this job. # noqa: E501
:param corrected_lins: The corrected_lins of this TargetReport. # noqa: E501
:type: int
"""
if corrected_lins is None:
raise ValueError("Invalid value for `corrected_lins`, must not be `None`") # noqa: E501
if corrected_lins is not None and corrected_lins > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `corrected_lins`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if corrected_lins is not None and corrected_lins < 0: # noqa: E501
raise ValueError("Invalid value for `corrected_lins`, must be a value greater than or equal to `0`") # noqa: E501
self._corrected_lins = corrected_lins
@property
def dead_node(self):
"""Gets the dead_node of this TargetReport. # noqa: E501
This field is true if the node running this job is dead. # noqa: E501
:return: The dead_node of this TargetReport. # noqa: E501
:rtype: bool
"""
return self._dead_node
@dead_node.setter
def dead_node(self, dead_node):
"""Sets the dead_node of this TargetReport.
This field is true if the node running this job is dead. # noqa: E501
:param dead_node: The dead_node of this TargetReport. # noqa: E501
:type: bool
"""
if dead_node is None:
raise ValueError("Invalid value for `dead_node`, must not be `None`") # noqa: E501
self._dead_node = dead_node
@property
def directories_replicated(self):
"""Gets the directories_replicated of this TargetReport. # noqa: E501
The number of directories replicated. # noqa: E501
:return: The directories_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._directories_replicated
@directories_replicated.setter
def directories_replicated(self, directories_replicated):
"""Sets the directories_replicated of this TargetReport.
The number of directories replicated. # noqa: E501
:param directories_replicated: The directories_replicated of this TargetReport. # noqa: E501
:type: int
"""
if directories_replicated is None:
raise ValueError("Invalid value for `directories_replicated`, must not be `None`") # noqa: E501
if directories_replicated is not None and directories_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `directories_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if directories_replicated is not None and directories_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `directories_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._directories_replicated = directories_replicated
@property
def dirs_changed(self):
"""Gets the dirs_changed of this TargetReport. # noqa: E501
The number of directories changed by this job. # noqa: E501
:return: The dirs_changed of this TargetReport. # noqa: E501
:rtype: int
"""
return self._dirs_changed
@dirs_changed.setter
def dirs_changed(self, dirs_changed):
"""Sets the dirs_changed of this TargetReport.
The number of directories changed by this job. # noqa: E501
:param dirs_changed: The dirs_changed of this TargetReport. # noqa: E501
:type: int
"""
if dirs_changed is None:
raise ValueError("Invalid value for `dirs_changed`, must not be `None`") # noqa: E501
if dirs_changed is not None and dirs_changed > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `dirs_changed`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if dirs_changed is not None and dirs_changed < 0: # noqa: E501
raise ValueError("Invalid value for `dirs_changed`, must be a value greater than or equal to `0`") # noqa: E501
self._dirs_changed = dirs_changed
@property
def dirs_deleted(self):
"""Gets the dirs_deleted of this TargetReport. # noqa: E501
The number of directories deleted by this job. # noqa: E501
:return: The dirs_deleted of this TargetReport. # noqa: E501
:rtype: int
"""
return self._dirs_deleted
@dirs_deleted.setter
def dirs_deleted(self, dirs_deleted):
"""Sets the dirs_deleted of this TargetReport.
The number of directories deleted by this job. # noqa: E501
:param dirs_deleted: The dirs_deleted of this TargetReport. # noqa: E501
:type: int
"""
if dirs_deleted is None:
raise ValueError("Invalid value for `dirs_deleted`, must not be `None`") # noqa: E501
if dirs_deleted is not None and dirs_deleted > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `dirs_deleted`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if dirs_deleted is not None and dirs_deleted < 0: # noqa: E501
raise ValueError("Invalid value for `dirs_deleted`, must be a value greater than or equal to `0`") # noqa: E501
self._dirs_deleted = dirs_deleted
@property
def dirs_moved(self):
"""Gets the dirs_moved of this TargetReport. # noqa: E501
The number of directories moved by this job. # noqa: E501
:return: The dirs_moved of this TargetReport. # noqa: E501
:rtype: int
"""
return self._dirs_moved
@dirs_moved.setter
def dirs_moved(self, dirs_moved):
"""Sets the dirs_moved of this TargetReport.
The number of directories moved by this job. # noqa: E501
:param dirs_moved: The dirs_moved of this TargetReport. # noqa: E501
:type: int
"""
if dirs_moved is None:
raise ValueError("Invalid value for `dirs_moved`, must not be `None`") # noqa: E501
if dirs_moved is not None and dirs_moved > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `dirs_moved`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if dirs_moved is not None and dirs_moved < 0: # noqa: E501
raise ValueError("Invalid value for `dirs_moved`, must be a value greater than or equal to `0`") # noqa: E501
self._dirs_moved = dirs_moved
@property
def dirs_new(self):
"""Gets the dirs_new of this TargetReport. # noqa: E501
The number of directories created by this job. # noqa: E501
:return: The dirs_new of this TargetReport. # noqa: E501
:rtype: int
"""
return self._dirs_new
@dirs_new.setter
def dirs_new(self, dirs_new):
"""Sets the dirs_new of this TargetReport.
The number of directories created by this job. # noqa: E501
:param dirs_new: The dirs_new of this TargetReport. # noqa: E501
:type: int
"""
if dirs_new is None:
raise ValueError("Invalid value for `dirs_new`, must not be `None`") # noqa: E501
if dirs_new is not None and dirs_new > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `dirs_new`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if dirs_new is not None and dirs_new < 0: # noqa: E501
raise ValueError("Invalid value for `dirs_new`, must be a value greater than or equal to `0`") # noqa: E501
self._dirs_new = dirs_new
@property
def duration(self):
"""Gets the duration of this TargetReport. # noqa: E501
The amount of time in seconds between when the job was started and when it ended. If the job has not yet ended, this is the amount of time since the job started. This field is null if the job has not yet started. # noqa: E501
:return: The duration of this TargetReport. # noqa: E501
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this TargetReport.
The amount of time in seconds between when the job was started and when it ended. If the job has not yet ended, this is the amount of time since the job started. This field is null if the job has not yet started. # noqa: E501
:param duration: The duration of this TargetReport. # noqa: E501
:type: int
"""
if duration is not None and duration > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `duration`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if duration is not None and duration < 0: # noqa: E501
raise ValueError("Invalid value for `duration`, must be a value greater than or equal to `0`") # noqa: E501
self._duration = duration
@property
def encrypted(self):
"""Gets the encrypted of this TargetReport. # noqa: E501
If true, syncs will be encrypted. # noqa: E501
:return: The encrypted of this TargetReport. # noqa: E501
:rtype: bool
"""
return self._encrypted
@encrypted.setter
def encrypted(self, encrypted):
"""Sets the encrypted of this TargetReport.
If true, syncs will be encrypted. # noqa: E501
:param encrypted: The encrypted of this TargetReport. # noqa: E501
:type: bool
"""
if encrypted is None:
raise ValueError("Invalid value for `encrypted`, must not be `None`") # noqa: E501
self._encrypted = encrypted
@property
def end_time(self):
"""Gets the end_time of this TargetReport. # noqa: E501
The time the job ended in unix epoch seconds. The field is null if the job hasn't ended. # noqa: E501
:return: The end_time of this TargetReport. # noqa: E501
:rtype: int
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this TargetReport.
The time the job ended in unix epoch seconds. The field is null if the job hasn't ended. # noqa: E501
:param end_time: The end_time of this TargetReport. # noqa: E501
:type: int
"""
if end_time is not None and end_time > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `end_time`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if end_time is not None and end_time < 0: # noqa: E501
raise ValueError("Invalid value for `end_time`, must be a value greater than or equal to `0`") # noqa: E501
self._end_time = end_time
@property
def error(self):
"""Gets the error of this TargetReport. # noqa: E501
The primary error message for this job. # noqa: E501
:return: The error of this TargetReport. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this TargetReport.
The primary error message for this job. # noqa: E501
:param error: The error of this TargetReport. # noqa: E501
:type: str
"""
if error is None:
raise ValueError("Invalid value for `error`, must not be `None`") # noqa: E501
if error is not None and len(error) > 255:
raise ValueError("Invalid value for `error`, length must be less than or equal to `255`") # noqa: E501
if error is not None and len(error) < 0:
raise ValueError("Invalid value for `error`, length must be greater than or equal to `0`") # noqa: E501
self._error = error
@property
def error_checksum_files_skipped(self):
"""Gets the error_checksum_files_skipped of this TargetReport. # noqa: E501
The number of files with checksum errors skipped by this job. # noqa: E501
:return: The error_checksum_files_skipped of this TargetReport. # noqa: E501
:rtype: int
"""
return self._error_checksum_files_skipped
@error_checksum_files_skipped.setter
def error_checksum_files_skipped(self, error_checksum_files_skipped):
"""Sets the error_checksum_files_skipped of this TargetReport.
The number of files with checksum errors skipped by this job. # noqa: E501
:param error_checksum_files_skipped: The error_checksum_files_skipped of this TargetReport. # noqa: E501
:type: int
"""
if error_checksum_files_skipped is None:
raise ValueError("Invalid value for `error_checksum_files_skipped`, must not be `None`") # noqa: E501
if error_checksum_files_skipped is not None and error_checksum_files_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `error_checksum_files_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if error_checksum_files_skipped is not None and error_checksum_files_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `error_checksum_files_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._error_checksum_files_skipped = error_checksum_files_skipped
@property
def error_io_files_skipped(self):
"""Gets the error_io_files_skipped of this TargetReport. # noqa: E501
The number of files with io errors skipped by this job. # noqa: E501
:return: The error_io_files_skipped of this TargetReport. # noqa: E501
:rtype: int
"""
return self._error_io_files_skipped
@error_io_files_skipped.setter
def error_io_files_skipped(self, error_io_files_skipped):
"""Sets the error_io_files_skipped of this TargetReport.
The number of files with io errors skipped by this job. # noqa: E501
:param error_io_files_skipped: The error_io_files_skipped of this TargetReport. # noqa: E501
:type: int
"""
if error_io_files_skipped is None:
raise ValueError("Invalid value for `error_io_files_skipped`, must not be `None`") # noqa: E501
if error_io_files_skipped is not None and error_io_files_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `error_io_files_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if error_io_files_skipped is not None and error_io_files_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `error_io_files_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._error_io_files_skipped = error_io_files_skipped
@property
def error_net_files_skipped(self):
"""Gets the error_net_files_skipped of this TargetReport. # noqa: E501
The number of files with network errors skipped by this job. # noqa: E501
:return: The error_net_files_skipped of this TargetReport. # noqa: E501
:rtype: int
"""
return self._error_net_files_skipped
@error_net_files_skipped.setter
def error_net_files_skipped(self, error_net_files_skipped):
"""Sets the error_net_files_skipped of this TargetReport.
The number of files with network errors skipped by this job. # noqa: E501
:param error_net_files_skipped: The error_net_files_skipped of this TargetReport. # noqa: E501
:type: int
"""
if error_net_files_skipped is None:
raise ValueError("Invalid value for `error_net_files_skipped`, must not be `None`") # noqa: E501
if error_net_files_skipped is not None and error_net_files_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `error_net_files_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if error_net_files_skipped is not None and error_net_files_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `error_net_files_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._error_net_files_skipped = error_net_files_skipped
@property
def errors(self):
"""Gets the errors of this TargetReport. # noqa: E501
A list of error messages for this job. # noqa: E501
:return: The errors of this TargetReport. # noqa: E501
:rtype: list[str]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this TargetReport.
A list of error messages for this job. # noqa: E501
:param errors: The errors of this TargetReport. # noqa: E501
:type: list[str]
"""
if errors is None:
raise ValueError("Invalid value for `errors`, must not be `None`") # noqa: E501
self._errors = errors
@property
def failed_chunks(self):
"""Gets the failed_chunks of this TargetReport. # noqa: E501
Tyhe number of data chunks that failed transmission. # noqa: E501
:return: The failed_chunks of this TargetReport. # noqa: E501
:rtype: int
"""
return self._failed_chunks
@failed_chunks.setter
def failed_chunks(self, failed_chunks):
"""Sets the failed_chunks of this TargetReport.
Tyhe number of data chunks that failed transmission. # noqa: E501
:param failed_chunks: The failed_chunks of this TargetReport. # noqa: E501
:type: int
"""
if failed_chunks is None:
raise ValueError("Invalid value for `failed_chunks`, must not be `None`") # noqa: E501
if failed_chunks is not None and failed_chunks > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `failed_chunks`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if failed_chunks is not None and failed_chunks < 0: # noqa: E501
raise ValueError("Invalid value for `failed_chunks`, must be a value greater than or equal to `0`") # noqa: E501
self._failed_chunks = failed_chunks
@property
def fifos_replicated(self):
"""Gets the fifos_replicated of this TargetReport. # noqa: E501
The number of fifos replicated by this job. # noqa: E501
:return: The fifos_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._fifos_replicated
@fifos_replicated.setter
def fifos_replicated(self, fifos_replicated):
"""Sets the fifos_replicated of this TargetReport.
The number of fifos replicated by this job. # noqa: E501
:param fifos_replicated: The fifos_replicated of this TargetReport. # noqa: E501
:type: int
"""
if fifos_replicated is None:
raise ValueError("Invalid value for `fifos_replicated`, must not be `None`") # noqa: E501
if fifos_replicated is not None and fifos_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `fifos_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if fifos_replicated is not None and fifos_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `fifos_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._fifos_replicated = fifos_replicated
@property
def file_data_bytes(self):
"""Gets the file_data_bytes of this TargetReport. # noqa: E501
The number of bytes transferred that belong to files. # noqa: E501
:return: The file_data_bytes of this TargetReport. # noqa: E501
:rtype: int
"""
return self._file_data_bytes
@file_data_bytes.setter
def file_data_bytes(self, file_data_bytes):
"""Sets the file_data_bytes of this TargetReport.
The number of bytes transferred that belong to files. # noqa: E501
:param file_data_bytes: The file_data_bytes of this TargetReport. # noqa: E501
:type: int
"""
if file_data_bytes is None:
raise ValueError("Invalid value for `file_data_bytes`, must not be `None`") # noqa: E501
if file_data_bytes is not None and file_data_bytes > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `file_data_bytes`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if file_data_bytes is not None and file_data_bytes < 0: # noqa: E501
raise ValueError("Invalid value for `file_data_bytes`, must be a value greater than or equal to `0`") # noqa: E501
self._file_data_bytes = file_data_bytes
@property
def files_changed(self):
"""Gets the files_changed of this TargetReport. # noqa: E501
The number of files changed by this job. # noqa: E501
:return: The files_changed of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_changed
@files_changed.setter
def files_changed(self, files_changed):
"""Sets the files_changed of this TargetReport.
The number of files changed by this job. # noqa: E501
:param files_changed: The files_changed of this TargetReport. # noqa: E501
:type: int
"""
if files_changed is None:
raise ValueError("Invalid value for `files_changed`, must not be `None`") # noqa: E501
if files_changed is not None and files_changed > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_changed`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_changed is not None and files_changed < 0: # noqa: E501
raise ValueError("Invalid value for `files_changed`, must be a value greater than or equal to `0`") # noqa: E501
self._files_changed = files_changed
@property
def files_linked(self):
"""Gets the files_linked of this TargetReport. # noqa: E501
The number of files linked by this job. # noqa: E501
:return: The files_linked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_linked
@files_linked.setter
def files_linked(self, files_linked):
"""Sets the files_linked of this TargetReport.
The number of files linked by this job. # noqa: E501
:param files_linked: The files_linked of this TargetReport. # noqa: E501
:type: int
"""
if files_linked is None:
raise ValueError("Invalid value for `files_linked`, must not be `None`") # noqa: E501
if files_linked is not None and files_linked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_linked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_linked is not None and files_linked < 0: # noqa: E501
raise ValueError("Invalid value for `files_linked`, must be a value greater than or equal to `0`") # noqa: E501
self._files_linked = files_linked
@property
def files_new(self):
"""Gets the files_new of this TargetReport. # noqa: E501
The number of files created by this job. # noqa: E501
:return: The files_new of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_new
@files_new.setter
def files_new(self, files_new):
"""Sets the files_new of this TargetReport.
The number of files created by this job. # noqa: E501
:param files_new: The files_new of this TargetReport. # noqa: E501
:type: int
"""
if files_new is None:
raise ValueError("Invalid value for `files_new`, must not be `None`") # noqa: E501
if files_new is not None and files_new > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_new`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_new is not None and files_new < 0: # noqa: E501
raise ValueError("Invalid value for `files_new`, must be a value greater than or equal to `0`") # noqa: E501
self._files_new = files_new
@property
def files_selected(self):
"""Gets the files_selected of this TargetReport. # noqa: E501
The number of files selected by this job. # noqa: E501
:return: The files_selected of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_selected
@files_selected.setter
def files_selected(self, files_selected):
"""Sets the files_selected of this TargetReport.
The number of files selected by this job. # noqa: E501
:param files_selected: The files_selected of this TargetReport. # noqa: E501
:type: int
"""
if files_selected is None:
raise ValueError("Invalid value for `files_selected`, must not be `None`") # noqa: E501
if files_selected is not None and files_selected > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_selected`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_selected is not None and files_selected < 0: # noqa: E501
raise ValueError("Invalid value for `files_selected`, must be a value greater than or equal to `0`") # noqa: E501
self._files_selected = files_selected
@property
def files_transferred(self):
"""Gets the files_transferred of this TargetReport. # noqa: E501
The number of files transferred by this job. # noqa: E501
:return: The files_transferred of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_transferred
@files_transferred.setter
def files_transferred(self, files_transferred):
"""Sets the files_transferred of this TargetReport.
The number of files transferred by this job. # noqa: E501
:param files_transferred: The files_transferred of this TargetReport. # noqa: E501
:type: int
"""
if files_transferred is None:
raise ValueError("Invalid value for `files_transferred`, must not be `None`") # noqa: E501
if files_transferred is not None and files_transferred > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_transferred`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_transferred is not None and files_transferred < 0: # noqa: E501
raise ValueError("Invalid value for `files_transferred`, must be a value greater than or equal to `0`") # noqa: E501
self._files_transferred = files_transferred
@property
def files_unlinked(self):
"""Gets the files_unlinked of this TargetReport. # noqa: E501
The number of files unlinked by this job. # noqa: E501
:return: The files_unlinked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_unlinked
@files_unlinked.setter
def files_unlinked(self, files_unlinked):
"""Sets the files_unlinked of this TargetReport.
The number of files unlinked by this job. # noqa: E501
:param files_unlinked: The files_unlinked of this TargetReport. # noqa: E501
:type: int
"""
if files_unlinked is None:
raise ValueError("Invalid value for `files_unlinked`, must not be `None`") # noqa: E501
if files_unlinked is not None and files_unlinked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_unlinked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_unlinked is not None and files_unlinked < 0: # noqa: E501
raise ValueError("Invalid value for `files_unlinked`, must be a value greater than or equal to `0`") # noqa: E501
self._files_unlinked = files_unlinked
@property
def files_with_ads_replicated(self):
"""Gets the files_with_ads_replicated of this TargetReport. # noqa: E501
The number of files with ads replicated by this job. # noqa: E501
:return: The files_with_ads_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._files_with_ads_replicated
@files_with_ads_replicated.setter
def files_with_ads_replicated(self, files_with_ads_replicated):
"""Sets the files_with_ads_replicated of this TargetReport.
The number of files with ads replicated by this job. # noqa: E501
:param files_with_ads_replicated: The files_with_ads_replicated of this TargetReport. # noqa: E501
:type: int
"""
if files_with_ads_replicated is None:
raise ValueError("Invalid value for `files_with_ads_replicated`, must not be `None`") # noqa: E501
if files_with_ads_replicated is not None and files_with_ads_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `files_with_ads_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if files_with_ads_replicated is not None and files_with_ads_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `files_with_ads_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._files_with_ads_replicated = files_with_ads_replicated
@property
def flipped_lins(self):
"""Gets the flipped_lins of this TargetReport. # noqa: E501
The number of LINs flipped by this job. # noqa: E501
:return: The flipped_lins of this TargetReport. # noqa: E501
:rtype: int
"""
return self._flipped_lins
@flipped_lins.setter
def flipped_lins(self, flipped_lins):
"""Sets the flipped_lins of this TargetReport.
The number of LINs flipped by this job. # noqa: E501
:param flipped_lins: The flipped_lins of this TargetReport. # noqa: E501
:type: int
"""
if flipped_lins is None:
raise ValueError("Invalid value for `flipped_lins`, must not be `None`") # noqa: E501
if flipped_lins is not None and flipped_lins > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `flipped_lins`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if flipped_lins is not None and flipped_lins < 0: # noqa: E501
raise ValueError("Invalid value for `flipped_lins`, must be a value greater than or equal to `0`") # noqa: E501
self._flipped_lins = flipped_lins
@property
def hard_links_replicated(self):
"""Gets the hard_links_replicated of this TargetReport. # noqa: E501
The number of hard links replicated by this job. # noqa: E501
:return: The hard_links_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._hard_links_replicated
@hard_links_replicated.setter
def hard_links_replicated(self, hard_links_replicated):
"""Sets the hard_links_replicated of this TargetReport.
The number of hard links replicated by this job. # noqa: E501
:param hard_links_replicated: The hard_links_replicated of this TargetReport. # noqa: E501
:type: int
"""
if hard_links_replicated is None:
raise ValueError("Invalid value for `hard_links_replicated`, must not be `None`") # noqa: E501
if hard_links_replicated is not None and hard_links_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `hard_links_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if hard_links_replicated is not None and hard_links_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `hard_links_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._hard_links_replicated = hard_links_replicated
@property
def hash_exceptions_fixed(self):
"""Gets the hash_exceptions_fixed of this TargetReport. # noqa: E501
The number of hash exceptions fixed by this job. # noqa: E501
:return: The hash_exceptions_fixed of this TargetReport. # noqa: E501
:rtype: int
"""
return self._hash_exceptions_fixed
@hash_exceptions_fixed.setter
def hash_exceptions_fixed(self, hash_exceptions_fixed):
"""Sets the hash_exceptions_fixed of this TargetReport.
The number of hash exceptions fixed by this job. # noqa: E501
:param hash_exceptions_fixed: The hash_exceptions_fixed of this TargetReport. # noqa: E501
:type: int
"""
if hash_exceptions_fixed is None:
raise ValueError("Invalid value for `hash_exceptions_fixed`, must not be `None`") # noqa: E501
if hash_exceptions_fixed is not None and hash_exceptions_fixed > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `hash_exceptions_fixed`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if hash_exceptions_fixed is not None and hash_exceptions_fixed < 0: # noqa: E501
raise ValueError("Invalid value for `hash_exceptions_fixed`, must be a value greater than or equal to `0`") # noqa: E501
self._hash_exceptions_fixed = hash_exceptions_fixed
@property
def hash_exceptions_found(self):
"""Gets the hash_exceptions_found of this TargetReport. # noqa: E501
The number of hash exceptions found by this job. # noqa: E501
:return: The hash_exceptions_found of this TargetReport. # noqa: E501
:rtype: int
"""
return self._hash_exceptions_found
@hash_exceptions_found.setter
def hash_exceptions_found(self, hash_exceptions_found):
"""Sets the hash_exceptions_found of this TargetReport.
The number of hash exceptions found by this job. # noqa: E501
:param hash_exceptions_found: The hash_exceptions_found of this TargetReport. # noqa: E501
:type: int
"""
if hash_exceptions_found is None:
raise ValueError("Invalid value for `hash_exceptions_found`, must not be `None`") # noqa: E501
if hash_exceptions_found is not None and hash_exceptions_found > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `hash_exceptions_found`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if hash_exceptions_found is not None and hash_exceptions_found < 0: # noqa: E501
raise ValueError("Invalid value for `hash_exceptions_found`, must be a value greater than or equal to `0`") # noqa: E501
self._hash_exceptions_found = hash_exceptions_found
@property
def id(self):
"""Gets the id of this TargetReport. # noqa: E501
A unique identifier for this object. # noqa: E501
:return: The id of this TargetReport. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TargetReport.
A unique identifier for this object. # noqa: E501
:param id: The id of this TargetReport. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
if id is not None and len(id) > 255:
raise ValueError("Invalid value for `id`, length must be less than or equal to `255`") # noqa: E501
if id is not None and len(id) < 0:
raise ValueError("Invalid value for `id`, length must be greater than or equal to `0`") # noqa: E501
self._id = id
@property
def job_id(self):
"""Gets the job_id of this TargetReport. # noqa: E501
The ID of the job. # noqa: E501
:return: The job_id of this TargetReport. # noqa: E501
:rtype: int
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this TargetReport.
The ID of the job. # noqa: E501
:param job_id: The job_id of this TargetReport. # noqa: E501
:type: int
"""
if job_id is not None and job_id > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `job_id`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if job_id is not None and job_id < 0: # noqa: E501
raise ValueError("Invalid value for `job_id`, must be a value greater than or equal to `0`") # noqa: E501
self._job_id = job_id
@property
def lins_total(self):
"""Gets the lins_total of this TargetReport. # noqa: E501
The number of LINs transferred by this job. # noqa: E501
:return: The lins_total of this TargetReport. # noqa: E501
:rtype: int
"""
return self._lins_total
@lins_total.setter
def lins_total(self, lins_total):
"""Sets the lins_total of this TargetReport.
The number of LINs transferred by this job. # noqa: E501
:param lins_total: The lins_total of this TargetReport. # noqa: E501
:type: int
"""
if lins_total is None:
raise ValueError("Invalid value for `lins_total`, must not be `None`") # noqa: E501
if lins_total is not None and lins_total > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `lins_total`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if lins_total is not None and lins_total < 0: # noqa: E501
raise ValueError("Invalid value for `lins_total`, must be a value greater than or equal to `0`") # noqa: E501
self._lins_total = lins_total
@property
def network_bytes_to_source(self):
"""Gets the network_bytes_to_source of this TargetReport. # noqa: E501
The total number of bytes sent to the source by this job. # noqa: E501
:return: The network_bytes_to_source of this TargetReport. # noqa: E501
:rtype: int
"""
return self._network_bytes_to_source
@network_bytes_to_source.setter
def network_bytes_to_source(self, network_bytes_to_source):
"""Sets the network_bytes_to_source of this TargetReport.
The total number of bytes sent to the source by this job. # noqa: E501
:param network_bytes_to_source: The network_bytes_to_source of this TargetReport. # noqa: E501
:type: int
"""
if network_bytes_to_source is None:
raise ValueError("Invalid value for `network_bytes_to_source`, must not be `None`") # noqa: E501
if network_bytes_to_source is not None and network_bytes_to_source > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `network_bytes_to_source`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if network_bytes_to_source is not None and network_bytes_to_source < 0: # noqa: E501
raise ValueError("Invalid value for `network_bytes_to_source`, must be a value greater than or equal to `0`") # noqa: E501
self._network_bytes_to_source = network_bytes_to_source
@property
def network_bytes_to_target(self):
"""Gets the network_bytes_to_target of this TargetReport. # noqa: E501
The total number of bytes sent to the target by this job. # noqa: E501
:return: The network_bytes_to_target of this TargetReport. # noqa: E501
:rtype: int
"""
return self._network_bytes_to_target
@network_bytes_to_target.setter
def network_bytes_to_target(self, network_bytes_to_target):
"""Sets the network_bytes_to_target of this TargetReport.
The total number of bytes sent to the target by this job. # noqa: E501
:param network_bytes_to_target: The network_bytes_to_target of this TargetReport. # noqa: E501
:type: int
"""
if network_bytes_to_target is None:
raise ValueError("Invalid value for `network_bytes_to_target`, must not be `None`") # noqa: E501
if network_bytes_to_target is not None and network_bytes_to_target > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `network_bytes_to_target`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if network_bytes_to_target is not None and network_bytes_to_target < 0: # noqa: E501
raise ValueError("Invalid value for `network_bytes_to_target`, must be a value greater than or equal to `0`") # noqa: E501
self._network_bytes_to_target = network_bytes_to_target
@property
def new_files_replicated(self):
"""Gets the new_files_replicated of this TargetReport. # noqa: E501
The number of new files replicated by this job. # noqa: E501
:return: The new_files_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._new_files_replicated
@new_files_replicated.setter
def new_files_replicated(self, new_files_replicated):
"""Sets the new_files_replicated of this TargetReport.
The number of new files replicated by this job. # noqa: E501
:param new_files_replicated: The new_files_replicated of this TargetReport. # noqa: E501
:type: int
"""
if new_files_replicated is None:
raise ValueError("Invalid value for `new_files_replicated`, must not be `None`") # noqa: E501
if new_files_replicated is not None and new_files_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `new_files_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if new_files_replicated is not None and new_files_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `new_files_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._new_files_replicated = new_files_replicated
@property
def num_retransmitted_files(self):
"""Gets the num_retransmitted_files of this TargetReport. # noqa: E501
The number of files that have been retransmitted by this job. # noqa: E501
:return: The num_retransmitted_files of this TargetReport. # noqa: E501
:rtype: int
"""
return self._num_retransmitted_files
@num_retransmitted_files.setter
def num_retransmitted_files(self, num_retransmitted_files):
"""Sets the num_retransmitted_files of this TargetReport.
The number of files that have been retransmitted by this job. # noqa: E501
:param num_retransmitted_files: The num_retransmitted_files of this TargetReport. # noqa: E501
:type: int
"""
if num_retransmitted_files is None:
raise ValueError("Invalid value for `num_retransmitted_files`, must not be `None`") # noqa: E501
if num_retransmitted_files is not None and num_retransmitted_files > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `num_retransmitted_files`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if num_retransmitted_files is not None and num_retransmitted_files < 0: # noqa: E501
raise ValueError("Invalid value for `num_retransmitted_files`, must be a value greater than or equal to `0`") # noqa: E501
self._num_retransmitted_files = num_retransmitted_files
@property
def phases(self):
"""Gets the phases of this TargetReport. # noqa: E501
Data for each phase of this job. # noqa: E501
:return: The phases of this TargetReport. # noqa: E501
:rtype: list[SyncJobPhase]
"""
return self._phases
@phases.setter
def phases(self, phases):
"""Sets the phases of this TargetReport.
Data for each phase of this job. # noqa: E501
:param phases: The phases of this TargetReport. # noqa: E501
:type: list[SyncJobPhase]
"""
if phases is None:
raise ValueError("Invalid value for `phases`, must not be `None`") # noqa: E501
self._phases = phases
@property
def policy_id(self):
"""Gets the policy_id of this TargetReport. # noqa: E501
The ID of the policy. # noqa: E501
:return: The policy_id of this TargetReport. # noqa: E501
:rtype: str
"""
return self._policy_id
@policy_id.setter
def policy_id(self, policy_id):
"""Sets the policy_id of this TargetReport.
The ID of the policy. # noqa: E501
:param policy_id: The policy_id of this TargetReport. # noqa: E501
:type: str
"""
if policy_id is None:
raise ValueError("Invalid value for `policy_id`, must not be `None`") # noqa: E501
if policy_id is not None and len(policy_id) > 255:
raise ValueError("Invalid value for `policy_id`, length must be less than or equal to `255`") # noqa: E501
if policy_id is not None and len(policy_id) < 0:
raise ValueError("Invalid value for `policy_id`, length must be greater than or equal to `0`") # noqa: E501
self._policy_id = policy_id
@property
def policy_name(self):
"""Gets the policy_name of this TargetReport. # noqa: E501
The name of the policy. # noqa: E501
:return: The policy_name of this TargetReport. # noqa: E501
:rtype: str
"""
return self._policy_name
@policy_name.setter
def policy_name(self, policy_name):
"""Sets the policy_name of this TargetReport.
The name of the policy. # noqa: E501
:param policy_name: The policy_name of this TargetReport. # noqa: E501
:type: str
"""
if policy_name is None:
raise ValueError("Invalid value for `policy_name`, must not be `None`") # noqa: E501
if policy_name is not None and len(policy_name) > 255:
raise ValueError("Invalid value for `policy_name`, length must be less than or equal to `255`") # noqa: E501
if policy_name is not None and len(policy_name) < 0:
raise ValueError("Invalid value for `policy_name`, length must be greater than or equal to `0`") # noqa: E501
self._policy_name = policy_name
@property
def quotas_deleted(self):
"""Gets the quotas_deleted of this TargetReport. # noqa: E501
The number of quotas removed from the target. # noqa: E501
:return: The quotas_deleted of this TargetReport. # noqa: E501
:rtype: int
"""
return self._quotas_deleted
@quotas_deleted.setter
def quotas_deleted(self, quotas_deleted):
"""Sets the quotas_deleted of this TargetReport.
The number of quotas removed from the target. # noqa: E501
:param quotas_deleted: The quotas_deleted of this TargetReport. # noqa: E501
:type: int
"""
if quotas_deleted is None:
raise ValueError("Invalid value for `quotas_deleted`, must not be `None`") # noqa: E501
if quotas_deleted is not None and quotas_deleted > 4294967295: # noqa: E501
raise ValueError("Invalid value for `quotas_deleted`, must be a value less than or equal to `4294967295`") # noqa: E501
if quotas_deleted is not None and quotas_deleted < 0: # noqa: E501
raise ValueError("Invalid value for `quotas_deleted`, must be a value greater than or equal to `0`") # noqa: E501
self._quotas_deleted = quotas_deleted
@property
def regular_files_replicated(self):
"""Gets the regular_files_replicated of this TargetReport. # noqa: E501
The number of regular files replicated by this job. # noqa: E501
:return: The regular_files_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._regular_files_replicated
@regular_files_replicated.setter
def regular_files_replicated(self, regular_files_replicated):
"""Sets the regular_files_replicated of this TargetReport.
The number of regular files replicated by this job. # noqa: E501
:param regular_files_replicated: The regular_files_replicated of this TargetReport. # noqa: E501
:type: int
"""
if regular_files_replicated is None:
raise ValueError("Invalid value for `regular_files_replicated`, must not be `None`") # noqa: E501
if regular_files_replicated is not None and regular_files_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `regular_files_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if regular_files_replicated is not None and regular_files_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `regular_files_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._regular_files_replicated = regular_files_replicated
@property
def resynced_lins(self):
"""Gets the resynced_lins of this TargetReport. # noqa: E501
The number of LINs resynched by this job. # noqa: E501
:return: The resynced_lins of this TargetReport. # noqa: E501
:rtype: int
"""
return self._resynced_lins
@resynced_lins.setter
def resynced_lins(self, resynced_lins):
"""Sets the resynced_lins of this TargetReport.
The number of LINs resynched by this job. # noqa: E501
:param resynced_lins: The resynced_lins of this TargetReport. # noqa: E501
:type: int
"""
if resynced_lins is None:
raise ValueError("Invalid value for `resynced_lins`, must not be `None`") # noqa: E501
if resynced_lins is not None and resynced_lins > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `resynced_lins`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if resynced_lins is not None and resynced_lins < 0: # noqa: E501
raise ValueError("Invalid value for `resynced_lins`, must be a value greater than or equal to `0`") # noqa: E501
self._resynced_lins = resynced_lins
@property
def retransmitted_files(self):
"""Gets the retransmitted_files of this TargetReport. # noqa: E501
The files that have been retransmitted by this job. # noqa: E501
:return: The retransmitted_files of this TargetReport. # noqa: E501
:rtype: list[str]
"""
return self._retransmitted_files
@retransmitted_files.setter
def retransmitted_files(self, retransmitted_files):
"""Sets the retransmitted_files of this TargetReport.
The files that have been retransmitted by this job. # noqa: E501
:param retransmitted_files: The retransmitted_files of this TargetReport. # noqa: E501
:type: list[str]
"""
if retransmitted_files is None:
raise ValueError("Invalid value for `retransmitted_files`, must not be `None`") # noqa: E501
self._retransmitted_files = retransmitted_files
@property
def retry(self):
"""Gets the retry of this TargetReport. # noqa: E501
The number of times the job has been retried. # noqa: E501
:return: The retry of this TargetReport. # noqa: E501
:rtype: int
"""
return self._retry
@retry.setter
def retry(self, retry):
"""Sets the retry of this TargetReport.
The number of times the job has been retried. # noqa: E501
:param retry: The retry of this TargetReport. # noqa: E501
:type: int
"""
if retry is None:
raise ValueError("Invalid value for `retry`, must not be `None`") # noqa: E501
if retry is not None and retry > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `retry`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if retry is not None and retry < 0: # noqa: E501
raise ValueError("Invalid value for `retry`, must be a value greater than or equal to `0`") # noqa: E501
self._retry = retry
@property
def running_chunks(self):
"""Gets the running_chunks of this TargetReport. # noqa: E501
The number of data chunks currently being transmitted. # noqa: E501
:return: The running_chunks of this TargetReport. # noqa: E501
:rtype: int
"""
return self._running_chunks
@running_chunks.setter
def running_chunks(self, running_chunks):
"""Sets the running_chunks of this TargetReport.
The number of data chunks currently being transmitted. # noqa: E501
:param running_chunks: The running_chunks of this TargetReport. # noqa: E501
:type: int
"""
if running_chunks is None:
raise ValueError("Invalid value for `running_chunks`, must not be `None`") # noqa: E501
if running_chunks is not None and running_chunks > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `running_chunks`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if running_chunks is not None and running_chunks < 0: # noqa: E501
raise ValueError("Invalid value for `running_chunks`, must be a value greater than or equal to `0`") # noqa: E501
self._running_chunks = running_chunks
@property
def service_report(self):
"""Gets the service_report of this TargetReport. # noqa: E501
Data for each component exported as part of service replication. # noqa: E501
:return: The service_report of this TargetReport. # noqa: E501
:rtype: list[SyncJobServiceReportItem]
"""
return self._service_report
@service_report.setter
def service_report(self, service_report):
"""Sets the service_report of this TargetReport.
Data for each component exported as part of service replication. # noqa: E501
:param service_report: The service_report of this TargetReport. # noqa: E501
:type: list[SyncJobServiceReportItem]
"""
self._service_report = service_report
@property
def sockets_replicated(self):
"""Gets the sockets_replicated of this TargetReport. # noqa: E501
The number of sockets replicated by this job. # noqa: E501
:return: The sockets_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._sockets_replicated
@sockets_replicated.setter
def sockets_replicated(self, sockets_replicated):
"""Sets the sockets_replicated of this TargetReport.
The number of sockets replicated by this job. # noqa: E501
:param sockets_replicated: The sockets_replicated of this TargetReport. # noqa: E501
:type: int
"""
if sockets_replicated is None:
raise ValueError("Invalid value for `sockets_replicated`, must not be `None`") # noqa: E501
if sockets_replicated is not None and sockets_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `sockets_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if sockets_replicated is not None and sockets_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `sockets_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._sockets_replicated = sockets_replicated
@property
def source_bytes_recovered(self):
"""Gets the source_bytes_recovered of this TargetReport. # noqa: E501
The number of bytes recovered on the source. # noqa: E501
:return: The source_bytes_recovered of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_bytes_recovered
@source_bytes_recovered.setter
def source_bytes_recovered(self, source_bytes_recovered):
"""Sets the source_bytes_recovered of this TargetReport.
The number of bytes recovered on the source. # noqa: E501
:param source_bytes_recovered: The source_bytes_recovered of this TargetReport. # noqa: E501
:type: int
"""
if source_bytes_recovered is None:
raise ValueError("Invalid value for `source_bytes_recovered`, must not be `None`") # noqa: E501
if source_bytes_recovered is not None and source_bytes_recovered > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_bytes_recovered`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_bytes_recovered is not None and source_bytes_recovered < 0: # noqa: E501
raise ValueError("Invalid value for `source_bytes_recovered`, must be a value greater than or equal to `0`") # noqa: E501
self._source_bytes_recovered = source_bytes_recovered
@property
def source_directories_created(self):
"""Gets the source_directories_created of this TargetReport. # noqa: E501
The number of directories created on the source. # noqa: E501
:return: The source_directories_created of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_directories_created
@source_directories_created.setter
def source_directories_created(self, source_directories_created):
"""Sets the source_directories_created of this TargetReport.
The number of directories created on the source. # noqa: E501
:param source_directories_created: The source_directories_created of this TargetReport. # noqa: E501
:type: int
"""
if source_directories_created is None:
raise ValueError("Invalid value for `source_directories_created`, must not be `None`") # noqa: E501
if source_directories_created is not None and source_directories_created > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_directories_created`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_directories_created is not None and source_directories_created < 0: # noqa: E501
raise ValueError("Invalid value for `source_directories_created`, must be a value greater than or equal to `0`") # noqa: E501
self._source_directories_created = source_directories_created
@property
def source_directories_deleted(self):
"""Gets the source_directories_deleted of this TargetReport. # noqa: E501
The number of directories deleted on the source. # noqa: E501
:return: The source_directories_deleted of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_directories_deleted
@source_directories_deleted.setter
def source_directories_deleted(self, source_directories_deleted):
"""Sets the source_directories_deleted of this TargetReport.
The number of directories deleted on the source. # noqa: E501
:param source_directories_deleted: The source_directories_deleted of this TargetReport. # noqa: E501
:type: int
"""
if source_directories_deleted is None:
raise ValueError("Invalid value for `source_directories_deleted`, must not be `None`") # noqa: E501
if source_directories_deleted is not None and source_directories_deleted > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_directories_deleted`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_directories_deleted is not None and source_directories_deleted < 0: # noqa: E501
raise ValueError("Invalid value for `source_directories_deleted`, must be a value greater than or equal to `0`") # noqa: E501
self._source_directories_deleted = source_directories_deleted
@property
def source_directories_linked(self):
"""Gets the source_directories_linked of this TargetReport. # noqa: E501
The number of directories linked on the source. # noqa: E501
:return: The source_directories_linked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_directories_linked
@source_directories_linked.setter
def source_directories_linked(self, source_directories_linked):
"""Sets the source_directories_linked of this TargetReport.
The number of directories linked on the source. # noqa: E501
:param source_directories_linked: The source_directories_linked of this TargetReport. # noqa: E501
:type: int
"""
if source_directories_linked is None:
raise ValueError("Invalid value for `source_directories_linked`, must not be `None`") # noqa: E501
if source_directories_linked is not None and source_directories_linked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_directories_linked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_directories_linked is not None and source_directories_linked < 0: # noqa: E501
raise ValueError("Invalid value for `source_directories_linked`, must be a value greater than or equal to `0`") # noqa: E501
self._source_directories_linked = source_directories_linked
@property
def source_directories_unlinked(self):
"""Gets the source_directories_unlinked of this TargetReport. # noqa: E501
The number of directories unlinked on the source. # noqa: E501
:return: The source_directories_unlinked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_directories_unlinked
@source_directories_unlinked.setter
def source_directories_unlinked(self, source_directories_unlinked):
"""Sets the source_directories_unlinked of this TargetReport.
The number of directories unlinked on the source. # noqa: E501
:param source_directories_unlinked: The source_directories_unlinked of this TargetReport. # noqa: E501
:type: int
"""
if source_directories_unlinked is None:
raise ValueError("Invalid value for `source_directories_unlinked`, must not be `None`") # noqa: E501
if source_directories_unlinked is not None and source_directories_unlinked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_directories_unlinked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_directories_unlinked is not None and source_directories_unlinked < 0: # noqa: E501
raise ValueError("Invalid value for `source_directories_unlinked`, must be a value greater than or equal to `0`") # noqa: E501
self._source_directories_unlinked = source_directories_unlinked
@property
def source_directories_visited(self):
"""Gets the source_directories_visited of this TargetReport. # noqa: E501
The number of directories visited on the source. # noqa: E501
:return: The source_directories_visited of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_directories_visited
@source_directories_visited.setter
def source_directories_visited(self, source_directories_visited):
"""Sets the source_directories_visited of this TargetReport.
The number of directories visited on the source. # noqa: E501
:param source_directories_visited: The source_directories_visited of this TargetReport. # noqa: E501
:type: int
"""
if source_directories_visited is None:
raise ValueError("Invalid value for `source_directories_visited`, must not be `None`") # noqa: E501
if source_directories_visited is not None and source_directories_visited > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_directories_visited`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_directories_visited is not None and source_directories_visited < 0: # noqa: E501
raise ValueError("Invalid value for `source_directories_visited`, must be a value greater than or equal to `0`") # noqa: E501
self._source_directories_visited = source_directories_visited
@property
def source_files_deleted(self):
"""Gets the source_files_deleted of this TargetReport. # noqa: E501
The number of files deleted on the source. # noqa: E501
:return: The source_files_deleted of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_files_deleted
@source_files_deleted.setter
def source_files_deleted(self, source_files_deleted):
"""Sets the source_files_deleted of this TargetReport.
The number of files deleted on the source. # noqa: E501
:param source_files_deleted: The source_files_deleted of this TargetReport. # noqa: E501
:type: int
"""
if source_files_deleted is None:
raise ValueError("Invalid value for `source_files_deleted`, must not be `None`") # noqa: E501
if source_files_deleted is not None and source_files_deleted > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_files_deleted`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_files_deleted is not None and source_files_deleted < 0: # noqa: E501
raise ValueError("Invalid value for `source_files_deleted`, must be a value greater than or equal to `0`") # noqa: E501
self._source_files_deleted = source_files_deleted
@property
def source_files_linked(self):
"""Gets the source_files_linked of this TargetReport. # noqa: E501
The number of files linked on the source. # noqa: E501
:return: The source_files_linked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_files_linked
@source_files_linked.setter
def source_files_linked(self, source_files_linked):
"""Sets the source_files_linked of this TargetReport.
The number of files linked on the source. # noqa: E501
:param source_files_linked: The source_files_linked of this TargetReport. # noqa: E501
:type: int
"""
if source_files_linked is None:
raise ValueError("Invalid value for `source_files_linked`, must not be `None`") # noqa: E501
if source_files_linked is not None and source_files_linked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_files_linked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_files_linked is not None and source_files_linked < 0: # noqa: E501
raise ValueError("Invalid value for `source_files_linked`, must be a value greater than or equal to `0`") # noqa: E501
self._source_files_linked = source_files_linked
@property
def source_files_unlinked(self):
"""Gets the source_files_unlinked of this TargetReport. # noqa: E501
The number of files unlinked on the source. # noqa: E501
:return: The source_files_unlinked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._source_files_unlinked
@source_files_unlinked.setter
def source_files_unlinked(self, source_files_unlinked):
"""Sets the source_files_unlinked of this TargetReport.
The number of files unlinked on the source. # noqa: E501
:param source_files_unlinked: The source_files_unlinked of this TargetReport. # noqa: E501
:type: int
"""
if source_files_unlinked is None:
raise ValueError("Invalid value for `source_files_unlinked`, must not be `None`") # noqa: E501
if source_files_unlinked is not None and source_files_unlinked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `source_files_unlinked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if source_files_unlinked is not None and source_files_unlinked < 0: # noqa: E501
raise ValueError("Invalid value for `source_files_unlinked`, must be a value greater than or equal to `0`") # noqa: E501
self._source_files_unlinked = source_files_unlinked
@property
def source_host(self):
"""Gets the source_host of this TargetReport. # noqa: E501
Hostname or IP address of sync source cluster. # noqa: E501
:return: The source_host of this TargetReport. # noqa: E501
:rtype: str
"""
return self._source_host
@source_host.setter
def source_host(self, source_host):
"""Sets the source_host of this TargetReport.
Hostname or IP address of sync source cluster. # noqa: E501
:param source_host: The source_host of this TargetReport. # noqa: E501
:type: str
"""
if source_host is None:
raise ValueError("Invalid value for `source_host`, must not be `None`") # noqa: E501
if source_host is not None and len(source_host) > 255:
raise ValueError("Invalid value for `source_host`, length must be less than or equal to `255`") # noqa: E501
if source_host is not None and len(source_host) < 0:
raise ValueError("Invalid value for `source_host`, length must be greater than or equal to `0`") # noqa: E501
self._source_host = source_host
@property
def sparse_data_bytes(self):
"""Gets the sparse_data_bytes of this TargetReport. # noqa: E501
The number of sparse data bytes transferred by this job. # noqa: E501
:return: The sparse_data_bytes of this TargetReport. # noqa: E501
:rtype: int
"""
return self._sparse_data_bytes
@sparse_data_bytes.setter
def sparse_data_bytes(self, sparse_data_bytes):
"""Sets the sparse_data_bytes of this TargetReport.
The number of sparse data bytes transferred by this job. # noqa: E501
:param sparse_data_bytes: The sparse_data_bytes of this TargetReport. # noqa: E501
:type: int
"""
if sparse_data_bytes is None:
raise ValueError("Invalid value for `sparse_data_bytes`, must not be `None`") # noqa: E501
if sparse_data_bytes is not None and sparse_data_bytes > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `sparse_data_bytes`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if sparse_data_bytes is not None and sparse_data_bytes < 0: # noqa: E501
raise ValueError("Invalid value for `sparse_data_bytes`, must be a value greater than or equal to `0`") # noqa: E501
self._sparse_data_bytes = sparse_data_bytes
@property
def start_time(self):
"""Gets the start_time of this TargetReport. # noqa: E501
The time the job started in unix epoch seconds. The field is null if the job hasn't started. # noqa: E501
:return: The start_time of this TargetReport. # noqa: E501
:rtype: int
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this TargetReport.
The time the job started in unix epoch seconds. The field is null if the job hasn't started. # noqa: E501
:param start_time: The start_time of this TargetReport. # noqa: E501
:type: int
"""
if start_time is not None and start_time > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `start_time`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if start_time is not None and start_time < 0: # noqa: E501
raise ValueError("Invalid value for `start_time`, must be a value greater than or equal to `0`") # noqa: E501
self._start_time = start_time
@property
def state(self):
"""Gets the state of this TargetReport. # noqa: E501
The state of the job. # noqa: E501
:return: The state of this TargetReport. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this TargetReport.
The state of the job. # noqa: E501
:param state: The state of this TargetReport. # noqa: E501
:type: str
"""
if state is None:
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
allowed_values = ["scheduled", "running", "paused", "finished", "failed", "canceled", "needs_attention", "skipped", "pending", "unknown"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def subreport_count(self):
"""Gets the subreport_count of this TargetReport. # noqa: E501
The number of subreports that are available for this job report. # noqa: E501
:return: The subreport_count of this TargetReport. # noqa: E501
:rtype: int
"""
return self._subreport_count
@subreport_count.setter
def subreport_count(self, subreport_count):
"""Sets the subreport_count of this TargetReport.
The number of subreports that are available for this job report. # noqa: E501
:param subreport_count: The subreport_count of this TargetReport. # noqa: E501
:type: int
"""
if subreport_count is None:
raise ValueError("Invalid value for `subreport_count`, must not be `None`") # noqa: E501
if subreport_count is not None and subreport_count > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `subreport_count`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if subreport_count is not None and subreport_count < 0: # noqa: E501
raise ValueError("Invalid value for `subreport_count`, must be a value greater than or equal to `0`") # noqa: E501
self._subreport_count = subreport_count
@property
def succeeded_chunks(self):
"""Gets the succeeded_chunks of this TargetReport. # noqa: E501
The number of data chunks that have been transmitted successfully. # noqa: E501
:return: The succeeded_chunks of this TargetReport. # noqa: E501
:rtype: int
"""
return self._succeeded_chunks
@succeeded_chunks.setter
def succeeded_chunks(self, succeeded_chunks):
"""Sets the succeeded_chunks of this TargetReport.
The number of data chunks that have been transmitted successfully. # noqa: E501
:param succeeded_chunks: The succeeded_chunks of this TargetReport. # noqa: E501
:type: int
"""
if succeeded_chunks is None:
raise ValueError("Invalid value for `succeeded_chunks`, must not be `None`") # noqa: E501
if succeeded_chunks is not None and succeeded_chunks > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `succeeded_chunks`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if succeeded_chunks is not None and succeeded_chunks < 0: # noqa: E501
raise ValueError("Invalid value for `succeeded_chunks`, must be a value greater than or equal to `0`") # noqa: E501
self._succeeded_chunks = succeeded_chunks
@property
def symlinks_replicated(self):
"""Gets the symlinks_replicated of this TargetReport. # noqa: E501
The number of symlinks replicated by this job. # noqa: E501
:return: The symlinks_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._symlinks_replicated
@symlinks_replicated.setter
def symlinks_replicated(self, symlinks_replicated):
"""Sets the symlinks_replicated of this TargetReport.
The number of symlinks replicated by this job. # noqa: E501
:param symlinks_replicated: The symlinks_replicated of this TargetReport. # noqa: E501
:type: int
"""
if symlinks_replicated is None:
raise ValueError("Invalid value for `symlinks_replicated`, must not be `None`") # noqa: E501
if symlinks_replicated is not None and symlinks_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `symlinks_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if symlinks_replicated is not None and symlinks_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `symlinks_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._symlinks_replicated = symlinks_replicated
@property
def sync_type(self):
"""Gets the sync_type of this TargetReport. # noqa: E501
The type of sync being performed by this job. # noqa: E501
:return: The sync_type of this TargetReport. # noqa: E501
:rtype: str
"""
return self._sync_type
@sync_type.setter
def sync_type(self, sync_type):
"""Sets the sync_type of this TargetReport.
The type of sync being performed by this job. # noqa: E501
:param sync_type: The sync_type of this TargetReport. # noqa: E501
:type: str
"""
if sync_type is None:
raise ValueError("Invalid value for `sync_type`, must not be `None`") # noqa: E501
allowed_values = ["invalid", "legacy", "initial", "incremental", "upgrade", "fofb", "domainmark"] # noqa: E501
if sync_type not in allowed_values:
raise ValueError(
"Invalid value for `sync_type` ({0}), must be one of {1}" # noqa: E501
.format(sync_type, allowed_values)
)
self._sync_type = sync_type
@property
def target_bytes_recovered(self):
"""Gets the target_bytes_recovered of this TargetReport. # noqa: E501
The number of bytes recovered on the target. # noqa: E501
:return: The target_bytes_recovered of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_bytes_recovered
@target_bytes_recovered.setter
def target_bytes_recovered(self, target_bytes_recovered):
"""Sets the target_bytes_recovered of this TargetReport.
The number of bytes recovered on the target. # noqa: E501
:param target_bytes_recovered: The target_bytes_recovered of this TargetReport. # noqa: E501
:type: int
"""
if target_bytes_recovered is None:
raise ValueError("Invalid value for `target_bytes_recovered`, must not be `None`") # noqa: E501
if target_bytes_recovered is not None and target_bytes_recovered > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_bytes_recovered`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_bytes_recovered is not None and target_bytes_recovered < 0: # noqa: E501
raise ValueError("Invalid value for `target_bytes_recovered`, must be a value greater than or equal to `0`") # noqa: E501
self._target_bytes_recovered = target_bytes_recovered
@property
def target_directories_created(self):
"""Gets the target_directories_created of this TargetReport. # noqa: E501
The number of directories created on the target. # noqa: E501
:return: The target_directories_created of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_directories_created
@target_directories_created.setter
def target_directories_created(self, target_directories_created):
"""Sets the target_directories_created of this TargetReport.
The number of directories created on the target. # noqa: E501
:param target_directories_created: The target_directories_created of this TargetReport. # noqa: E501
:type: int
"""
if target_directories_created is None:
raise ValueError("Invalid value for `target_directories_created`, must not be `None`") # noqa: E501
if target_directories_created is not None and target_directories_created > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_directories_created`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_directories_created is not None and target_directories_created < 0: # noqa: E501
raise ValueError("Invalid value for `target_directories_created`, must be a value greater than or equal to `0`") # noqa: E501
self._target_directories_created = target_directories_created
@property
def target_directories_deleted(self):
"""Gets the target_directories_deleted of this TargetReport. # noqa: E501
The number of directories deleted on the target. # noqa: E501
:return: The target_directories_deleted of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_directories_deleted
@target_directories_deleted.setter
def target_directories_deleted(self, target_directories_deleted):
"""Sets the target_directories_deleted of this TargetReport.
The number of directories deleted on the target. # noqa: E501
:param target_directories_deleted: The target_directories_deleted of this TargetReport. # noqa: E501
:type: int
"""
if target_directories_deleted is None:
raise ValueError("Invalid value for `target_directories_deleted`, must not be `None`") # noqa: E501
if target_directories_deleted is not None and target_directories_deleted > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_directories_deleted`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_directories_deleted is not None and target_directories_deleted < 0: # noqa: E501
raise ValueError("Invalid value for `target_directories_deleted`, must be a value greater than or equal to `0`") # noqa: E501
self._target_directories_deleted = target_directories_deleted
@property
def target_directories_linked(self):
"""Gets the target_directories_linked of this TargetReport. # noqa: E501
The number of directories linked on the target. # noqa: E501
:return: The target_directories_linked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_directories_linked
@target_directories_linked.setter
def target_directories_linked(self, target_directories_linked):
"""Sets the target_directories_linked of this TargetReport.
The number of directories linked on the target. # noqa: E501
:param target_directories_linked: The target_directories_linked of this TargetReport. # noqa: E501
:type: int
"""
if target_directories_linked is None:
raise ValueError("Invalid value for `target_directories_linked`, must not be `None`") # noqa: E501
if target_directories_linked is not None and target_directories_linked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_directories_linked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_directories_linked is not None and target_directories_linked < 0: # noqa: E501
raise ValueError("Invalid value for `target_directories_linked`, must be a value greater than or equal to `0`") # noqa: E501
self._target_directories_linked = target_directories_linked
@property
def target_directories_unlinked(self):
"""Gets the target_directories_unlinked of this TargetReport. # noqa: E501
The number of directories unlinked on the target. # noqa: E501
:return: The target_directories_unlinked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_directories_unlinked
@target_directories_unlinked.setter
def target_directories_unlinked(self, target_directories_unlinked):
"""Sets the target_directories_unlinked of this TargetReport.
The number of directories unlinked on the target. # noqa: E501
:param target_directories_unlinked: The target_directories_unlinked of this TargetReport. # noqa: E501
:type: int
"""
if target_directories_unlinked is None:
raise ValueError("Invalid value for `target_directories_unlinked`, must not be `None`") # noqa: E501
if target_directories_unlinked is not None and target_directories_unlinked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_directories_unlinked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_directories_unlinked is not None and target_directories_unlinked < 0: # noqa: E501
raise ValueError("Invalid value for `target_directories_unlinked`, must be a value greater than or equal to `0`") # noqa: E501
self._target_directories_unlinked = target_directories_unlinked
@property
def target_files_deleted(self):
"""Gets the target_files_deleted of this TargetReport. # noqa: E501
The number of files deleted on the target. # noqa: E501
:return: The target_files_deleted of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_files_deleted
@target_files_deleted.setter
def target_files_deleted(self, target_files_deleted):
"""Sets the target_files_deleted of this TargetReport.
The number of files deleted on the target. # noqa: E501
:param target_files_deleted: The target_files_deleted of this TargetReport. # noqa: E501
:type: int
"""
if target_files_deleted is None:
raise ValueError("Invalid value for `target_files_deleted`, must not be `None`") # noqa: E501
if target_files_deleted is not None and target_files_deleted > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_files_deleted`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_files_deleted is not None and target_files_deleted < 0: # noqa: E501
raise ValueError("Invalid value for `target_files_deleted`, must be a value greater than or equal to `0`") # noqa: E501
self._target_files_deleted = target_files_deleted
@property
def target_files_linked(self):
"""Gets the target_files_linked of this TargetReport. # noqa: E501
The number of files linked on the target. # noqa: E501
:return: The target_files_linked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_files_linked
@target_files_linked.setter
def target_files_linked(self, target_files_linked):
"""Sets the target_files_linked of this TargetReport.
The number of files linked on the target. # noqa: E501
:param target_files_linked: The target_files_linked of this TargetReport. # noqa: E501
:type: int
"""
if target_files_linked is None:
raise ValueError("Invalid value for `target_files_linked`, must not be `None`") # noqa: E501
if target_files_linked is not None and target_files_linked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_files_linked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_files_linked is not None and target_files_linked < 0: # noqa: E501
raise ValueError("Invalid value for `target_files_linked`, must be a value greater than or equal to `0`") # noqa: E501
self._target_files_linked = target_files_linked
@property
def target_files_unlinked(self):
"""Gets the target_files_unlinked of this TargetReport. # noqa: E501
The number of files unlinked on the target. # noqa: E501
:return: The target_files_unlinked of this TargetReport. # noqa: E501
:rtype: int
"""
return self._target_files_unlinked
@target_files_unlinked.setter
def target_files_unlinked(self, target_files_unlinked):
"""Sets the target_files_unlinked of this TargetReport.
The number of files unlinked on the target. # noqa: E501
:param target_files_unlinked: The target_files_unlinked of this TargetReport. # noqa: E501
:type: int
"""
if target_files_unlinked is None:
raise ValueError("Invalid value for `target_files_unlinked`, must not be `None`") # noqa: E501
if target_files_unlinked is not None and target_files_unlinked > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `target_files_unlinked`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if target_files_unlinked is not None and target_files_unlinked < 0: # noqa: E501
raise ValueError("Invalid value for `target_files_unlinked`, must be a value greater than or equal to `0`") # noqa: E501
self._target_files_unlinked = target_files_unlinked
@property
def target_path(self):
"""Gets the target_path of this TargetReport. # noqa: E501
Absolute filesystem path on the target cluster for the sync destination. # noqa: E501
:return: The target_path of this TargetReport. # noqa: E501
:rtype: str
"""
return self._target_path
@target_path.setter
def target_path(self, target_path):
"""Sets the target_path of this TargetReport.
Absolute filesystem path on the target cluster for the sync destination. # noqa: E501
:param target_path: The target_path of this TargetReport. # noqa: E501
:type: str
"""
if target_path is None:
raise ValueError("Invalid value for `target_path`, must not be `None`") # noqa: E501
if target_path is not None and len(target_path) > 255:
raise ValueError("Invalid value for `target_path`, length must be less than or equal to `255`") # noqa: E501
if target_path is not None and len(target_path) < 0:
raise ValueError("Invalid value for `target_path`, length must be greater than or equal to `0`") # noqa: E501
self._target_path = target_path
@property
def target_snapshots(self):
"""Gets the target_snapshots of this TargetReport. # noqa: E501
The target snapshots created by this job. # noqa: E501
:return: The target_snapshots of this TargetReport. # noqa: E501
:rtype: list[str]
"""
return self._target_snapshots
@target_snapshots.setter
def target_snapshots(self, target_snapshots):
"""Sets the target_snapshots of this TargetReport.
The target snapshots created by this job. # noqa: E501
:param target_snapshots: The target_snapshots of this TargetReport. # noqa: E501
:type: list[str]
"""
if target_snapshots is None:
raise ValueError("Invalid value for `target_snapshots`, must not be `None`") # noqa: E501
self._target_snapshots = target_snapshots
@property
def total_chunks(self):
"""Gets the total_chunks of this TargetReport. # noqa: E501
The total number of data chunks transmitted by this job. # noqa: E501
:return: The total_chunks of this TargetReport. # noqa: E501
:rtype: int
"""
return self._total_chunks
@total_chunks.setter
def total_chunks(self, total_chunks):
"""Sets the total_chunks of this TargetReport.
The total number of data chunks transmitted by this job. # noqa: E501
:param total_chunks: The total_chunks of this TargetReport. # noqa: E501
:type: int
"""
if total_chunks is None:
raise ValueError("Invalid value for `total_chunks`, must not be `None`") # noqa: E501
if total_chunks is not None and total_chunks > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `total_chunks`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if total_chunks is not None and total_chunks < 0: # noqa: E501
raise ValueError("Invalid value for `total_chunks`, must be a value greater than or equal to `0`") # noqa: E501
self._total_chunks = total_chunks
@property
def total_data_bytes(self):
"""Gets the total_data_bytes of this TargetReport. # noqa: E501
The total number of bytes transferred by this job. # noqa: E501
:return: The total_data_bytes of this TargetReport. # noqa: E501
:rtype: int
"""
return self._total_data_bytes
@total_data_bytes.setter
def total_data_bytes(self, total_data_bytes):
"""Sets the total_data_bytes of this TargetReport.
The total number of bytes transferred by this job. # noqa: E501
:param total_data_bytes: The total_data_bytes of this TargetReport. # noqa: E501
:type: int
"""
if total_data_bytes is None:
raise ValueError("Invalid value for `total_data_bytes`, must not be `None`") # noqa: E501
if total_data_bytes is not None and total_data_bytes > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `total_data_bytes`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if total_data_bytes is not None and total_data_bytes < 0: # noqa: E501
raise ValueError("Invalid value for `total_data_bytes`, must be a value greater than or equal to `0`") # noqa: E501
self._total_data_bytes = total_data_bytes
@property
def total_exported_services(self):
"""Gets the total_exported_services of this TargetReport. # noqa: E501
The total number of components exported as part of service replication. # noqa: E501
:return: The total_exported_services of this TargetReport. # noqa: E501
:rtype: int
"""
return self._total_exported_services
@total_exported_services.setter
def total_exported_services(self, total_exported_services):
"""Sets the total_exported_services of this TargetReport.
The total number of components exported as part of service replication. # noqa: E501
:param total_exported_services: The total_exported_services of this TargetReport. # noqa: E501
:type: int
"""
if total_exported_services is not None and total_exported_services > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `total_exported_services`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if total_exported_services is not None and total_exported_services < 0: # noqa: E501
raise ValueError("Invalid value for `total_exported_services`, must be a value greater than or equal to `0`") # noqa: E501
self._total_exported_services = total_exported_services
@property
def total_files(self):
"""Gets the total_files of this TargetReport. # noqa: E501
The number of files affected by this job. # noqa: E501
:return: The total_files of this TargetReport. # noqa: E501
:rtype: int
"""
return self._total_files
@total_files.setter
def total_files(self, total_files):
"""Sets the total_files of this TargetReport.
The number of files affected by this job. # noqa: E501
:param total_files: The total_files of this TargetReport. # noqa: E501
:type: int
"""
if total_files is None:
raise ValueError("Invalid value for `total_files`, must not be `None`") # noqa: E501
if total_files is not None and total_files > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `total_files`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if total_files is not None and total_files < 0: # noqa: E501
raise ValueError("Invalid value for `total_files`, must be a value greater than or equal to `0`") # noqa: E501
self._total_files = total_files
@property
def total_network_bytes(self):
"""Gets the total_network_bytes of this TargetReport. # noqa: E501
The total number of bytes sent over the network by this job. # noqa: E501
:return: The total_network_bytes of this TargetReport. # noqa: E501
:rtype: int
"""
return self._total_network_bytes
@total_network_bytes.setter
def total_network_bytes(self, total_network_bytes):
"""Sets the total_network_bytes of this TargetReport.
The total number of bytes sent over the network by this job. # noqa: E501
:param total_network_bytes: The total_network_bytes of this TargetReport. # noqa: E501
:type: int
"""
if total_network_bytes is None:
raise ValueError("Invalid value for `total_network_bytes`, must not be `None`") # noqa: E501
if total_network_bytes is not None and total_network_bytes > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `total_network_bytes`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if total_network_bytes is not None and total_network_bytes < 0: # noqa: E501
raise ValueError("Invalid value for `total_network_bytes`, must be a value greater than or equal to `0`") # noqa: E501
self._total_network_bytes = total_network_bytes
@property
def total_phases(self):
"""Gets the total_phases of this TargetReport. # noqa: E501
The total number of phases for this job. # noqa: E501
:return: The total_phases of this TargetReport. # noqa: E501
:rtype: int
"""
return self._total_phases
@total_phases.setter
def total_phases(self, total_phases):
"""Sets the total_phases of this TargetReport.
The total number of phases for this job. # noqa: E501
:param total_phases: The total_phases of this TargetReport. # noqa: E501
:type: int
"""
if total_phases is None:
raise ValueError("Invalid value for `total_phases`, must not be `None`") # noqa: E501
if total_phases is not None and total_phases > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `total_phases`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if total_phases is not None and total_phases < 0: # noqa: E501
raise ValueError("Invalid value for `total_phases`, must be a value greater than or equal to `0`") # noqa: E501
self._total_phases = total_phases
@property
def unchanged_data_bytes(self):
"""Gets the unchanged_data_bytes of this TargetReport. # noqa: E501
The number of bytes unchanged by this job. # noqa: E501
:return: The unchanged_data_bytes of this TargetReport. # noqa: E501
:rtype: int
"""
return self._unchanged_data_bytes
@unchanged_data_bytes.setter
def unchanged_data_bytes(self, unchanged_data_bytes):
"""Sets the unchanged_data_bytes of this TargetReport.
The number of bytes unchanged by this job. # noqa: E501
:param unchanged_data_bytes: The unchanged_data_bytes of this TargetReport. # noqa: E501
:type: int
"""
if unchanged_data_bytes is None:
raise ValueError("Invalid value for `unchanged_data_bytes`, must not be `None`") # noqa: E501
if unchanged_data_bytes is not None and unchanged_data_bytes > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `unchanged_data_bytes`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if unchanged_data_bytes is not None and unchanged_data_bytes < 0: # noqa: E501
raise ValueError("Invalid value for `unchanged_data_bytes`, must be a value greater than or equal to `0`") # noqa: E501
self._unchanged_data_bytes = unchanged_data_bytes
@property
def up_to_date_files_skipped(self):
"""Gets the up_to_date_files_skipped of this TargetReport. # noqa: E501
The number of up-to-date files skipped by this job. # noqa: E501
:return: The up_to_date_files_skipped of this TargetReport. # noqa: E501
:rtype: int
"""
return self._up_to_date_files_skipped
@up_to_date_files_skipped.setter
def up_to_date_files_skipped(self, up_to_date_files_skipped):
"""Sets the up_to_date_files_skipped of this TargetReport.
The number of up-to-date files skipped by this job. # noqa: E501
:param up_to_date_files_skipped: The up_to_date_files_skipped of this TargetReport. # noqa: E501
:type: int
"""
if up_to_date_files_skipped is None:
raise ValueError("Invalid value for `up_to_date_files_skipped`, must not be `None`") # noqa: E501
if up_to_date_files_skipped is not None and up_to_date_files_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `up_to_date_files_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if up_to_date_files_skipped is not None and up_to_date_files_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `up_to_date_files_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._up_to_date_files_skipped = up_to_date_files_skipped
@property
def updated_files_replicated(self):
"""Gets the updated_files_replicated of this TargetReport. # noqa: E501
The number of updated files replicated by this job. # noqa: E501
:return: The updated_files_replicated of this TargetReport. # noqa: E501
:rtype: int
"""
return self._updated_files_replicated
@updated_files_replicated.setter
def updated_files_replicated(self, updated_files_replicated):
"""Sets the updated_files_replicated of this TargetReport.
The number of updated files replicated by this job. # noqa: E501
:param updated_files_replicated: The updated_files_replicated of this TargetReport. # noqa: E501
:type: int
"""
if updated_files_replicated is None:
raise ValueError("Invalid value for `updated_files_replicated`, must not be `None`") # noqa: E501
if updated_files_replicated is not None and updated_files_replicated > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `updated_files_replicated`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if updated_files_replicated is not None and updated_files_replicated < 0: # noqa: E501
raise ValueError("Invalid value for `updated_files_replicated`, must be a value greater than or equal to `0`") # noqa: E501
self._updated_files_replicated = updated_files_replicated
@property
def user_conflict_files_skipped(self):
"""Gets the user_conflict_files_skipped of this TargetReport. # noqa: E501
The number of files with user conflicts skipped by this job. # noqa: E501
:return: The user_conflict_files_skipped of this TargetReport. # noqa: E501
:rtype: int
"""
return self._user_conflict_files_skipped
@user_conflict_files_skipped.setter
def user_conflict_files_skipped(self, user_conflict_files_skipped):
"""Sets the user_conflict_files_skipped of this TargetReport.
The number of files with user conflicts skipped by this job. # noqa: E501
:param user_conflict_files_skipped: The user_conflict_files_skipped of this TargetReport. # noqa: E501
:type: int
"""
if user_conflict_files_skipped is None:
raise ValueError("Invalid value for `user_conflict_files_skipped`, must not be `None`") # noqa: E501
if user_conflict_files_skipped is not None and user_conflict_files_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `user_conflict_files_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if user_conflict_files_skipped is not None and user_conflict_files_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `user_conflict_files_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._user_conflict_files_skipped = user_conflict_files_skipped
@property
def warnings(self):
"""Gets the warnings of this TargetReport. # noqa: E501
A list of warning messages for this job. # noqa: E501
:return: The warnings of this TargetReport. # noqa: E501
:rtype: list[str]
"""
return self._warnings
@warnings.setter
def warnings(self, warnings):
"""Sets the warnings of this TargetReport.
A list of warning messages for this job. # noqa: E501
:param warnings: The warnings of this TargetReport. # noqa: E501
:type: list[str]
"""
if warnings is None:
raise ValueError("Invalid value for `warnings`, must not be `None`") # noqa: E501
self._warnings = warnings
@property
def worm_committed_file_conflicts(self):
"""Gets the worm_committed_file_conflicts of this TargetReport. # noqa: E501
The number of WORM committed files which needed to be reverted. Since WORM committed files cannot be reverted, this is the number of files that were preserved in the compliance store. # noqa: E501
:return: The worm_committed_file_conflicts of this TargetReport. # noqa: E501
:rtype: int
"""
return self._worm_committed_file_conflicts
@worm_committed_file_conflicts.setter
def worm_committed_file_conflicts(self, worm_committed_file_conflicts):
"""Sets the worm_committed_file_conflicts of this TargetReport.
The number of WORM committed files which needed to be reverted. Since WORM committed files cannot be reverted, this is the number of files that were preserved in the compliance store. # noqa: E501
:param worm_committed_file_conflicts: The worm_committed_file_conflicts of this TargetReport. # noqa: E501
:type: int
"""
if worm_committed_file_conflicts is None:
raise ValueError("Invalid value for `worm_committed_file_conflicts`, must not be `None`") # noqa: E501
if worm_committed_file_conflicts is not None and worm_committed_file_conflicts > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `worm_committed_file_conflicts`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if worm_committed_file_conflicts is not None and worm_committed_file_conflicts < 0: # noqa: E501
raise ValueError("Invalid value for `worm_committed_file_conflicts`, must be a value greater than or equal to `0`") # noqa: E501
self._worm_committed_file_conflicts = worm_committed_file_conflicts
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TargetReport):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 44.862808 | 2,233 | 0.673667 |
3cf8b38a4f7f92e3858644586f8a992ef9769cfc
| 9,847 |
py
|
Python
|
tests/app/celery/test_contact_information_tasks.py
|
department-of-veterans-affairs/notification-api
|
698bc98d8e78a13a0b2cfc432cfc718ff1016b06
|
[
"MIT"
] | 10 |
2020-05-04T14:11:06.000Z
|
2022-02-22T19:06:36.000Z
|
tests/app/celery/test_contact_information_tasks.py
|
department-of-veterans-affairs/notification-api
|
698bc98d8e78a13a0b2cfc432cfc718ff1016b06
|
[
"MIT"
] | 554 |
2020-05-07T21:56:24.000Z
|
2022-03-31T23:04:51.000Z
|
tests/app/celery/test_contact_information_tasks.py
|
department-of-veterans-affairs/notification-api
|
698bc98d8e78a13a0b2cfc432cfc718ff1016b06
|
[
"MIT"
] | 4 |
2020-08-27T16:43:29.000Z
|
2021-02-17T22:17:27.000Z
|
import uuid
import pytest
from app.celery.contact_information_tasks import lookup_contact_info
from app.exceptions import NotificationTechnicalFailureException, NotificationPermanentFailureException
from app.models import Notification, RecipientIdentifier, NOTIFICATION_TECHNICAL_FAILURE, \
NOTIFICATION_PERMANENT_FAILURE, LETTER_TYPE, EMAIL_TYPE, SMS_TYPE
from app.va.identifier import IdentifierType
from app.va.va_profile import VAProfileClient, VAProfileNonRetryableException, \
VAProfileRetryableException, NoContactInfoException
EXAMPLE_VA_PROFILE_ID = '135'
notification_id = str(uuid.uuid4())
@pytest.fixture(scope='function')
def notification():
recipient_identifier = RecipientIdentifier(
notification_id=notification_id,
id_type=IdentifierType.VA_PROFILE_ID.value,
id_value=EXAMPLE_VA_PROFILE_ID
)
notification = Notification(id=notification_id)
notification.recipient_identifiers.set(recipient_identifier)
notification.notification_type = LETTER_TYPE
return notification
def test_should_get_email_address_and_update_notification(client, mocker, notification):
notification.notification_type = EMAIL_TYPE
mocked_get_notification_by_id = mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
mocked_va_profile_client.get_email = mocker.Mock(return_value='[email protected]')
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_update_notification = mocker.patch(
'app.celery.contact_information_tasks.dao_update_notification'
)
lookup_contact_info(notification.id)
mocked_get_notification_by_id.assert_called()
mocked_va_profile_client.get_email.assert_called_with(EXAMPLE_VA_PROFILE_ID)
mocked_update_notification.assert_called_with(notification)
assert notification.to == '[email protected]'
def test_should_get_phone_number_and_update_notification(client, mocker, notification):
notification.notification_type = SMS_TYPE
mocked_get_notification_by_id = mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
mocked_va_profile_client.get_telephone = mocker.Mock(return_value='+15555555555')
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_update_notification = mocker.patch(
'app.celery.contact_information_tasks.dao_update_notification'
)
lookup_contact_info(notification.id)
mocked_get_notification_by_id.assert_called()
mocked_va_profile_client.get_telephone.assert_called_with(EXAMPLE_VA_PROFILE_ID)
mocked_update_notification.assert_called_with(notification)
assert notification.to == '+15555555555'
def test_should_not_retry_on_non_retryable_exception(client, mocker, notification):
notification.notification_type = EMAIL_TYPE
mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
exception = VAProfileNonRetryableException
mocked_va_profile_client.get_email = mocker.Mock(side_effect=exception)
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_update_notification_status_by_id = mocker.patch(
'app.celery.contact_information_tasks.update_notification_status_by_id'
)
mocked_retry = mocker.patch('app.celery.contact_information_tasks.lookup_contact_info.retry')
with pytest.raises(NotificationPermanentFailureException):
lookup_contact_info(notification.id)
mocked_va_profile_client.get_email.assert_called_with(EXAMPLE_VA_PROFILE_ID)
mocked_update_notification_status_by_id.assert_called_with(
notification.id, NOTIFICATION_PERMANENT_FAILURE, status_reason=exception.failure_reason
)
mocked_retry.assert_not_called()
def test_should_retry_on_retryable_exception(client, mocker, notification):
notification.notification_type = EMAIL_TYPE
mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
mocked_va_profile_client.get_email = mocker.Mock(side_effect=VAProfileRetryableException('some error'))
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_retry = mocker.patch('app.celery.contact_information_tasks.lookup_contact_info.retry')
lookup_contact_info(notification.id)
mocked_va_profile_client.get_email.assert_called_with(EXAMPLE_VA_PROFILE_ID)
mocked_retry.assert_called()
def test_should_update_notification_to_technical_failure_on_max_retries(client, mocker, notification):
notification.notification_type = EMAIL_TYPE
mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
exception = VAProfileRetryableException(
'RETRY FAILED: Max retries reached. '
f'The task lookup_contact_info failed for notification {notification_id}. '
'Notification has been updated to technical-failure'
)
mocked_va_profile_client.get_email = mocker.Mock(side_effect=exception)
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_update_notification_status_by_id = mocker.patch(
'app.celery.contact_information_tasks.update_notification_status_by_id'
)
mocker.patch(
'app.celery.contact_information_tasks.lookup_contact_info.retry',
side_effect=lookup_contact_info.MaxRetriesExceededError
)
with pytest.raises(NotificationTechnicalFailureException):
lookup_contact_info(notification.id)
mocked_va_profile_client.get_email.assert_called_with(EXAMPLE_VA_PROFILE_ID)
mocked_update_notification_status_by_id.assert_called_with(
notification.id, NOTIFICATION_TECHNICAL_FAILURE, status_reason=exception.failure_reason
)
def test_should_update_notification_to_permanent_failure_on_no_contact_info_exception(client, mocker, notification):
notification.notification_type = EMAIL_TYPE
mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
exception = NoContactInfoException
mocked_va_profile_client.get_email = mocker.Mock(side_effect=exception)
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_update_notification_status_by_id = mocker.patch(
'app.celery.contact_information_tasks.update_notification_status_by_id'
)
mocked_request = mocker.Mock()
mocked_chain = mocker.PropertyMock()
mocked_chain.return_value = ['some-task-to-be-executed-next']
type(mocked_request).chain = mocked_chain
mocker.patch(
'celery.app.task.Task.request',
new=mocked_request
)
lookup_contact_info(notification.id)
mocked_va_profile_client.get_email.assert_called_with(EXAMPLE_VA_PROFILE_ID)
mocked_update_notification_status_by_id.assert_called_with(
notification.id, NOTIFICATION_PERMANENT_FAILURE, status_reason=exception.failure_reason
)
mocked_chain.assert_called_with(None)
@pytest.mark.parametrize(
'exception, throws_additional_exception, notification_status, exception_reason',
[
(
VAProfileRetryableException,
NotificationTechnicalFailureException,
NOTIFICATION_TECHNICAL_FAILURE,
VAProfileRetryableException.failure_reason
),
(
NoContactInfoException,
False,
NOTIFICATION_PERMANENT_FAILURE,
NoContactInfoException.failure_reason
),
(
VAProfileNonRetryableException,
NotificationPermanentFailureException,
NOTIFICATION_PERMANENT_FAILURE,
VAProfileNonRetryableException.failure_reason
)
]
)
def test_exception_sets_failure_reason_if_thrown(
client, mocker, notification, exception, throws_additional_exception, notification_status, exception_reason
):
notification.notification_type = EMAIL_TYPE
mocker.patch(
'app.celery.contact_information_tasks.get_notification_by_id',
return_value=notification
)
mocked_va_profile_client = mocker.Mock(VAProfileClient)
mocked_va_profile_client.get_email = mocker.Mock(side_effect=exception)
mocker.patch(
'app.celery.contact_information_tasks.va_profile_client',
new=mocked_va_profile_client
)
mocked_update_notification_status_by_id = mocker.patch(
'app.celery.contact_information_tasks.update_notification_status_by_id'
)
mocker.patch(
'app.celery.contact_information_tasks.lookup_contact_info.retry',
side_effect=lookup_contact_info.MaxRetriesExceededError
)
if throws_additional_exception:
with pytest.raises(throws_additional_exception):
lookup_contact_info(notification.id)
else:
lookup_contact_info(notification.id)
mocked_update_notification_status_by_id.assert_called_once_with(
notification.id, notification_status, status_reason=exception_reason
)
| 35.807273 | 116 | 0.774855 |
ca3207fb330836415b01e5759bdfa6fe9bfc96e5
| 3,924 |
py
|
Python
|
opennre/dataset/preprocess_dataset.py
|
igorvlnascimento/DeepREF
|
0fed8120571e44e12ee3d1861289bc101c0a275f
|
[
"MIT"
] | null | null | null |
opennre/dataset/preprocess_dataset.py
|
igorvlnascimento/DeepREF
|
0fed8120571e44e12ee3d1861289bc101c0a275f
|
[
"MIT"
] | null | null | null |
opennre/dataset/preprocess_dataset.py
|
igorvlnascimento/DeepREF
|
0fed8120571e44e12ee3d1861289bc101c0a275f
|
[
"MIT"
] | null | null | null |
import os
import json
import argparse
import subprocess
from opennre import config
from opennre.dataset.preprocess import Preprocess
class PreprocessDataset():
def __init__(self, dataset_name, preprocessing_type):
self.dataset_name = dataset_name
self.preprocessing_type = []
if len(preprocessing_type):
self.preprocessing_type = sorted(preprocessing_type)
self.preprocessing_type_str = "_".join(self.preprocessing_type)
self.output_path = os.path.join('benchmark', dataset_name, self.preprocessing_type_str)
def out(self, path): return os.path.join(self.output_path, path)
def makedir(self):
if not os.path.exists(os.path.join(self.output_path)):
os.makedirs(os.path.join(self.output_path))
def output_file_length(self, filename_path):
return len(open(filename_path).readlines())
def preprocess_dataset(self):
preprocessing_types = {
"entity_blinding": False,
"digit": False,
"punct": False,
"stopword": False,
"brackets": False,
"ner_blinding": False
}
if self.preprocessing_type is not None:
if "nb" in self.preprocessing_type:
preprocessing_types["ner_blinding"] = True
elif "eb" in self.preprocessing_type:
preprocessing_types["entity_blinding"] = True
if "d" in self.preprocessing_type:
preprocessing_types["digit"] = True
if "p" in self.preprocessing_type:
preprocessing_types["punct"] = True
if "sw" in self.preprocessing_type:
preprocessing_types["stopword"] = True
if "b" in self.preprocessing_type:
preprocessing_types["brackets"] = True
preprocess = Preprocess(self.dataset_name, preprocessing_types)
original_dataframe_names = [self.dataset_name + '_train', self.dataset_name + '_val', self.dataset_name + '_test']
self.makedir()
for original_df_name in original_dataframe_names:
ds_path = os.path.join('benchmark', self.dataset_name, 'original', original_df_name)
if not os.path.exists(os.path.join(ds_path + '_original.txt')) or \
not os.path.exists(os.path.join(ds_path + '_original.csv')):
print("preprocessing_type_str:",self.preprocessing_type_str)
subprocess.call(['bash', 'benchmark/download_{}.sh'.format(self.dataset_name)])
if not os.path.exists(self.out(original_df_name + '_{}.txt'.format(self.preprocessing_type_str))):
print("Preprocessing...")
original_ds = preprocess.preprocess(os.path.join(ds_path + '_original.csv'))
preprocess.write_into_txt(original_ds, self.out(original_df_name + '_{}.txt'.format(self.preprocessing_type_str)))
for original_df_name in original_dataframe_names:
print(self.output_file_length(os.path.join('benchmark', self.dataset_name, 'original', '{}_original.txt'.format(original_df_name))))
print(self.output_file_length(self.out('{}_{}.txt'.format(original_df_name, self.preprocessing_type_str))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', type=str, required=True, choices=config.DATASETS,
help='Dataset name')
parser.add_argument('-p', '--preprocessing', type=list, required=True, choices=config.PREPROCESSING_COMBINATION, nargs='+',
help='Preprocessing types')
args = parser.parse_args()
with open(config.BEST_HPARAMS_FILE_PATH.format(args.dataset), 'r') as f:
best_hparams = json.load(f)
preprocess_dataset = PreprocessDataset(args.dataset, args.preprocessing)
preprocess_dataset.preprocess_dataset()
| 44.089888 | 144 | 0.649847 |
ce8add9288ce475f24d8c47e019be1390a85bfee
| 512 |
py
|
Python
|
vsch/helpers/handling.py
|
msmukowski/vision-system-counting-holes
|
9e154f50e5bc9d8df6d4a3cacdb84db218a9b522
|
[
"MIT"
] | null | null | null |
vsch/helpers/handling.py
|
msmukowski/vision-system-counting-holes
|
9e154f50e5bc9d8df6d4a3cacdb84db218a9b522
|
[
"MIT"
] | null | null | null |
vsch/helpers/handling.py
|
msmukowski/vision-system-counting-holes
|
9e154f50e5bc9d8df6d4a3cacdb84db218a9b522
|
[
"MIT"
] | null | null | null |
import json
def loadData(path):
data = {}
image_list = []
with open(path,"r") as read_file:
data = json.load(read_file)
for scene, frame in enumerate(data):
image_list.append(frame)
return data, image_list
def saveData(path,data):
with open(path, 'w', encoding='utf8') as outfile:
str_ = json.dumps(data,
indent=4, sort_keys=True,
separators=(',', ': '), ensure_ascii=False)
outfile.write(str_)
| 30.117647 | 69 | 0.558594 |
90dbb0ce15224bdc7aa19a76effd887eca8cb8ae
| 15,871 |
py
|
Python
|
tests/test_build_epub.py
|
danieleades/sphinx
|
1c98aea126919b218766371e1fdf308a1167e95f
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_build_epub.py
|
danieleades/sphinx
|
1c98aea126919b218766371e1fdf308a1167e95f
|
[
"BSD-2-Clause"
] | 1,662 |
2015-01-02T11:45:27.000Z
|
2015-01-03T12:21:29.000Z
|
tests/test_build_epub.py
|
danieleades/sphinx
|
1c98aea126919b218766371e1fdf308a1167e95f
|
[
"BSD-2-Clause"
] | null | null | null |
"""Test the HTML builder and check output against XPath."""
import os
import subprocess
from subprocess import PIPE, CalledProcessError
from xml.etree import ElementTree
import pytest
# check given command is runnable
def runnable(command):
try:
subprocess.run(command, stdout=PIPE, stderr=PIPE, check=True)
return True
except (OSError, CalledProcessError):
return False # command not found or exit with non-zero
class EPUBElementTree:
"""Test helper for content.opf and toc.ncx"""
namespaces = {
'idpf': 'http://www.idpf.org/2007/opf',
'dc': 'http://purl.org/dc/elements/1.1/',
'ibooks': 'http://vocabulary.itunes.apple.com/rdf/ibooks/vocabulary-extensions-1.0/',
'ncx': 'http://www.daisy.org/z3986/2005/ncx/',
'xhtml': 'http://www.w3.org/1999/xhtml',
'epub': 'http://www.idpf.org/2007/ops'
}
def __init__(self, tree):
self.tree = tree
@classmethod
def fromstring(cls, string):
return cls(ElementTree.fromstring(string))
def find(self, match):
ret = self.tree.find(match, namespaces=self.namespaces)
if ret is not None:
return self.__class__(ret)
else:
return ret
def findall(self, match):
ret = self.tree.findall(match, namespaces=self.namespaces)
return [self.__class__(e) for e in ret]
def __getattr__(self, name):
return getattr(self.tree, name)
def __iter__(self):
for child in self.tree:
yield self.__class__(child)
@pytest.mark.sphinx('epub', testroot='basic')
def test_build_epub(app):
app.build()
assert (app.outdir / 'mimetype').read_text(encoding='utf8') == 'application/epub+zip'
assert (app.outdir / 'META-INF' / 'container.xml').exists()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').read_text(encoding='utf8'))
assert toc.find("./ncx:docTitle/ncx:text").text == 'Python'
# toc.ncx / head
meta = list(toc.find("./ncx:head"))
assert meta[0].attrib == {'name': 'dtb:uid', 'content': 'unknown'}
assert meta[1].attrib == {'name': 'dtb:depth', 'content': '1'}
assert meta[2].attrib == {'name': 'dtb:totalPageCount', 'content': '0'}
assert meta[3].attrib == {'name': 'dtb:maxPageNumber', 'content': '0'}
# toc.ncx / navMap
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 1
assert navpoints[0].attrib == {'id': 'navPoint1', 'playOrder': '1'}
assert navpoints[0].find("./ncx:content").attrib == {'src': 'index.xhtml'}
navlabel = navpoints[0].find("./ncx:navLabel/ncx:text")
assert navlabel.text == 'The basic Sphinx documentation for testing'
# content.opf
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').read_text(encoding='utf8'))
# content.opf / metadata
metadata = opf.find("./idpf:metadata")
assert metadata.find("./dc:language").text == 'en'
assert metadata.find("./dc:title").text == 'Python'
assert metadata.find("./dc:description").text == 'unknown'
assert metadata.find("./dc:creator").text == 'unknown'
assert metadata.find("./dc:contributor").text == 'unknown'
assert metadata.find("./dc:publisher").text == 'unknown'
assert metadata.find("./dc:rights").text is None
assert metadata.find("./idpf:meta[@property='ibooks:version']").text is None
assert metadata.find("./idpf:meta[@property='ibooks:specified-fonts']").text == 'true'
assert metadata.find("./idpf:meta[@property='ibooks:binding']").text == 'true'
assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
# content.opf / manifest
manifest = opf.find("./idpf:manifest")
items = list(manifest)
assert items[0].attrib == {'id': 'ncx',
'href': 'toc.ncx',
'media-type': 'application/x-dtbncx+xml'}
assert items[1].attrib == {'id': 'nav',
'href': 'nav.xhtml',
'media-type': 'application/xhtml+xml',
'properties': 'nav'}
assert items[2].attrib == {'id': 'epub-0',
'href': 'genindex.xhtml',
'media-type': 'application/xhtml+xml'}
assert items[3].attrib == {'id': 'epub-1',
'href': 'index.xhtml',
'media-type': 'application/xhtml+xml'}
for i, item in enumerate(items[2:]):
# items are named as epub-NN
assert item.get('id') == 'epub-%d' % i
# content.opf / spine
spine = opf.find("./idpf:spine")
itemrefs = list(spine)
assert spine.get('toc') == 'ncx'
assert spine.get('page-progression-direction') == 'ltr'
assert itemrefs[0].get('idref') == 'epub-1'
assert itemrefs[1].get('idref') == 'epub-0'
# content.opf / guide
reference = opf.find("./idpf:guide/idpf:reference")
assert reference.get('type') == 'toc'
assert reference.get('title') == 'Table of Contents'
assert reference.get('href') == 'index.xhtml'
# nav.xhtml
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').read_text(encoding='utf8'))
assert nav.attrib == {'lang': 'en',
'{http://www.w3.org/XML/1998/namespace}lang': 'en'}
assert nav.find("./xhtml:head/xhtml:title").text == 'Table of Contents'
# nav.xhtml / nav
navlist = nav.find("./xhtml:body/xhtml:nav")
toc = navlist.findall("./xhtml:ol/xhtml:li")
assert navlist.find("./xhtml:h1").text == 'Table of Contents'
assert len(toc) == 1
assert toc[0].find("./xhtml:a").get("href") == 'index.xhtml'
assert toc[0].find("./xhtml:a").text == 'The basic Sphinx documentation for testing'
@pytest.mark.sphinx('epub', testroot='footnotes',
confoverrides={'epub_cover': ('_images/rimg.png', None)})
def test_epub_cover(app):
app.build()
# content.opf / metadata
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').read_text(encoding='utf8'))
cover_image = opf.find("./idpf:manifest/idpf:item[@href='%s']" % app.config.epub_cover[0])
cover = opf.find("./idpf:metadata/idpf:meta[@name='cover']")
assert cover
assert cover.get('content') == cover_image.get('id')
@pytest.mark.sphinx('epub', testroot='toctree')
def test_nested_toc(app):
app.build()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').read_bytes())
assert toc.find("./ncx:docTitle/ncx:text").text == 'Python'
# toc.ncx / navPoint
def navinfo(elem):
label = elem.find("./ncx:navLabel/ncx:text")
content = elem.find("./ncx:content")
return (elem.get('id'), elem.get('playOrder'),
content.get('src'), label.text)
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
"Welcome to Sphinx Tests’s documentation!")
assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints
assert navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', 'foo')
navchildren = navpoints[1].findall("./ncx:navPoint")
assert len(navchildren) == 4
assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', 'foo')
assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo.1')
assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
# nav.xhtml / nav
def navinfo(elem):
anchor = elem.find("./xhtml:a")
return (anchor.get('href'), anchor.text)
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').read_bytes())
toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
assert len(toc) == 4
assert navinfo(toc[0]) == ('index.xhtml',
"Welcome to Sphinx Tests’s documentation!")
assert toc[0].findall("./xhtml:ol") == []
# nav.xhtml / nested toc
assert navinfo(toc[1]) == ('foo.xhtml', 'foo')
tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
assert len(tocchildren) == 3
assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo.1')
assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")
assert len(grandchild) == 1
assert navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
@pytest.mark.sphinx('epub', testroot='need-escaped')
def test_escaped_toc(app):
app.build()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').read_bytes())
assert toc.find("./ncx:docTitle/ncx:text").text == 'need <b>"escaped"</b> project'
# toc.ncx / navPoint
def navinfo(elem):
label = elem.find("./ncx:navLabel/ncx:text")
content = elem.find("./ncx:content")
return (elem.get('id'), elem.get('playOrder'),
content.get('src'), label.text)
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
"Welcome to Sphinx Tests's documentation!")
assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints
assert navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', '<foo>')
navchildren = navpoints[1].findall("./ncx:navPoint")
assert len(navchildren) == 4
assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', '<foo>')
assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo “1”')
assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
# nav.xhtml / nav
def navinfo(elem):
anchor = elem.find("./xhtml:a")
return (anchor.get('href'), anchor.text)
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').read_bytes())
toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
assert len(toc) == 4
assert navinfo(toc[0]) == ('index.xhtml',
"Welcome to Sphinx Tests's documentation!")
assert toc[0].findall("./xhtml:ol") == []
# nav.xhtml / nested toc
assert navinfo(toc[1]) == ('foo.xhtml', '<foo>')
tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
assert len(tocchildren) == 3
assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo “1”')
assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")
assert len(grandchild) == 1
assert navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
@pytest.mark.sphinx('epub', testroot='basic')
def test_epub_writing_mode(app):
# horizontal (default)
app.build()
# horizontal / page-progression-direction
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').read_text(encoding='utf8'))
assert opf.find("./idpf:spine").get('page-progression-direction') == 'ltr'
# horizontal / ibooks:scroll-axis
metadata = opf.find("./idpf:metadata")
assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
# horizontal / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').read_text(encoding='utf8')
assert 'writing-mode: horizontal-tb;' in css
# vertical
app.config.epub_writing_mode = 'vertical'
(app.outdir / 'index.xhtml').unlink() # forcely rebuild
app.build()
# vertical / page-progression-direction
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').read_text(encoding='utf8'))
assert opf.find("./idpf:spine").get('page-progression-direction') == 'rtl'
# vertical / ibooks:scroll-axis
metadata = opf.find("./idpf:metadata")
assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'horizontal'
# vertical / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').read_text(encoding='utf8')
assert 'writing-mode: vertical-rl;' in css
@pytest.mark.sphinx('epub', testroot='epub-anchor-id')
def test_epub_anchor_id(app):
app.build()
html = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert ('<p id="std-setting-STATICFILES_FINDERS">'
'blah blah blah</p>' in html)
assert ('<span id="std-setting-STATICFILES_SECTION"></span>'
'<h1>blah blah blah</h1>' in html)
assert 'see <a class="reference internal" href="#std-setting-STATICFILES_FINDERS">' in html
@pytest.mark.sphinx('epub', testroot='html_assets')
def test_epub_assets(app):
app.builder.build_all()
# epub_sytlesheets (same as html_css_files)
content = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert ('<link rel="stylesheet" type="text/css" href="_static/css/style.css" />'
in content)
assert ('<link media="print" rel="stylesheet" title="title" type="text/css" '
'href="https://example.com/custom.css" />' in content)
@pytest.mark.sphinx('epub', testroot='html_assets',
confoverrides={'epub_css_files': ['css/epub.css']})
def test_epub_css_files(app):
app.builder.build_all()
# epub_css_files
content = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert '<link rel="stylesheet" type="text/css" href="_static/css/epub.css" />' in content
# files in html_css_files are not outputted
assert ('<link rel="stylesheet" type="text/css" href="_static/css/style.css" />'
not in content)
assert ('<link media="print" rel="stylesheet" title="title" type="text/css" '
'href="https://example.com/custom.css" />' not in content)
@pytest.mark.sphinx('epub', testroot='roles-download')
def test_html_download_role(app, status, warning):
app.build()
assert not (app.outdir / '_downloads' / 'dummy.dat').exists()
content = (app.outdir / 'index.xhtml').read_text(encoding='utf8')
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">dummy.dat</span></code></p></li>' in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">not_found.dat</span></code></p></li>' in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">Sphinx</span> <span class="pre">logo</span></code>'
'<span class="link-target"> [http://www.sphinx-doc.org/en/master'
'/_static/sphinxheader.png]</span></p></li>' in content)
@pytest.mark.sphinx('epub', testroot='toctree-duplicated')
def test_duplicated_toctree_entry(app, status, warning):
app.build()
assert 'WARNING: duplicated ToC entry found: foo.xhtml' in warning.getvalue()
@pytest.mark.skipif('DO_EPUBCHECK' not in os.environ,
reason='Skipped because DO_EPUBCHECK is not set')
@pytest.mark.sphinx('epub')
def test_run_epubcheck(app):
app.build()
epubcheck = os.environ.get('EPUBCHECK_PATH', '/usr/share/java/epubcheck.jar')
if runnable(['java', '-version']) and os.path.exists(epubcheck):
try:
subprocess.run(['java', '-jar', epubcheck, app.outdir / 'SphinxTests.epub'],
stdout=PIPE, stderr=PIPE, check=True)
except CalledProcessError as exc:
print(exc.stdout.decode('utf-8'))
print(exc.stderr.decode('utf-8'))
assert False, 'epubcheck exited with return code %s' % exc.returncode
| 41.223377 | 95 | 0.617289 |
4a2ebacd83ba003a299b2c453488a92ea04ff5e4
| 11,846 |
py
|
Python
|
moronbot.py
|
Heufneutje/PyMoronBot
|
055abf0e685f3d2fc02863517952dc7fad9050f3
|
[
"MIT"
] | null | null | null |
moronbot.py
|
Heufneutje/PyMoronBot
|
055abf0e685f3d2fc02863517952dc7fad9050f3
|
[
"MIT"
] | null | null | null |
moronbot.py
|
Heufneutje/PyMoronBot
|
055abf0e685f3d2fc02863517952dc7fad9050f3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import shelve
import sys
import platform
import datetime
import argparse
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
from IRCMessage import IRCMessage, IRCChannel, IRCUser
from IRCResponse import IRCResponse
import ModuleHandler
import GlobalVars
import ServerInfo
parser = argparse.ArgumentParser(description='An IRC bot written in Python.')
parser.add_argument('-s', '--server', help='the IRC server to connect to (required)', type=str, required=True)
parser.add_argument('-p', '--port', help='the port on the server to connect to (default 6667)', type=int, default=6667)
parser.add_argument('-c', '--channels', help='channels to join after connecting (default none)', type=str, nargs='+', default=[])
parser.add_argument('-n', '--nick', help='the nick the bot should use (default PyMoronBot)', type=str, default='PyMoronBot')
cmdArgs = parser.parse_args()
restarting = False
startTime = datetime.datetime.utcnow()
class MoronBot(irc.IRCClient):
def __init__(self):
self.nickname = cmdArgs.nick
self.commandChar = '.'
self.realname = self.nickname
self.username = self.nickname
self.channels = {}
self.userModes = {}
self.fingerReply = GlobalVars.finger
self.versionName = self.nickname
self.versionNum = GlobalVars.version
self.versionEnv = platform.platform()
self.sourceURL = GlobalVars.source
# dataStore has to be before moduleHandler
dataStorePath = os.path.join('Data', cmdArgs.server)
if not os.path.exists(dataStorePath):
os.makedirs(dataStorePath)
self.dataStore = shelve.open(os.path.join(dataStorePath, 'shelve.db'), protocol=2, writeback=True)
self.moduleHandler = ModuleHandler.ModuleHandler(self)
self.moduleHandler.loadAll()
def quit(self, message=''):
self.dataStore.close()
irc.IRCClient.quit(self, message)
def signedOn(self):
for channel in cmdArgs.channels:
self.join(channel)
global startTime
startTime = datetime.datetime.utcnow()
def privmsg(self, user, channel, msg):
chan = self.getChannel(channel)
message = IRCMessage('PRIVMSG', user, chan, msg, self)
self.handleMessage(message)
def action(self, user, channel, msg):
chan = self.getChannel(channel)
message = IRCMessage('ACTION', user, chan, msg, self)
self.handleMessage(message)
def noticed(self, user, channel, msg):
chan = self.getChannel(channel)
message = IRCMessage('NOTICE', user, chan, msg, self)
self.handleMessage(message)
def irc_NICK(self, prefix, params):
userArray = prefix.split('!')
oldnick = userArray[0]
newnick = params[0]
for key in self.channels:
channel = self.channels[key]
for userKey in channel.Users:
user = channel.Users[userKey]
if userKey == oldnick:
channel.Users[newnick] = IRCUser('{0}!{1}@{2}'.format(newnick, user.User, user.Hostmask))
del channel.Users[oldnick]
if oldnick in channel.Ranks:
channel.Ranks[newnick] = channel.Ranks[oldnick]
del channel.Ranks[oldnick]
message = IRCMessage('NICK', prefix, channel, newnick, self)
self.handleMessage(message)
def nickChanged(self, nick):
self.nickname = nick
def irc_JOIN(self, prefix, params):
if params[0] in self.channels:
channel = self.channels[params[0]]
else:
channel = IRCChannel(params[0])
message = IRCMessage('JOIN', prefix, channel, u'', self)
if message.User.Name == self.nickname:
self.channels[message.ReplyTo] = channel
self.sendLine('WHO ' + message.ReplyTo)
self.sendLine('MODE ' + message.ReplyTo)
else:
channel.Users[message.User.Name] = message.User
self.handleMessage(message)
def irc_PART(self, prefix, params):
partMessage = u''
if len(params) > 1:
partMessage = u', message: ' + u' '.join(params[1:])
channel = self.channels[params[0]]
message = IRCMessage('PART', prefix, channel, partMessage, self)
if message.User.Name == self.nickname:
del self.channels[message.ReplyTo]
else:
del channel.Users[message.User.Name]
if message.User.Name in channel.Ranks:
del channel.Ranks[message.User.Name]
self.handleMessage(message)
def irc_KICK(self, prefix, params):
kickMessage = u''
if len(params) > 2:
kickMessage = u', message: ' + u' '.join(params[2:])
channel = self.channels[params[0]]
message = IRCMessage('KICK', prefix, channel, kickMessage, self)
message.Kickee = params[1]
if message.Kickee == self.nickname:
del self.channels[message.ReplyTo]
else:
del channel.Users[message.Kickee]
if message.Kickee in channel.Ranks:
del channel.Ranks[message.Kickee]
self.handleMessage(message)
def irc_QUIT(self, prefix, params):
quitMessage = u''
if len(params) > 0:
quitMessage = u', message: ' + u' '.join(params[0:])
for key in self.channels:
channel = self.channels[key]
message = IRCMessage('QUIT', prefix, channel, quitMessage, self)
if message.User.Name in channel.Users:
del channel.Users[message.User.Name]
if message.User.Name in channel.Ranks:
del channel.Ranks[message.User.Name]
self.handleMessage(message)
def irc_RPL_WHOREPLY(self, _, params):
user = IRCUser('{0}!{1}@{2}'.format(params[5], params[2], params[3]))
channel = self.channels[params[1]]
flags = params[6][2:] if '*' in params[6] else params[6][1:]
statusModes = ''
for flag in flags:
statusModes = statusModes + ServerInfo.StatusesReverse[flag]
channel.Users[user.Name] = user
channel.Ranks[user.Name] = statusModes
def irc_RPL_CHANNELMODEIS(self, _, params):
channel = self.channels[params[1]]
modestring = params[2][1:]
modeparams = params[3:]
for mode in modestring:
if mode in ServerInfo.ChannelSetArgsModes or mode in ServerInfo.ChannelSetUnsetArgsModes:
# Mode takes an argument
channel.Modes[mode] = modeparams[0]
del modeparams[0]
else:
channel.Modes[mode] = None
def irc_RPL_MYINFO(self, prefix, params):
ServerInfo.UserModes = params[3]
def isupport(self, options):
for item in options:
if '=' in item:
option = item.split('=')
if option[0] == 'CHANTYPES':
ServerInfo.ChannelTypes = option[1]
elif option[0] == 'CHANMODES':
modes = option[1].split(',')
ServerInfo.ChannelListModes = modes[0]
ServerInfo.ChannelSetUnsetArgsModes = modes[1]
ServerInfo.ChannelSetArgsModes = modes[2]
ServerInfo.ChannelNormalModes = modes[3]
elif option[0] == 'PREFIX':
prefixes = option[1]
statusChars = prefixes[:prefixes.find(')')]
statusSymbols = prefixes[prefixes.find(')'):]
ServerInfo.StatusOrder = statusChars
for i in range(1, len(statusChars)):
ServerInfo.Statuses[statusChars[i]] = statusSymbols[i]
ServerInfo.StatusesReverse[statusSymbols[i]] = statusChars[i]
def modeChanged(self, user, channel, set, modes, args):
message = IRCMessage('MODE', user, self.getChannel(channel), u'', self)
if not message.Channel:
# Setting a usermode
for mode, arg in zip(modes, args):
if set:
self.userModes[mode] = arg
else:
del self.userModes[mode]
else:
# Setting a chanmode
for mode, arg in zip(modes, args):
if mode in ServerInfo.Statuses:
# Setting a status mode
if set:
if arg not in self.channels[channel].Ranks:
self.channels[channel].Ranks[arg] = mode
else:
self.channels[channel].Ranks[arg] = self.channels[channel].Ranks[arg] + mode
else:
self.channels[channel].Ranks[arg] = self.channels[channel].Rank[arg].replace(mode, '')
else:
# Setting a normal chanmode
if set:
self.channels[channel].Modes[mode] = arg
else:
del self.channels[channel].Modes[mode]
message.ModeArgs = [arg for arg in args if arg is not None]
message.Modes = modes
message.ModeOperator = '+' if set else '-'
message.ReplyTo = message.ReplyTo if message.Channel else ''
self.handleMessage(message)
def getChannel(self, name):
if name in self.channels:
return self.channels[name]
else:
# This is a PM
return None
def topicUpdated(self, user, channel, newTopic):
self.channels[channel].Topic = newTopic
self.channels[channel].TopicSetBy = user
message = IRCMessage('TOPIC', user, self.getChannel(channel), newTopic, self)
self.handleMessage(message)
def handleMessage(self, message):
"""
@type message: IRCMessage
"""
# restart command, can't restart within 1 minute of starting (avoids chanhistory triggering another restart)
if (message.Command == 'restart' and
datetime.datetime.utcnow() > startTime + datetime.timedelta(seconds=10) and
message.User.Name in GlobalVars.admins):
global restarting
restarting = True
self.dataStore.close()
self.quit(message='restarting')
return
self.moduleHandler.handleMessage(message)
def sendResponse(self, response):
"""
@type response: IRCResponse
"""
self.moduleHandler.sendResponse(response)
class MoronBotFactory(protocol.ReconnectingClientFactory):
def __init__(self):
self.protocol = MoronBot
def startedConnecting(self, connector):
print '-#- Started to connect.'
def buildProtocol(self, addr):
print '-#- Connected.'
print '-#- Resetting reconnection delay'
self.resetDelay()
return MoronBot()
def clientConnectionLost(self, connector, reason):
print '-!- Lost connection. Reason:', reason
if restarting:
python = sys.executable
os.execl(python, python, *sys.argv)
# nothing beyond here will be executed if the bot is restarting, as the process itself is replaced
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
print '-!- Connection failed. Reason:', reason
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
if __name__ == '__main__':
moronbot = MoronBotFactory()
reactor.connectTCP(cmdArgs.server, cmdArgs.port, moronbot)
reactor.run()
| 36.561728 | 129 | 0.592183 |
40f1739a246d3458fc5d84495830dd0b0006f590
| 3,750 |
py
|
Python
|
oteapi/strategies/parse/application_vnd_sqlite.py
|
EMMC-ASBL/oteapi-core
|
5a034c7610c300b21e585f563debb43383375af0
|
[
"MIT"
] | 3 |
2022-01-24T15:18:08.000Z
|
2022-03-16T14:01:51.000Z
|
oteapi/strategies/parse/application_vnd_sqlite.py
|
EMMC-ASBL/oteapi-core
|
5a034c7610c300b21e585f563debb43383375af0
|
[
"MIT"
] | 117 |
2022-01-13T17:26:38.000Z
|
2022-03-30T16:12:06.000Z
|
oteapi/strategies/parse/application_vnd_sqlite.py
|
EMMC-ASBL/oteapi-core
|
5a034c7610c300b21e585f563debb43383375af0
|
[
"MIT"
] | 3 |
2022-01-17T20:57:57.000Z
|
2022-01-25T08:16:14.000Z
|
"""Strategy class for application/vnd.sqlite3."""
# pylint: disable=unused-argument
import sqlite3
from pathlib import Path
from typing import TYPE_CHECKING, Optional
from pydantic import Field
from pydantic.dataclasses import dataclass
from oteapi.datacache import DataCache
from oteapi.models import AttrDict, DataCacheConfig, ResourceConfig, SessionUpdate
from oteapi.plugins import create_strategy
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict
class SqliteParseConfig(AttrDict):
"""Configuration data model for
[`SqliteParseStrategy`][oteapi.strategies.parse.application_vnd_sqlite.SqliteParseStrategy]."""
sqlquery: str = Field("", description="A SQL query string.")
datacache_config: Optional[DataCacheConfig] = Field(
None,
description="Configuration options for the local data cache.",
)
class SqliteParserResourceConfig(ResourceConfig):
"""SQLite parse strategy resource config."""
mediaType: str = Field(
"application/vnd.sqlite3",
const=True,
description=ResourceConfig.__fields__["mediaType"].field_info.description,
)
configuration: SqliteParseConfig = Field(
SqliteParseConfig(), description="SQLite parse strategy-specific configuration."
)
def create_connection(db_file: Path) -> sqlite3.Connection:
"""Create a database connection to SQLite database.
Parameters:
db_file: Full path to SQLite database file.
Raises:
sqlite3.Error: If a DB connection cannot be made.
Returns:
Connection object.
"""
try:
return sqlite3.connect(db_file)
except sqlite3.Error as exc:
raise sqlite3.Error("Could not connect to given SQLite DB.") from exc
class SessionUpdateSqLiteParse(SessionUpdate):
"""Configuration model for SqLiteParse."""
result: list = Field(..., description="List of results from the query.")
@dataclass
class SqliteParseStrategy:
"""Parse strategy for SQLite.
**Registers strategies**:
- `("mediaType", "application/vnd.sqlite3")`
Purpose of this strategy: Download a SQLite database using `downloadUrl` and run a
SQL query on the database to return all relevant rows.
"""
parse_config: SqliteParserResourceConfig
def initialize(self, session: "Optional[Dict[str, Any]]" = None) -> SessionUpdate:
"""Initialize strategy."""
return SessionUpdate()
def get(
self, session: "Optional[Dict[str, Any]]" = None
) -> SessionUpdateSqLiteParse:
"""Parse SQLite query responses."""
if session:
self._use_filters(session)
session = session if session else {}
# Retrieve SQLite file
download_config = self.parse_config.copy()
del download_config.configuration
downloader = create_strategy("download", download_config)
session.update(downloader.initialize(session))
cache_key = downloader.get(session).get("key", "")
cache = DataCache(self.parse_config.configuration.datacache_config)
with cache.getfile(cache_key, suffix="db") as filename:
connection = create_connection(filename)
cursor = connection.cursor()
result = cursor.execute(self.parse_config.configuration.sqlquery).fetchall()
connection.close()
return SessionUpdateSqLiteParse(result=result)
def _use_filters(self, session: "Dict[str, Any]") -> None:
"""Update `config` according to filter values found in the session."""
if "sqlquery" in session and not self.parse_config.configuration.sqlquery:
# Use SQL query available in session
self.parse_config.configuration.sqlquery = session["sqlquery"]
| 32.894737 | 99 | 0.6968 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.