body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
5a70439ef8ab75d2db8ab826964d38b04112b0be41e63560fe1f6dea690b5686
|
def USGS2netcdf(ncfile, meta, time, discharge):
'\n Convert the USGS files to netcdf4 format\n '
shpfile = (ncfile[:(- 2)] + 'shp')
varname = 'discharge'
longname = 'Stream Discharge Rate'
units = 'm3 s-1'
ncdict = []
ii = (- 1)
for (tt, dd) in zip(time, discharge):
ii += 1
timeout = othertime.MinutesSince(tt, basetime=datetime(1970, 1, 1))
ncdict = netcdfio.createObsDict(varname, longname, units, [dd], [timeout], [meta['Latitude'][ii]], [meta['Longitude'][ii]], [0.0], [meta['Station ID'][ii]], [meta['StationName'][ii]], ncdict=ncdict)
globalatts = {'Title': 'USGS stream gage discharge data'}
netcdfio.writePointData2Netcdf(ncfile, ncdict, globalatts)
netcdfio.pointNC2shp(ncfile, shpfile)
|
Convert the USGS files to netcdf4 format
|
sfoda/dataio/datadownload/getUSGSnwis.py
|
USGS2netcdf
|
mrayson/sfoda
| 1 |
python
|
def USGS2netcdf(ncfile, meta, time, discharge):
'\n \n '
shpfile = (ncfile[:(- 2)] + 'shp')
varname = 'discharge'
longname = 'Stream Discharge Rate'
units = 'm3 s-1'
ncdict = []
ii = (- 1)
for (tt, dd) in zip(time, discharge):
ii += 1
timeout = othertime.MinutesSince(tt, basetime=datetime(1970, 1, 1))
ncdict = netcdfio.createObsDict(varname, longname, units, [dd], [timeout], [meta['Latitude'][ii]], [meta['Longitude'][ii]], [0.0], [meta['Station ID'][ii]], [meta['StationName'][ii]], ncdict=ncdict)
globalatts = {'Title': 'USGS stream gage discharge data'}
netcdfio.writePointData2Netcdf(ncfile, ncdict, globalatts)
netcdfio.pointNC2shp(ncfile, shpfile)
|
def USGS2netcdf(ncfile, meta, time, discharge):
'\n \n '
shpfile = (ncfile[:(- 2)] + 'shp')
varname = 'discharge'
longname = 'Stream Discharge Rate'
units = 'm3 s-1'
ncdict = []
ii = (- 1)
for (tt, dd) in zip(time, discharge):
ii += 1
timeout = othertime.MinutesSince(tt, basetime=datetime(1970, 1, 1))
ncdict = netcdfio.createObsDict(varname, longname, units, [dd], [timeout], [meta['Latitude'][ii]], [meta['Longitude'][ii]], [0.0], [meta['Station ID'][ii]], [meta['StationName'][ii]], ncdict=ncdict)
globalatts = {'Title': 'USGS stream gage discharge data'}
netcdfio.writePointData2Netcdf(ncfile, ncdict, globalatts)
netcdfio.pointNC2shp(ncfile, shpfile)<|docstring|>Convert the USGS files to netcdf4 format<|endoftext|>
|
7638813c7dc7c4cf54f562637e2ad176e786b209dd239bbd28b63e8d00bdc104
|
@commands.command()
async def market(self, ctx, *, query):
'Shows listings for an item from the Marketboard'
async with aiohttp.ClientSession() as session:
async with session.get(f'https://xivapi.com/search?string={query}&indexes=Item&limit=1') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
itemDump = (await req.json())
if (itemDump['Results'] == 0):
(await ctx.send(f'No results found for {query}'))
else:
itemID = itemDump['Results'][0]['ID']
itemName = itemDump['Results'][0]['Name']
itemIcon = itemDump['Results'][0]['Icon']
async with aiohttp.ClientSession() as session:
async with session.get(f'https://universalis.app/api/Light/{itemID}?&listings=5') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
marketDump = (await req.json())
embed = discord.Embed()
embed.add_field(name='Universalis Link:', value=f'https://universalis.app/market/{itemID}', inline=False)
embed.set_author(name=itemName, icon_url=('https://xivapi.com/' + itemIcon))
for listing in marketDump['listings']:
embed.add_field(name=listing['worldName'], value=(((str(listing['quantity']) + ' selling at ') + str(listing['pricePerUnit'])) + ' gil.'), inline=False)
(await ctx.send(embed=embed))
|
Shows listings for an item from the Marketboard
|
Mailbot/cogs/xiv.py
|
market
|
tekofu/Mailbot
| 0 |
python
|
@commands.command()
async def market(self, ctx, *, query):
async with aiohttp.ClientSession() as session:
async with session.get(f'https://xivapi.com/search?string={query}&indexes=Item&limit=1') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
itemDump = (await req.json())
if (itemDump['Results'] == 0):
(await ctx.send(f'No results found for {query}'))
else:
itemID = itemDump['Results'][0]['ID']
itemName = itemDump['Results'][0]['Name']
itemIcon = itemDump['Results'][0]['Icon']
async with aiohttp.ClientSession() as session:
async with session.get(f'https://universalis.app/api/Light/{itemID}?&listings=5') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
marketDump = (await req.json())
embed = discord.Embed()
embed.add_field(name='Universalis Link:', value=f'https://universalis.app/market/{itemID}', inline=False)
embed.set_author(name=itemName, icon_url=('https://xivapi.com/' + itemIcon))
for listing in marketDump['listings']:
embed.add_field(name=listing['worldName'], value=(((str(listing['quantity']) + ' selling at ') + str(listing['pricePerUnit'])) + ' gil.'), inline=False)
(await ctx.send(embed=embed))
|
@commands.command()
async def market(self, ctx, *, query):
async with aiohttp.ClientSession() as session:
async with session.get(f'https://xivapi.com/search?string={query}&indexes=Item&limit=1') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
itemDump = (await req.json())
if (itemDump['Results'] == 0):
(await ctx.send(f'No results found for {query}'))
else:
itemID = itemDump['Results'][0]['ID']
itemName = itemDump['Results'][0]['Name']
itemIcon = itemDump['Results'][0]['Icon']
async with aiohttp.ClientSession() as session:
async with session.get(f'https://universalis.app/api/Light/{itemID}?&listings=5') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
marketDump = (await req.json())
embed = discord.Embed()
embed.add_field(name='Universalis Link:', value=f'https://universalis.app/market/{itemID}', inline=False)
embed.set_author(name=itemName, icon_url=('https://xivapi.com/' + itemIcon))
for listing in marketDump['listings']:
embed.add_field(name=listing['worldName'], value=(((str(listing['quantity']) + ' selling at ') + str(listing['pricePerUnit'])) + ' gil.'), inline=False)
(await ctx.send(embed=embed))<|docstring|>Shows listings for an item from the Marketboard<|endoftext|>
|
cfd8e7080a1883ff519b14ad3533caaa71ebf2b87186a39ec10ad85af89d456a
|
@commands.command()
async def char(self, ctx, forename, surname, world):
'Searches for a character on the Lodestone'
charRequest = f'{forename} {surname}'
async with aiohttp.ClientSession() as session:
async with session.get(f'https://xivapi.com/character/search?name= {charRequest}&server={world}') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
character = (await req.json())
embed = discord.Embed(description=('on ' + character['Results'][0]['Server']))
embed.set_image(url=character['Results'][0]['Avatar'])
lodestoneURL = ('https://eu.finalfantasyxiv.com/lodestone/character/' + str(character['Results'][0]['ID']))
embed.add_field(name='Lodestone', value=f'[Profile]({lodestoneURL})', inline=False)
embed.set_author(name=character['Results'][0]['Name'], icon_url=character['Results'][0]['Avatar'])
(await ctx.send(embed=embed))
|
Searches for a character on the Lodestone
|
Mailbot/cogs/xiv.py
|
char
|
tekofu/Mailbot
| 0 |
python
|
@commands.command()
async def char(self, ctx, forename, surname, world):
charRequest = f'{forename} {surname}'
async with aiohttp.ClientSession() as session:
async with session.get(f'https://xivapi.com/character/search?name= {charRequest}&server={world}') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
character = (await req.json())
embed = discord.Embed(description=('on ' + character['Results'][0]['Server']))
embed.set_image(url=character['Results'][0]['Avatar'])
lodestoneURL = ('https://eu.finalfantasyxiv.com/lodestone/character/' + str(character['Results'][0]['ID']))
embed.add_field(name='Lodestone', value=f'[Profile]({lodestoneURL})', inline=False)
embed.set_author(name=character['Results'][0]['Name'], icon_url=character['Results'][0]['Avatar'])
(await ctx.send(embed=embed))
|
@commands.command()
async def char(self, ctx, forename, surname, world):
charRequest = f'{forename} {surname}'
async with aiohttp.ClientSession() as session:
async with session.get(f'https://xivapi.com/character/search?name= {charRequest}&server={world}') as req:
if (req.status != 200):
(await ctx.send('Something went wrong :('))
character = (await req.json())
embed = discord.Embed(description=('on ' + character['Results'][0]['Server']))
embed.set_image(url=character['Results'][0]['Avatar'])
lodestoneURL = ('https://eu.finalfantasyxiv.com/lodestone/character/' + str(character['Results'][0]['ID']))
embed.add_field(name='Lodestone', value=f'[Profile]({lodestoneURL})', inline=False)
embed.set_author(name=character['Results'][0]['Name'], icon_url=character['Results'][0]['Avatar'])
(await ctx.send(embed=embed))<|docstring|>Searches for a character on the Lodestone<|endoftext|>
|
875885ac8de802bb250af390300b0e35cc742964a6cdb7947c1a7b2c1f6d07fa
|
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Returns a text object representing 's' -- unicode on Python 2 and str on\n Python 3. Treats bytestrings using the 'encoding' codec.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, Promise):
return s
return force_text(s, encoding, strings_only, errors)
|
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
|
string_demon/helpers.py
|
smart_text
|
guokr/string_demon
| 0 |
python
|
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Returns a text object representing 's' -- unicode on Python 2 and str on\n Python 3. Treats bytestrings using the 'encoding' codec.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, Promise):
return s
return force_text(s, encoding, strings_only, errors)
|
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Returns a text object representing 's' -- unicode on Python 2 and str on\n Python 3. Treats bytestrings using the 'encoding' codec.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, Promise):
return s
return force_text(s, encoding, strings_only, errors)<|docstring|>Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.<|endoftext|>
|
3d19b21d03aef4c2792b7b6a6e418b9e8e1eda5c7e55470d944092924bb44a86
|
def is_protected_type(obj):
'Determine if the object instance is of a protected type.\n Objects of protected types are preserved as-is when passed to\n force_text(strings_only=True).\n '
return isinstance(obj, _PROTECTED_TYPES)
|
Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
|
string_demon/helpers.py
|
is_protected_type
|
guokr/string_demon
| 0 |
python
|
def is_protected_type(obj):
'Determine if the object instance is of a protected type.\n Objects of protected types are preserved as-is when passed to\n force_text(strings_only=True).\n '
return isinstance(obj, _PROTECTED_TYPES)
|
def is_protected_type(obj):
'Determine if the object instance is of a protected type.\n Objects of protected types are preserved as-is when passed to\n force_text(strings_only=True).\n '
return isinstance(obj, _PROTECTED_TYPES)<|docstring|>Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).<|endoftext|>
|
d1c4cf6a1cd0106c666ccd7672c77827ddb46a0bbbea9e43c169e84d622d9e5a
|
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Similar to smart_text, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if issubclass(type(s), six.text_type):
return s
if (strings_only and is_protected_type(s)):
return s
try:
if (not issubclass(type(s), six.string_types)):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if (not isinstance(s, Exception)):
raise _UnicodeDecodeError(s, *e.args)
else:
s = ' '.join((force_text(arg, encoding, strings_only, errors) for arg in s))
return s
|
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
|
string_demon/helpers.py
|
force_text
|
guokr/string_demon
| 0 |
python
|
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Similar to smart_text, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if issubclass(type(s), six.text_type):
return s
if (strings_only and is_protected_type(s)):
return s
try:
if (not issubclass(type(s), six.string_types)):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if (not isinstance(s, Exception)):
raise _UnicodeDecodeError(s, *e.args)
else:
s = ' '.join((force_text(arg, encoding, strings_only, errors) for arg in s))
return s
|
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Similar to smart_text, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if issubclass(type(s), six.text_type):
return s
if (strings_only and is_protected_type(s)):
return s
try:
if (not issubclass(type(s), six.string_types)):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if (not isinstance(s, Exception)):
raise _UnicodeDecodeError(s, *e.args)
else:
s = ' '.join((force_text(arg, encoding, strings_only, errors) for arg in s))
return s<|docstring|>Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.<|endoftext|>
|
62098ff2973a86de6c5baae71035c6c58578131b784a5e4ba74df578e973d1ec
|
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Returns a bytestring version of 's', encoded as specified in 'encoding'.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, Promise):
return s
return force_bytes(s, encoding, strings_only, errors)
|
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
|
string_demon/helpers.py
|
smart_bytes
|
guokr/string_demon
| 0 |
python
|
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Returns a bytestring version of 's', encoded as specified in 'encoding'.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, Promise):
return s
return force_bytes(s, encoding, strings_only, errors)
|
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Returns a bytestring version of 's', encoded as specified in 'encoding'.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, Promise):
return s
return force_bytes(s, encoding, strings_only, errors)<|docstring|>Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.<|endoftext|>
|
92b1e8f7abaf4fd15bb2c6b57efe507dc9457aeadf5bb227fb2b03f4240bee74
|
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Similar to smart_bytes, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, bytes):
if (encoding == 'utf-8'):
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if (strings_only and is_protected_type(s)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if (not isinstance(s, six.string_types)):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
return b' '.join((force_bytes(arg, encoding, strings_only, errors) for arg in s))
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
|
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
|
string_demon/helpers.py
|
force_bytes
|
guokr/string_demon
| 0 |
python
|
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Similar to smart_bytes, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, bytes):
if (encoding == 'utf-8'):
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if (strings_only and is_protected_type(s)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if (not isinstance(s, six.string_types)):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
return b' '.join((force_bytes(arg, encoding, strings_only, errors) for arg in s))
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
|
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"\n Similar to smart_bytes, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n If strings_only is True, don't convert (some) non-string-like objects.\n "
if isinstance(s, bytes):
if (encoding == 'utf-8'):
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if (strings_only and is_protected_type(s)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if (not isinstance(s, six.string_types)):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
return b' '.join((force_bytes(arg, encoding, strings_only, errors) for arg in s))
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)<|docstring|>Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.<|endoftext|>
|
8e1fe1e0e450545f291d606386e297036c8289f8340eb2ca378c1667098d0331
|
def has_add_permission(self, request):
'\n Returns True if the given request has permission to add an object.\n Can be overridden by the user in subclasses.\n '
raise NotImplementedError
|
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
|
admin/admins.py
|
has_add_permission
|
bpeschier/reek-admin
| 0 |
python
|
def has_add_permission(self, request):
'\n Returns True if the given request has permission to add an object.\n Can be overridden by the user in subclasses.\n '
raise NotImplementedError
|
def has_add_permission(self, request):
'\n Returns True if the given request has permission to add an object.\n Can be overridden by the user in subclasses.\n '
raise NotImplementedError<|docstring|>Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.<|endoftext|>
|
0656052c529d55aafe2a7e74a0c85cb5309a1e662425848ed5954db495ecafa7
|
def has_change_permission(self, request, obj=None):
"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to change the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to change *any* object of the given type.\n "
raise NotImplementedError
|
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
|
admin/admins.py
|
has_change_permission
|
bpeschier/reek-admin
| 0 |
python
|
def has_change_permission(self, request, obj=None):
"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to change the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to change *any* object of the given type.\n "
raise NotImplementedError
|
def has_change_permission(self, request, obj=None):
"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to change the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to change *any* object of the given type.\n "
raise NotImplementedError<|docstring|>Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.<|endoftext|>
|
3581ed5898a8ea2e8575583b1919de282cd9d3255182f0573142e226689c9049
|
def has_delete_permission(self, request, obj=None):
"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to delete the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to delete *any* object of the given type.\n "
raise NotImplementedError
|
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
|
admin/admins.py
|
has_delete_permission
|
bpeschier/reek-admin
| 0 |
python
|
def has_delete_permission(self, request, obj=None):
"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to delete the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to delete *any* object of the given type.\n "
raise NotImplementedError
|
def has_delete_permission(self, request, obj=None):
"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to delete the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to delete *any* object of the given type.\n "
raise NotImplementedError<|docstring|>Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.<|endoftext|>
|
9d3422d8ed4cba8d1d94ac3ccda9501694dbfda352b137278e9cc77edcf3f45a
|
def get_permissions(self, request):
'\n Returns a dict of all perms for this model. This dict has the keys\n ``add``, ``change``, and ``delete`` mapping to the True/False for each\n of those actions.\n '
return {'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request)}
|
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
|
admin/admins.py
|
get_permissions
|
bpeschier/reek-admin
| 0 |
python
|
def get_permissions(self, request):
'\n Returns a dict of all perms for this model. This dict has the keys\n ``add``, ``change``, and ``delete`` mapping to the True/False for each\n of those actions.\n '
return {'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request)}
|
def get_permissions(self, request):
'\n Returns a dict of all perms for this model. This dict has the keys\n ``add``, ``change``, and ``delete`` mapping to the True/False for each\n of those actions.\n '
return {'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request)}<|docstring|>Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.<|endoftext|>
|
a04679b696eb6f4d477be7f637cb51c97d26128eb0b6a1029cefef8d9f3b4f7f
|
@staticmethod
def create_index():
'\n Creates index in ElasticSearch\n '
ElasticSearchUtils.remove_index()
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
request_body = {'settings': {'number_of_shards': 5, 'number_of_replicas': 1}, 'mappings': {'cadaster_doc': {'properties': {'location': {'type': 'geo_point'}, 'constructions': {'type': 'nested', 'properties': {'door': {'type': 'keyword'}, 'doorway': {'type': 'keyword'}, 'floor': {'type': 'keyword'}, 'reform': {'type': 'nested', 'properties': {'type': {'type': 'keyword'}, 'year': {'type': 'keyword'}}}, 'surface': {'type': 'keyword'}, 'use': {'type': 'keyword'}}}}, 'dynamic_templates': [{'strings': {'match_mapping_type': 'string', 'mapping': {'type': 'text', 'fields': {'keyword': {'type': 'keyword'}}}}}]}}}
logger.debug("Creating 'cadaster' index...")
try:
res = es.indices.create(index='cadaster', body=request_body)
logger.debug(res)
except Exception as e:
logger.debug(e)
es.transport.close()
|
Creates index in ElasticSearch
|
src/utils/elasticsearch_utils.py
|
create_index
|
jjmartinez-taiger/libreCatastro
| 9 |
python
|
@staticmethod
def create_index():
'\n \n '
ElasticSearchUtils.remove_index()
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
request_body = {'settings': {'number_of_shards': 5, 'number_of_replicas': 1}, 'mappings': {'cadaster_doc': {'properties': {'location': {'type': 'geo_point'}, 'constructions': {'type': 'nested', 'properties': {'door': {'type': 'keyword'}, 'doorway': {'type': 'keyword'}, 'floor': {'type': 'keyword'}, 'reform': {'type': 'nested', 'properties': {'type': {'type': 'keyword'}, 'year': {'type': 'keyword'}}}, 'surface': {'type': 'keyword'}, 'use': {'type': 'keyword'}}}}, 'dynamic_templates': [{'strings': {'match_mapping_type': 'string', 'mapping': {'type': 'text', 'fields': {'keyword': {'type': 'keyword'}}}}}]}}}
logger.debug("Creating 'cadaster' index...")
try:
res = es.indices.create(index='cadaster', body=request_body)
logger.debug(res)
except Exception as e:
logger.debug(e)
es.transport.close()
|
@staticmethod
def create_index():
'\n \n '
ElasticSearchUtils.remove_index()
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
request_body = {'settings': {'number_of_shards': 5, 'number_of_replicas': 1}, 'mappings': {'cadaster_doc': {'properties': {'location': {'type': 'geo_point'}, 'constructions': {'type': 'nested', 'properties': {'door': {'type': 'keyword'}, 'doorway': {'type': 'keyword'}, 'floor': {'type': 'keyword'}, 'reform': {'type': 'nested', 'properties': {'type': {'type': 'keyword'}, 'year': {'type': 'keyword'}}}, 'surface': {'type': 'keyword'}, 'use': {'type': 'keyword'}}}}, 'dynamic_templates': [{'strings': {'match_mapping_type': 'string', 'mapping': {'type': 'text', 'fields': {'keyword': {'type': 'keyword'}}}}}]}}}
logger.debug("Creating 'cadaster' index...")
try:
res = es.indices.create(index='cadaster', body=request_body)
logger.debug(res)
except Exception as e:
logger.debug(e)
es.transport.close()<|docstring|>Creates index in ElasticSearch<|endoftext|>
|
48db2e4d85a810e58f99fe481eed5227dc9475a8881b9a21dc71a3c4d4e6a1f9
|
@staticmethod
def remove_index():
'\n Removes index from ElasticSearch\n '
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
logger.debug("Deleting 'cadaster' index...")
try:
res = es.indices.delete(index='cadaster', ignore=[400, 404])
logger.debug(res)
except Exception as e:
logger.debug(e)
es.transport.close()
|
Removes index from ElasticSearch
|
src/utils/elasticsearch_utils.py
|
remove_index
|
jjmartinez-taiger/libreCatastro
| 9 |
python
|
@staticmethod
def remove_index():
'\n \n '
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
logger.debug("Deleting 'cadaster' index...")
try:
res = es.indices.delete(index='cadaster', ignore=[400, 404])
logger.debug(res)
except Exception as e:
logger.debug(e)
es.transport.close()
|
@staticmethod
def remove_index():
'\n \n '
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
logger.debug("Deleting 'cadaster' index...")
try:
res = es.indices.delete(index='cadaster', ignore=[400, 404])
logger.debug(res)
except Exception as e:
logger.debug(e)
es.transport.close()<|docstring|>Removes index from ElasticSearch<|endoftext|>
|
4cbb777acc9922f292d608fd76a49d9a1bdf20a6a5a3a1a7642f3caf32175eab
|
@staticmethod
def check_if_address_present(address, city_name, province_name):
'\n Checks if an address has been already scrapped (to skip it).\n :param address: full addres (including tipo de via, nombre de via ...)\n :param city_name: City Name\n :param province_name: Province Name\n :return: True if already scrapped, False otherwise\n '
res = False
query = {'query': {'bool': {'must': [{'prefix': {'address.full_address.keyword': '{}'.format(address)}}, {'match': {'address.province.keyword': '{}'.format(province_name)}}, {'match': {'address.city.keyword': '{}'.format(city_name)}}], 'must_not': [], 'should': []}}, 'from': 0, 'size': 11, 'sort': [], 'aggs': {}}
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
try:
res = es.search(config['elasticsearch-index'], config['elasticsearch-doc'], query)
hits = DotMap(res).hits.total
if (hits == DotMap()):
hits = 0
res = (hits > 0)
except Exception as e:
logger.debug(e)
es.transport.close()
return res
|
Checks if an address has been already scrapped (to skip it).
:param address: full addres (including tipo de via, nombre de via ...)
:param city_name: City Name
:param province_name: Province Name
:return: True if already scrapped, False otherwise
|
src/utils/elasticsearch_utils.py
|
check_if_address_present
|
jjmartinez-taiger/libreCatastro
| 9 |
python
|
@staticmethod
def check_if_address_present(address, city_name, province_name):
'\n Checks if an address has been already scrapped (to skip it).\n :param address: full addres (including tipo de via, nombre de via ...)\n :param city_name: City Name\n :param province_name: Province Name\n :return: True if already scrapped, False otherwise\n '
res = False
query = {'query': {'bool': {'must': [{'prefix': {'address.full_address.keyword': '{}'.format(address)}}, {'match': {'address.province.keyword': '{}'.format(province_name)}}, {'match': {'address.city.keyword': '{}'.format(city_name)}}], 'must_not': [], 'should': []}}, 'from': 0, 'size': 11, 'sort': [], 'aggs': {}}
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
try:
res = es.search(config['elasticsearch-index'], config['elasticsearch-doc'], query)
hits = DotMap(res).hits.total
if (hits == DotMap()):
hits = 0
res = (hits > 0)
except Exception as e:
logger.debug(e)
es.transport.close()
return res
|
@staticmethod
def check_if_address_present(address, city_name, province_name):
'\n Checks if an address has been already scrapped (to skip it).\n :param address: full addres (including tipo de via, nombre de via ...)\n :param city_name: City Name\n :param province_name: Province Name\n :return: True if already scrapped, False otherwise\n '
res = False
query = {'query': {'bool': {'must': [{'prefix': {'address.full_address.keyword': '{}'.format(address)}}, {'match': {'address.province.keyword': '{}'.format(province_name)}}, {'match': {'address.city.keyword': '{}'.format(city_name)}}], 'must_not': [], 'should': []}}, 'from': 0, 'size': 11, 'sort': [], 'aggs': {}}
es = Elasticsearch(hosts=[config['elasticsearch-host']], port=config['elasticsearch-port'])
try:
res = es.search(config['elasticsearch-index'], config['elasticsearch-doc'], query)
hits = DotMap(res).hits.total
if (hits == DotMap()):
hits = 0
res = (hits > 0)
except Exception as e:
logger.debug(e)
es.transport.close()
return res<|docstring|>Checks if an address has been already scrapped (to skip it).
:param address: full addres (including tipo de via, nombre de via ...)
:param city_name: City Name
:param province_name: Province Name
:return: True if already scrapped, False otherwise<|endoftext|>
|
b9b3b36aed1f4b25316ef6ad0d112a943f7195b72bb2b5f5fb3017544d0387cf
|
def fairCandySwap(self, A, B):
'\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n '
(sum_A, sum_B) = (sum(A), sum(B))
counter_B = Counter(B)
diff = ((sum_A - sum_B) / 2)
for i in A:
if counter_B.has_key((i - diff)):
return [i, (i - diff)]
|
:type A: List[int]
:type B: List[int]
:rtype: List[int]
|
888.Fair-Candy-Swap.py
|
fairCandySwap
|
mickey0524/leetcode
| 18 |
python
|
def fairCandySwap(self, A, B):
'\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n '
(sum_A, sum_B) = (sum(A), sum(B))
counter_B = Counter(B)
diff = ((sum_A - sum_B) / 2)
for i in A:
if counter_B.has_key((i - diff)):
return [i, (i - diff)]
|
def fairCandySwap(self, A, B):
'\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n '
(sum_A, sum_B) = (sum(A), sum(B))
counter_B = Counter(B)
diff = ((sum_A - sum_B) / 2)
for i in A:
if counter_B.has_key((i - diff)):
return [i, (i - diff)]<|docstring|>:type A: List[int]
:type B: List[int]
:rtype: List[int]<|endoftext|>
|
99552f9e80569af2b172b91829d4aa49b135ae50d066f587b06a9e2267135ef6
|
@property
def known_fields(self):
'\n Return a set of known fields that can be returned by complete() method.\n '
return set()
|
Return a set of known fields that can be returned by complete() method.
|
music_browser/plugins/deezer.py
|
known_fields
|
tms-studio/python-music-browser
| 0 |
python
|
@property
def known_fields(self):
'\n \n '
return set()
|
@property
def known_fields(self):
'\n \n '
return set()<|docstring|>Return a set of known fields that can be returned by complete() method.<|endoftext|>
|
5053a120801a41fef256eefe40d26104a8abea1fab72efbf6c890050937f15d8
|
def query(self, track: SimpleTrack) -> object:
'\n Complete metadata of a track based on simple track data like title, artist, or id.\n '
pass
|
Complete metadata of a track based on simple track data like title, artist, or id.
|
music_browser/plugins/deezer.py
|
query
|
tms-studio/python-music-browser
| 0 |
python
|
def query(self, track: SimpleTrack) -> object:
'\n \n '
pass
|
def query(self, track: SimpleTrack) -> object:
'\n \n '
pass<|docstring|>Complete metadata of a track based on simple track data like title, artist, or id.<|endoftext|>
|
3f75384fdb128b8276c52f7e8b904970bcc526c3a825e69e001886d01fa3c4ca
|
def search(self, query: str) -> List[SimpleTrack]:
'\n Return list of tracks matching the query.\n\n Parameters:\n query: String describing what track you are looking for.\n\n Returns:\n List of tracks known by Deezer that matches the query.\n '
response = requests.get(('https://api.deezer.com/search?' + urlencode({'q': query})))
tracks = []
for track_data in response.json()['data']:
tracks.append(SimpleTrack(album=track_data['album']['title'], artist=track_data['artist']['name'], title=track_data['title'], cover=('https://e-cdns-images.dzcdn.net/images/cover/%s/264x264-000000-80-0-0.jpg' % track_data['md5_image']), source={'id': str(track_data['id']), 'platform': 'deezer'}))
return tracks
|
Return list of tracks matching the query.
Parameters:
query: String describing what track you are looking for.
Returns:
List of tracks known by Deezer that matches the query.
|
music_browser/plugins/deezer.py
|
search
|
tms-studio/python-music-browser
| 0 |
python
|
def search(self, query: str) -> List[SimpleTrack]:
'\n Return list of tracks matching the query.\n\n Parameters:\n query: String describing what track you are looking for.\n\n Returns:\n List of tracks known by Deezer that matches the query.\n '
response = requests.get(('https://api.deezer.com/search?' + urlencode({'q': query})))
tracks = []
for track_data in response.json()['data']:
tracks.append(SimpleTrack(album=track_data['album']['title'], artist=track_data['artist']['name'], title=track_data['title'], cover=('https://e-cdns-images.dzcdn.net/images/cover/%s/264x264-000000-80-0-0.jpg' % track_data['md5_image']), source={'id': str(track_data['id']), 'platform': 'deezer'}))
return tracks
|
def search(self, query: str) -> List[SimpleTrack]:
'\n Return list of tracks matching the query.\n\n Parameters:\n query: String describing what track you are looking for.\n\n Returns:\n List of tracks known by Deezer that matches the query.\n '
response = requests.get(('https://api.deezer.com/search?' + urlencode({'q': query})))
tracks = []
for track_data in response.json()['data']:
tracks.append(SimpleTrack(album=track_data['album']['title'], artist=track_data['artist']['name'], title=track_data['title'], cover=('https://e-cdns-images.dzcdn.net/images/cover/%s/264x264-000000-80-0-0.jpg' % track_data['md5_image']), source={'id': str(track_data['id']), 'platform': 'deezer'}))
return tracks<|docstring|>Return list of tracks matching the query.
Parameters:
query: String describing what track you are looking for.
Returns:
List of tracks known by Deezer that matches the query.<|endoftext|>
|
9b51de302c169e3865ce628b8a38f738a3f4b67924bee722fc6e3d745a211fbe
|
def gen_plot(self):
"\n color_sequence = [\n '#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',\n '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',\n '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',\n '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5',\n '#9caae5', '#1cafe2']\n "
fig = plt.figure()
means = self.load_data(self.fnames)
np.save(((self.dir + '/') + 'allmean'), means, allow_pickle=False)
ax1 = fig.add_subplot(1, 1, 1)
maxval = (50 + (len(means) * 25))
no = list(range(50, maxval, 25))
no = np.array(no)
values = []
for i in range(len(means)):
values.append(np.max(means[i]))
values = np.array(values)
maxindx = argrelextrema(values, np.greater)
minindx = argrelextrema(values, np.less)
ax1.plot(no[maxindx], values[maxindx], label='Maxima', marker='^', linestyle='--', linewidth=2)
ax1.plot(no[minindx], values[minindx], label='Minima', marker='o', linestyle='--', linewidth=1)
ax1.plot(no, values, linewidth=1, label='Mean')
ax1.set_xlabel('No. of Agents')
ax1.set_ylabel('Performance')
ax1.set_title(self.title)
ax1.legend(fontsize='x-small')
plt.tight_layout()
fig.savefig((self.dir + '/agentsmean.pdf'))
fig.savefig((self.dir + '/agentsmean.png'))
plt.close(fig)
|
color_sequence = [
'#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5',
'#9caae5', '#1cafe2']
|
swarms/utils/graph.py
|
gen_plot
|
aadeshnpn/swarm
| 9 |
python
|
def gen_plot(self):
"\n color_sequence = [\n '#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',\n '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',\n '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',\n '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5',\n '#9caae5', '#1cafe2']\n "
fig = plt.figure()
means = self.load_data(self.fnames)
np.save(((self.dir + '/') + 'allmean'), means, allow_pickle=False)
ax1 = fig.add_subplot(1, 1, 1)
maxval = (50 + (len(means) * 25))
no = list(range(50, maxval, 25))
no = np.array(no)
values = []
for i in range(len(means)):
values.append(np.max(means[i]))
values = np.array(values)
maxindx = argrelextrema(values, np.greater)
minindx = argrelextrema(values, np.less)
ax1.plot(no[maxindx], values[maxindx], label='Maxima', marker='^', linestyle='--', linewidth=2)
ax1.plot(no[minindx], values[minindx], label='Minima', marker='o', linestyle='--', linewidth=1)
ax1.plot(no, values, linewidth=1, label='Mean')
ax1.set_xlabel('No. of Agents')
ax1.set_ylabel('Performance')
ax1.set_title(self.title)
ax1.legend(fontsize='x-small')
plt.tight_layout()
fig.savefig((self.dir + '/agentsmean.pdf'))
fig.savefig((self.dir + '/agentsmean.png'))
plt.close(fig)
|
def gen_plot(self):
"\n color_sequence = [\n '#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',\n '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',\n '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',\n '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5',\n '#9caae5', '#1cafe2']\n "
fig = plt.figure()
means = self.load_data(self.fnames)
np.save(((self.dir + '/') + 'allmean'), means, allow_pickle=False)
ax1 = fig.add_subplot(1, 1, 1)
maxval = (50 + (len(means) * 25))
no = list(range(50, maxval, 25))
no = np.array(no)
values = []
for i in range(len(means)):
values.append(np.max(means[i]))
values = np.array(values)
maxindx = argrelextrema(values, np.greater)
minindx = argrelextrema(values, np.less)
ax1.plot(no[maxindx], values[maxindx], label='Maxima', marker='^', linestyle='--', linewidth=2)
ax1.plot(no[minindx], values[minindx], label='Minima', marker='o', linestyle='--', linewidth=1)
ax1.plot(no, values, linewidth=1, label='Mean')
ax1.set_xlabel('No. of Agents')
ax1.set_ylabel('Performance')
ax1.set_title(self.title)
ax1.legend(fontsize='x-small')
plt.tight_layout()
fig.savefig((self.dir + '/agentsmean.pdf'))
fig.savefig((self.dir + '/agentsmean.png'))
plt.close(fig)<|docstring|>color_sequence = [
'#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5',
'#9caae5', '#1cafe2']<|endoftext|>
|
f1306ffe85796877af6f043957805e3d754d96eedc695caf649fbce8d25743e3
|
def assertEventEquals(self, expected, actual):
'Compares two events for equality, ignoring the timestamps.'
expected = dict(expected)
del expected['timestamp']
actual = dict(actual)
del actual['timestamp']
self.assertEquals(expected, actual)
|
Compares two events for equality, ignoring the timestamps.
|
test/testhelper.py
|
assertEventEquals
|
srault95/logcabin
| 0 |
python
|
def assertEventEquals(self, expected, actual):
expected = dict(expected)
del expected['timestamp']
actual = dict(actual)
del actual['timestamp']
self.assertEquals(expected, actual)
|
def assertEventEquals(self, expected, actual):
expected = dict(expected)
del expected['timestamp']
actual = dict(actual)
del actual['timestamp']
self.assertEquals(expected, actual)<|docstring|>Compares two events for equality, ignoring the timestamps.<|endoftext|>
|
701a8ebd5bb6278ac29548879a36e689e6766ef0dc4055149c4f7746e3df715c
|
def any(_):
'Matches anything'
return True
|
Matches anything
|
test/testhelper.py
|
any
|
srault95/logcabin
| 0 |
python
|
def any(_):
return True
|
def any(_):
return True<|docstring|>Matches anything<|endoftext|>
|
20bafeaeebee0b2c53f93e7f60db1246b402b0251b89ae1e87a82efef0fffbd5
|
@classmethod
def setup_class(cls):
' set-up class '
cls.client = UserAuth('https://cerberus.fake.com', 'testuser', 'hardtoguesspasswd')
cls.auth_resp = {'status': 'mfa_req', 'data': {'username': '[email protected]', 'state_token': '0127a384d305138d4e', 'client_token': 'None', 'user_id': '1325', 'devices': [{'id': '223', 'name': 'Google Authenticator'}]}}
cls.auth_resp_multi = {'status': 'mfa_req', 'data': {'username': '[email protected]', 'state_token': '0127a384d305138d4e', 'client_token': 'None', 'user_id': '1325', 'devices': [{'id': '223', 'name': 'Google Authenticator'}, {'id': '224', 'name': 'OTP Authenticator'}]}}
|
set-up class
|
tests/test_user_auth.py
|
setup_class
|
splintercat/cerberus-python-client
| 16 |
python
|
@classmethod
def setup_class(cls):
' '
cls.client = UserAuth('https://cerberus.fake.com', 'testuser', 'hardtoguesspasswd')
cls.auth_resp = {'status': 'mfa_req', 'data': {'username': '[email protected]', 'state_token': '0127a384d305138d4e', 'client_token': 'None', 'user_id': '1325', 'devices': [{'id': '223', 'name': 'Google Authenticator'}]}}
cls.auth_resp_multi = {'status': 'mfa_req', 'data': {'username': '[email protected]', 'state_token': '0127a384d305138d4e', 'client_token': 'None', 'user_id': '1325', 'devices': [{'id': '223', 'name': 'Google Authenticator'}, {'id': '224', 'name': 'OTP Authenticator'}]}}
|
@classmethod
def setup_class(cls):
' '
cls.client = UserAuth('https://cerberus.fake.com', 'testuser', 'hardtoguesspasswd')
cls.auth_resp = {'status': 'mfa_req', 'data': {'username': '[email protected]', 'state_token': '0127a384d305138d4e', 'client_token': 'None', 'user_id': '1325', 'devices': [{'id': '223', 'name': 'Google Authenticator'}]}}
cls.auth_resp_multi = {'status': 'mfa_req', 'data': {'username': '[email protected]', 'state_token': '0127a384d305138d4e', 'client_token': 'None', 'user_id': '1325', 'devices': [{'id': '223', 'name': 'Google Authenticator'}, {'id': '224', 'name': 'OTP Authenticator'}]}}<|docstring|>set-up class<|endoftext|>
|
e47b919919f8c1326ee65fd9f96f5e46aa2954c060438a42be2fb6ce67d79f96
|
def test_username(self):
' Testing to make sure username match '
assert_equals(self.client.username, 'testuser')
|
Testing to make sure username match
|
tests/test_user_auth.py
|
test_username
|
splintercat/cerberus-python-client
| 16 |
python
|
def test_username(self):
' '
assert_equals(self.client.username, 'testuser')
|
def test_username(self):
' '
assert_equals(self.client.username, 'testuser')<|docstring|>Testing to make sure username match<|endoftext|>
|
65a75a8b8fd44a44df55c9f1d1a43038850940860e2a404fc888a75bc124ac9b
|
@patch('cerberus.user_auth.UserAuth.get_auth')
def test_get_token(self, mock_get_auth):
' Test to make sure the correct token is returned '
mock_get_auth.return_value = {'status': 'success', 'data': {'client_token': {'client_token': '7f6808f1-ede3-2177-aa9d-45f507391310'}}}
token = self.client.get_token()
assert_equals(token, '7f6808f1-ede3-2177-aa9d-45f507391310')
|
Test to make sure the correct token is returned
|
tests/test_user_auth.py
|
test_get_token
|
splintercat/cerberus-python-client
| 16 |
python
|
@patch('cerberus.user_auth.UserAuth.get_auth')
def test_get_token(self, mock_get_auth):
' '
mock_get_auth.return_value = {'status': 'success', 'data': {'client_token': {'client_token': '7f6808f1-ede3-2177-aa9d-45f507391310'}}}
token = self.client.get_token()
assert_equals(token, '7f6808f1-ede3-2177-aa9d-45f507391310')
|
@patch('cerberus.user_auth.UserAuth.get_auth')
def test_get_token(self, mock_get_auth):
' '
mock_get_auth.return_value = {'status': 'success', 'data': {'client_token': {'client_token': '7f6808f1-ede3-2177-aa9d-45f507391310'}}}
token = self.client.get_token()
assert_equals(token, '7f6808f1-ede3-2177-aa9d-45f507391310')<|docstring|>Test to make sure the correct token is returned<|endoftext|>
|
5d30ba4efdb3dc9cd7c06e8c36ec631df8f47f45b4d0425ca66b9558d5dc07bb
|
@patch('requests.get')
def test_get_auth(self, mock_get):
'" Test that correct response is returned by get_auth '
mock_resp = self._mock_response(content=json.dumps(self.auth_resp))
mock_get.return_value = mock_resp
response = self.client.get_auth()
assert_dict_equal(response, self.auth_resp)
|
" Test that correct response is returned by get_auth
|
tests/test_user_auth.py
|
test_get_auth
|
splintercat/cerberus-python-client
| 16 |
python
|
@patch('requests.get')
def test_get_auth(self, mock_get):
' '
mock_resp = self._mock_response(content=json.dumps(self.auth_resp))
mock_get.return_value = mock_resp
response = self.client.get_auth()
assert_dict_equal(response, self.auth_resp)
|
@patch('requests.get')
def test_get_auth(self, mock_get):
' '
mock_resp = self._mock_response(content=json.dumps(self.auth_resp))
mock_get.return_value = mock_resp
response = self.client.get_auth()
assert_dict_equal(response, self.auth_resp)<|docstring|>" Test that correct response is returned by get_auth<|endoftext|>
|
d7f8e0fb8a4a64b58878099b73a7a1392cd73aa7cdd91807154e95cfd85a35f5
|
@patch(input_module, return_value='0987654321')
@patch('requests.post')
def test_mfa_response(self, mock_post, mock_input=None):
' Testing that mfa_response returns the correct json '
mfa_data = {'status': 'success', 'data': {'user_id': '134', 'username': '[email protected]', 'state_token': None, 'devices': [], 'client_token': {'client_token': '61e3-f3f-6536-a3e6-b498161d', 'policies': ['cloud-events-owner', 'pixie-dust-owner'], 'metadata': {'groups': 'Rainbow.Playgroun.User,CareBear.users', 'is_admin': 'false', 'username': '[email protected]'}, 'lease_duration': 3600, 'renewable': True}}}
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp)
assert_dict_equal(response, mfa_data)
|
Testing that mfa_response returns the correct json
|
tests/test_user_auth.py
|
test_mfa_response
|
splintercat/cerberus-python-client
| 16 |
python
|
@patch(input_module, return_value='0987654321')
@patch('requests.post')
def test_mfa_response(self, mock_post, mock_input=None):
' '
mfa_data = {'status': 'success', 'data': {'user_id': '134', 'username': '[email protected]', 'state_token': None, 'devices': [], 'client_token': {'client_token': '61e3-f3f-6536-a3e6-b498161d', 'policies': ['cloud-events-owner', 'pixie-dust-owner'], 'metadata': {'groups': 'Rainbow.Playgroun.User,CareBear.users', 'is_admin': 'false', 'username': '[email protected]'}, 'lease_duration': 3600, 'renewable': True}}}
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp)
assert_dict_equal(response, mfa_data)
|
@patch(input_module, return_value='0987654321')
@patch('requests.post')
def test_mfa_response(self, mock_post, mock_input=None):
' '
mfa_data = {'status': 'success', 'data': {'user_id': '134', 'username': '[email protected]', 'state_token': None, 'devices': [], 'client_token': {'client_token': '61e3-f3f-6536-a3e6-b498161d', 'policies': ['cloud-events-owner', 'pixie-dust-owner'], 'metadata': {'groups': 'Rainbow.Playgroun.User,CareBear.users', 'is_admin': 'false', 'username': '[email protected]'}, 'lease_duration': 3600, 'renewable': True}}}
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp)
assert_dict_equal(response, mfa_data)<|docstring|>Testing that mfa_response returns the correct json<|endoftext|>
|
30c9efb88fa116713fd5936d0f83c20f5bed9ceb941faec32df77e98b1e60e79
|
@patch(input_module, side_effect=['1', '0987654321'])
@patch('requests.post')
def test_multi_mfa_response(self, mock_post, mock_input=None):
' Testing that mfa_response returns the correct json when there are multiple MFAs available '
mfa_data = {'status': 'success', 'data': {'user_id': '134', 'username': '[email protected]', 'state_token': None, 'devices': [], 'client_token': {'client_token': '61e3-f3f-6536-a3e6-b498161d', 'policies': ['cloud-events-owner', 'pixie-dust-owner'], 'metadata': {'groups': 'Rainbow.Playgroun.User,CareBear.users', 'is_admin': 'false', 'username': '[email protected]'}, 'lease_duration': 3600, 'renewable': True}}}
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp_multi)
assert_dict_equal(response, mfa_data)
|
Testing that mfa_response returns the correct json when there are multiple MFAs available
|
tests/test_user_auth.py
|
test_multi_mfa_response
|
splintercat/cerberus-python-client
| 16 |
python
|
@patch(input_module, side_effect=['1', '0987654321'])
@patch('requests.post')
def test_multi_mfa_response(self, mock_post, mock_input=None):
' '
mfa_data = {'status': 'success', 'data': {'user_id': '134', 'username': '[email protected]', 'state_token': None, 'devices': [], 'client_token': {'client_token': '61e3-f3f-6536-a3e6-b498161d', 'policies': ['cloud-events-owner', 'pixie-dust-owner'], 'metadata': {'groups': 'Rainbow.Playgroun.User,CareBear.users', 'is_admin': 'false', 'username': '[email protected]'}, 'lease_duration': 3600, 'renewable': True}}}
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp_multi)
assert_dict_equal(response, mfa_data)
|
@patch(input_module, side_effect=['1', '0987654321'])
@patch('requests.post')
def test_multi_mfa_response(self, mock_post, mock_input=None):
' '
mfa_data = {'status': 'success', 'data': {'user_id': '134', 'username': '[email protected]', 'state_token': None, 'devices': [], 'client_token': {'client_token': '61e3-f3f-6536-a3e6-b498161d', 'policies': ['cloud-events-owner', 'pixie-dust-owner'], 'metadata': {'groups': 'Rainbow.Playgroun.User,CareBear.users', 'is_admin': 'false', 'username': '[email protected]'}, 'lease_duration': 3600, 'renewable': True}}}
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp_multi)
assert_dict_equal(response, mfa_data)<|docstring|>Testing that mfa_response returns the correct json when there are multiple MFAs available<|endoftext|>
|
4700395166e6ebcf8b17338cd2e955d254c2a4b33722de7ba3cbaa8a557107e9
|
@raises(CerberusClientException)
@patch(input_module, return_value='a1')
def test_multi_mfa_response_text(self, mock_input=None):
' Testing improper inputs for Multiple MFA selections, (a1) '
response = self.client.get_mfa(self.auth_resp_multi)
|
Testing improper inputs for Multiple MFA selections, (a1)
|
tests/test_user_auth.py
|
test_multi_mfa_response_text
|
splintercat/cerberus-python-client
| 16 |
python
|
@raises(CerberusClientException)
@patch(input_module, return_value='a1')
def test_multi_mfa_response_text(self, mock_input=None):
' '
response = self.client.get_mfa(self.auth_resp_multi)
|
@raises(CerberusClientException)
@patch(input_module, return_value='a1')
def test_multi_mfa_response_text(self, mock_input=None):
' '
response = self.client.get_mfa(self.auth_resp_multi)<|docstring|>Testing improper inputs for Multiple MFA selections, (a1)<|endoftext|>
|
20bc788ea31ff39fb245d363d92c7409dfef6e60a5898d8f283bab4ea20fc0d4
|
@raises(CerberusClientException)
@patch(input_module, return_value='-1')
def test_multi_mfa_response_low(self, mock_input=None):
' Testing improper inputs for Multiple MFA selections, (-1) '
response = self.client.get_mfa(self.auth_resp_multi)
|
Testing improper inputs for Multiple MFA selections, (-1)
|
tests/test_user_auth.py
|
test_multi_mfa_response_low
|
splintercat/cerberus-python-client
| 16 |
python
|
@raises(CerberusClientException)
@patch(input_module, return_value='-1')
def test_multi_mfa_response_low(self, mock_input=None):
' '
response = self.client.get_mfa(self.auth_resp_multi)
|
@raises(CerberusClientException)
@patch(input_module, return_value='-1')
def test_multi_mfa_response_low(self, mock_input=None):
' '
response = self.client.get_mfa(self.auth_resp_multi)<|docstring|>Testing improper inputs for Multiple MFA selections, (-1)<|endoftext|>
|
677f230078c7b633972bef7b4ee3951f3f4ccb663deb32ff40da7f552cd39ab5
|
@raises(CerberusClientException)
@patch(input_module, return_value='2')
def test_multi_mfa_response_high(self, mock_input=None):
' Testing improper inputs for Multiple MFA selections, (2) '
response = self.client.get_mfa(self.auth_resp_multi)
|
Testing improper inputs for Multiple MFA selections, (2)
|
tests/test_user_auth.py
|
test_multi_mfa_response_high
|
splintercat/cerberus-python-client
| 16 |
python
|
@raises(CerberusClientException)
@patch(input_module, return_value='2')
def test_multi_mfa_response_high(self, mock_input=None):
' '
response = self.client.get_mfa(self.auth_resp_multi)
|
@raises(CerberusClientException)
@patch(input_module, return_value='2')
def test_multi_mfa_response_high(self, mock_input=None):
' '
response = self.client.get_mfa(self.auth_resp_multi)<|docstring|>Testing improper inputs for Multiple MFA selections, (2)<|endoftext|>
|
dd8581bf69d0b419e575b630cdd8acd9f865d99b914069fdd81b0188d3848a82
|
@raises(CerberusClientException)
@patch('requests.get')
def test_when_not_200_status_code(self, mock_get):
' test when 200 status code is not returned'
data = json.dumps({'error_id': '123', 'errors': []})
mock_resp = self._mock_response(status=404, reason='Not Found', content=data)
mock_get.return_value = mock_resp
self.client.get_auth()
|
test when 200 status code is not returned
|
tests/test_user_auth.py
|
test_when_not_200_status_code
|
splintercat/cerberus-python-client
| 16 |
python
|
@raises(CerberusClientException)
@patch('requests.get')
def test_when_not_200_status_code(self, mock_get):
' '
data = json.dumps({'error_id': '123', 'errors': []})
mock_resp = self._mock_response(status=404, reason='Not Found', content=data)
mock_get.return_value = mock_resp
self.client.get_auth()
|
@raises(CerberusClientException)
@patch('requests.get')
def test_when_not_200_status_code(self, mock_get):
' '
data = json.dumps({'error_id': '123', 'errors': []})
mock_resp = self._mock_response(status=404, reason='Not Found', content=data)
mock_get.return_value = mock_resp
self.client.get_auth()<|docstring|>test when 200 status code is not returned<|endoftext|>
|
1f562dbaa975bade676216ed2150aa374b136aa5461581441d08030e9c0e0514
|
def __init__(self, master, defPath=None, fileTypes=None, maxChar=30, callFunc=None, severity=opscore.RO.Constants.sevNormal, helpText=None, helpURL=None, **kargs):
'Creates a new Button.\n\n Inputs:\n - defPath: initial path; silently ignored if invalid or nonexistent\n - fileTypes: sequence of (label, pattern) tuples;\n use * as a pattern to allow all files of that labelled type;\n omit altogether to allow all files\n - maxChar: maximum # of characters of file path to display\n - callFunc callback function; the function receives one argument: self.\n It is called whenever the value changes (manually or via\n the associated variable being set).\n - severity initial severity; one of opscore.RO.Constants.sevNormal, sevWarning or sevError\n - helpText text for hot help\n - helpURL URL for longer help\n - all remaining keyword arguments are used to configure the Tkinter Button;\n command is supported, for the sake of conformity, but callFunc is preferred.\n '
self.fileTypes = fileTypes
self.maxChar = max(3, int(maxChar))
self.helpText = helpText
self.path = None
self.defPath = None
self.leftChar = 0
self.rightChar = ((self.maxChar - self.leftChar) - 1)
tkinter.Button.__init__(self, master=master, command=self._doChoose, **kargs)
opscore.RO.AddCallback.BaseMixin.__init__(self)
CtxMenuMixin.__init__(self, helpURL=helpURL)
SeverityActiveMixin.__init__(self, severity)
self._initPath(defPath)
if callFunc:
self.addCallback(callFunc, False)
|
Creates a new Button.
Inputs:
- defPath: initial path; silently ignored if invalid or nonexistent
- fileTypes: sequence of (label, pattern) tuples;
use * as a pattern to allow all files of that labelled type;
omit altogether to allow all files
- maxChar: maximum # of characters of file path to display
- callFunc callback function; the function receives one argument: self.
It is called whenever the value changes (manually or via
the associated variable being set).
- severity initial severity; one of opscore.RO.Constants.sevNormal, sevWarning or sevError
- helpText text for hot help
- helpURL URL for longer help
- all remaining keyword arguments are used to configure the Tkinter Button;
command is supported, for the sake of conformity, but callFunc is preferred.
|
python/opscore/RO/Wdg/PathWdg.py
|
__init__
|
sdss/opscore
| 0 |
python
|
def __init__(self, master, defPath=None, fileTypes=None, maxChar=30, callFunc=None, severity=opscore.RO.Constants.sevNormal, helpText=None, helpURL=None, **kargs):
'Creates a new Button.\n\n Inputs:\n - defPath: initial path; silently ignored if invalid or nonexistent\n - fileTypes: sequence of (label, pattern) tuples;\n use * as a pattern to allow all files of that labelled type;\n omit altogether to allow all files\n - maxChar: maximum # of characters of file path to display\n - callFunc callback function; the function receives one argument: self.\n It is called whenever the value changes (manually or via\n the associated variable being set).\n - severity initial severity; one of opscore.RO.Constants.sevNormal, sevWarning or sevError\n - helpText text for hot help\n - helpURL URL for longer help\n - all remaining keyword arguments are used to configure the Tkinter Button;\n command is supported, for the sake of conformity, but callFunc is preferred.\n '
self.fileTypes = fileTypes
self.maxChar = max(3, int(maxChar))
self.helpText = helpText
self.path = None
self.defPath = None
self.leftChar = 0
self.rightChar = ((self.maxChar - self.leftChar) - 1)
tkinter.Button.__init__(self, master=master, command=self._doChoose, **kargs)
opscore.RO.AddCallback.BaseMixin.__init__(self)
CtxMenuMixin.__init__(self, helpURL=helpURL)
SeverityActiveMixin.__init__(self, severity)
self._initPath(defPath)
if callFunc:
self.addCallback(callFunc, False)
|
def __init__(self, master, defPath=None, fileTypes=None, maxChar=30, callFunc=None, severity=opscore.RO.Constants.sevNormal, helpText=None, helpURL=None, **kargs):
'Creates a new Button.\n\n Inputs:\n - defPath: initial path; silently ignored if invalid or nonexistent\n - fileTypes: sequence of (label, pattern) tuples;\n use * as a pattern to allow all files of that labelled type;\n omit altogether to allow all files\n - maxChar: maximum # of characters of file path to display\n - callFunc callback function; the function receives one argument: self.\n It is called whenever the value changes (manually or via\n the associated variable being set).\n - severity initial severity; one of opscore.RO.Constants.sevNormal, sevWarning or sevError\n - helpText text for hot help\n - helpURL URL for longer help\n - all remaining keyword arguments are used to configure the Tkinter Button;\n command is supported, for the sake of conformity, but callFunc is preferred.\n '
self.fileTypes = fileTypes
self.maxChar = max(3, int(maxChar))
self.helpText = helpText
self.path = None
self.defPath = None
self.leftChar = 0
self.rightChar = ((self.maxChar - self.leftChar) - 1)
tkinter.Button.__init__(self, master=master, command=self._doChoose, **kargs)
opscore.RO.AddCallback.BaseMixin.__init__(self)
CtxMenuMixin.__init__(self, helpURL=helpURL)
SeverityActiveMixin.__init__(self, severity)
self._initPath(defPath)
if callFunc:
self.addCallback(callFunc, False)<|docstring|>Creates a new Button.
Inputs:
- defPath: initial path; silently ignored if invalid or nonexistent
- fileTypes: sequence of (label, pattern) tuples;
use * as a pattern to allow all files of that labelled type;
omit altogether to allow all files
- maxChar: maximum # of characters of file path to display
- callFunc callback function; the function receives one argument: self.
It is called whenever the value changes (manually or via
the associated variable being set).
- severity initial severity; one of opscore.RO.Constants.sevNormal, sevWarning or sevError
- helpText text for hot help
- helpURL URL for longer help
- all remaining keyword arguments are used to configure the Tkinter Button;
command is supported, for the sake of conformity, but callFunc is preferred.<|endoftext|>
|
83383f72a2febbf74272f9a9cd1efd3515c426e41486d39fd7e6370f531fef17
|
def _doChoose(self):
'Put up a dialog to choose a new file.\n Subclasses must override.\n '
raise NotImplementedError('_doChoose must be implemented by a subclass')
|
Put up a dialog to choose a new file.
Subclasses must override.
|
python/opscore/RO/Wdg/PathWdg.py
|
_doChoose
|
sdss/opscore
| 0 |
python
|
def _doChoose(self):
'Put up a dialog to choose a new file.\n Subclasses must override.\n '
raise NotImplementedError('_doChoose must be implemented by a subclass')
|
def _doChoose(self):
'Put up a dialog to choose a new file.\n Subclasses must override.\n '
raise NotImplementedError('_doChoose must be implemented by a subclass')<|docstring|>Put up a dialog to choose a new file.
Subclasses must override.<|endoftext|>
|
6c142e891e92575afa81cc7a16026c7a717ff2c17263feb393866cf60cb4476d
|
def _initPath(self, defPath):
'During initialization set self.defPath and self.path.\n '
if defPath:
try:
self.checkPath(defPath)
defPath = os.path.abspath(defPath)
except ValueError:
defPath = None
self.defPath = defPath
self.setPath(defPath)
|
During initialization set self.defPath and self.path.
|
python/opscore/RO/Wdg/PathWdg.py
|
_initPath
|
sdss/opscore
| 0 |
python
|
def _initPath(self, defPath):
'\n '
if defPath:
try:
self.checkPath(defPath)
defPath = os.path.abspath(defPath)
except ValueError:
defPath = None
self.defPath = defPath
self.setPath(defPath)
|
def _initPath(self, defPath):
'\n '
if defPath:
try:
self.checkPath(defPath)
defPath = os.path.abspath(defPath)
except ValueError:
defPath = None
self.defPath = defPath
self.setPath(defPath)<|docstring|>During initialization set self.defPath and self.path.<|endoftext|>
|
6108a3be46a0d68404e93f95abbfc9280aa70441294214cb55e3ba20fab1ba4b
|
def checkPath(self, path):
'Raise ValueError if path not None and does not exist.\n Override from base class to make more specific.\n '
if (path and (not os.path.exists(path))):
raise ValueError(('Path %r does not exist' % (path,)))
|
Raise ValueError if path not None and does not exist.
Override from base class to make more specific.
|
python/opscore/RO/Wdg/PathWdg.py
|
checkPath
|
sdss/opscore
| 0 |
python
|
def checkPath(self, path):
'Raise ValueError if path not None and does not exist.\n Override from base class to make more specific.\n '
if (path and (not os.path.exists(path))):
raise ValueError(('Path %r does not exist' % (path,)))
|
def checkPath(self, path):
'Raise ValueError if path not None and does not exist.\n Override from base class to make more specific.\n '
if (path and (not os.path.exists(path))):
raise ValueError(('Path %r does not exist' % (path,)))<|docstring|>Raise ValueError if path not None and does not exist.
Override from base class to make more specific.<|endoftext|>
|
75e0e8d801ac47fced8077f22a2269cad57109326e455bf78711a3d776e65845
|
def setEnable(self, doEnable):
'Enable or disable widget\n\n Inputs:\n - doEnable: if True enable widget (set state to normal); otherwise set state to disabled\n\n Warning: if you want the state to be "active" you must set that explicitly.\n '
if doEnable:
self['state'] = tkinter.NORMAL
else:
self['state'] = tkinter.DISABLED
|
Enable or disable widget
Inputs:
- doEnable: if True enable widget (set state to normal); otherwise set state to disabled
Warning: if you want the state to be "active" you must set that explicitly.
|
python/opscore/RO/Wdg/PathWdg.py
|
setEnable
|
sdss/opscore
| 0 |
python
|
def setEnable(self, doEnable):
'Enable or disable widget\n\n Inputs:\n - doEnable: if True enable widget (set state to normal); otherwise set state to disabled\n\n Warning: if you want the state to be "active" you must set that explicitly.\n '
if doEnable:
self['state'] = tkinter.NORMAL
else:
self['state'] = tkinter.DISABLED
|
def setEnable(self, doEnable):
'Enable or disable widget\n\n Inputs:\n - doEnable: if True enable widget (set state to normal); otherwise set state to disabled\n\n Warning: if you want the state to be "active" you must set that explicitly.\n '
if doEnable:
self['state'] = tkinter.NORMAL
else:
self['state'] = tkinter.DISABLED<|docstring|>Enable or disable widget
Inputs:
- doEnable: if True enable widget (set state to normal); otherwise set state to disabled
Warning: if you want the state to be "active" you must set that explicitly.<|endoftext|>
|
7f30fda178745b866568a8ea36f33176f26b288a94d8fbeaeb7152f499e24b31
|
def getEnable(self):
'Return True if widget is enabled, False otherwise\n\n Enabled is defined as the state is not "disabled" (thus "enabled" or "active").\n '
return (self['state'] != tkinter.DISABLED)
|
Return True if widget is enabled, False otherwise
Enabled is defined as the state is not "disabled" (thus "enabled" or "active").
|
python/opscore/RO/Wdg/PathWdg.py
|
getEnable
|
sdss/opscore
| 0 |
python
|
def getEnable(self):
'Return True if widget is enabled, False otherwise\n\n Enabled is defined as the state is not "disabled" (thus "enabled" or "active").\n '
return (self['state'] != tkinter.DISABLED)
|
def getEnable(self):
'Return True if widget is enabled, False otherwise\n\n Enabled is defined as the state is not "disabled" (thus "enabled" or "active").\n '
return (self['state'] != tkinter.DISABLED)<|docstring|>Return True if widget is enabled, False otherwise
Enabled is defined as the state is not "disabled" (thus "enabled" or "active").<|endoftext|>
|
9d575e60b06519b3a8c5146145fe666991201b4c9e76a227b5977abcdd93b885
|
def setPath(self, path):
'Set self.path to normalized version of path.\n\n Inputs:\n - path: path; if None or "" then no path\n\n Raise ValueError if path invalid or nonexistent.\n '
if (not path):
dispStr = ''
else:
self.checkPath(path)
path = os.path.abspath(path)
if (len(path) > self.maxChar):
dispStr = ''.join((path[0:self.leftChar], u('…'), path[(- self.rightChar):]))
else:
dispStr = path
self.path = path
self['text'] = dispStr
self._doCallbacks()
|
Set self.path to normalized version of path.
Inputs:
- path: path; if None or "" then no path
Raise ValueError if path invalid or nonexistent.
|
python/opscore/RO/Wdg/PathWdg.py
|
setPath
|
sdss/opscore
| 0 |
python
|
def setPath(self, path):
'Set self.path to normalized version of path.\n\n Inputs:\n - path: path; if None or then no path\n\n Raise ValueError if path invalid or nonexistent.\n '
if (not path):
dispStr =
else:
self.checkPath(path)
path = os.path.abspath(path)
if (len(path) > self.maxChar):
dispStr = .join((path[0:self.leftChar], u('…'), path[(- self.rightChar):]))
else:
dispStr = path
self.path = path
self['text'] = dispStr
self._doCallbacks()
|
def setPath(self, path):
'Set self.path to normalized version of path.\n\n Inputs:\n - path: path; if None or then no path\n\n Raise ValueError if path invalid or nonexistent.\n '
if (not path):
dispStr =
else:
self.checkPath(path)
path = os.path.abspath(path)
if (len(path) > self.maxChar):
dispStr = .join((path[0:self.leftChar], u('…'), path[(- self.rightChar):]))
else:
dispStr = path
self.path = path
self['text'] = dispStr
self._doCallbacks()<|docstring|>Set self.path to normalized version of path.
Inputs:
- path: path; if None or "" then no path
Raise ValueError if path invalid or nonexistent.<|endoftext|>
|
d3d3dc47038034454b857de848531dc11330433f8c04ad14129c71cf48278ea4
|
def getPath(self):
'Return the current path (or None if no path).\n '
return self.path
|
Return the current path (or None if no path).
|
python/opscore/RO/Wdg/PathWdg.py
|
getPath
|
sdss/opscore
| 0 |
python
|
def getPath(self):
'\n '
return self.path
|
def getPath(self):
'\n '
return self.path<|docstring|>Return the current path (or None if no path).<|endoftext|>
|
68deab11050cfee50825c90f8493cf21ee68f3b99fbcc13985a93ec9e1c9d93e
|
def ctxConfigMenu(self, menu):
'Configure the contextual menu.\n Called just before the menu is posted.\n '
if (not self.getEnable()):
return True
if (self.path is None):
state = 'disabled'
else:
state = 'normal'
if self.path:
copyLabel = ' '.join(('Copy', self.path))
else:
copyLabel = 'Copy'
menu.add_command(label=copyLabel, command=self._copyToClip, state=state)
return True
|
Configure the contextual menu.
Called just before the menu is posted.
|
python/opscore/RO/Wdg/PathWdg.py
|
ctxConfigMenu
|
sdss/opscore
| 0 |
python
|
def ctxConfigMenu(self, menu):
'Configure the contextual menu.\n Called just before the menu is posted.\n '
if (not self.getEnable()):
return True
if (self.path is None):
state = 'disabled'
else:
state = 'normal'
if self.path:
copyLabel = ' '.join(('Copy', self.path))
else:
copyLabel = 'Copy'
menu.add_command(label=copyLabel, command=self._copyToClip, state=state)
return True
|
def ctxConfigMenu(self, menu):
'Configure the contextual menu.\n Called just before the menu is posted.\n '
if (not self.getEnable()):
return True
if (self.path is None):
state = 'disabled'
else:
state = 'normal'
if self.path:
copyLabel = ' '.join(('Copy', self.path))
else:
copyLabel = 'Copy'
menu.add_command(label=copyLabel, command=self._copyToClip, state=state)
return True<|docstring|>Configure the contextual menu.
Called just before the menu is posted.<|endoftext|>
|
71154e3bc701a52a2401dbc5ac0a9b41085f02636b78d0661af1a5028ceb3845
|
def _copyToClip(self):
'Copy the current path to the clipboard\n '
if (self.path is not None):
self.clipboard_clear()
self.clipboard_append(self.path)
|
Copy the current path to the clipboard
|
python/opscore/RO/Wdg/PathWdg.py
|
_copyToClip
|
sdss/opscore
| 0 |
python
|
def _copyToClip(self):
'\n '
if (self.path is not None):
self.clipboard_clear()
self.clipboard_append(self.path)
|
def _copyToClip(self):
'\n '
if (self.path is not None):
self.clipboard_clear()
self.clipboard_append(self.path)<|docstring|>Copy the current path to the clipboard<|endoftext|>
|
59d71efbaf899b3b9e3696070caa36dbbb0b98fc255d359766909a2f27caabb0
|
def _doChoose(self):
'Put up a dialog to choose a new file.\n '
if (self.path is not None):
startDir = self.path
else:
startDir = self.defPath
if (startDir and (not os.path.isdir(startDir))):
parDir = os.path.split(self.path)[0]
if os.path.isdir(parDir):
startDir = parDir
else:
startDir = None
kargs = {}
if self.fileTypes:
kargs['filetypes'] = self.fileTypes
newPath = tkinter.filedialog.askdirectory(initialdir=startDir, mustexist=True, title=self.helpText, **kargs)
if newPath:
newPath = opscore.RO.CnvUtil.asStr(newPath)
self.setPath(newPath)
|
Put up a dialog to choose a new file.
|
python/opscore/RO/Wdg/PathWdg.py
|
_doChoose
|
sdss/opscore
| 0 |
python
|
def _doChoose(self):
'\n '
if (self.path is not None):
startDir = self.path
else:
startDir = self.defPath
if (startDir and (not os.path.isdir(startDir))):
parDir = os.path.split(self.path)[0]
if os.path.isdir(parDir):
startDir = parDir
else:
startDir = None
kargs = {}
if self.fileTypes:
kargs['filetypes'] = self.fileTypes
newPath = tkinter.filedialog.askdirectory(initialdir=startDir, mustexist=True, title=self.helpText, **kargs)
if newPath:
newPath = opscore.RO.CnvUtil.asStr(newPath)
self.setPath(newPath)
|
def _doChoose(self):
'\n '
if (self.path is not None):
startDir = self.path
else:
startDir = self.defPath
if (startDir and (not os.path.isdir(startDir))):
parDir = os.path.split(self.path)[0]
if os.path.isdir(parDir):
startDir = parDir
else:
startDir = None
kargs = {}
if self.fileTypes:
kargs['filetypes'] = self.fileTypes
newPath = tkinter.filedialog.askdirectory(initialdir=startDir, mustexist=True, title=self.helpText, **kargs)
if newPath:
newPath = opscore.RO.CnvUtil.asStr(newPath)
self.setPath(newPath)<|docstring|>Put up a dialog to choose a new file.<|endoftext|>
|
8efccc8366293b61bd6d944608b5b771d87bd04f54ac25f7c4f9b231fe1b074b
|
def checkPath(self, path):
'Raise ValueError if path not None and not an existing directory'
if (path and (not os.path.isdir(path))):
raise ValueError(('Path %r is not an existing directory' % (path,)))
|
Raise ValueError if path not None and not an existing directory
|
python/opscore/RO/Wdg/PathWdg.py
|
checkPath
|
sdss/opscore
| 0 |
python
|
def checkPath(self, path):
if (path and (not os.path.isdir(path))):
raise ValueError(('Path %r is not an existing directory' % (path,)))
|
def checkPath(self, path):
if (path and (not os.path.isdir(path))):
raise ValueError(('Path %r is not an existing directory' % (path,)))<|docstring|>Raise ValueError if path not None and not an existing directory<|endoftext|>
|
456da0a439a30580888650396782833e3689394a5589f15862ba9735feb99098
|
def _doChoose(self):
'Put up a dialog to choose a new file.\n '
if (self.path is not None):
startPath = self.path
else:
startPath = self.defPath
if (startPath is not None):
(startDir, startFile) = os.path.split(self.path)
if (not os.path.isfile(startPath)):
startFile = None
else:
startFile = None
startDir = self.defDir
if ((startDir is not None) and (not os.path.isdir(startDir))):
startDir = startFile = None
kargs = {}
if self.fileTypes:
kargs['filetypes'] = self.fileTypes
newPath = tkinter.filedialog.askopenfilename(initialdir=startDir, initialfile=startFile, title=self.helpText, **kargs)
if newPath:
newPath = opscore.RO.CnvUtil.asStr(newPath)
self.setPath(newPath)
|
Put up a dialog to choose a new file.
|
python/opscore/RO/Wdg/PathWdg.py
|
_doChoose
|
sdss/opscore
| 0 |
python
|
def _doChoose(self):
'\n '
if (self.path is not None):
startPath = self.path
else:
startPath = self.defPath
if (startPath is not None):
(startDir, startFile) = os.path.split(self.path)
if (not os.path.isfile(startPath)):
startFile = None
else:
startFile = None
startDir = self.defDir
if ((startDir is not None) and (not os.path.isdir(startDir))):
startDir = startFile = None
kargs = {}
if self.fileTypes:
kargs['filetypes'] = self.fileTypes
newPath = tkinter.filedialog.askopenfilename(initialdir=startDir, initialfile=startFile, title=self.helpText, **kargs)
if newPath:
newPath = opscore.RO.CnvUtil.asStr(newPath)
self.setPath(newPath)
|
def _doChoose(self):
'\n '
if (self.path is not None):
startPath = self.path
else:
startPath = self.defPath
if (startPath is not None):
(startDir, startFile) = os.path.split(self.path)
if (not os.path.isfile(startPath)):
startFile = None
else:
startFile = None
startDir = self.defDir
if ((startDir is not None) and (not os.path.isdir(startDir))):
startDir = startFile = None
kargs = {}
if self.fileTypes:
kargs['filetypes'] = self.fileTypes
newPath = tkinter.filedialog.askopenfilename(initialdir=startDir, initialfile=startFile, title=self.helpText, **kargs)
if newPath:
newPath = opscore.RO.CnvUtil.asStr(newPath)
self.setPath(newPath)<|docstring|>Put up a dialog to choose a new file.<|endoftext|>
|
63efb029657549d639203f94db88e4e7be03629fbf1d39f5dc22f55f34aeb427
|
def _initPath(self, defPath):
'During initialization set self.defDir, self.defPath and self.path.\n '
defDir = None
if defPath:
if os.path.isfile(defPath):
defPath = os.path.abspath(defPath)
elif os.path.isdir(defPath):
defDir = os.path.abspath(defPath)
defPath = None
else:
parDir = os.path.split(defPath)[0]
if os.path.isdir(parDir):
defDir = parDir
defPath = None
self.defDir = defDir
self.defPath = defPath
self.setPath(defPath)
|
During initialization set self.defDir, self.defPath and self.path.
|
python/opscore/RO/Wdg/PathWdg.py
|
_initPath
|
sdss/opscore
| 0 |
python
|
def _initPath(self, defPath):
'\n '
defDir = None
if defPath:
if os.path.isfile(defPath):
defPath = os.path.abspath(defPath)
elif os.path.isdir(defPath):
defDir = os.path.abspath(defPath)
defPath = None
else:
parDir = os.path.split(defPath)[0]
if os.path.isdir(parDir):
defDir = parDir
defPath = None
self.defDir = defDir
self.defPath = defPath
self.setPath(defPath)
|
def _initPath(self, defPath):
'\n '
defDir = None
if defPath:
if os.path.isfile(defPath):
defPath = os.path.abspath(defPath)
elif os.path.isdir(defPath):
defDir = os.path.abspath(defPath)
defPath = None
else:
parDir = os.path.split(defPath)[0]
if os.path.isdir(parDir):
defDir = parDir
defPath = None
self.defDir = defDir
self.defPath = defPath
self.setPath(defPath)<|docstring|>During initialization set self.defDir, self.defPath and self.path.<|endoftext|>
|
080d7638559e985e3f1e4d13fcffd9b4a54ab2414525d11531abefb50ddb11c6
|
def checkPath(self, path):
'Raise ValueError if path not None and not not an existing file'
if (path and (not os.path.isfile(path))):
raise ValueError(('Path %r is not an existing file' % (path,)))
|
Raise ValueError if path not None and not not an existing file
|
python/opscore/RO/Wdg/PathWdg.py
|
checkPath
|
sdss/opscore
| 0 |
python
|
def checkPath(self, path):
if (path and (not os.path.isfile(path))):
raise ValueError(('Path %r is not an existing file' % (path,)))
|
def checkPath(self, path):
if (path and (not os.path.isfile(path))):
raise ValueError(('Path %r is not an existing file' % (path,)))<|docstring|>Raise ValueError if path not None and not not an existing file<|endoftext|>
|
f0aa5cf90badbddbb1ebebcbaf89c4fd97227c2b42e5965ee56a81d23246bd17
|
def test_3d_multiple_properties(self):
'\n loading two modalities, grouping subject names only\n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'Lesion', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'Lesion', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
self.assertEqual(reader.output_list[0]['mr'].output_pixdim, ((4.0, 3.0, 4.0),))
self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (('R', 'A', 'S'),))
(idx, data, interp) = reader()
self.assertTrue(('mr' in data))
self.assertTrue(('ct' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'mr': (1,), 'ct': (1,)})
self.assertAllClose(data['mr'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['mr'].shape[3:], (1, 1))
self.assertAllClose(data['ct'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['ct'].shape[3:], (1, 1))
|
loading two modalities, grouping subject names only
|
tests/reader_modular_test.py
|
test_3d_multiple_properties
|
tdml13/NiftyNet
| 1,403 |
python
|
def test_3d_multiple_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'Lesion', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'Lesion', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
self.assertEqual(reader.output_list[0]['mr'].output_pixdim, ((4.0, 3.0, 4.0),))
self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (('R', 'A', 'S'),))
(idx, data, interp) = reader()
self.assertTrue(('mr' in data))
self.assertTrue(('ct' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'mr': (1,), 'ct': (1,)})
self.assertAllClose(data['mr'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['mr'].shape[3:], (1, 1))
self.assertAllClose(data['ct'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['ct'].shape[3:], (1, 1))
|
def test_3d_multiple_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'Lesion', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'Lesion', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
self.assertEqual(reader.output_list[0]['mr'].output_pixdim, ((4.0, 3.0, 4.0),))
self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (('R', 'A', 'S'),))
(idx, data, interp) = reader()
self.assertTrue(('mr' in data))
self.assertTrue(('ct' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'mr': (1,), 'ct': (1,)})
self.assertAllClose(data['mr'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['mr'].shape[3:], (1, 1))
self.assertAllClose(data['ct'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['ct'].shape[3:], (1, 1))<|docstring|>loading two modalities, grouping subject names only<|endoftext|>
|
b82fdd74be55625f5d25489f86db42222f107d0a33519a85d08d9f0772ea3bb3
|
def test_3d_concat_properties(self):
'\n loading two modalities, grouping subject names only\n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'LesionFin', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'FLAIR', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
grouping_param = {'image': ('mr', 'ct')}
reader = ImageReader().initialise(data_param, grouping_param)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertEqual(reader.output_list[0]['image'].output_pixdim, (((4.0, 3.0, 4.0),) * 2))
self.assertEqual(reader.output_list[0]['image'].output_axcodes, ((('R', 'A', 'S'),) * 2))
(idx, data, interp) = reader()
self.assertTrue(('image' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'image': (1, 1)})
self.assertAllClose(data['image'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['image'].shape[3:], (1, 2))
|
loading two modalities, grouping subject names only
|
tests/reader_modular_test.py
|
test_3d_concat_properties
|
tdml13/NiftyNet
| 1,403 |
python
|
def test_3d_concat_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'LesionFin', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'FLAIR', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
grouping_param = {'image': ('mr', 'ct')}
reader = ImageReader().initialise(data_param, grouping_param)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertEqual(reader.output_list[0]['image'].output_pixdim, (((4.0, 3.0, 4.0),) * 2))
self.assertEqual(reader.output_list[0]['image'].output_axcodes, ((('R', 'A', 'S'),) * 2))
(idx, data, interp) = reader()
self.assertTrue(('image' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'image': (1, 1)})
self.assertAllClose(data['image'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['image'].shape[3:], (1, 2))
|
def test_3d_concat_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'LesionFin', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D, 'filename_contains': 'FLAIR', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
grouping_param = {'image': ('mr', 'ct')}
reader = ImageReader().initialise(data_param, grouping_param)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertEqual(reader.output_list[0]['image'].output_pixdim, (((4.0, 3.0, 4.0),) * 2))
self.assertEqual(reader.output_list[0]['image'].output_axcodes, ((('R', 'A', 'S'),) * 2))
(idx, data, interp) = reader()
self.assertTrue(('image' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'image': (1, 1)})
self.assertAllClose(data['image'].shape[:3], (62, 83, 62), atol=1)
self.assertAllClose(data['image'].shape[3:], (1, 2))<|docstring|>loading two modalities, grouping subject names only<|endoftext|>
|
e1abadf3bc536cf627256a1468ee37a045dfcea40d37a88d760dce79fdaf56b8
|
def test_3d_multiple_properties(self):
'\n loading two modalities, grouping subject names only\n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
self.assertEqual(reader.output_list[0]['mr'].output_pixdim, ((4.0, 3.0, 4.0),))
self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (('R', 'A', 'S'),))
(idx, data, interp) = reader()
self.assertTrue(('mr' in data))
self.assertTrue(('ct' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'mr': (1,), 'ct': (1,)})
self.assertAllClose(data['mr'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['mr'].shape[3:], (1, 1))
self.assertAllClose(data['ct'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['ct'].shape[3:], (1, 1))
|
loading two modalities, grouping subject names only
|
tests/reader_modular_test.py
|
test_3d_multiple_properties
|
tdml13/NiftyNet
| 1,403 |
python
|
def test_3d_multiple_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
self.assertEqual(reader.output_list[0]['mr'].output_pixdim, ((4.0, 3.0, 4.0),))
self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (('R', 'A', 'S'),))
(idx, data, interp) = reader()
self.assertTrue(('mr' in data))
self.assertTrue(('ct' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'mr': (1,), 'ct': (1,)})
self.assertAllClose(data['mr'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['mr'].shape[3:], (1, 1))
self.assertAllClose(data['ct'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['ct'].shape[3:], (1, 1))
|
def test_3d_multiple_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
self.assertEqual(reader.output_list[0]['mr'].output_pixdim, ((4.0, 3.0, 4.0),))
self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (('R', 'A', 'S'),))
(idx, data, interp) = reader()
self.assertTrue(('mr' in data))
self.assertTrue(('ct' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'mr': (1,), 'ct': (1,)})
self.assertAllClose(data['mr'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['mr'].shape[3:], (1, 1))
self.assertAllClose(data['ct'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['ct'].shape[3:], (1, 1))<|docstring|>loading two modalities, grouping subject names only<|endoftext|>
|
5a5bcf50cd262a0fdec15890e31c82c47e0241a94fa1ad32c75ba6477cce5c76
|
def test_3d_concat_properties(self):
'\n loading two modalities, grouping subject names only\n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
grouping_param = {'image': ('mr', 'ct')}
reader = ImageReader().initialise(data_param, grouping_param)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertEqual(reader.output_list[0]['image'].output_pixdim, (((4.0, 3.0, 4.0),) * 2))
self.assertEqual(reader.output_list[0]['image'].output_axcodes, ((('R', 'A', 'S'),) * 2))
(idx, data, interp) = reader()
self.assertTrue(('image' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'image': (1, 1)})
self.assertAllClose(data['image'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['image'].shape[3:], (1, 2))
|
loading two modalities, grouping subject names only
|
tests/reader_modular_test.py
|
test_3d_concat_properties
|
tdml13/NiftyNet
| 1,403 |
python
|
def test_3d_concat_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
grouping_param = {'image': ('mr', 'ct')}
reader = ImageReader().initialise(data_param, grouping_param)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertEqual(reader.output_list[0]['image'].output_pixdim, (((4.0, 3.0, 4.0),) * 2))
self.assertEqual(reader.output_list[0]['image'].output_axcodes, ((('R', 'A', 'S'),) * 2))
(idx, data, interp) = reader()
self.assertTrue(('image' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'image': (1, 1)})
self.assertAllClose(data['image'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['image'].shape[3:], (1, 2))
|
def test_3d_concat_properties(self):
'\n \n '
data_param = {'mr': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}, 'ct': {'path_to_search': IMAGE_PATH_3D_1, 'filename_contains': 'x_y_z_1_1', 'pixdim': (4, 3, 4), 'axcodes': 'RAS'}}
grouping_param = {'image': ('mr', 'ct')}
reader = ImageReader().initialise(data_param, grouping_param)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertEqual(reader.output_list[0]['image'].output_pixdim, (((4.0, 3.0, 4.0),) * 2))
self.assertEqual(reader.output_list[0]['image'].output_axcodes, ((('R', 'A', 'S'),) * 2))
(idx, data, interp) = reader()
self.assertTrue(('image' in data))
self.assertTrue((idx in range(len(reader.output_list))))
self.assertDictEqual(interp, {'image': (1, 1)})
self.assertAllClose(data['image'].shape[:3], (12, 8, 10), atol=1)
self.assertAllClose(data['image'].shape[3:], (1, 2))<|docstring|>loading two modalities, grouping subject names only<|endoftext|>
|
f3ebec341da49512032eb86e9bb4b3c12ec238245826f11e26e6f1f6ed4ee14c
|
def _parse_args():
'\n Parses arguments for the main script.\n '
parser = ArgumentParser(description='utility script for converting a midi file in csv format into a text', prog='python csv_to_text.py')
parser.add_argument('-t', '--ticks', help='Ticks per each time step (default: 25). Make sure to remember its value when converting the text back to a csv.', type=int, default=25)
parser.add_argument('-v', '--verbose', help='make the process more verbose.', action='store_true')
parser.add_argument('csv', type=str, help='File path for the csv file to convert or path to the directory which contains one or more csv files to convert.')
parser.add_argument('text', nargs='?', type=str, help='File path for the resulting text file (Optional). By default, the text file will be generated in the same directory as the source csv file. If `csv` is a directory, this will be the resulting directory.')
if (len(sys.argv) == 1):
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if (not (args.ticks > 0)):
parser.error('The value for ticks per time step must be at least 1.')
csv_is_dir = osp.isdir(args.csv)
if ((not osp.isfile(args.csv)) and (not csv_is_dir)):
parser.error('The input csv file or directory does not exist. Please, check the path and try again.\n{}'.format(args.csv))
if (args.text and (not osp.isdir((args.text if csv_is_dir else osp.dirname(args.text))))):
parser.error('The result path does not exist. Please, use an existing directory.\n{}'.format((args.text if csv_is_dir else osp.dirname(args.text))))
return (args, csv_is_dir)
|
Parses arguments for the main script.
|
utils/csv_to_text.py
|
_parse_args
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def _parse_args():
'\n \n '
parser = ArgumentParser(description='utility script for converting a midi file in csv format into a text', prog='python csv_to_text.py')
parser.add_argument('-t', '--ticks', help='Ticks per each time step (default: 25). Make sure to remember its value when converting the text back to a csv.', type=int, default=25)
parser.add_argument('-v', '--verbose', help='make the process more verbose.', action='store_true')
parser.add_argument('csv', type=str, help='File path for the csv file to convert or path to the directory which contains one or more csv files to convert.')
parser.add_argument('text', nargs='?', type=str, help='File path for the resulting text file (Optional). By default, the text file will be generated in the same directory as the source csv file. If `csv` is a directory, this will be the resulting directory.')
if (len(sys.argv) == 1):
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if (not (args.ticks > 0)):
parser.error('The value for ticks per time step must be at least 1.')
csv_is_dir = osp.isdir(args.csv)
if ((not osp.isfile(args.csv)) and (not csv_is_dir)):
parser.error('The input csv file or directory does not exist. Please, check the path and try again.\n{}'.format(args.csv))
if (args.text and (not osp.isdir((args.text if csv_is_dir else osp.dirname(args.text))))):
parser.error('The result path does not exist. Please, use an existing directory.\n{}'.format((args.text if csv_is_dir else osp.dirname(args.text))))
return (args, csv_is_dir)
|
def _parse_args():
'\n \n '
parser = ArgumentParser(description='utility script for converting a midi file in csv format into a text', prog='python csv_to_text.py')
parser.add_argument('-t', '--ticks', help='Ticks per each time step (default: 25). Make sure to remember its value when converting the text back to a csv.', type=int, default=25)
parser.add_argument('-v', '--verbose', help='make the process more verbose.', action='store_true')
parser.add_argument('csv', type=str, help='File path for the csv file to convert or path to the directory which contains one or more csv files to convert.')
parser.add_argument('text', nargs='?', type=str, help='File path for the resulting text file (Optional). By default, the text file will be generated in the same directory as the source csv file. If `csv` is a directory, this will be the resulting directory.')
if (len(sys.argv) == 1):
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if (not (args.ticks > 0)):
parser.error('The value for ticks per time step must be at least 1.')
csv_is_dir = osp.isdir(args.csv)
if ((not osp.isfile(args.csv)) and (not csv_is_dir)):
parser.error('The input csv file or directory does not exist. Please, check the path and try again.\n{}'.format(args.csv))
if (args.text and (not osp.isdir((args.text if csv_is_dir else osp.dirname(args.text))))):
parser.error('The result path does not exist. Please, use an existing directory.\n{}'.format((args.text if csv_is_dir else osp.dirname(args.text))))
return (args, csv_is_dir)<|docstring|>Parses arguments for the main script.<|endoftext|>
|
3effd058d868bbffc85ceee73ec3fbe236342d49ad989647fa7c602b57d0d9ab
|
def read_midi_csv(midi_file_path):
'\n Given the file path of a converted midi csv file, reads and returns the data as a pandas DataFrame.\n Also, during the process, removes double quotes in the 5th column (e.g. "major")\n so that it can be processed later without much problem.\n '
midi = pd.read_csv(midi_file_path, names=['Track', 'Time', 'Type', 'Val1', 'Val2', 'Val3', 'Val4'])
midi.iloc[(:, 4)] = midi.iloc[(:, 4)].apply((lambda val: (val.replace('"', '') if isinstance(val, str) else val)))
return midi
|
Given the file path of a converted midi csv file, reads and returns the data as a pandas DataFrame.
Also, during the process, removes double quotes in the 5th column (e.g. "major")
so that it can be processed later without much problem.
|
utils/csv_to_text.py
|
read_midi_csv
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def read_midi_csv(midi_file_path):
'\n Given the file path of a converted midi csv file, reads and returns the data as a pandas DataFrame.\n Also, during the process, removes double quotes in the 5th column (e.g. "major")\n so that it can be processed later without much problem.\n '
midi = pd.read_csv(midi_file_path, names=['Track', 'Time', 'Type', 'Val1', 'Val2', 'Val3', 'Val4'])
midi.iloc[(:, 4)] = midi.iloc[(:, 4)].apply((lambda val: (val.replace('"', ) if isinstance(val, str) else val)))
return midi
|
def read_midi_csv(midi_file_path):
'\n Given the file path of a converted midi csv file, reads and returns the data as a pandas DataFrame.\n Also, during the process, removes double quotes in the 5th column (e.g. "major")\n so that it can be processed later without much problem.\n '
midi = pd.read_csv(midi_file_path, names=['Track', 'Time', 'Type', 'Val1', 'Val2', 'Val3', 'Val4'])
midi.iloc[(:, 4)] = midi.iloc[(:, 4)].apply((lambda val: (val.replace('"', ) if isinstance(val, str) else val)))
return midi<|docstring|>Given the file path of a converted midi csv file, reads and returns the data as a pandas DataFrame.
Also, during the process, removes double quotes in the 5th column (e.g. "major")
so that it can be processed later without much problem.<|endoftext|>
|
a8ebe48c39f5c09a433d0cd87051d8a0141ea41cb681336cce82a247de74332a
|
def drop_nonessentials(midi_dataframe):
'\n Drops all items except for those that are essential for the music.\n The resulting dataframe is returned.\n '
non_essential_list = ['header', 'end_of_file', 'start_track', 'end_track', 'tempo', 'note_on_c', 'note_off_c']
non_essentials = midi_dataframe.iloc[(:, 2)].apply((lambda str: (str.strip().lower() not in non_essential_list)))
return midi_dataframe.drop(index=midi_dataframe.loc[non_essentials].index).reset_index(drop=True)
|
Drops all items except for those that are essential for the music.
The resulting dataframe is returned.
|
utils/csv_to_text.py
|
drop_nonessentials
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def drop_nonessentials(midi_dataframe):
'\n Drops all items except for those that are essential for the music.\n The resulting dataframe is returned.\n '
non_essential_list = ['header', 'end_of_file', 'start_track', 'end_track', 'tempo', 'note_on_c', 'note_off_c']
non_essentials = midi_dataframe.iloc[(:, 2)].apply((lambda str: (str.strip().lower() not in non_essential_list)))
return midi_dataframe.drop(index=midi_dataframe.loc[non_essentials].index).reset_index(drop=True)
|
def drop_nonessentials(midi_dataframe):
'\n Drops all items except for those that are essential for the music.\n The resulting dataframe is returned.\n '
non_essential_list = ['header', 'end_of_file', 'start_track', 'end_track', 'tempo', 'note_on_c', 'note_off_c']
non_essentials = midi_dataframe.iloc[(:, 2)].apply((lambda str: (str.strip().lower() not in non_essential_list)))
return midi_dataframe.drop(index=midi_dataframe.loc[non_essentials].index).reset_index(drop=True)<|docstring|>Drops all items except for those that are essential for the music.
The resulting dataframe is returned.<|endoftext|>
|
7361ba53c1fd5764986cf2a4932f33f341d2dd477fd03b13ee8b9fe697c8de7d
|
def time_adjustment(midi_dataframe):
"\n Drops 'Tempo' items and changes the time values accordingly.\n The resulting dataframe is returned.\n "
base_midi = midi_dataframe.copy()
base_midi.loc[(:, 'index')] = base_midi.index
modified_midi = midi_dataframe.copy()
DEFAULT_TEMPO = 500000
tempos = base_midi[base_midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'tempo')))].sort_values([base_midi.columns[1], 'index'])
tempo_change_time_points = tempos.iloc[(:, 1)].values.tolist()
tempo_change_time_points.insert(0, 0)
tempo_change_time_points.append((base_midi.iloc[(:, 1)].max() + 1))
interval_multipliers = (tempos.iloc[(:, 3)] / DEFAULT_TEMPO).values.tolist()
interval_multipliers.insert(0, 1.0)
last_time_point = tempo_change_time_points[0]
for tempo_idx in range((len(tempo_change_time_points) - 1)):
selecting_condition = ((base_midi.iloc[(:, 1)] > tempo_change_time_points[tempo_idx]) & (base_midi.iloc[(:, 1)] <= tempo_change_time_points[(tempo_idx + 1)]))
if (selecting_condition.sum() > 0):
multiplier = interval_multipliers[tempo_idx]
times_since_tempo = (base_midi.loc[(selecting_condition, base_midi.columns[1])] - tempo_change_time_points[tempo_idx])
scaled_times = (times_since_tempo * multiplier)
adjusted_times = (scaled_times + last_time_point).values
modified_midi.loc[(selecting_condition, base_midi.columns[1])] = adjusted_times
last_time_point = adjusted_times.max()
modified_midi.iloc[(:, 1)] = modified_midi.iloc[(:, 1)].round()
modified_midi.drop(index=modified_midi.loc[modified_midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'tempo')))].index, inplace=True)
modified_midi.reset_index(drop=True, inplace=True)
return modified_midi
|
Drops 'Tempo' items and changes the time values accordingly.
The resulting dataframe is returned.
|
utils/csv_to_text.py
|
time_adjustment
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def time_adjustment(midi_dataframe):
"\n Drops 'Tempo' items and changes the time values accordingly.\n The resulting dataframe is returned.\n "
base_midi = midi_dataframe.copy()
base_midi.loc[(:, 'index')] = base_midi.index
modified_midi = midi_dataframe.copy()
DEFAULT_TEMPO = 500000
tempos = base_midi[base_midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'tempo')))].sort_values([base_midi.columns[1], 'index'])
tempo_change_time_points = tempos.iloc[(:, 1)].values.tolist()
tempo_change_time_points.insert(0, 0)
tempo_change_time_points.append((base_midi.iloc[(:, 1)].max() + 1))
interval_multipliers = (tempos.iloc[(:, 3)] / DEFAULT_TEMPO).values.tolist()
interval_multipliers.insert(0, 1.0)
last_time_point = tempo_change_time_points[0]
for tempo_idx in range((len(tempo_change_time_points) - 1)):
selecting_condition = ((base_midi.iloc[(:, 1)] > tempo_change_time_points[tempo_idx]) & (base_midi.iloc[(:, 1)] <= tempo_change_time_points[(tempo_idx + 1)]))
if (selecting_condition.sum() > 0):
multiplier = interval_multipliers[tempo_idx]
times_since_tempo = (base_midi.loc[(selecting_condition, base_midi.columns[1])] - tempo_change_time_points[tempo_idx])
scaled_times = (times_since_tempo * multiplier)
adjusted_times = (scaled_times + last_time_point).values
modified_midi.loc[(selecting_condition, base_midi.columns[1])] = adjusted_times
last_time_point = adjusted_times.max()
modified_midi.iloc[(:, 1)] = modified_midi.iloc[(:, 1)].round()
modified_midi.drop(index=modified_midi.loc[modified_midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'tempo')))].index, inplace=True)
modified_midi.reset_index(drop=True, inplace=True)
return modified_midi
|
def time_adjustment(midi_dataframe):
"\n Drops 'Tempo' items and changes the time values accordingly.\n The resulting dataframe is returned.\n "
base_midi = midi_dataframe.copy()
base_midi.loc[(:, 'index')] = base_midi.index
modified_midi = midi_dataframe.copy()
DEFAULT_TEMPO = 500000
tempos = base_midi[base_midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'tempo')))].sort_values([base_midi.columns[1], 'index'])
tempo_change_time_points = tempos.iloc[(:, 1)].values.tolist()
tempo_change_time_points.insert(0, 0)
tempo_change_time_points.append((base_midi.iloc[(:, 1)].max() + 1))
interval_multipliers = (tempos.iloc[(:, 3)] / DEFAULT_TEMPO).values.tolist()
interval_multipliers.insert(0, 1.0)
last_time_point = tempo_change_time_points[0]
for tempo_idx in range((len(tempo_change_time_points) - 1)):
selecting_condition = ((base_midi.iloc[(:, 1)] > tempo_change_time_points[tempo_idx]) & (base_midi.iloc[(:, 1)] <= tempo_change_time_points[(tempo_idx + 1)]))
if (selecting_condition.sum() > 0):
multiplier = interval_multipliers[tempo_idx]
times_since_tempo = (base_midi.loc[(selecting_condition, base_midi.columns[1])] - tempo_change_time_points[tempo_idx])
scaled_times = (times_since_tempo * multiplier)
adjusted_times = (scaled_times + last_time_point).values
modified_midi.loc[(selecting_condition, base_midi.columns[1])] = adjusted_times
last_time_point = adjusted_times.max()
modified_midi.iloc[(:, 1)] = modified_midi.iloc[(:, 1)].round()
modified_midi.drop(index=modified_midi.loc[modified_midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'tempo')))].index, inplace=True)
modified_midi.reset_index(drop=True, inplace=True)
return modified_midi<|docstring|>Drops 'Tempo' items and changes the time values accordingly.
The resulting dataframe is returned.<|endoftext|>
|
d56801875d4738177dfd9077e5b98112ee10a3583ba1bc211f920cf49e8d37c4
|
def merge_tracks(midi_dataframe):
'\n Combines multiple tracks into one track (track 1), then sorts the items by the time values.\n Returns the resulting dataframe.\n '
midi = midi_dataframe.copy()
midi.loc[(midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'header'))), midi.columns[4])] = 1
start_indices = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'start_track')))].index
end_indices = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'end_track')))].index
min_start_idx = midi.loc[(start_indices, midi.columns[1])].idxmin()
max_end_idx = midi.loc[(end_indices, midi.columns[1])].idxmax()
midi.drop(index=start_indices[(start_indices != min_start_idx)], inplace=True)
midi.drop(index=end_indices[(end_indices != max_end_idx)], inplace=True)
midi.loc[((midi.iloc[(:, 0)] > 1), midi.columns[0])] = 1
midi.loc[(midi.iloc[(:, 0)] == 1)] = midi.loc[(midi.iloc[(:, 0)] == 1)].sort_values(midi.columns[1], axis=0, ascending=True).values
midi.reset_index(drop=True, inplace=True)
return midi
|
Combines multiple tracks into one track (track 1), then sorts the items by the time values.
Returns the resulting dataframe.
|
utils/csv_to_text.py
|
merge_tracks
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def merge_tracks(midi_dataframe):
'\n Combines multiple tracks into one track (track 1), then sorts the items by the time values.\n Returns the resulting dataframe.\n '
midi = midi_dataframe.copy()
midi.loc[(midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'header'))), midi.columns[4])] = 1
start_indices = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'start_track')))].index
end_indices = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'end_track')))].index
min_start_idx = midi.loc[(start_indices, midi.columns[1])].idxmin()
max_end_idx = midi.loc[(end_indices, midi.columns[1])].idxmax()
midi.drop(index=start_indices[(start_indices != min_start_idx)], inplace=True)
midi.drop(index=end_indices[(end_indices != max_end_idx)], inplace=True)
midi.loc[((midi.iloc[(:, 0)] > 1), midi.columns[0])] = 1
midi.loc[(midi.iloc[(:, 0)] == 1)] = midi.loc[(midi.iloc[(:, 0)] == 1)].sort_values(midi.columns[1], axis=0, ascending=True).values
midi.reset_index(drop=True, inplace=True)
return midi
|
def merge_tracks(midi_dataframe):
'\n Combines multiple tracks into one track (track 1), then sorts the items by the time values.\n Returns the resulting dataframe.\n '
midi = midi_dataframe.copy()
midi.loc[(midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'header'))), midi.columns[4])] = 1
start_indices = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'start_track')))].index
end_indices = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() == 'end_track')))].index
min_start_idx = midi.loc[(start_indices, midi.columns[1])].idxmin()
max_end_idx = midi.loc[(end_indices, midi.columns[1])].idxmax()
midi.drop(index=start_indices[(start_indices != min_start_idx)], inplace=True)
midi.drop(index=end_indices[(end_indices != max_end_idx)], inplace=True)
midi.loc[((midi.iloc[(:, 0)] > 1), midi.columns[0])] = 1
midi.loc[(midi.iloc[(:, 0)] == 1)] = midi.loc[(midi.iloc[(:, 0)] == 1)].sort_values(midi.columns[1], axis=0, ascending=True).values
midi.reset_index(drop=True, inplace=True)
return midi<|docstring|>Combines multiple tracks into one track (track 1), then sorts the items by the time values.
Returns the resulting dataframe.<|endoftext|>
|
d34fd94f6009822cbb97ed6c704bc5872e7374f3251efa291d3b93f4f2ce2226
|
def constantize_velocities(midi_dataframe, velocity=80):
'\n Fixes all Note_on velocity values to a constant.\n Returns the resulting dataframe.\n Though it is not necessary, it can give you some sence as to how the machine generated musics would be.\n '
midi = midi_dataframe.copy()
note_on = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() in ['note_on_c', 'note_off_c'])))]
nonzero_vel_idx = note_on.loc[note_on.iloc[(:, 5)].apply((lambda vel: (vel > 0)))].index
midi.loc[(nonzero_vel_idx, midi.columns[5])] = velocity
return midi
|
Fixes all Note_on velocity values to a constant.
Returns the resulting dataframe.
Though it is not necessary, it can give you some sence as to how the machine generated musics would be.
|
utils/csv_to_text.py
|
constantize_velocities
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def constantize_velocities(midi_dataframe, velocity=80):
'\n Fixes all Note_on velocity values to a constant.\n Returns the resulting dataframe.\n Though it is not necessary, it can give you some sence as to how the machine generated musics would be.\n '
midi = midi_dataframe.copy()
note_on = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() in ['note_on_c', 'note_off_c'])))]
nonzero_vel_idx = note_on.loc[note_on.iloc[(:, 5)].apply((lambda vel: (vel > 0)))].index
midi.loc[(nonzero_vel_idx, midi.columns[5])] = velocity
return midi
|
def constantize_velocities(midi_dataframe, velocity=80):
'\n Fixes all Note_on velocity values to a constant.\n Returns the resulting dataframe.\n Though it is not necessary, it can give you some sence as to how the machine generated musics would be.\n '
midi = midi_dataframe.copy()
note_on = midi.loc[midi.iloc[(:, 2)].apply((lambda str: (str.strip().lower() in ['note_on_c', 'note_off_c'])))]
nonzero_vel_idx = note_on.loc[note_on.iloc[(:, 5)].apply((lambda vel: (vel > 0)))].index
midi.loc[(nonzero_vel_idx, midi.columns[5])] = velocity
return midi<|docstring|>Fixes all Note_on velocity values to a constant.
Returns the resulting dataframe.
Though it is not necessary, it can give you some sence as to how the machine generated musics would be.<|endoftext|>
|
1cc04a935d899c8f31fbdfbe40ba2e08bc964d4340583ddaf4ad91c61ba95650
|
def midicsv_to_text(midi_dataframe, ticks_per_step=25):
'\n Converts the given midi dataframe into a string form, where each line represents\n the notes that are pressed / being hold at that time step.\n Ticks per time step determines the length of each time step.\n The smaller the value is, the longer and more accurate the result will be.\n However, when putting it into a machine learning algorithm, consider how\n much the algorithm should take. Longer string will give the algorithm a hard time\n maintaining its memory.\n\n Returns the resulting string text.\n '
NUMBER_OF_PITCH = 128
TRACK_COL = 0
TIME_COL = 1
TYPE_COL = 2
CH_COL = 3
PITCH_COL = 4
VEL_COL = 5
base_midi = midi_dataframe.copy()
col_names = base_midi.columns
note_midi = base_midi.loc[base_midi.iloc[(:, TYPE_COL)].apply((lambda str: (str.strip().lower() in ['note_on_c', 'note_off_c'])))].copy()
note_midi.sort_values(col_names[TIME_COL], inplace=True)
note_midi.reset_index(drop=True, inplace=True)
note_midi = note_midi.append({col_names[TRACK_COL]: 1, col_names[TIME_COL]: (note_midi.iloc[(:, 1)].max() + 1920), col_names[TYPE_COL]: 'END', col_names[CH_COL]: 0, col_names[PITCH_COL]: 0, col_names[VEL_COL]: 0}, ignore_index=True)
note_midi.iloc[(:, TIME_COL)] = (note_midi.iloc[(:, TIME_COL)] / ticks_per_step).round()
n_steps = int(note_midi.iloc[(:, TIME_COL)].max())
note_time_matrix = np.zeros((NUMBER_OF_PITCH, n_steps), dtype=np.uint8)
for pitch in range(128):
note_pitch_match = note_midi.loc[(note_midi.iloc[(:, PITCH_COL)].astype(int) == pitch)]
if (len(note_pitch_match) > 0):
for note_idx in range((len(note_pitch_match) - 1)):
if (note_pitch_match.iloc[(note_idx, VEL_COL)] > 0):
start_from = int(note_pitch_match.iloc[(note_idx, TIME_COL)])
end_before = int(note_pitch_match.iloc[((note_idx + 1), TIME_COL)])
note_time_matrix[(pitch, start_from:end_before)] = 1
if (note_time_matrix[(pitch, (start_from - 2):start_from)].sum() == 2):
note_time_matrix[(pitch, (start_from - 1))] = 0
text_list = []
for time_step in range(n_steps):
for pitch in note_time_matrix[(:, time_step)].nonzero()[0]:
text_list.append(str((pitch + 1)))
text_list.append('0')
midi_text = ' '.join(text_list)
return midi_text
|
Converts the given midi dataframe into a string form, where each line represents
the notes that are pressed / being hold at that time step.
Ticks per time step determines the length of each time step.
The smaller the value is, the longer and more accurate the result will be.
However, when putting it into a machine learning algorithm, consider how
much the algorithm should take. Longer string will give the algorithm a hard time
maintaining its memory.
Returns the resulting string text.
|
utils/csv_to_text.py
|
midicsv_to_text
|
dragonoken/undertale_deltarune_soundtrack_generator
| 4 |
python
|
def midicsv_to_text(midi_dataframe, ticks_per_step=25):
'\n Converts the given midi dataframe into a string form, where each line represents\n the notes that are pressed / being hold at that time step.\n Ticks per time step determines the length of each time step.\n The smaller the value is, the longer and more accurate the result will be.\n However, when putting it into a machine learning algorithm, consider how\n much the algorithm should take. Longer string will give the algorithm a hard time\n maintaining its memory.\n\n Returns the resulting string text.\n '
NUMBER_OF_PITCH = 128
TRACK_COL = 0
TIME_COL = 1
TYPE_COL = 2
CH_COL = 3
PITCH_COL = 4
VEL_COL = 5
base_midi = midi_dataframe.copy()
col_names = base_midi.columns
note_midi = base_midi.loc[base_midi.iloc[(:, TYPE_COL)].apply((lambda str: (str.strip().lower() in ['note_on_c', 'note_off_c'])))].copy()
note_midi.sort_values(col_names[TIME_COL], inplace=True)
note_midi.reset_index(drop=True, inplace=True)
note_midi = note_midi.append({col_names[TRACK_COL]: 1, col_names[TIME_COL]: (note_midi.iloc[(:, 1)].max() + 1920), col_names[TYPE_COL]: 'END', col_names[CH_COL]: 0, col_names[PITCH_COL]: 0, col_names[VEL_COL]: 0}, ignore_index=True)
note_midi.iloc[(:, TIME_COL)] = (note_midi.iloc[(:, TIME_COL)] / ticks_per_step).round()
n_steps = int(note_midi.iloc[(:, TIME_COL)].max())
note_time_matrix = np.zeros((NUMBER_OF_PITCH, n_steps), dtype=np.uint8)
for pitch in range(128):
note_pitch_match = note_midi.loc[(note_midi.iloc[(:, PITCH_COL)].astype(int) == pitch)]
if (len(note_pitch_match) > 0):
for note_idx in range((len(note_pitch_match) - 1)):
if (note_pitch_match.iloc[(note_idx, VEL_COL)] > 0):
start_from = int(note_pitch_match.iloc[(note_idx, TIME_COL)])
end_before = int(note_pitch_match.iloc[((note_idx + 1), TIME_COL)])
note_time_matrix[(pitch, start_from:end_before)] = 1
if (note_time_matrix[(pitch, (start_from - 2):start_from)].sum() == 2):
note_time_matrix[(pitch, (start_from - 1))] = 0
text_list = []
for time_step in range(n_steps):
for pitch in note_time_matrix[(:, time_step)].nonzero()[0]:
text_list.append(str((pitch + 1)))
text_list.append('0')
midi_text = ' '.join(text_list)
return midi_text
|
def midicsv_to_text(midi_dataframe, ticks_per_step=25):
'\n Converts the given midi dataframe into a string form, where each line represents\n the notes that are pressed / being hold at that time step.\n Ticks per time step determines the length of each time step.\n The smaller the value is, the longer and more accurate the result will be.\n However, when putting it into a machine learning algorithm, consider how\n much the algorithm should take. Longer string will give the algorithm a hard time\n maintaining its memory.\n\n Returns the resulting string text.\n '
NUMBER_OF_PITCH = 128
TRACK_COL = 0
TIME_COL = 1
TYPE_COL = 2
CH_COL = 3
PITCH_COL = 4
VEL_COL = 5
base_midi = midi_dataframe.copy()
col_names = base_midi.columns
note_midi = base_midi.loc[base_midi.iloc[(:, TYPE_COL)].apply((lambda str: (str.strip().lower() in ['note_on_c', 'note_off_c'])))].copy()
note_midi.sort_values(col_names[TIME_COL], inplace=True)
note_midi.reset_index(drop=True, inplace=True)
note_midi = note_midi.append({col_names[TRACK_COL]: 1, col_names[TIME_COL]: (note_midi.iloc[(:, 1)].max() + 1920), col_names[TYPE_COL]: 'END', col_names[CH_COL]: 0, col_names[PITCH_COL]: 0, col_names[VEL_COL]: 0}, ignore_index=True)
note_midi.iloc[(:, TIME_COL)] = (note_midi.iloc[(:, TIME_COL)] / ticks_per_step).round()
n_steps = int(note_midi.iloc[(:, TIME_COL)].max())
note_time_matrix = np.zeros((NUMBER_OF_PITCH, n_steps), dtype=np.uint8)
for pitch in range(128):
note_pitch_match = note_midi.loc[(note_midi.iloc[(:, PITCH_COL)].astype(int) == pitch)]
if (len(note_pitch_match) > 0):
for note_idx in range((len(note_pitch_match) - 1)):
if (note_pitch_match.iloc[(note_idx, VEL_COL)] > 0):
start_from = int(note_pitch_match.iloc[(note_idx, TIME_COL)])
end_before = int(note_pitch_match.iloc[((note_idx + 1), TIME_COL)])
note_time_matrix[(pitch, start_from:end_before)] = 1
if (note_time_matrix[(pitch, (start_from - 2):start_from)].sum() == 2):
note_time_matrix[(pitch, (start_from - 1))] = 0
text_list = []
for time_step in range(n_steps):
for pitch in note_time_matrix[(:, time_step)].nonzero()[0]:
text_list.append(str((pitch + 1)))
text_list.append('0')
midi_text = ' '.join(text_list)
return midi_text<|docstring|>Converts the given midi dataframe into a string form, where each line represents
the notes that are pressed / being hold at that time step.
Ticks per time step determines the length of each time step.
The smaller the value is, the longer and more accurate the result will be.
However, when putting it into a machine learning algorithm, consider how
much the algorithm should take. Longer string will give the algorithm a hard time
maintaining its memory.
Returns the resulting string text.<|endoftext|>
|
2cfe26a139ca68f129f79a2c0fd407f3b90c5a022603f3050cdd6e0edfead3da
|
def all_columns(table, alias=None):
'alias all columns with a table name for Select.'
alias = (alias or table)
return ', '.join((('%s.%s AS "%s.%s"' % (alias, col, alias, col)) for col in columns(table)))
|
alias all columns with a table name for Select.
|
app/gbd/core/db.py
|
all_columns
|
gbd-consult/qwebgisapp
| 0 |
python
|
def all_columns(table, alias=None):
alias = (alias or table)
return ', '.join((('%s.%s AS "%s.%s"' % (alias, col, alias, col)) for col in columns(table)))
|
def all_columns(table, alias=None):
alias = (alias or table)
return ', '.join((('%s.%s AS "%s.%s"' % (alias, col, alias, col)) for col in columns(table)))<|docstring|>alias all columns with a table name for Select.<|endoftext|>
|
a68bdceddacc83073b0ac6e61090930158bb49b1d2f547232e2ed9ddbe512bf9
|
def star_like(key, value):
'Replace the meta-char * with %.'
value = value.replace('%', '').replace('_', '').replace('*', '%')
if ('%' in value):
return [(key + ' LIKE %s'), value]
return [(key + '=%s'), value]
|
Replace the meta-char * with %.
|
app/gbd/core/db.py
|
star_like
|
gbd-consult/qwebgisapp
| 0 |
python
|
def star_like(key, value):
value = value.replace('%', ).replace('_', ).replace('*', '%')
if ('%' in value):
return [(key + ' LIKE %s'), value]
return [(key + '=%s'), value]
|
def star_like(key, value):
value = value.replace('%', ).replace('_', ).replace('*', '%')
if ('%' in value):
return [(key + ' LIKE %s'), value]
return [(key + '=%s'), value]<|docstring|>Replace the meta-char * with %.<|endoftext|>
|
39f4e603d680d9e7be74010777bcf6d0469433b53be0b5b8d8ab395235f99871
|
def combine_fastqs(fps_in, fp_out):
'Combine multiple FASTQs into a single FASTQ.'
assert (len(fps_in) > 0)
if (len(fps_in) == 1):
assert os.path.exists(fps_in[0])
logging.info('Making a symlink: {} -> {}'.format(fps_in[0], fp_out))
os.symlink(fps_in[0], fp_out)
else:
logging.info('Combining {:,} FASTQ files'.format(len(fps_in)))
logging.info('Writing all inputs to {}'.format(fp_out))
with open(fp_out, 'wt') as fo:
for (fp_ix, f) in enumerate(fps_in):
logging.info('Adding {} to {}'.format(f, fp_out))
with open(f, 'rt') as fi:
for (line_ix, line) in enumerate(fi):
mod = (line_ix % 4)
if ((mod == 0) or (mod == 2)):
line = line.rstrip('\n')
fo.write('{}-{}\n'.format(line, fp_ix))
else:
fo.write(line)
|
Combine multiple FASTQs into a single FASTQ.
|
famli/fastq_helpers.py
|
combine_fastqs
|
FredHutch/FAMLI
| 14 |
python
|
def combine_fastqs(fps_in, fp_out):
assert (len(fps_in) > 0)
if (len(fps_in) == 1):
assert os.path.exists(fps_in[0])
logging.info('Making a symlink: {} -> {}'.format(fps_in[0], fp_out))
os.symlink(fps_in[0], fp_out)
else:
logging.info('Combining {:,} FASTQ files'.format(len(fps_in)))
logging.info('Writing all inputs to {}'.format(fp_out))
with open(fp_out, 'wt') as fo:
for (fp_ix, f) in enumerate(fps_in):
logging.info('Adding {} to {}'.format(f, fp_out))
with open(f, 'rt') as fi:
for (line_ix, line) in enumerate(fi):
mod = (line_ix % 4)
if ((mod == 0) or (mod == 2)):
line = line.rstrip('\n')
fo.write('{}-{}\n'.format(line, fp_ix))
else:
fo.write(line)
|
def combine_fastqs(fps_in, fp_out):
assert (len(fps_in) > 0)
if (len(fps_in) == 1):
assert os.path.exists(fps_in[0])
logging.info('Making a symlink: {} -> {}'.format(fps_in[0], fp_out))
os.symlink(fps_in[0], fp_out)
else:
logging.info('Combining {:,} FASTQ files'.format(len(fps_in)))
logging.info('Writing all inputs to {}'.format(fp_out))
with open(fp_out, 'wt') as fo:
for (fp_ix, f) in enumerate(fps_in):
logging.info('Adding {} to {}'.format(f, fp_out))
with open(f, 'rt') as fi:
for (line_ix, line) in enumerate(fi):
mod = (line_ix % 4)
if ((mod == 0) or (mod == 2)):
line = line.rstrip('\n')
fo.write('{}-{}\n'.format(line, fp_ix))
else:
fo.write(line)<|docstring|>Combine multiple FASTQs into a single FASTQ.<|endoftext|>
|
a12250dbac9ca62ecb69dc3800c0d40ef7e0362a877a40ba65b8a2deadb416a4
|
def set_up_sra_cache_folder(temp_folder):
'Set up the fastq-dump cache folder within the temp folder.'
logging.info('Setting up fastq-dump cache within {}'.format(temp_folder))
for path in ['/root/ncbi', '/root/ncbi/public']:
if (os.path.exists(path) is False):
os.mkdir(path)
if os.path.exists('/root/ncbi/public/sra'):
shutil.rmtree('/root/ncbi/public/sra')
temp_cache = os.path.join(temp_folder, 'sra')
assert (os.path.exists(temp_cache) is False)
os.mkdir(temp_cache)
run_cmds(['ln', '-s', '-f', temp_cache, '/root/ncbi/public/sra'])
assert os.path.exists('/root/ncbi/public/sra')
|
Set up the fastq-dump cache folder within the temp folder.
|
famli/fastq_helpers.py
|
set_up_sra_cache_folder
|
FredHutch/FAMLI
| 14 |
python
|
def set_up_sra_cache_folder(temp_folder):
logging.info('Setting up fastq-dump cache within {}'.format(temp_folder))
for path in ['/root/ncbi', '/root/ncbi/public']:
if (os.path.exists(path) is False):
os.mkdir(path)
if os.path.exists('/root/ncbi/public/sra'):
shutil.rmtree('/root/ncbi/public/sra')
temp_cache = os.path.join(temp_folder, 'sra')
assert (os.path.exists(temp_cache) is False)
os.mkdir(temp_cache)
run_cmds(['ln', '-s', '-f', temp_cache, '/root/ncbi/public/sra'])
assert os.path.exists('/root/ncbi/public/sra')
|
def set_up_sra_cache_folder(temp_folder):
logging.info('Setting up fastq-dump cache within {}'.format(temp_folder))
for path in ['/root/ncbi', '/root/ncbi/public']:
if (os.path.exists(path) is False):
os.mkdir(path)
if os.path.exists('/root/ncbi/public/sra'):
shutil.rmtree('/root/ncbi/public/sra')
temp_cache = os.path.join(temp_folder, 'sra')
assert (os.path.exists(temp_cache) is False)
os.mkdir(temp_cache)
run_cmds(['ln', '-s', '-f', temp_cache, '/root/ncbi/public/sra'])
assert os.path.exists('/root/ncbi/public/sra')<|docstring|>Set up the fastq-dump cache folder within the temp folder.<|endoftext|>
|
cba9799e0c7881574ad54de3a953778d1f14b9c6bffb54f41c766a396cc8d55a
|
def get_reads_from_url(input_str, temp_folder, random_string=str(uuid.uuid4())[:8], min_qual=None):
'Get a set of reads from a URL -- return the downloaded filepath.'
fetched_reads_folder = os.path.join(temp_folder, 'fetched_reads')
cleaned_reads_folder = os.path.join(temp_folder, 'cleaned_reads')
trimmed_reads_folder = os.path.join(temp_folder, 'trimmed_reads')
for folder in [fetched_reads_folder, cleaned_reads_folder, trimmed_reads_folder]:
if (not os.path.exists(folder)):
logging.info('Making new folder {}'.format(folder))
os.mkdir(folder)
logging.info('Getting reads from {}'.format(input_str))
filename = input_str.split('/')[(- 1)]
local_path = os.path.join(fetched_reads_folder, filename)
logging.info(('Filename: ' + filename))
logging.info(('Local path: ' + local_path))
if (not input_str.startswith(('s3://', 'sra://', 'ftp://'))):
logging.info('Treating as local path')
msg = 'Input file does not exist ({})'.format(input_str)
assert os.path.exists(input_str), msg
logging.info('Making a symlink to temporary folder')
os.symlink(input_str, local_path)
elif input_str.startswith('s3://'):
logging.info('Getting reads from S3')
run_cmds(['aws', 's3', 'cp', '--quiet', '--sse', 'AES256', input_str, fetched_reads_folder])
elif input_str.startswith('ftp://'):
logging.info('Getting reads from FTP')
run_cmds(['wget', '-P', fetched_reads_folder, input_str])
elif input_str.startswith('sra://'):
accession = filename
logging.info(('Getting reads from SRA: ' + accession))
local_path = get_sra(accession, fetched_reads_folder)
else:
raise Exception(('Did not recognize prefix for input: ' + input_str))
logging.info('Cleaning up FASTQ headers')
cleaned_path = clean_fastq_headers(local_path, cleaned_reads_folder)
logging.info('Made new cleaned FASTQ file: {}'.format(cleaned_path))
logging.info('Deleting old file: {}'.format(local_path))
os.unlink(local_path)
if (min_qual is None):
return cleaned_path
else:
logging.info('Quality trimming the FASTQ (Q{})'.format(min_qual))
trimmed_path = quality_trim(cleaned_path, trimmed_reads_folder, min_qual)
logging.info('Made new quality trimmed FASTQ: {}'.format(trimmed_path))
logging.info('Deleting old file: {}'.format(cleaned_path))
os.unlink(cleaned_path)
return trimmed_path
|
Get a set of reads from a URL -- return the downloaded filepath.
|
famli/fastq_helpers.py
|
get_reads_from_url
|
FredHutch/FAMLI
| 14 |
python
|
def get_reads_from_url(input_str, temp_folder, random_string=str(uuid.uuid4())[:8], min_qual=None):
fetched_reads_folder = os.path.join(temp_folder, 'fetched_reads')
cleaned_reads_folder = os.path.join(temp_folder, 'cleaned_reads')
trimmed_reads_folder = os.path.join(temp_folder, 'trimmed_reads')
for folder in [fetched_reads_folder, cleaned_reads_folder, trimmed_reads_folder]:
if (not os.path.exists(folder)):
logging.info('Making new folder {}'.format(folder))
os.mkdir(folder)
logging.info('Getting reads from {}'.format(input_str))
filename = input_str.split('/')[(- 1)]
local_path = os.path.join(fetched_reads_folder, filename)
logging.info(('Filename: ' + filename))
logging.info(('Local path: ' + local_path))
if (not input_str.startswith(('s3://', 'sra://', 'ftp://'))):
logging.info('Treating as local path')
msg = 'Input file does not exist ({})'.format(input_str)
assert os.path.exists(input_str), msg
logging.info('Making a symlink to temporary folder')
os.symlink(input_str, local_path)
elif input_str.startswith('s3://'):
logging.info('Getting reads from S3')
run_cmds(['aws', 's3', 'cp', '--quiet', '--sse', 'AES256', input_str, fetched_reads_folder])
elif input_str.startswith('ftp://'):
logging.info('Getting reads from FTP')
run_cmds(['wget', '-P', fetched_reads_folder, input_str])
elif input_str.startswith('sra://'):
accession = filename
logging.info(('Getting reads from SRA: ' + accession))
local_path = get_sra(accession, fetched_reads_folder)
else:
raise Exception(('Did not recognize prefix for input: ' + input_str))
logging.info('Cleaning up FASTQ headers')
cleaned_path = clean_fastq_headers(local_path, cleaned_reads_folder)
logging.info('Made new cleaned FASTQ file: {}'.format(cleaned_path))
logging.info('Deleting old file: {}'.format(local_path))
os.unlink(local_path)
if (min_qual is None):
return cleaned_path
else:
logging.info('Quality trimming the FASTQ (Q{})'.format(min_qual))
trimmed_path = quality_trim(cleaned_path, trimmed_reads_folder, min_qual)
logging.info('Made new quality trimmed FASTQ: {}'.format(trimmed_path))
logging.info('Deleting old file: {}'.format(cleaned_path))
os.unlink(cleaned_path)
return trimmed_path
|
def get_reads_from_url(input_str, temp_folder, random_string=str(uuid.uuid4())[:8], min_qual=None):
fetched_reads_folder = os.path.join(temp_folder, 'fetched_reads')
cleaned_reads_folder = os.path.join(temp_folder, 'cleaned_reads')
trimmed_reads_folder = os.path.join(temp_folder, 'trimmed_reads')
for folder in [fetched_reads_folder, cleaned_reads_folder, trimmed_reads_folder]:
if (not os.path.exists(folder)):
logging.info('Making new folder {}'.format(folder))
os.mkdir(folder)
logging.info('Getting reads from {}'.format(input_str))
filename = input_str.split('/')[(- 1)]
local_path = os.path.join(fetched_reads_folder, filename)
logging.info(('Filename: ' + filename))
logging.info(('Local path: ' + local_path))
if (not input_str.startswith(('s3://', 'sra://', 'ftp://'))):
logging.info('Treating as local path')
msg = 'Input file does not exist ({})'.format(input_str)
assert os.path.exists(input_str), msg
logging.info('Making a symlink to temporary folder')
os.symlink(input_str, local_path)
elif input_str.startswith('s3://'):
logging.info('Getting reads from S3')
run_cmds(['aws', 's3', 'cp', '--quiet', '--sse', 'AES256', input_str, fetched_reads_folder])
elif input_str.startswith('ftp://'):
logging.info('Getting reads from FTP')
run_cmds(['wget', '-P', fetched_reads_folder, input_str])
elif input_str.startswith('sra://'):
accession = filename
logging.info(('Getting reads from SRA: ' + accession))
local_path = get_sra(accession, fetched_reads_folder)
else:
raise Exception(('Did not recognize prefix for input: ' + input_str))
logging.info('Cleaning up FASTQ headers')
cleaned_path = clean_fastq_headers(local_path, cleaned_reads_folder)
logging.info('Made new cleaned FASTQ file: {}'.format(cleaned_path))
logging.info('Deleting old file: {}'.format(local_path))
os.unlink(local_path)
if (min_qual is None):
return cleaned_path
else:
logging.info('Quality trimming the FASTQ (Q{})'.format(min_qual))
trimmed_path = quality_trim(cleaned_path, trimmed_reads_folder, min_qual)
logging.info('Made new quality trimmed FASTQ: {}'.format(trimmed_path))
logging.info('Deleting old file: {}'.format(cleaned_path))
os.unlink(cleaned_path)
return trimmed_path<|docstring|>Get a set of reads from a URL -- return the downloaded filepath.<|endoftext|>
|
562e95aa0a1ab7216fd5e3e8fc6a6df6d63e3945269baa8d416438131b30c319
|
def quality_trim(fp_in, folder_out, min_qual, min_len=30):
'Trim a FASTQ to a minimum quality score.'
assert os.path.exists(fp_in)
assert (fp_in.endswith('.gz') is False)
assert os.path.exists(folder_out)
assert isinstance(min_qual, int)
fp_out = os.path.join(folder_out, fp_in.split('/')[(- 1)])
run_cmds(['fastq_quality_trimmer', '-Q', '33', '-t', str(min_qual), '-i', fp_in, '-o', fp_out, '-l', str(min_len), '-v'])
assert os.path.exists(fp_out)
return fp_out
|
Trim a FASTQ to a minimum quality score.
|
famli/fastq_helpers.py
|
quality_trim
|
FredHutch/FAMLI
| 14 |
python
|
def quality_trim(fp_in, folder_out, min_qual, min_len=30):
assert os.path.exists(fp_in)
assert (fp_in.endswith('.gz') is False)
assert os.path.exists(folder_out)
assert isinstance(min_qual, int)
fp_out = os.path.join(folder_out, fp_in.split('/')[(- 1)])
run_cmds(['fastq_quality_trimmer', '-Q', '33', '-t', str(min_qual), '-i', fp_in, '-o', fp_out, '-l', str(min_len), '-v'])
assert os.path.exists(fp_out)
return fp_out
|
def quality_trim(fp_in, folder_out, min_qual, min_len=30):
assert os.path.exists(fp_in)
assert (fp_in.endswith('.gz') is False)
assert os.path.exists(folder_out)
assert isinstance(min_qual, int)
fp_out = os.path.join(folder_out, fp_in.split('/')[(- 1)])
run_cmds(['fastq_quality_trimmer', '-Q', '33', '-t', str(min_qual), '-i', fp_in, '-o', fp_out, '-l', str(min_len), '-v'])
assert os.path.exists(fp_out)
return fp_out<|docstring|>Trim a FASTQ to a minimum quality score.<|endoftext|>
|
e3a96ee6521321caf1db555ddc768fe885c6ff24225d69432acf9ad649281cd3
|
def get_sra(accession, temp_folder):
'Get the FASTQ for an SRA accession.'
logging.info('Downloading {} from SRA'.format(accession))
local_path = os.path.join(temp_folder, (accession + '.fastq'))
logging.info('Local path: {}'.format(local_path))
logging.info('Downloading via fastq-dump')
run_cmds(['prefetch', accession])
run_cmds(['fastq-dump', '--split-files', '--outdir', temp_folder, accession])
msg = 'File could not be downloaded from SRA: {}'.format(accession)
assert any([(fp.startswith(accession) and fp.endswith('fastq')) for fp in os.listdir(temp_folder)]), msg
logging.info('Concatenating output files')
with open((local_path + '.temp'), 'wt') as fo:
cmd = 'cat {}/{}*fastq'.format(temp_folder, accession)
cat = subprocess.Popen(cmd, shell=True, stdout=fo)
cat.wait()
for fp in os.listdir(temp_folder):
if (fp.startswith(accession) and fp.endswith('fastq')):
fp = os.path.join(temp_folder, fp)
logging.info('Removing {}'.format(fp))
os.unlink(fp)
cache_fp = '/root/ncbi/public/sra/{}.sra'.format(accession)
if os.path.exists(cache_fp):
logging.info('Removing {}'.format(cache_fp))
os.unlink(cache_fp)
run_cmds(['mv', (local_path + '.temp'), local_path])
logging.info(('Done fetching ' + accession))
return local_path
|
Get the FASTQ for an SRA accession.
|
famli/fastq_helpers.py
|
get_sra
|
FredHutch/FAMLI
| 14 |
python
|
def get_sra(accession, temp_folder):
logging.info('Downloading {} from SRA'.format(accession))
local_path = os.path.join(temp_folder, (accession + '.fastq'))
logging.info('Local path: {}'.format(local_path))
logging.info('Downloading via fastq-dump')
run_cmds(['prefetch', accession])
run_cmds(['fastq-dump', '--split-files', '--outdir', temp_folder, accession])
msg = 'File could not be downloaded from SRA: {}'.format(accession)
assert any([(fp.startswith(accession) and fp.endswith('fastq')) for fp in os.listdir(temp_folder)]), msg
logging.info('Concatenating output files')
with open((local_path + '.temp'), 'wt') as fo:
cmd = 'cat {}/{}*fastq'.format(temp_folder, accession)
cat = subprocess.Popen(cmd, shell=True, stdout=fo)
cat.wait()
for fp in os.listdir(temp_folder):
if (fp.startswith(accession) and fp.endswith('fastq')):
fp = os.path.join(temp_folder, fp)
logging.info('Removing {}'.format(fp))
os.unlink(fp)
cache_fp = '/root/ncbi/public/sra/{}.sra'.format(accession)
if os.path.exists(cache_fp):
logging.info('Removing {}'.format(cache_fp))
os.unlink(cache_fp)
run_cmds(['mv', (local_path + '.temp'), local_path])
logging.info(('Done fetching ' + accession))
return local_path
|
def get_sra(accession, temp_folder):
logging.info('Downloading {} from SRA'.format(accession))
local_path = os.path.join(temp_folder, (accession + '.fastq'))
logging.info('Local path: {}'.format(local_path))
logging.info('Downloading via fastq-dump')
run_cmds(['prefetch', accession])
run_cmds(['fastq-dump', '--split-files', '--outdir', temp_folder, accession])
msg = 'File could not be downloaded from SRA: {}'.format(accession)
assert any([(fp.startswith(accession) and fp.endswith('fastq')) for fp in os.listdir(temp_folder)]), msg
logging.info('Concatenating output files')
with open((local_path + '.temp'), 'wt') as fo:
cmd = 'cat {}/{}*fastq'.format(temp_folder, accession)
cat = subprocess.Popen(cmd, shell=True, stdout=fo)
cat.wait()
for fp in os.listdir(temp_folder):
if (fp.startswith(accession) and fp.endswith('fastq')):
fp = os.path.join(temp_folder, fp)
logging.info('Removing {}'.format(fp))
os.unlink(fp)
cache_fp = '/root/ncbi/public/sra/{}.sra'.format(accession)
if os.path.exists(cache_fp):
logging.info('Removing {}'.format(cache_fp))
os.unlink(cache_fp)
run_cmds(['mv', (local_path + '.temp'), local_path])
logging.info(('Done fetching ' + accession))
return local_path<|docstring|>Get the FASTQ for an SRA accession.<|endoftext|>
|
8268dbccf57ee6dba503f00d5f785ab866ecdc594aac24d13cb3da415f7e6a2b
|
def clean_fastq_headers(fp_in, folder_out):
'Read in a FASTQ file and write out a copy with unique headers.'
fp_out = os.path.join(folder_out, fp_in.split('/')[(- 1)])
if fp_out.endswith('.gz'):
fp_out = fp_out[:(- 3)]
if fp_in.endswith('.gz'):
f_in = gzip.open(fp_in, 'rt')
else:
f_in = open(fp_in, 'rt')
f_out = open(fp_out, 'wt')
atcg = re.compile('[^ATCG\n]')
for (ix, line) in enumerate(f_in):
mod = (ix % 4)
if (mod == 0):
if (len(line) == 1):
continue
assert (line[0] == '@'), "Header lacks '@' ({})".format(line)
line = line.rstrip('\n').split(' ')[0].split('\t')[0]
line = '{}-r{}\n'.format(line, (1 + (ix / 4)))
header = line[1:]
elif (mod == 1):
assert (len(line) > 1)
line = atcg.sub('N', line)
elif (mod == 2):
assert (line[0] == '+')
line = ('+' + header)
elif (mod == 3):
assert (len(line) > 1)
f_out.write(line)
f_in.close()
f_out.close()
return fp_out
|
Read in a FASTQ file and write out a copy with unique headers.
|
famli/fastq_helpers.py
|
clean_fastq_headers
|
FredHutch/FAMLI
| 14 |
python
|
def clean_fastq_headers(fp_in, folder_out):
fp_out = os.path.join(folder_out, fp_in.split('/')[(- 1)])
if fp_out.endswith('.gz'):
fp_out = fp_out[:(- 3)]
if fp_in.endswith('.gz'):
f_in = gzip.open(fp_in, 'rt')
else:
f_in = open(fp_in, 'rt')
f_out = open(fp_out, 'wt')
atcg = re.compile('[^ATCG\n]')
for (ix, line) in enumerate(f_in):
mod = (ix % 4)
if (mod == 0):
if (len(line) == 1):
continue
assert (line[0] == '@'), "Header lacks '@' ({})".format(line)
line = line.rstrip('\n').split(' ')[0].split('\t')[0]
line = '{}-r{}\n'.format(line, (1 + (ix / 4)))
header = line[1:]
elif (mod == 1):
assert (len(line) > 1)
line = atcg.sub('N', line)
elif (mod == 2):
assert (line[0] == '+')
line = ('+' + header)
elif (mod == 3):
assert (len(line) > 1)
f_out.write(line)
f_in.close()
f_out.close()
return fp_out
|
def clean_fastq_headers(fp_in, folder_out):
fp_out = os.path.join(folder_out, fp_in.split('/')[(- 1)])
if fp_out.endswith('.gz'):
fp_out = fp_out[:(- 3)]
if fp_in.endswith('.gz'):
f_in = gzip.open(fp_in, 'rt')
else:
f_in = open(fp_in, 'rt')
f_out = open(fp_out, 'wt')
atcg = re.compile('[^ATCG\n]')
for (ix, line) in enumerate(f_in):
mod = (ix % 4)
if (mod == 0):
if (len(line) == 1):
continue
assert (line[0] == '@'), "Header lacks '@' ({})".format(line)
line = line.rstrip('\n').split(' ')[0].split('\t')[0]
line = '{}-r{}\n'.format(line, (1 + (ix / 4)))
header = line[1:]
elif (mod == 1):
assert (len(line) > 1)
line = atcg.sub('N', line)
elif (mod == 2):
assert (line[0] == '+')
line = ('+' + header)
elif (mod == 3):
assert (len(line) > 1)
f_out.write(line)
f_in.close()
f_out.close()
return fp_out<|docstring|>Read in a FASTQ file and write out a copy with unique headers.<|endoftext|>
|
b4adc2c19a2717f14592720431b88b9bcce7ea9fe96f5a1738b500edd55c521d
|
@register(outgoing=True, pattern='^.sticker')
async def kang(args):
' For .kang command, kangs stickers or creates new ones. '
user = (await bot.get_me())
if (not user.username):
user.username = user.first_name
message = (await args.get_reply_message())
photo = None
emojibypass = False
is_anim = False
emoji = ''
(await args.edit('**🔁 Caricamento..**'))
if (message and message.media):
if isinstance(message.media, MessageMediaPhoto):
photo = io.BytesIO()
photo = (await bot.download_media(message.photo, photo))
elif ('image' in message.media.document.mime_type.split('/')):
photo = io.BytesIO()
(await bot.download_file(message.media.document, photo))
if (DocumentAttributeFilename(file_name='sticker.webp') in message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif (DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in message.media.document.attributes):
emoji = message.media.document.attributes[0].alt
emojibypass = True
is_anim = True
photo = 1
else:
(await args.edit('**❌ Errore:** `File non supportato!`'))
return
else:
(await args.edit('**❌ Errore:** `Rispondi ad uno sticker/media.`'))
return
if photo:
splat = args.text.split()
if (not emojibypass):
emoji = '🤔'
pack = 1
if (len(splat) == 3):
pack = splat[2]
emoji = splat[1]
elif (len(splat) == 2):
if splat[1].isnumeric():
pack = int(splat[1])
else:
emoji = splat[1]
packname = f'a{user.id}_by_{user.username}_{pack}'
packnick = f"@{user.username}'s pack {pack}"
cmd = '/newpack'
file = io.BytesIO()
if (not is_anim):
image = (await resize_photo(photo))
file.name = 'sticker.png'
image.save(file, 'PNG')
else:
packname += '_anim'
packnick += ' animated'
cmd = '/newanimated'
response = urllib.request.urlopen(urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode('utf8').split('\n')
if (' A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>.' not in htmlstr):
async with bot.conversation('Stickers') as conv:
(await conv.send_message('/addsticker'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packname))
x = (await conv.get_response())
while (x.text == PACK_FULL):
pack += 1
packname = f'a{user.id}_by_{user.username}_{pack}'
packnick = f"@{user.username}'s pack {pack}"
(await args.edit((('`Spostando su ' + str(pack)) + " perchè non c'è più spazio.`")))
(await conv.send_message(packname))
x = (await conv.get_response())
if (x.text == '**Pack selezionato non valido.**'):
(await conv.send_message(cmd))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packnick))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/publish'))
if is_anim:
(await conv.get_response())
(await conv.send_message(f'<{packnick}>'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message('/skip'))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message(packname))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await args.edit(f'''**📚 Sticker aggiunto!**
**➡️ Per vederlo premi** [qui.](t.me/addstickers/{packname}).''', parse_mode='md'))
return
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/done'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
else:
(await args.edit('**🔁 Pack non trovato, ne sto creando uno nuovo...**'))
async with bot.conversation('Stickers') as conv:
(await conv.send_message(cmd))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packnick))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/publish'))
if is_anim:
(await conv.get_response())
(await conv.send_message(f'<{packnick}>'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message('/skip'))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message(packname))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await args.edit(f'''**📚 Sticker aggiunto!**
**➡️ Per vederlo premi** [qui.](t.me/addstickers/{packname})''', parse_mode='md'))
|
For .kang command, kangs stickers or creates new ones.
|
userbot/plugins/stickers.py
|
kang
|
znotash/X-tra-Telegram
| 2 |
python
|
@register(outgoing=True, pattern='^.sticker')
async def kang(args):
' '
user = (await bot.get_me())
if (not user.username):
user.username = user.first_name
message = (await args.get_reply_message())
photo = None
emojibypass = False
is_anim = False
emoji =
(await args.edit('**🔁 Caricamento..**'))
if (message and message.media):
if isinstance(message.media, MessageMediaPhoto):
photo = io.BytesIO()
photo = (await bot.download_media(message.photo, photo))
elif ('image' in message.media.document.mime_type.split('/')):
photo = io.BytesIO()
(await bot.download_file(message.media.document, photo))
if (DocumentAttributeFilename(file_name='sticker.webp') in message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif (DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in message.media.document.attributes):
emoji = message.media.document.attributes[0].alt
emojibypass = True
is_anim = True
photo = 1
else:
(await args.edit('**❌ Errore:** `File non supportato!`'))
return
else:
(await args.edit('**❌ Errore:** `Rispondi ad uno sticker/media.`'))
return
if photo:
splat = args.text.split()
if (not emojibypass):
emoji = '🤔'
pack = 1
if (len(splat) == 3):
pack = splat[2]
emoji = splat[1]
elif (len(splat) == 2):
if splat[1].isnumeric():
pack = int(splat[1])
else:
emoji = splat[1]
packname = f'a{user.id}_by_{user.username}_{pack}'
packnick = f"@{user.username}'s pack {pack}"
cmd = '/newpack'
file = io.BytesIO()
if (not is_anim):
image = (await resize_photo(photo))
file.name = 'sticker.png'
image.save(file, 'PNG')
else:
packname += '_anim'
packnick += ' animated'
cmd = '/newanimated'
response = urllib.request.urlopen(urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode('utf8').split('\n')
if (' A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>.' not in htmlstr):
async with bot.conversation('Stickers') as conv:
(await conv.send_message('/addsticker'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packname))
x = (await conv.get_response())
while (x.text == PACK_FULL):
pack += 1
packname = f'a{user.id}_by_{user.username}_{pack}'
packnick = f"@{user.username}'s pack {pack}"
(await args.edit((('`Spostando su ' + str(pack)) + " perchè non c'è più spazio.`")))
(await conv.send_message(packname))
x = (await conv.get_response())
if (x.text == '**Pack selezionato non valido.**'):
(await conv.send_message(cmd))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packnick))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/publish'))
if is_anim:
(await conv.get_response())
(await conv.send_message(f'<{packnick}>'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message('/skip'))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message(packname))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await args.edit(f'**📚 Sticker aggiunto!**
**➡️ Per vederlo premi** [qui.](t.me/addstickers/{packname}).', parse_mode='md'))
return
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/done'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
else:
(await args.edit('**🔁 Pack non trovato, ne sto creando uno nuovo...**'))
async with bot.conversation('Stickers') as conv:
(await conv.send_message(cmd))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packnick))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/publish'))
if is_anim:
(await conv.get_response())
(await conv.send_message(f'<{packnick}>'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message('/skip'))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message(packname))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await args.edit(f'**📚 Sticker aggiunto!**
**➡️ Per vederlo premi** [qui.](t.me/addstickers/{packname})', parse_mode='md'))
|
@register(outgoing=True, pattern='^.sticker')
async def kang(args):
' '
user = (await bot.get_me())
if (not user.username):
user.username = user.first_name
message = (await args.get_reply_message())
photo = None
emojibypass = False
is_anim = False
emoji =
(await args.edit('**🔁 Caricamento..**'))
if (message and message.media):
if isinstance(message.media, MessageMediaPhoto):
photo = io.BytesIO()
photo = (await bot.download_media(message.photo, photo))
elif ('image' in message.media.document.mime_type.split('/')):
photo = io.BytesIO()
(await bot.download_file(message.media.document, photo))
if (DocumentAttributeFilename(file_name='sticker.webp') in message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif (DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in message.media.document.attributes):
emoji = message.media.document.attributes[0].alt
emojibypass = True
is_anim = True
photo = 1
else:
(await args.edit('**❌ Errore:** `File non supportato!`'))
return
else:
(await args.edit('**❌ Errore:** `Rispondi ad uno sticker/media.`'))
return
if photo:
splat = args.text.split()
if (not emojibypass):
emoji = '🤔'
pack = 1
if (len(splat) == 3):
pack = splat[2]
emoji = splat[1]
elif (len(splat) == 2):
if splat[1].isnumeric():
pack = int(splat[1])
else:
emoji = splat[1]
packname = f'a{user.id}_by_{user.username}_{pack}'
packnick = f"@{user.username}'s pack {pack}"
cmd = '/newpack'
file = io.BytesIO()
if (not is_anim):
image = (await resize_photo(photo))
file.name = 'sticker.png'
image.save(file, 'PNG')
else:
packname += '_anim'
packnick += ' animated'
cmd = '/newanimated'
response = urllib.request.urlopen(urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode('utf8').split('\n')
if (' A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>.' not in htmlstr):
async with bot.conversation('Stickers') as conv:
(await conv.send_message('/addsticker'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packname))
x = (await conv.get_response())
while (x.text == PACK_FULL):
pack += 1
packname = f'a{user.id}_by_{user.username}_{pack}'
packnick = f"@{user.username}'s pack {pack}"
(await args.edit((('`Spostando su ' + str(pack)) + " perchè non c'è più spazio.`")))
(await conv.send_message(packname))
x = (await conv.get_response())
if (x.text == '**Pack selezionato non valido.**'):
(await conv.send_message(cmd))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packnick))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/publish'))
if is_anim:
(await conv.get_response())
(await conv.send_message(f'<{packnick}>'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message('/skip'))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message(packname))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await args.edit(f'**📚 Sticker aggiunto!**
**➡️ Per vederlo premi** [qui.](t.me/addstickers/{packname}).', parse_mode='md'))
return
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/done'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
else:
(await args.edit('**🔁 Pack non trovato, ne sto creando uno nuovo...**'))
async with bot.conversation('Stickers') as conv:
(await conv.send_message(cmd))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message(packnick))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
if is_anim:
(await bot.forward_messages('Stickers', [message.id], args.chat_id))
else:
file.seek(0)
(await conv.send_file(file, force_document=True))
(await conv.get_response())
(await conv.send_message(emoji))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message('/publish'))
if is_anim:
(await conv.get_response())
(await conv.send_message(f'<{packnick}>'))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.send_message('/skip'))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await conv.send_message(packname))
(await bot.send_read_acknowledge(conv.chat_id))
(await conv.get_response())
(await bot.send_read_acknowledge(conv.chat_id))
(await args.edit(f'**📚 Sticker aggiunto!**
**➡️ Per vederlo premi** [qui.](t.me/addstickers/{packname})', parse_mode='md'))<|docstring|>For .kang command, kangs stickers or creates new ones.<|endoftext|>
|
c380082369a2aa4a85b0a2226ee470008892e8151e1705d9e83a8f4e719b07d6
|
async def resize_photo(photo):
' Resize the given photo to 512x512 '
image = Image.open(photo)
maxsize = (512, 512)
if ((image.width and image.height) < 512):
size1 = image.width
size2 = image.height
if (image.width > image.height):
scale = (512 / size1)
size1new = 512
size2new = (size2 * scale)
else:
scale = (512 / size2)
size1new = (size1 * scale)
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
|
Resize the given photo to 512x512
|
userbot/plugins/stickers.py
|
resize_photo
|
znotash/X-tra-Telegram
| 2 |
python
|
async def resize_photo(photo):
' '
image = Image.open(photo)
maxsize = (512, 512)
if ((image.width and image.height) < 512):
size1 = image.width
size2 = image.height
if (image.width > image.height):
scale = (512 / size1)
size1new = 512
size2new = (size2 * scale)
else:
scale = (512 / size2)
size1new = (size1 * scale)
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
|
async def resize_photo(photo):
' '
image = Image.open(photo)
maxsize = (512, 512)
if ((image.width and image.height) < 512):
size1 = image.width
size2 = image.height
if (image.width > image.height):
scale = (512 / size1)
size1new = 512
size2new = (size2 * scale)
else:
scale = (512 / size2)
size1new = (size1 * scale)
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image<|docstring|>Resize the given photo to 512x512<|endoftext|>
|
5839f7f474154064491d6909f604d289e06dbd79dd0a8669d59165b2ed31eb0e
|
def parse_service_provider_opt():
'Parse service definition opts and returns result.'
def validate_name(name):
if (len(name) > 255):
raise n_exc.Invalid((_('Provider name is limited by 255 characters: %s') % name))
svc_providers_opt = cfg.CONF.service_providers.service_provider
res = []
for prov_def in svc_providers_opt:
split = prov_def.split(':')
try:
(svc_type, name, driver) = split[:3]
except ValueError:
raise n_exc.Invalid(_('Invalid service provider format'))
validate_name(name)
name = normalize_provider_name(name)
default = False
if ((len(split) == 4) and split[3]):
if (split[3] == 'default'):
default = True
else:
msg = (_("Invalid provider format. Last part should be 'default' or empty: %s") % prov_def)
LOG.error(msg)
raise n_exc.Invalid(msg)
if (svc_type not in constants.ALLOWED_SERVICES):
msg = (_("Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s") % {'svc_type': svc_type, 'allowed': constants.ALLOWED_SERVICES})
LOG.error(msg)
raise n_exc.Invalid(msg)
res.append({'service_type': svc_type, 'name': name, 'driver': driver, 'default': default})
return res
|
Parse service definition opts and returns result.
|
neutron/services/provider_configuration.py
|
parse_service_provider_opt
|
leenheer/neutron
| 3 |
python
|
def parse_service_provider_opt():
def validate_name(name):
if (len(name) > 255):
raise n_exc.Invalid((_('Provider name is limited by 255 characters: %s') % name))
svc_providers_opt = cfg.CONF.service_providers.service_provider
res = []
for prov_def in svc_providers_opt:
split = prov_def.split(':')
try:
(svc_type, name, driver) = split[:3]
except ValueError:
raise n_exc.Invalid(_('Invalid service provider format'))
validate_name(name)
name = normalize_provider_name(name)
default = False
if ((len(split) == 4) and split[3]):
if (split[3] == 'default'):
default = True
else:
msg = (_("Invalid provider format. Last part should be 'default' or empty: %s") % prov_def)
LOG.error(msg)
raise n_exc.Invalid(msg)
if (svc_type not in constants.ALLOWED_SERVICES):
msg = (_("Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s") % {'svc_type': svc_type, 'allowed': constants.ALLOWED_SERVICES})
LOG.error(msg)
raise n_exc.Invalid(msg)
res.append({'service_type': svc_type, 'name': name, 'driver': driver, 'default': default})
return res
|
def parse_service_provider_opt():
def validate_name(name):
if (len(name) > 255):
raise n_exc.Invalid((_('Provider name is limited by 255 characters: %s') % name))
svc_providers_opt = cfg.CONF.service_providers.service_provider
res = []
for prov_def in svc_providers_opt:
split = prov_def.split(':')
try:
(svc_type, name, driver) = split[:3]
except ValueError:
raise n_exc.Invalid(_('Invalid service provider format'))
validate_name(name)
name = normalize_provider_name(name)
default = False
if ((len(split) == 4) and split[3]):
if (split[3] == 'default'):
default = True
else:
msg = (_("Invalid provider format. Last part should be 'default' or empty: %s") % prov_def)
LOG.error(msg)
raise n_exc.Invalid(msg)
if (svc_type not in constants.ALLOWED_SERVICES):
msg = (_("Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s") % {'svc_type': svc_type, 'allowed': constants.ALLOWED_SERVICES})
LOG.error(msg)
raise n_exc.Invalid(msg)
res.append({'service_type': svc_type, 'name': name, 'driver': driver, 'default': default})
return res<|docstring|>Parse service definition opts and returns result.<|endoftext|>
|
7dfe7a29da5d9471d4daea1303bcdbc12d7b29e39abc49863f7d325c82788cf4
|
def main():
' Main '
table = PrettyTable(['Task', 'Optimistic', 'Nominal', 'Pessimistic', 'Expected Duration', 'Standard Deviation'])
add_task = True
while add_task:
task_name = input('Task name: ')
optimistic_estimate = utils.get_user_input_number('Optimistic estimate: ')
nominal_estimate = utils.get_user_input_number('Nominal estimate: ')
pessimistic_estimate = utils.get_user_input_number('Pessimistic estimate: ')
add_task = utils.get_user_input_boolean('Add task? (Y/N): ')
print('\n')
expected_duration = utils.calculate_expected_duration(optimistic_estimate, nominal_estimate, pessimistic_estimate)
standard_deviation = utils.calculate_standard_deviation(pessimistic_estimate, optimistic_estimate)
table.add_row([task_name, optimistic_estimate, nominal_estimate, pessimistic_estimate, expected_duration, standard_deviation])
print(table)
|
Main
|
pert_estimator/__main__.py
|
main
|
andrewdieken/pert_estimator
| 0 |
python
|
def main():
' '
table = PrettyTable(['Task', 'Optimistic', 'Nominal', 'Pessimistic', 'Expected Duration', 'Standard Deviation'])
add_task = True
while add_task:
task_name = input('Task name: ')
optimistic_estimate = utils.get_user_input_number('Optimistic estimate: ')
nominal_estimate = utils.get_user_input_number('Nominal estimate: ')
pessimistic_estimate = utils.get_user_input_number('Pessimistic estimate: ')
add_task = utils.get_user_input_boolean('Add task? (Y/N): ')
print('\n')
expected_duration = utils.calculate_expected_duration(optimistic_estimate, nominal_estimate, pessimistic_estimate)
standard_deviation = utils.calculate_standard_deviation(pessimistic_estimate, optimistic_estimate)
table.add_row([task_name, optimistic_estimate, nominal_estimate, pessimistic_estimate, expected_duration, standard_deviation])
print(table)
|
def main():
' '
table = PrettyTable(['Task', 'Optimistic', 'Nominal', 'Pessimistic', 'Expected Duration', 'Standard Deviation'])
add_task = True
while add_task:
task_name = input('Task name: ')
optimistic_estimate = utils.get_user_input_number('Optimistic estimate: ')
nominal_estimate = utils.get_user_input_number('Nominal estimate: ')
pessimistic_estimate = utils.get_user_input_number('Pessimistic estimate: ')
add_task = utils.get_user_input_boolean('Add task? (Y/N): ')
print('\n')
expected_duration = utils.calculate_expected_duration(optimistic_estimate, nominal_estimate, pessimistic_estimate)
standard_deviation = utils.calculate_standard_deviation(pessimistic_estimate, optimistic_estimate)
table.add_row([task_name, optimistic_estimate, nominal_estimate, pessimistic_estimate, expected_duration, standard_deviation])
print(table)<|docstring|>Main<|endoftext|>
|
8bc5a965138f8e94977bc059a4f8b41ef6f7c10aa1735ec5a864cc8c7d5515c2
|
def __init__(self, metamap_home):
' Interface to MetaMap using subprocess. This creates a\n command line call to a specified metamap process.\n '
MetaMapLite.__init__(self, metamap_home=metamap_home)
|
Interface to MetaMap using subprocess. This creates a
command line call to a specified metamap process.
|
pymetamap/SubprocessBackendLite.py
|
__init__
|
liquet-ai/pymetamap
| 151 |
python
|
def __init__(self, metamap_home):
' Interface to MetaMap using subprocess. This creates a\n command line call to a specified metamap process.\n '
MetaMapLite.__init__(self, metamap_home=metamap_home)
|
def __init__(self, metamap_home):
' Interface to MetaMap using subprocess. This creates a\n command line call to a specified metamap process.\n '
MetaMapLite.__init__(self, metamap_home=metamap_home)<|docstring|>Interface to MetaMap using subprocess. This creates a
command line call to a specified metamap process.<|endoftext|>
|
15436ff0d77b32b618523e96c842c36c8ac7aeadee2767a543d49d1e8086b049
|
def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None):
' extract_concepts takes a list of sentences and ids(optional)\n then returns a list of Concept objects extracted via\n MetaMapLite.\n\n Supported Options:\n Restrict to Semantic Types --restrict_to_sts\n Restrict to Sources --restrict_to_sources\n\n For information about the available options visit\n http://metamap.nlm.nih.gov/.\n\n Note: If an error is encountered the process will be closed\n and whatever was processed, if anything, will be\n returned along with the error found.\n '
if (((sentences is not None) and (filename is not None)) or ((sentences is None) and (filename is None))):
raise ValueError('You must either pass a list of sentences OR a filename.')
input_file = None
if (sentences is not None):
input_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.mmi')
else:
input_file = open(filename, 'r')
output_file_name = None
error = None
try:
if (sentences is not None):
if (ids is not None):
for (identifier, sentence) in zip(ids, sentences):
input_file.write('{0!r}|{1}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
input_file.close()
command = ['bash', os.path.join(self.metamap_home, 'metamaplite.sh')]
if restrict_to_sts:
if isinstance(restrict_to_sts, str):
restrict_to_sts = [restrict_to_sts]
if (len(restrict_to_sts) > 0):
command.append('--restrict_to_sts={}'.format(str(','.join(restrict_to_sts))))
if restrict_to_sources:
if isinstance(restrict_to_sources, str):
restrict_to_sources = [restrict_to_sources]
if (len(restrict_to_sources) > 0):
command.append('--restrict_to_sources')
command.append(str(','.join(restrict_to_sources)))
if (ids is not None):
command.append('--inputformat=sldiwi')
command.append(input_file.name)
command.append('--overwrite')
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while (metamap_process.poll() is None):
stdout = str(metamap_process.stdout.readline())
if ('ERROR' in stdout):
metamap_process.terminate()
error = stdout.rstrip()
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
with open(output_file_name) as fd:
output = fd.read()
except:
pass
concepts = CorpusLite.load(output.splitlines())
return (concepts, error)
|
extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMapLite.
Supported Options:
Restrict to Semantic Types --restrict_to_sts
Restrict to Sources --restrict_to_sources
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.
|
pymetamap/SubprocessBackendLite.py
|
extract_concepts
|
liquet-ai/pymetamap
| 151 |
python
|
def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None):
' extract_concepts takes a list of sentences and ids(optional)\n then returns a list of Concept objects extracted via\n MetaMapLite.\n\n Supported Options:\n Restrict to Semantic Types --restrict_to_sts\n Restrict to Sources --restrict_to_sources\n\n For information about the available options visit\n http://metamap.nlm.nih.gov/.\n\n Note: If an error is encountered the process will be closed\n and whatever was processed, if anything, will be\n returned along with the error found.\n '
if (((sentences is not None) and (filename is not None)) or ((sentences is None) and (filename is None))):
raise ValueError('You must either pass a list of sentences OR a filename.')
input_file = None
if (sentences is not None):
input_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.mmi')
else:
input_file = open(filename, 'r')
output_file_name = None
error = None
try:
if (sentences is not None):
if (ids is not None):
for (identifier, sentence) in zip(ids, sentences):
input_file.write('{0!r}|{1}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
input_file.close()
command = ['bash', os.path.join(self.metamap_home, 'metamaplite.sh')]
if restrict_to_sts:
if isinstance(restrict_to_sts, str):
restrict_to_sts = [restrict_to_sts]
if (len(restrict_to_sts) > 0):
command.append('--restrict_to_sts={}'.format(str(','.join(restrict_to_sts))))
if restrict_to_sources:
if isinstance(restrict_to_sources, str):
restrict_to_sources = [restrict_to_sources]
if (len(restrict_to_sources) > 0):
command.append('--restrict_to_sources')
command.append(str(','.join(restrict_to_sources)))
if (ids is not None):
command.append('--inputformat=sldiwi')
command.append(input_file.name)
command.append('--overwrite')
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while (metamap_process.poll() is None):
stdout = str(metamap_process.stdout.readline())
if ('ERROR' in stdout):
metamap_process.terminate()
error = stdout.rstrip()
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
with open(output_file_name) as fd:
output = fd.read()
except:
pass
concepts = CorpusLite.load(output.splitlines())
return (concepts, error)
|
def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None):
' extract_concepts takes a list of sentences and ids(optional)\n then returns a list of Concept objects extracted via\n MetaMapLite.\n\n Supported Options:\n Restrict to Semantic Types --restrict_to_sts\n Restrict to Sources --restrict_to_sources\n\n For information about the available options visit\n http://metamap.nlm.nih.gov/.\n\n Note: If an error is encountered the process will be closed\n and whatever was processed, if anything, will be\n returned along with the error found.\n '
if (((sentences is not None) and (filename is not None)) or ((sentences is None) and (filename is None))):
raise ValueError('You must either pass a list of sentences OR a filename.')
input_file = None
if (sentences is not None):
input_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.mmi')
else:
input_file = open(filename, 'r')
output_file_name = None
error = None
try:
if (sentences is not None):
if (ids is not None):
for (identifier, sentence) in zip(ids, sentences):
input_file.write('{0!r}|{1}\n'.format(identifier, sentence).encode('utf8'))
else:
for sentence in sentences:
input_file.write('{0!r}\n'.format(sentence).encode('utf8'))
input_file.flush()
input_file.close()
command = ['bash', os.path.join(self.metamap_home, 'metamaplite.sh')]
if restrict_to_sts:
if isinstance(restrict_to_sts, str):
restrict_to_sts = [restrict_to_sts]
if (len(restrict_to_sts) > 0):
command.append('--restrict_to_sts={}'.format(str(','.join(restrict_to_sts))))
if restrict_to_sources:
if isinstance(restrict_to_sources, str):
restrict_to_sources = [restrict_to_sources]
if (len(restrict_to_sources) > 0):
command.append('--restrict_to_sources')
command.append(str(','.join(restrict_to_sources)))
if (ids is not None):
command.append('--inputformat=sldiwi')
command.append(input_file.name)
command.append('--overwrite')
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE)
while (metamap_process.poll() is None):
stdout = str(metamap_process.stdout.readline())
if ('ERROR' in stdout):
metamap_process.terminate()
error = stdout.rstrip()
(output_file_name, file_extension) = os.path.splitext(input_file.name)
output_file_name += ('.' + 'mmi')
with open(output_file_name) as fd:
output = fd.read()
except:
pass
concepts = CorpusLite.load(output.splitlines())
return (concepts, error)<|docstring|>extract_concepts takes a list of sentences and ids(optional)
then returns a list of Concept objects extracted via
MetaMapLite.
Supported Options:
Restrict to Semantic Types --restrict_to_sts
Restrict to Sources --restrict_to_sources
For information about the available options visit
http://metamap.nlm.nih.gov/.
Note: If an error is encountered the process will be closed
and whatever was processed, if anything, will be
returned along with the error found.<|endoftext|>
|
8ddb91d12175aee5d9d255a0c20326bd3b08f838eb1216646186946b4c641290
|
def subdict_by_prefix(flat, prefix, key=None):
"\n Put key-value pairs in `flat` dict prefixed by `prefix` in a sub-dict.\n\n >>> flat = dict(\n ... prefix_alpha=1,\n ... prefix_beta=2,\n ... gamma=3,\n ... )\n >>> assert subdict_by_prefix(flat, 'prefix_') == dict(\n ... prefix=dict(alpha=1, beta=2),\n ... gamma=3,\n ... )\n\n Key of the sub-dictionary can be explicitly specified:\n\n >>> assert subdict_by_prefix(flat, 'prefix_', 'delta') == dict(\n ... delta=dict(alpha=1, beta=2),\n ... gamma=3,\n ... )\n\n If the sub-dictionary already exists, it is copied and then\n extended:\n\n >>> flat['prefix'] = dict(theta=4)\n >>> assert subdict_by_prefix(flat, 'prefix_') == dict(\n ... prefix=dict(alpha=1, beta=2, theta=4),\n ... gamma=3,\n ... )\n >>> assert flat['prefix'] == dict(theta=4) # i.e., not modified\n\n "
if (key is None):
key = prefix.rstrip('_')
nested = {}
nested[key] = subdict = flat.get(key, {}).copy()
assert isinstance(subdict, dict)
for (k, v) in flat.items():
if (k == key):
pass
elif k.startswith(prefix):
subdict[k[len(prefix):]] = v
else:
nested[k] = v
return nested
|
Put key-value pairs in `flat` dict prefixed by `prefix` in a sub-dict.
>>> flat = dict(
... prefix_alpha=1,
... prefix_beta=2,
... gamma=3,
... )
>>> assert subdict_by_prefix(flat, 'prefix_') == dict(
... prefix=dict(alpha=1, beta=2),
... gamma=3,
... )
Key of the sub-dictionary can be explicitly specified:
>>> assert subdict_by_prefix(flat, 'prefix_', 'delta') == dict(
... delta=dict(alpha=1, beta=2),
... gamma=3,
... )
If the sub-dictionary already exists, it is copied and then
extended:
>>> flat['prefix'] = dict(theta=4)
>>> assert subdict_by_prefix(flat, 'prefix_') == dict(
... prefix=dict(alpha=1, beta=2, theta=4),
... gamma=3,
... )
>>> assert flat['prefix'] == dict(theta=4) # i.e., not modified
|
tc_gan/utils/dicts.py
|
subdict_by_prefix
|
ahmadianlab/tc-gan
| 4 |
python
|
def subdict_by_prefix(flat, prefix, key=None):
"\n Put key-value pairs in `flat` dict prefixed by `prefix` in a sub-dict.\n\n >>> flat = dict(\n ... prefix_alpha=1,\n ... prefix_beta=2,\n ... gamma=3,\n ... )\n >>> assert subdict_by_prefix(flat, 'prefix_') == dict(\n ... prefix=dict(alpha=1, beta=2),\n ... gamma=3,\n ... )\n\n Key of the sub-dictionary can be explicitly specified:\n\n >>> assert subdict_by_prefix(flat, 'prefix_', 'delta') == dict(\n ... delta=dict(alpha=1, beta=2),\n ... gamma=3,\n ... )\n\n If the sub-dictionary already exists, it is copied and then\n extended:\n\n >>> flat['prefix'] = dict(theta=4)\n >>> assert subdict_by_prefix(flat, 'prefix_') == dict(\n ... prefix=dict(alpha=1, beta=2, theta=4),\n ... gamma=3,\n ... )\n >>> assert flat['prefix'] == dict(theta=4) # i.e., not modified\n\n "
if (key is None):
key = prefix.rstrip('_')
nested = {}
nested[key] = subdict = flat.get(key, {}).copy()
assert isinstance(subdict, dict)
for (k, v) in flat.items():
if (k == key):
pass
elif k.startswith(prefix):
subdict[k[len(prefix):]] = v
else:
nested[k] = v
return nested
|
def subdict_by_prefix(flat, prefix, key=None):
"\n Put key-value pairs in `flat` dict prefixed by `prefix` in a sub-dict.\n\n >>> flat = dict(\n ... prefix_alpha=1,\n ... prefix_beta=2,\n ... gamma=3,\n ... )\n >>> assert subdict_by_prefix(flat, 'prefix_') == dict(\n ... prefix=dict(alpha=1, beta=2),\n ... gamma=3,\n ... )\n\n Key of the sub-dictionary can be explicitly specified:\n\n >>> assert subdict_by_prefix(flat, 'prefix_', 'delta') == dict(\n ... delta=dict(alpha=1, beta=2),\n ... gamma=3,\n ... )\n\n If the sub-dictionary already exists, it is copied and then\n extended:\n\n >>> flat['prefix'] = dict(theta=4)\n >>> assert subdict_by_prefix(flat, 'prefix_') == dict(\n ... prefix=dict(alpha=1, beta=2, theta=4),\n ... gamma=3,\n ... )\n >>> assert flat['prefix'] == dict(theta=4) # i.e., not modified\n\n "
if (key is None):
key = prefix.rstrip('_')
nested = {}
nested[key] = subdict = flat.get(key, {}).copy()
assert isinstance(subdict, dict)
for (k, v) in flat.items():
if (k == key):
pass
elif k.startswith(prefix):
subdict[k[len(prefix):]] = v
else:
nested[k] = v
return nested<|docstring|>Put key-value pairs in `flat` dict prefixed by `prefix` in a sub-dict.
>>> flat = dict(
... prefix_alpha=1,
... prefix_beta=2,
... gamma=3,
... )
>>> assert subdict_by_prefix(flat, 'prefix_') == dict(
... prefix=dict(alpha=1, beta=2),
... gamma=3,
... )
Key of the sub-dictionary can be explicitly specified:
>>> assert subdict_by_prefix(flat, 'prefix_', 'delta') == dict(
... delta=dict(alpha=1, beta=2),
... gamma=3,
... )
If the sub-dictionary already exists, it is copied and then
extended:
>>> flat['prefix'] = dict(theta=4)
>>> assert subdict_by_prefix(flat, 'prefix_') == dict(
... prefix=dict(alpha=1, beta=2, theta=4),
... gamma=3,
... )
>>> assert flat['prefix'] == dict(theta=4) # i.e., not modified<|endoftext|>
|
2d743c6724ba054b5f6cc9aadda787b74263dab49d258539768c7ba5f494af0d
|
def iteritemsdeep(dct):
"\n Works like ``dict.iteritems`` but iterate over all descendant items\n\n >>> dct = dict(a=1, b=2, c=dict(d=3, e=4))\n >>> sorted(iteritemsdeep(dct))\n [(('a',), 1), (('b',), 2), (('c', 'd'), 3), (('c', 'e'), 4)]\n\n "
for (key, val) in dct.items():
if isinstance(val, dict):
for (key_child, val_child) in iteritemsdeep(val):
(yield (((key,) + key_child), val_child))
else:
(yield ((key,), val))
|
Works like ``dict.iteritems`` but iterate over all descendant items
>>> dct = dict(a=1, b=2, c=dict(d=3, e=4))
>>> sorted(iteritemsdeep(dct))
[(('a',), 1), (('b',), 2), (('c', 'd'), 3), (('c', 'e'), 4)]
|
tc_gan/utils/dicts.py
|
iteritemsdeep
|
ahmadianlab/tc-gan
| 4 |
python
|
def iteritemsdeep(dct):
"\n Works like ``dict.iteritems`` but iterate over all descendant items\n\n >>> dct = dict(a=1, b=2, c=dict(d=3, e=4))\n >>> sorted(iteritemsdeep(dct))\n [(('a',), 1), (('b',), 2), (('c', 'd'), 3), (('c', 'e'), 4)]\n\n "
for (key, val) in dct.items():
if isinstance(val, dict):
for (key_child, val_child) in iteritemsdeep(val):
(yield (((key,) + key_child), val_child))
else:
(yield ((key,), val))
|
def iteritemsdeep(dct):
"\n Works like ``dict.iteritems`` but iterate over all descendant items\n\n >>> dct = dict(a=1, b=2, c=dict(d=3, e=4))\n >>> sorted(iteritemsdeep(dct))\n [(('a',), 1), (('b',), 2), (('c', 'd'), 3), (('c', 'e'), 4)]\n\n "
for (key, val) in dct.items():
if isinstance(val, dict):
for (key_child, val_child) in iteritemsdeep(val):
(yield (((key,) + key_child), val_child))
else:
(yield ((key,), val))<|docstring|>Works like ``dict.iteritems`` but iterate over all descendant items
>>> dct = dict(a=1, b=2, c=dict(d=3, e=4))
>>> sorted(iteritemsdeep(dct))
[(('a',), 1), (('b',), 2), (('c', 'd'), 3), (('c', 'e'), 4)]<|endoftext|>
|
1e593a67c974ab801b6d770b598bcb2a41021c377f74ca999cfc5c512dbcbeb1
|
def getdeep(dct, key):
"\n Get deeply nested value of a dict-like object `dct`.\n\n >>> dct = {'a': {'b': {'c': 1}}}\n >>> getdeep(dct, 'a.b.c')\n 1\n >>> getdeep(dct, 'a.b.d')\n Traceback (most recent call last):\n ...\n KeyError: 'd'\n\n "
if (not isinstance(key, tuple)):
key = key.split('.')
for k in key[:(- 1)]:
dct = dct[k]
return dct[key[(- 1)]]
|
Get deeply nested value of a dict-like object `dct`.
>>> dct = {'a': {'b': {'c': 1}}}
>>> getdeep(dct, 'a.b.c')
1
>>> getdeep(dct, 'a.b.d')
Traceback (most recent call last):
...
KeyError: 'd'
|
tc_gan/utils/dicts.py
|
getdeep
|
ahmadianlab/tc-gan
| 4 |
python
|
def getdeep(dct, key):
"\n Get deeply nested value of a dict-like object `dct`.\n\n >>> dct = {'a': {'b': {'c': 1}}}\n >>> getdeep(dct, 'a.b.c')\n 1\n >>> getdeep(dct, 'a.b.d')\n Traceback (most recent call last):\n ...\n KeyError: 'd'\n\n "
if (not isinstance(key, tuple)):
key = key.split('.')
for k in key[:(- 1)]:
dct = dct[k]
return dct[key[(- 1)]]
|
def getdeep(dct, key):
"\n Get deeply nested value of a dict-like object `dct`.\n\n >>> dct = {'a': {'b': {'c': 1}}}\n >>> getdeep(dct, 'a.b.c')\n 1\n >>> getdeep(dct, 'a.b.d')\n Traceback (most recent call last):\n ...\n KeyError: 'd'\n\n "
if (not isinstance(key, tuple)):
key = key.split('.')
for k in key[:(- 1)]:
dct = dct[k]
return dct[key[(- 1)]]<|docstring|>Get deeply nested value of a dict-like object `dct`.
>>> dct = {'a': {'b': {'c': 1}}}
>>> getdeep(dct, 'a.b.c')
1
>>> getdeep(dct, 'a.b.d')
Traceback (most recent call last):
...
KeyError: 'd'<|endoftext|>
|
1a25bc2d93cb27a9a14272531da45e7cbcf762533599ffae7fbd70a12eeabd69
|
def transform_entity_synonyms(synonyms, known_synonyms: Optional[Dict[(Text, Any)]]=None) -> Dict[(Text, Any)]:
'Transforms the entity synonyms into a text->value dictionary'
entity_synonyms = (known_synonyms if known_synonyms else {})
for s in synonyms:
if (('value' in s) and ('synonyms' in s)):
for synonym in s['synonyms']:
entity_synonyms[synonym] = s['value']
return entity_synonyms
|
Transforms the entity synonyms into a text->value dictionary
|
rasa/shared/nlu/training_data/util.py
|
transform_entity_synonyms
|
pranavdurai10/rasa
| 0 |
python
|
def transform_entity_synonyms(synonyms, known_synonyms: Optional[Dict[(Text, Any)]]=None) -> Dict[(Text, Any)]:
entity_synonyms = (known_synonyms if known_synonyms else {})
for s in synonyms:
if (('value' in s) and ('synonyms' in s)):
for synonym in s['synonyms']:
entity_synonyms[synonym] = s['value']
return entity_synonyms
|
def transform_entity_synonyms(synonyms, known_synonyms: Optional[Dict[(Text, Any)]]=None) -> Dict[(Text, Any)]:
entity_synonyms = (known_synonyms if known_synonyms else {})
for s in synonyms:
if (('value' in s) and ('synonyms' in s)):
for synonym in s['synonyms']:
entity_synonyms[synonym] = s['value']
return entity_synonyms<|docstring|>Transforms the entity synonyms into a text->value dictionary<|endoftext|>
|
f52976017f1ccc7a024070ec9ce9be666ff1588020ed7b9c8b9c8ed6d4fd448f
|
def remove_untrainable_entities_from(example: Dict[(Text, Any)]) -> None:
'Remove untrainable entities from serialised training example `example`.\n\n Entities with an untrainable extractor will be removed. Untrainable extractors\n are defined in `rasa.nlu.constants.PRETRAINED_EXTRACTORS`.\n\n Args:\n example: Serialised training example to inspect.\n '
example_entities = example.get(ENTITIES)
if (not example_entities):
return None
trainable_entities = []
for entity in example_entities:
if (entity.get(EXTRACTOR) in PRETRAINED_EXTRACTORS):
logger.debug(f"Excluding entity '{json.dumps(entity)}' from training data. Entity examples extracted by the following classes are not dumped to training data in markdown format: `{'`, `'.join(sorted(PRETRAINED_EXTRACTORS))}`.")
else:
trainable_entities.append(entity)
example[ENTITIES] = trainable_entities
|
Remove untrainable entities from serialised training example `example`.
Entities with an untrainable extractor will be removed. Untrainable extractors
are defined in `rasa.nlu.constants.PRETRAINED_EXTRACTORS`.
Args:
example: Serialised training example to inspect.
|
rasa/shared/nlu/training_data/util.py
|
remove_untrainable_entities_from
|
pranavdurai10/rasa
| 0 |
python
|
def remove_untrainable_entities_from(example: Dict[(Text, Any)]) -> None:
'Remove untrainable entities from serialised training example `example`.\n\n Entities with an untrainable extractor will be removed. Untrainable extractors\n are defined in `rasa.nlu.constants.PRETRAINED_EXTRACTORS`.\n\n Args:\n example: Serialised training example to inspect.\n '
example_entities = example.get(ENTITIES)
if (not example_entities):
return None
trainable_entities = []
for entity in example_entities:
if (entity.get(EXTRACTOR) in PRETRAINED_EXTRACTORS):
logger.debug(f"Excluding entity '{json.dumps(entity)}' from training data. Entity examples extracted by the following classes are not dumped to training data in markdown format: `{'`, `'.join(sorted(PRETRAINED_EXTRACTORS))}`.")
else:
trainable_entities.append(entity)
example[ENTITIES] = trainable_entities
|
def remove_untrainable_entities_from(example: Dict[(Text, Any)]) -> None:
'Remove untrainable entities from serialised training example `example`.\n\n Entities with an untrainable extractor will be removed. Untrainable extractors\n are defined in `rasa.nlu.constants.PRETRAINED_EXTRACTORS`.\n\n Args:\n example: Serialised training example to inspect.\n '
example_entities = example.get(ENTITIES)
if (not example_entities):
return None
trainable_entities = []
for entity in example_entities:
if (entity.get(EXTRACTOR) in PRETRAINED_EXTRACTORS):
logger.debug(f"Excluding entity '{json.dumps(entity)}' from training data. Entity examples extracted by the following classes are not dumped to training data in markdown format: `{'`, `'.join(sorted(PRETRAINED_EXTRACTORS))}`.")
else:
trainable_entities.append(entity)
example[ENTITIES] = trainable_entities<|docstring|>Remove untrainable entities from serialised training example `example`.
Entities with an untrainable extractor will be removed. Untrainable extractors
are defined in `rasa.nlu.constants.PRETRAINED_EXTRACTORS`.
Args:
example: Serialised training example to inspect.<|endoftext|>
|
ce20e87749f4c973724b42ecfefdd228205dc7e05e4e9e7eeda319f1e2200f19
|
def encode_string(s: Text) -> Text:
'Return an encoded python string.'
def replace(match: Match) -> Text:
return ESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)]
return ESCAPE.sub(replace, s)
|
Return an encoded python string.
|
rasa/shared/nlu/training_data/util.py
|
encode_string
|
pranavdurai10/rasa
| 0 |
python
|
def encode_string(s: Text) -> Text:
def replace(match: Match) -> Text:
return ESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)]
return ESCAPE.sub(replace, s)
|
def encode_string(s: Text) -> Text:
def replace(match: Match) -> Text:
return ESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)]
return ESCAPE.sub(replace, s)<|docstring|>Return an encoded python string.<|endoftext|>
|
f2ff3bf5ab14fc7dc0e5846ff1b4fb76a837cfabfc4b0c049aa7e1743b62af9d
|
def decode_string(s: Text) -> Text:
'Return a decoded python string.'
def replace(match: Match) -> Text:
return UNESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)]
return UNESCAPE.sub(replace, s)
|
Return a decoded python string.
|
rasa/shared/nlu/training_data/util.py
|
decode_string
|
pranavdurai10/rasa
| 0 |
python
|
def decode_string(s: Text) -> Text:
def replace(match: Match) -> Text:
return UNESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)]
return UNESCAPE.sub(replace, s)
|
def decode_string(s: Text) -> Text:
def replace(match: Match) -> Text:
return UNESCAPE_DCT[match.group(GROUP_COMPLETE_MATCH)]
return UNESCAPE.sub(replace, s)<|docstring|>Return a decoded python string.<|endoftext|>
|
4a30e974a370ef57fd2395a29b2e50c05f9b35b4d2564a37ad014fc56da4f6ed
|
def build_entity(start: int, end: int, value: Text, entity_type: Text, role: Optional[Text]=None, group: Optional[Text]=None, **kwargs: Any) -> Dict[(Text, Any)]:
'Builds a standard entity dictionary.\n\n Adds additional keyword parameters.\n\n Args:\n start: start position of entity\n end: end position of entity\n value: text value of the entity\n entity_type: name of the entity type\n role: role of the entity\n group: group of the entity\n **kwargs: additional parameters\n\n Returns:\n an entity dictionary\n '
entity = {ENTITY_ATTRIBUTE_START: start, ENTITY_ATTRIBUTE_END: end, ENTITY_ATTRIBUTE_VALUE: value, ENTITY_ATTRIBUTE_TYPE: entity_type}
if role:
entity[ENTITY_ATTRIBUTE_ROLE] = role
if group:
entity[ENTITY_ATTRIBUTE_GROUP] = group
entity.update(kwargs)
return entity
|
Builds a standard entity dictionary.
Adds additional keyword parameters.
Args:
start: start position of entity
end: end position of entity
value: text value of the entity
entity_type: name of the entity type
role: role of the entity
group: group of the entity
**kwargs: additional parameters
Returns:
an entity dictionary
|
rasa/shared/nlu/training_data/util.py
|
build_entity
|
pranavdurai10/rasa
| 0 |
python
|
def build_entity(start: int, end: int, value: Text, entity_type: Text, role: Optional[Text]=None, group: Optional[Text]=None, **kwargs: Any) -> Dict[(Text, Any)]:
'Builds a standard entity dictionary.\n\n Adds additional keyword parameters.\n\n Args:\n start: start position of entity\n end: end position of entity\n value: text value of the entity\n entity_type: name of the entity type\n role: role of the entity\n group: group of the entity\n **kwargs: additional parameters\n\n Returns:\n an entity dictionary\n '
entity = {ENTITY_ATTRIBUTE_START: start, ENTITY_ATTRIBUTE_END: end, ENTITY_ATTRIBUTE_VALUE: value, ENTITY_ATTRIBUTE_TYPE: entity_type}
if role:
entity[ENTITY_ATTRIBUTE_ROLE] = role
if group:
entity[ENTITY_ATTRIBUTE_GROUP] = group
entity.update(kwargs)
return entity
|
def build_entity(start: int, end: int, value: Text, entity_type: Text, role: Optional[Text]=None, group: Optional[Text]=None, **kwargs: Any) -> Dict[(Text, Any)]:
'Builds a standard entity dictionary.\n\n Adds additional keyword parameters.\n\n Args:\n start: start position of entity\n end: end position of entity\n value: text value of the entity\n entity_type: name of the entity type\n role: role of the entity\n group: group of the entity\n **kwargs: additional parameters\n\n Returns:\n an entity dictionary\n '
entity = {ENTITY_ATTRIBUTE_START: start, ENTITY_ATTRIBUTE_END: end, ENTITY_ATTRIBUTE_VALUE: value, ENTITY_ATTRIBUTE_TYPE: entity_type}
if role:
entity[ENTITY_ATTRIBUTE_ROLE] = role
if group:
entity[ENTITY_ATTRIBUTE_GROUP] = group
entity.update(kwargs)
return entity<|docstring|>Builds a standard entity dictionary.
Adds additional keyword parameters.
Args:
start: start position of entity
end: end position of entity
value: text value of the entity
entity_type: name of the entity type
role: role of the entity
group: group of the entity
**kwargs: additional parameters
Returns:
an entity dictionary<|endoftext|>
|
2c8d45b6f9054f524c6ad6b619623e0e0f38a982a373a3584c5ba682464974be
|
def load_dict(path):
'Load JSON files at given path into a dictionary.\n\n Dynamically load all the json files at the root of this module into a\n dictionary attribute "schema".\n\n The Key is the name of the json file (without extension)\n The Value is the json object\n\n E.g.::\n\n from core_schemas import core2\n user_schema = core2.schema["user"]\n '
def load(_path):
try:
with open(_path) as f:
module_name = os.path.splitext(os.path.basename(_path))[0]
scim_logger.debug('Loading {module_name:s} by {__module__:s}.{__name__:s}'.format(module_name=module_name, __module__=Model.load.__module__, __name__=Model.load.__name__))
return Model.load(f)
except JSONDecodeError as jde:
assert (jde is None), 'Failed to load example: {} from {}'.format(_path, __package__)
return {schema.id: schema for schema in (load(path) for path in glob.glob(os.path.join(path, '*.json')))}
|
Load JSON files at given path into a dictionary.
Dynamically load all the json files at the root of this module into a
dictionary attribute "schema".
The Key is the name of the json file (without extension)
The Value is the json object
E.g.::
from core_schemas import core2
user_schema = core2.schema["user"]
|
src/scimschema/core_schemas/__init__.py
|
load_dict
|
datakurre/scimschema
| 1 |
python
|
def load_dict(path):
'Load JSON files at given path into a dictionary.\n\n Dynamically load all the json files at the root of this module into a\n dictionary attribute "schema".\n\n The Key is the name of the json file (without extension)\n The Value is the json object\n\n E.g.::\n\n from core_schemas import core2\n user_schema = core2.schema["user"]\n '
def load(_path):
try:
with open(_path) as f:
module_name = os.path.splitext(os.path.basename(_path))[0]
scim_logger.debug('Loading {module_name:s} by {__module__:s}.{__name__:s}'.format(module_name=module_name, __module__=Model.load.__module__, __name__=Model.load.__name__))
return Model.load(f)
except JSONDecodeError as jde:
assert (jde is None), 'Failed to load example: {} from {}'.format(_path, __package__)
return {schema.id: schema for schema in (load(path) for path in glob.glob(os.path.join(path, '*.json')))}
|
def load_dict(path):
'Load JSON files at given path into a dictionary.\n\n Dynamically load all the json files at the root of this module into a\n dictionary attribute "schema".\n\n The Key is the name of the json file (without extension)\n The Value is the json object\n\n E.g.::\n\n from core_schemas import core2\n user_schema = core2.schema["user"]\n '
def load(_path):
try:
with open(_path) as f:
module_name = os.path.splitext(os.path.basename(_path))[0]
scim_logger.debug('Loading {module_name:s} by {__module__:s}.{__name__:s}'.format(module_name=module_name, __module__=Model.load.__module__, __name__=Model.load.__name__))
return Model.load(f)
except JSONDecodeError as jde:
assert (jde is None), 'Failed to load example: {} from {}'.format(_path, __package__)
return {schema.id: schema for schema in (load(path) for path in glob.glob(os.path.join(path, '*.json')))}<|docstring|>Load JSON files at given path into a dictionary.
Dynamically load all the json files at the root of this module into a
dictionary attribute "schema".
The Key is the name of the json file (without extension)
The Value is the json object
E.g.::
from core_schemas import core2
user_schema = core2.schema["user"]<|endoftext|>
|
b67b9400af5a1e93b7cd1df7ca184b29193645fb38114540da2c8639203d83a2
|
def __init__(self, original_error: Exception) -> None:
'Initialize an UnexpectedProtocolError with an original error.'
super().__init__(str(original_error))
self.original_error: Exception = original_error
|
Initialize an UnexpectedProtocolError with an original error.
|
api/src/opentrons/protocol_engine/errors/exceptions.py
|
__init__
|
Opentrons/labware
| 2 |
python
|
def __init__(self, original_error: Exception) -> None:
super().__init__(str(original_error))
self.original_error: Exception = original_error
|
def __init__(self, original_error: Exception) -> None:
super().__init__(str(original_error))
self.original_error: Exception = original_error<|docstring|>Initialize an UnexpectedProtocolError with an original error.<|endoftext|>
|
29b2aac840252c160436c97fd724135da9011a4d44ff8dcf0c0c12eedef37757
|
def closed(self, reason: str) -> None:
'Close the writer when the spider closes.\n\n This function is called automatically by Scrapy upon closing the spider.\n '
self.writer.close()
|
Close the writer when the spider closes.
This function is called automatically by Scrapy upon closing the spider.
|
clicha_scrapy/clicha_scrapy/spiders/un.py
|
closed
|
aksh-n/CliChA
| 0 |
python
|
def closed(self, reason: str) -> None:
'Close the writer when the spider closes.\n\n This function is called automatically by Scrapy upon closing the spider.\n '
self.writer.close()
|
def closed(self, reason: str) -> None:
'Close the writer when the spider closes.\n\n This function is called automatically by Scrapy upon closing the spider.\n '
self.writer.close()<|docstring|>Close the writer when the spider closes.
This function is called automatically by Scrapy upon closing the spider.<|endoftext|>
|
0ca2b56752c8d54a719e5bd3ad46b5794ef793972d33b007876de42f0987ab7c
|
def start_requests(self) -> Iterator[Request]:
'Initiate the scraping process by yielding requests to each page containing\n links to articles to be crawled.\n\n This function is called automatically by Scrapy when scraping starts.\n '
base_url = 'https://news.un.org/en/news/topic/climate-change'
for page_num in range(1, 52):
(yield Request(((base_url + '?page=') + str(page_num)), callback=self.parse))
|
Initiate the scraping process by yielding requests to each page containing
links to articles to be crawled.
This function is called automatically by Scrapy when scraping starts.
|
clicha_scrapy/clicha_scrapy/spiders/un.py
|
start_requests
|
aksh-n/CliChA
| 0 |
python
|
def start_requests(self) -> Iterator[Request]:
'Initiate the scraping process by yielding requests to each page containing\n links to articles to be crawled.\n\n This function is called automatically by Scrapy when scraping starts.\n '
base_url = 'https://news.un.org/en/news/topic/climate-change'
for page_num in range(1, 52):
(yield Request(((base_url + '?page=') + str(page_num)), callback=self.parse))
|
def start_requests(self) -> Iterator[Request]:
'Initiate the scraping process by yielding requests to each page containing\n links to articles to be crawled.\n\n This function is called automatically by Scrapy when scraping starts.\n '
base_url = 'https://news.un.org/en/news/topic/climate-change'
for page_num in range(1, 52):
(yield Request(((base_url + '?page=') + str(page_num)), callback=self.parse))<|docstring|>Initiate the scraping process by yielding requests to each page containing
links to articles to be crawled.
This function is called automatically by Scrapy when scraping starts.<|endoftext|>
|
a2a5cfec6aab25e2b2c99153ce86930434e528f59b33ec4a8612fa6e8a808383
|
def parse(self, response: TextResponse) -> Iterator[Request]:
'Parse each catalog page and extract article links.'
for each in response.xpath('//div[@class="view-content"]//h1/a/@href').getall():
(yield response.follow(each, callback=self.parse_article))
|
Parse each catalog page and extract article links.
|
clicha_scrapy/clicha_scrapy/spiders/un.py
|
parse
|
aksh-n/CliChA
| 0 |
python
|
def parse(self, response: TextResponse) -> Iterator[Request]:
for each in response.xpath('//div[@class="view-content"]//h1/a/@href').getall():
(yield response.follow(each, callback=self.parse_article))
|
def parse(self, response: TextResponse) -> Iterator[Request]:
for each in response.xpath('//div[@class="view-content"]//h1/a/@href').getall():
(yield response.follow(each, callback=self.parse_article))<|docstring|>Parse each catalog page and extract article links.<|endoftext|>
|
1c34cc1de1aa7246ee669d669c834a6ca8a6a7b4e45197c5de3b60a1d0fc0b99
|
def parse_article(self, response: TextResponse) -> None:
'Parse each article to extract its content.'
title = response.xpath('//h1/text()').get().strip()
body_path = '(//div[@class="content"]//p | //div[@class="content"]//h3)/text()'
body = str.join(' ', (each.strip() for each in response.xpath(body_path).getall()))
if ((not body) or body.isspace()):
return
self.writer.append_article(((title + '\n') + body))
|
Parse each article to extract its content.
|
clicha_scrapy/clicha_scrapy/spiders/un.py
|
parse_article
|
aksh-n/CliChA
| 0 |
python
|
def parse_article(self, response: TextResponse) -> None:
title = response.xpath('//h1/text()').get().strip()
body_path = '(//div[@class="content"]//p | //div[@class="content"]//h3)/text()'
body = str.join(' ', (each.strip() for each in response.xpath(body_path).getall()))
if ((not body) or body.isspace()):
return
self.writer.append_article(((title + '\n') + body))
|
def parse_article(self, response: TextResponse) -> None:
title = response.xpath('//h1/text()').get().strip()
body_path = '(//div[@class="content"]//p | //div[@class="content"]//h3)/text()'
body = str.join(' ', (each.strip() for each in response.xpath(body_path).getall()))
if ((not body) or body.isspace()):
return
self.writer.append_article(((title + '\n') + body))<|docstring|>Parse each article to extract its content.<|endoftext|>
|
5b9cca462f8466b14ee320c123d1ff452fbbe62fd6abd165d2654aa5da857ae0
|
def get_key_for_purpose_and_type(self, purpose, key_type):
'\n Gets a list of keys that match the purpose and key_type, and returns the first key in that list\n Note, if there are many keys that match the criteria, the one you get back will be random from that list\n :returns: A key object that matches the criteria\n '
key = [key for key in self.keys.values() if ((key.purpose == purpose) and (key.key_type == key_type))]
try:
return key[0]
except IndexError:
return None
|
Gets a list of keys that match the purpose and key_type, and returns the first key in that list
Note, if there are many keys that match the criteria, the one you get back will be random from that list
:returns: A key object that matches the criteria
|
sdc/crypto/key_store.py
|
get_key_for_purpose_and_type
|
uk-gov-mirror/ONSdigital.sdc-cryptography
| 3 |
python
|
def get_key_for_purpose_and_type(self, purpose, key_type):
'\n Gets a list of keys that match the purpose and key_type, and returns the first key in that list\n Note, if there are many keys that match the criteria, the one you get back will be random from that list\n :returns: A key object that matches the criteria\n '
key = [key for key in self.keys.values() if ((key.purpose == purpose) and (key.key_type == key_type))]
try:
return key[0]
except IndexError:
return None
|
def get_key_for_purpose_and_type(self, purpose, key_type):
'\n Gets a list of keys that match the purpose and key_type, and returns the first key in that list\n Note, if there are many keys that match the criteria, the one you get back will be random from that list\n :returns: A key object that matches the criteria\n '
key = [key for key in self.keys.values() if ((key.purpose == purpose) and (key.key_type == key_type))]
try:
return key[0]
except IndexError:
return None<|docstring|>Gets a list of keys that match the purpose and key_type, and returns the first key in that list
Note, if there are many keys that match the criteria, the one you get back will be random from that list
:returns: A key object that matches the criteria<|endoftext|>
|
bcb697ea11fcaefab78031c02a3e616c70cbc42288a268a149fb3e537b2bee9e
|
def load_annotations(self):
'Load annoations for REDS dataset.\n\n Returns:\n dict: Returned dict for LQ and GT pairs.\n '
with open(self.ann_file, 'r') as fin:
keys = [v.strip().split('.')[0] for v in fin]
if (self.val_partition == 'REDS4'):
val_partition = [f'{v:03d}' for v in range(180, 184)]
elif (self.val_partition == 'official'):
val_partition = [f'{v:03d}' for v in range(180, 200)]
elif (self.val_partition == 'test'):
val_partition = [f'{v:03d}' for v in range(1, 11)]
elif (self.val_partition == 'test_extra_3'):
val_partition = [f'{v:03d}' for v in range(1, 8)]
elif (self.val_partition == 'test_extra_1'):
val_partition = [f'{v:03d}' for v in range(1, 5)]
elif (self.val_partition == 'test_extra'):
val_partition = [f'{v:03d}' for v in range(1, 18)]
elif (self.val_partition == 'val_1'):
val_partition = ['000']
elif (self.val_partition == 'test_1'):
val_partition = ['002']
elif (self.val_partition == 'train_1'):
val_partition = ['001']
elif (self.val_partition == 'train_s'):
val_partition = [f'{v:03d}' for v in range(1, 6)]
elif (self.val_partition == 'test_s'):
val_partition = [f'{v:03d}' for v in range(180, 184)]
elif (self.val_partition == 'train'):
val_partition = [f'{v:03d}' for v in range(1, 201)]
elif (self.val_partition == 'finetune'):
val_partition = [f'{v:03d}' for v in [35, 36, 60, 55, 19, 20, 27, 45, 1, 5, 117, 56]]
else:
raise ValueError(f'Wrong validation partition {self.val_partition}.Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if (v.split('/')[0] in val_partition)]
else:
keys = [v for v in keys if (v.split('/')[0] not in val_partition)]
data_infos = []
for key in keys:
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, max_frame_num=100, num_input_frames=self.num_input_frames))
return data_infos
|
Load annoations for REDS dataset.
Returns:
dict: Returned dict for LQ and GT pairs.
|
mmedit/datasets/sr_reds_dataset.py
|
load_annotations
|
chaowentao/ntire2021_compress
| 0 |
python
|
def load_annotations(self):
'Load annoations for REDS dataset.\n\n Returns:\n dict: Returned dict for LQ and GT pairs.\n '
with open(self.ann_file, 'r') as fin:
keys = [v.strip().split('.')[0] for v in fin]
if (self.val_partition == 'REDS4'):
val_partition = [f'{v:03d}' for v in range(180, 184)]
elif (self.val_partition == 'official'):
val_partition = [f'{v:03d}' for v in range(180, 200)]
elif (self.val_partition == 'test'):
val_partition = [f'{v:03d}' for v in range(1, 11)]
elif (self.val_partition == 'test_extra_3'):
val_partition = [f'{v:03d}' for v in range(1, 8)]
elif (self.val_partition == 'test_extra_1'):
val_partition = [f'{v:03d}' for v in range(1, 5)]
elif (self.val_partition == 'test_extra'):
val_partition = [f'{v:03d}' for v in range(1, 18)]
elif (self.val_partition == 'val_1'):
val_partition = ['000']
elif (self.val_partition == 'test_1'):
val_partition = ['002']
elif (self.val_partition == 'train_1'):
val_partition = ['001']
elif (self.val_partition == 'train_s'):
val_partition = [f'{v:03d}' for v in range(1, 6)]
elif (self.val_partition == 'test_s'):
val_partition = [f'{v:03d}' for v in range(180, 184)]
elif (self.val_partition == 'train'):
val_partition = [f'{v:03d}' for v in range(1, 201)]
elif (self.val_partition == 'finetune'):
val_partition = [f'{v:03d}' for v in [35, 36, 60, 55, 19, 20, 27, 45, 1, 5, 117, 56]]
else:
raise ValueError(f'Wrong validation partition {self.val_partition}.Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if (v.split('/')[0] in val_partition)]
else:
keys = [v for v in keys if (v.split('/')[0] not in val_partition)]
data_infos = []
for key in keys:
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, max_frame_num=100, num_input_frames=self.num_input_frames))
return data_infos
|
def load_annotations(self):
'Load annoations for REDS dataset.\n\n Returns:\n dict: Returned dict for LQ and GT pairs.\n '
with open(self.ann_file, 'r') as fin:
keys = [v.strip().split('.')[0] for v in fin]
if (self.val_partition == 'REDS4'):
val_partition = [f'{v:03d}' for v in range(180, 184)]
elif (self.val_partition == 'official'):
val_partition = [f'{v:03d}' for v in range(180, 200)]
elif (self.val_partition == 'test'):
val_partition = [f'{v:03d}' for v in range(1, 11)]
elif (self.val_partition == 'test_extra_3'):
val_partition = [f'{v:03d}' for v in range(1, 8)]
elif (self.val_partition == 'test_extra_1'):
val_partition = [f'{v:03d}' for v in range(1, 5)]
elif (self.val_partition == 'test_extra'):
val_partition = [f'{v:03d}' for v in range(1, 18)]
elif (self.val_partition == 'val_1'):
val_partition = ['000']
elif (self.val_partition == 'test_1'):
val_partition = ['002']
elif (self.val_partition == 'train_1'):
val_partition = ['001']
elif (self.val_partition == 'train_s'):
val_partition = [f'{v:03d}' for v in range(1, 6)]
elif (self.val_partition == 'test_s'):
val_partition = [f'{v:03d}' for v in range(180, 184)]
elif (self.val_partition == 'train'):
val_partition = [f'{v:03d}' for v in range(1, 201)]
elif (self.val_partition == 'finetune'):
val_partition = [f'{v:03d}' for v in [35, 36, 60, 55, 19, 20, 27, 45, 1, 5, 117, 56]]
else:
raise ValueError(f'Wrong validation partition {self.val_partition}.Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if (v.split('/')[0] in val_partition)]
else:
keys = [v for v in keys if (v.split('/')[0] not in val_partition)]
data_infos = []
for key in keys:
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, max_frame_num=100, num_input_frames=self.num_input_frames))
return data_infos<|docstring|>Load annoations for REDS dataset.
Returns:
dict: Returned dict for LQ and GT pairs.<|endoftext|>
|
eab7e89f2fe930c252e9307491f0ec2151c09527a93d9f6ec70b92f42f2ffad3
|
def precision_recall(prediction, actual, include_f1=False, mode='total'):
"Calculate the precision and recall for a prediction on a dataset.\n\n Optionally calculate the f1 score as well.\n\n Args:\n prediction: A binary matrix representing the predictions on the\n dataset.\n actual: A binary matrix representing the actual true positives\n of the dataset.\n include_f1: Whether or not to include f1 in the return values.\n mode: One of 'total' or 'class'.\n In 'total' mode, the entire set is considered.\n In 'class' mode, the precision and recall is calculated\n for each class individually.\n Returns:\n A tuple containing (precision, recall).\n If include_f1 is True, the tuple contains (precision, recall, f1).\n "
if (mode == 'total'):
axis = None
elif (mode == 'class'):
axis = 0
else:
raise ValueError('The mode has to be either "total" or "class"')
truepos = np.logical_and(prediction, actual)
false = np.subtract(actual, prediction)
falsepos = (false < 0)
falseneg = (false > 0)
truepos = np.sum(truepos, axis=axis)
falsepos = np.sum(falsepos, axis=axis)
falseneg = np.sum(falseneg, axis=axis)
with np.errstate(divide='ignore', invalid='ignore'):
precision = (truepos / (truepos + falsepos))
recall = (truepos / (truepos + falseneg))
if (not np.isscalar(precision)):
precision[(~ np.isfinite(precision))] = 0
recall[(~ np.isfinite(recall))] = 0
if include_f1:
f1_score = ((2 * (precision * recall)) / (precision + recall))
if include_f1:
return (precision, recall, f1_score)
return (precision, recall)
|
Calculate the precision and recall for a prediction on a dataset.
Optionally calculate the f1 score as well.
Args:
prediction: A binary matrix representing the predictions on the
dataset.
actual: A binary matrix representing the actual true positives
of the dataset.
include_f1: Whether or not to include f1 in the return values.
mode: One of 'total' or 'class'.
In 'total' mode, the entire set is considered.
In 'class' mode, the precision and recall is calculated
for each class individually.
Returns:
A tuple containing (precision, recall).
If include_f1 is True, the tuple contains (precision, recall, f1).
|
testchallenge/scoring.py
|
precision_recall
|
CellProfiling/test-challenge
| 0 |
python
|
def precision_recall(prediction, actual, include_f1=False, mode='total'):
"Calculate the precision and recall for a prediction on a dataset.\n\n Optionally calculate the f1 score as well.\n\n Args:\n prediction: A binary matrix representing the predictions on the\n dataset.\n actual: A binary matrix representing the actual true positives\n of the dataset.\n include_f1: Whether or not to include f1 in the return values.\n mode: One of 'total' or 'class'.\n In 'total' mode, the entire set is considered.\n In 'class' mode, the precision and recall is calculated\n for each class individually.\n Returns:\n A tuple containing (precision, recall).\n If include_f1 is True, the tuple contains (precision, recall, f1).\n "
if (mode == 'total'):
axis = None
elif (mode == 'class'):
axis = 0
else:
raise ValueError('The mode has to be either "total" or "class"')
truepos = np.logical_and(prediction, actual)
false = np.subtract(actual, prediction)
falsepos = (false < 0)
falseneg = (false > 0)
truepos = np.sum(truepos, axis=axis)
falsepos = np.sum(falsepos, axis=axis)
falseneg = np.sum(falseneg, axis=axis)
with np.errstate(divide='ignore', invalid='ignore'):
precision = (truepos / (truepos + falsepos))
recall = (truepos / (truepos + falseneg))
if (not np.isscalar(precision)):
precision[(~ np.isfinite(precision))] = 0
recall[(~ np.isfinite(recall))] = 0
if include_f1:
f1_score = ((2 * (precision * recall)) / (precision + recall))
if include_f1:
return (precision, recall, f1_score)
return (precision, recall)
|
def precision_recall(prediction, actual, include_f1=False, mode='total'):
"Calculate the precision and recall for a prediction on a dataset.\n\n Optionally calculate the f1 score as well.\n\n Args:\n prediction: A binary matrix representing the predictions on the\n dataset.\n actual: A binary matrix representing the actual true positives\n of the dataset.\n include_f1: Whether or not to include f1 in the return values.\n mode: One of 'total' or 'class'.\n In 'total' mode, the entire set is considered.\n In 'class' mode, the precision and recall is calculated\n for each class individually.\n Returns:\n A tuple containing (precision, recall).\n If include_f1 is True, the tuple contains (precision, recall, f1).\n "
if (mode == 'total'):
axis = None
elif (mode == 'class'):
axis = 0
else:
raise ValueError('The mode has to be either "total" or "class"')
truepos = np.logical_and(prediction, actual)
false = np.subtract(actual, prediction)
falsepos = (false < 0)
falseneg = (false > 0)
truepos = np.sum(truepos, axis=axis)
falsepos = np.sum(falsepos, axis=axis)
falseneg = np.sum(falseneg, axis=axis)
with np.errstate(divide='ignore', invalid='ignore'):
precision = (truepos / (truepos + falsepos))
recall = (truepos / (truepos + falseneg))
if (not np.isscalar(precision)):
precision[(~ np.isfinite(precision))] = 0
recall[(~ np.isfinite(recall))] = 0
if include_f1:
f1_score = ((2 * (precision * recall)) / (precision + recall))
if include_f1:
return (precision, recall, f1_score)
return (precision, recall)<|docstring|>Calculate the precision and recall for a prediction on a dataset.
Optionally calculate the f1 score as well.
Args:
prediction: A binary matrix representing the predictions on the
dataset.
actual: A binary matrix representing the actual true positives
of the dataset.
include_f1: Whether or not to include f1 in the return values.
mode: One of 'total' or 'class'.
In 'total' mode, the entire set is considered.
In 'class' mode, the precision and recall is calculated
for each class individually.
Returns:
A tuple containing (precision, recall).
If include_f1 is True, the tuple contains (precision, recall, f1).<|endoftext|>
|
64a3f3e84bc6ee4fd6f4922d4e66bc36c7917fc994ae584e74cf2b909cbb875b
|
def jaccard_index(y_true, y_predict):
'Calculate the Jaccard index of the predictions on the true values.\n\n Also known as Jaccard similarity, Hamming score, or multi-label accuracy.\n\n Defined as:\n Let y_true=T, and y_predict=S.\n The Jaccard index is calculated as\n |intersection(T,S)|/|union(T,S)|\n\n Args:\n y_true: A list of binary vectors.\n The list should consist of the target vectors.\n\n y_predict: A list of binary vectors.\n The list should consist of the prediction vectors.\n Returns:\n The Jaccard index (jaccard similarity) of the predictions\n on the true labels.\n '
numerator = 0
denominator = 0
for (true_item, pred_item) in zip(y_true, y_predict):
if (len(true_item) != len(pred_item)):
raise ValueError('Array lengths do not agree')
true = set(np.where(true_item)[0])
pred = set(np.where(pred_item)[0])
intersection = true.intersection(pred)
union = true.union(pred)
numerator += len(intersection)
denominator += len(union)
return (numerator / denominator)
|
Calculate the Jaccard index of the predictions on the true values.
Also known as Jaccard similarity, Hamming score, or multi-label accuracy.
Defined as:
Let y_true=T, and y_predict=S.
The Jaccard index is calculated as
|intersection(T,S)|/|union(T,S)|
Args:
y_true: A list of binary vectors.
The list should consist of the target vectors.
y_predict: A list of binary vectors.
The list should consist of the prediction vectors.
Returns:
The Jaccard index (jaccard similarity) of the predictions
on the true labels.
|
testchallenge/scoring.py
|
jaccard_index
|
CellProfiling/test-challenge
| 0 |
python
|
def jaccard_index(y_true, y_predict):
'Calculate the Jaccard index of the predictions on the true values.\n\n Also known as Jaccard similarity, Hamming score, or multi-label accuracy.\n\n Defined as:\n Let y_true=T, and y_predict=S.\n The Jaccard index is calculated as\n |intersection(T,S)|/|union(T,S)|\n\n Args:\n y_true: A list of binary vectors.\n The list should consist of the target vectors.\n\n y_predict: A list of binary vectors.\n The list should consist of the prediction vectors.\n Returns:\n The Jaccard index (jaccard similarity) of the predictions\n on the true labels.\n '
numerator = 0
denominator = 0
for (true_item, pred_item) in zip(y_true, y_predict):
if (len(true_item) != len(pred_item)):
raise ValueError('Array lengths do not agree')
true = set(np.where(true_item)[0])
pred = set(np.where(pred_item)[0])
intersection = true.intersection(pred)
union = true.union(pred)
numerator += len(intersection)
denominator += len(union)
return (numerator / denominator)
|
def jaccard_index(y_true, y_predict):
'Calculate the Jaccard index of the predictions on the true values.\n\n Also known as Jaccard similarity, Hamming score, or multi-label accuracy.\n\n Defined as:\n Let y_true=T, and y_predict=S.\n The Jaccard index is calculated as\n |intersection(T,S)|/|union(T,S)|\n\n Args:\n y_true: A list of binary vectors.\n The list should consist of the target vectors.\n\n y_predict: A list of binary vectors.\n The list should consist of the prediction vectors.\n Returns:\n The Jaccard index (jaccard similarity) of the predictions\n on the true labels.\n '
numerator = 0
denominator = 0
for (true_item, pred_item) in zip(y_true, y_predict):
if (len(true_item) != len(pred_item)):
raise ValueError('Array lengths do not agree')
true = set(np.where(true_item)[0])
pred = set(np.where(pred_item)[0])
intersection = true.intersection(pred)
union = true.union(pred)
numerator += len(intersection)
denominator += len(union)
return (numerator / denominator)<|docstring|>Calculate the Jaccard index of the predictions on the true values.
Also known as Jaccard similarity, Hamming score, or multi-label accuracy.
Defined as:
Let y_true=T, and y_predict=S.
The Jaccard index is calculated as
|intersection(T,S)|/|union(T,S)|
Args:
y_true: A list of binary vectors.
The list should consist of the target vectors.
y_predict: A list of binary vectors.
The list should consist of the prediction vectors.
Returns:
The Jaccard index (jaccard similarity) of the predictions
on the true labels.<|endoftext|>
|
fe2335db708dda3fdbac5115110bd83d43308be67baaf3c9f45c998babb36bf7
|
def parse_solution_file(solution_file):
'Parse a solution file.'
ids = []
classes = []
with open(solution_file) as file_handle:
solution_reader = csv.reader(file_handle)
header = next(solution_reader, None)
if (header != HEADER):
raise ValueError('Incorrect header found: {}, should be: {}'.format(header, HEADER))
solution = sorted(list(solution_reader), key=(lambda x: x[0]))
for row in solution:
if (len(row) < 2):
raise ValueError('Bad row length: {}, should be at least {} for row {}'.format(len(row), len(HEADER), row))
row_classes = row[1:]
if any(((class_ not in POSSIBLE_CLASSES) for class_ in row_classes)):
raise ValueError('Unknown class found among: {}'.format(row_classes))
ids.append(row[0])
classes.append(row_classes)
return (ids, classes)
|
Parse a solution file.
|
testchallenge/scoring.py
|
parse_solution_file
|
CellProfiling/test-challenge
| 0 |
python
|
def parse_solution_file(solution_file):
ids = []
classes = []
with open(solution_file) as file_handle:
solution_reader = csv.reader(file_handle)
header = next(solution_reader, None)
if (header != HEADER):
raise ValueError('Incorrect header found: {}, should be: {}'.format(header, HEADER))
solution = sorted(list(solution_reader), key=(lambda x: x[0]))
for row in solution:
if (len(row) < 2):
raise ValueError('Bad row length: {}, should be at least {} for row {}'.format(len(row), len(HEADER), row))
row_classes = row[1:]
if any(((class_ not in POSSIBLE_CLASSES) for class_ in row_classes)):
raise ValueError('Unknown class found among: {}'.format(row_classes))
ids.append(row[0])
classes.append(row_classes)
return (ids, classes)
|
def parse_solution_file(solution_file):
ids = []
classes = []
with open(solution_file) as file_handle:
solution_reader = csv.reader(file_handle)
header = next(solution_reader, None)
if (header != HEADER):
raise ValueError('Incorrect header found: {}, should be: {}'.format(header, HEADER))
solution = sorted(list(solution_reader), key=(lambda x: x[0]))
for row in solution:
if (len(row) < 2):
raise ValueError('Bad row length: {}, should be at least {} for row {}'.format(len(row), len(HEADER), row))
row_classes = row[1:]
if any(((class_ not in POSSIBLE_CLASSES) for class_ in row_classes)):
raise ValueError('Unknown class found among: {}'.format(row_classes))
ids.append(row[0])
classes.append(row_classes)
return (ids, classes)<|docstring|>Parse a solution file.<|endoftext|>
|
67bf8a6dde05017567521ea3b7f881ac5308c0fcad5f7bfb572f77a9712433f6
|
def score():
'Run script.'
parser = argparse.ArgumentParser(description='Scores precision, recall, and f1 score for a simple classification challenge.\r\nBoth solution files and prediction files should follow the general format of:\n\nfilename,cell_line\nID1,ANSWER1\nID2,ANSWER2\n...\n\nNote that it is required that all IDs are present in both files.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('solution', help='The gold standard solutions')
parser.add_argument('predictions', help='Predictions from the challenger')
parser.add_argument('-O', '--output-file', type=str, default=None, help='Saves output to the specified file in csv format.\nDefault behaviour is to print to stdout using spaces as separator characters')
args = parser.parse_args()
binarizer = Binarizer(POSSIBLE_CLASSES)
(solution_ids, solution_classes) = parse_solution_file(args.solution)
bin_solution = binarizer(solution_classes)
(prediction_ids, prediction_classes) = parse_solution_file(args.predictions)
if (solution_ids != prediction_ids):
print('The IDs in the two files are unordered or non-equal.')
print('IDs only in solution:', (set(solution_ids) - set(prediction_ids)))
print('IDs only in prediction:', (set(prediction_ids) - set(solution_ids)))
sys.exit((- 1))
bin_prediction = binarizer(prediction_classes)
overall_result = precision_recall(bin_prediction, bin_solution, True)
result = precision_recall(bin_prediction, bin_solution, True, 'class')
output_file = args.output_file
if output_file:
json_result = {'data': list(overall_result), 'additionalData': [[result[0][idx], result[1][idx], result[2][idx]] for (idx, _) in enumerate(binarizer.classes)]}
with open(args.output_file, 'w') as json_file:
simplejson.dump(json_result, json_file, ignore_nan=True, indent=2)
print(simplejson.dumps(json_result, ignore_nan=True, indent=2))
else:
print('class', 'pre', 'rec', 'f1')
for (i, class_) in enumerate(binarizer.classes):
print(class_, result[0][i], result[1][i], result[2][i])
print('Overall', overall_result[0], overall_result[1], overall_result[2])
|
Run script.
|
testchallenge/scoring.py
|
score
|
CellProfiling/test-challenge
| 0 |
python
|
def score():
parser = argparse.ArgumentParser(description='Scores precision, recall, and f1 score for a simple classification challenge.\r\nBoth solution files and prediction files should follow the general format of:\n\nfilename,cell_line\nID1,ANSWER1\nID2,ANSWER2\n...\n\nNote that it is required that all IDs are present in both files.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('solution', help='The gold standard solutions')
parser.add_argument('predictions', help='Predictions from the challenger')
parser.add_argument('-O', '--output-file', type=str, default=None, help='Saves output to the specified file in csv format.\nDefault behaviour is to print to stdout using spaces as separator characters')
args = parser.parse_args()
binarizer = Binarizer(POSSIBLE_CLASSES)
(solution_ids, solution_classes) = parse_solution_file(args.solution)
bin_solution = binarizer(solution_classes)
(prediction_ids, prediction_classes) = parse_solution_file(args.predictions)
if (solution_ids != prediction_ids):
print('The IDs in the two files are unordered or non-equal.')
print('IDs only in solution:', (set(solution_ids) - set(prediction_ids)))
print('IDs only in prediction:', (set(prediction_ids) - set(solution_ids)))
sys.exit((- 1))
bin_prediction = binarizer(prediction_classes)
overall_result = precision_recall(bin_prediction, bin_solution, True)
result = precision_recall(bin_prediction, bin_solution, True, 'class')
output_file = args.output_file
if output_file:
json_result = {'data': list(overall_result), 'additionalData': [[result[0][idx], result[1][idx], result[2][idx]] for (idx, _) in enumerate(binarizer.classes)]}
with open(args.output_file, 'w') as json_file:
simplejson.dump(json_result, json_file, ignore_nan=True, indent=2)
print(simplejson.dumps(json_result, ignore_nan=True, indent=2))
else:
print('class', 'pre', 'rec', 'f1')
for (i, class_) in enumerate(binarizer.classes):
print(class_, result[0][i], result[1][i], result[2][i])
print('Overall', overall_result[0], overall_result[1], overall_result[2])
|
def score():
parser = argparse.ArgumentParser(description='Scores precision, recall, and f1 score for a simple classification challenge.\r\nBoth solution files and prediction files should follow the general format of:\n\nfilename,cell_line\nID1,ANSWER1\nID2,ANSWER2\n...\n\nNote that it is required that all IDs are present in both files.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('solution', help='The gold standard solutions')
parser.add_argument('predictions', help='Predictions from the challenger')
parser.add_argument('-O', '--output-file', type=str, default=None, help='Saves output to the specified file in csv format.\nDefault behaviour is to print to stdout using spaces as separator characters')
args = parser.parse_args()
binarizer = Binarizer(POSSIBLE_CLASSES)
(solution_ids, solution_classes) = parse_solution_file(args.solution)
bin_solution = binarizer(solution_classes)
(prediction_ids, prediction_classes) = parse_solution_file(args.predictions)
if (solution_ids != prediction_ids):
print('The IDs in the two files are unordered or non-equal.')
print('IDs only in solution:', (set(solution_ids) - set(prediction_ids)))
print('IDs only in prediction:', (set(prediction_ids) - set(solution_ids)))
sys.exit((- 1))
bin_prediction = binarizer(prediction_classes)
overall_result = precision_recall(bin_prediction, bin_solution, True)
result = precision_recall(bin_prediction, bin_solution, True, 'class')
output_file = args.output_file
if output_file:
json_result = {'data': list(overall_result), 'additionalData': [[result[0][idx], result[1][idx], result[2][idx]] for (idx, _) in enumerate(binarizer.classes)]}
with open(args.output_file, 'w') as json_file:
simplejson.dump(json_result, json_file, ignore_nan=True, indent=2)
print(simplejson.dumps(json_result, ignore_nan=True, indent=2))
else:
print('class', 'pre', 'rec', 'f1')
for (i, class_) in enumerate(binarizer.classes):
print(class_, result[0][i], result[1][i], result[2][i])
print('Overall', overall_result[0], overall_result[1], overall_result[2])<|docstring|>Run script.<|endoftext|>
|
6664248d38ed8e05e11ec960e89d97ef79d1b5955255561ebc0012e37df48020
|
def __init__(self, classes):
'Args:\n classes: A list of the classes that can be binarized.\n '
self.classes = sorted(set(classes))
self._index = dict(zip(self.classes, range(len(self.classes))))
self._reverse_index = dict(zip(range(len(self.classes)), self.classes))
|
Args:
classes: A list of the classes that can be binarized.
|
testchallenge/scoring.py
|
__init__
|
CellProfiling/test-challenge
| 0 |
python
|
def __init__(self, classes):
'Args:\n classes: A list of the classes that can be binarized.\n '
self.classes = sorted(set(classes))
self._index = dict(zip(self.classes, range(len(self.classes))))
self._reverse_index = dict(zip(range(len(self.classes)), self.classes))
|
def __init__(self, classes):
'Args:\n classes: A list of the classes that can be binarized.\n '
self.classes = sorted(set(classes))
self._index = dict(zip(self.classes, range(len(self.classes))))
self._reverse_index = dict(zip(range(len(self.classes)), self.classes))<|docstring|>Args:
classes: A list of the classes that can be binarized.<|endoftext|>
|
d332685635e75501a41c098aebf650d634a31a93f39a5d6973b54bc38fe13f05
|
def bin_label(self, item):
'Binarize a single item.\n\n If the item is iterable and is not a string, the item will be\n binarized as a multi-label item.\n '
bin_ = ([0] * len(self.classes))
if (isinstance(item, collections.Iterable) and (not isinstance(item, str))):
for class_ in item:
bin_[self._index[class_]] = 1
else:
bin_[self._index[item]] = 1
return bin_
|
Binarize a single item.
If the item is iterable and is not a string, the item will be
binarized as a multi-label item.
|
testchallenge/scoring.py
|
bin_label
|
CellProfiling/test-challenge
| 0 |
python
|
def bin_label(self, item):
'Binarize a single item.\n\n If the item is iterable and is not a string, the item will be\n binarized as a multi-label item.\n '
bin_ = ([0] * len(self.classes))
if (isinstance(item, collections.Iterable) and (not isinstance(item, str))):
for class_ in item:
bin_[self._index[class_]] = 1
else:
bin_[self._index[item]] = 1
return bin_
|
def bin_label(self, item):
'Binarize a single item.\n\n If the item is iterable and is not a string, the item will be\n binarized as a multi-label item.\n '
bin_ = ([0] * len(self.classes))
if (isinstance(item, collections.Iterable) and (not isinstance(item, str))):
for class_ in item:
bin_[self._index[class_]] = 1
else:
bin_[self._index[item]] = 1
return bin_<|docstring|>Binarize a single item.
If the item is iterable and is not a string, the item will be
binarized as a multi-label item.<|endoftext|>
|
f9d542eb16df9ba91bcb490d98ce6db5b1347f7d24f27562b6f5c6a8a5c8c637
|
def binarize(self, to_bin):
'Binarize a list of labels.\n\n Args:\n to_bin: A list of of labels to be binarized.\n Items in `to_bin` that are iterable (except strings)\n will be binarized as a multi-label item. All other items\n will be binarized as a single-label item.\n Returns:\n A list of binarized label lists.\n '
binarized = []
for item in to_bin:
bin_ = self.bin_label(item)
binarized.append(bin_)
return binarized
|
Binarize a list of labels.
Args:
to_bin: A list of of labels to be binarized.
Items in `to_bin` that are iterable (except strings)
will be binarized as a multi-label item. All other items
will be binarized as a single-label item.
Returns:
A list of binarized label lists.
|
testchallenge/scoring.py
|
binarize
|
CellProfiling/test-challenge
| 0 |
python
|
def binarize(self, to_bin):
'Binarize a list of labels.\n\n Args:\n to_bin: A list of of labels to be binarized.\n Items in `to_bin` that are iterable (except strings)\n will be binarized as a multi-label item. All other items\n will be binarized as a single-label item.\n Returns:\n A list of binarized label lists.\n '
binarized = []
for item in to_bin:
bin_ = self.bin_label(item)
binarized.append(bin_)
return binarized
|
def binarize(self, to_bin):
'Binarize a list of labels.\n\n Args:\n to_bin: A list of of labels to be binarized.\n Items in `to_bin` that are iterable (except strings)\n will be binarized as a multi-label item. All other items\n will be binarized as a single-label item.\n Returns:\n A list of binarized label lists.\n '
binarized = []
for item in to_bin:
bin_ = self.bin_label(item)
binarized.append(bin_)
return binarized<|docstring|>Binarize a list of labels.
Args:
to_bin: A list of of labels to be binarized.
Items in `to_bin` that are iterable (except strings)
will be binarized as a multi-label item. All other items
will be binarized as a single-label item.
Returns:
A list of binarized label lists.<|endoftext|>
|
0e4a2dc7b979f5eebf45ed5051783bcba3a4d01c9ebef9b29fc6402c024666cd
|
def unbin_label(self, item):
'Unbinarize a single item.'
unbin = []
for idx in item:
if idx:
unbin.append(self._reverse_index[idx])
return unbin
|
Unbinarize a single item.
|
testchallenge/scoring.py
|
unbin_label
|
CellProfiling/test-challenge
| 0 |
python
|
def unbin_label(self, item):
unbin = []
for idx in item:
if idx:
unbin.append(self._reverse_index[idx])
return unbin
|
def unbin_label(self, item):
unbin = []
for idx in item:
if idx:
unbin.append(self._reverse_index[idx])
return unbin<|docstring|>Unbinarize a single item.<|endoftext|>
|
66b9d12f43d17decd7cc7cb583000559486b17f2cd6d29064e3a922204ffdac7
|
def unbinarize(self, from_bin):
'Unbinarize a list of binarized labels.'
unbinarized = []
for item in from_bin:
unbinarized.append(self.unbin_label(item))
return unbinarized
|
Unbinarize a list of binarized labels.
|
testchallenge/scoring.py
|
unbinarize
|
CellProfiling/test-challenge
| 0 |
python
|
def unbinarize(self, from_bin):
unbinarized = []
for item in from_bin:
unbinarized.append(self.unbin_label(item))
return unbinarized
|
def unbinarize(self, from_bin):
unbinarized = []
for item in from_bin:
unbinarized.append(self.unbin_label(item))
return unbinarized<|docstring|>Unbinarize a list of binarized labels.<|endoftext|>
|
7de3ae014922140cd4bcff84e7f9b529ad644fe39bb81fc72deec79777a5a606
|
def grouper(n, iterable, fillvalue=None):
"Group lists into lists of items.\n\n grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = ([iter(iterable)] * n)
return itertools.zip_longest(*args, fillvalue=fillvalue)
|
Group lists into lists of items.
grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
|
crprofile/crprofile.py
|
grouper
|
zodpixel/SML-Cogs
| 17 |
python
|
def grouper(n, iterable, fillvalue=None):
"Group lists into lists of items.\n\n grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = ([iter(iterable)] * n)
return itertools.zip_longest(*args, fillvalue=fillvalue)
|
def grouper(n, iterable, fillvalue=None):
"Group lists into lists of items.\n\n grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = ([iter(iterable)] * n)
return itertools.zip_longest(*args, fillvalue=fillvalue)<|docstring|>Group lists into lists of items.
grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx<|endoftext|>
|
0b108e36ce627a8974345699153c3760d08da37a707cdb8f44a401431b5ffc60
|
def nested_dict():
'Recursively nested defaultdict.'
return defaultdict(nested_dict)
|
Recursively nested defaultdict.
|
crprofile/crprofile.py
|
nested_dict
|
zodpixel/SML-Cogs
| 17 |
python
|
def nested_dict():
return defaultdict(nested_dict)
|
def nested_dict():
return defaultdict(nested_dict)<|docstring|>Recursively nested defaultdict.<|endoftext|>
|
de3c19298e9479dde6a00fd0df213ba83cce37b1ec013af714e859e295de0b7d
|
def random_discord_color():
'Return random color as an integer.'
color = ''.join([choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
return discord.Color(value=color)
|
Return random color as an integer.
|
crprofile/crprofile.py
|
random_discord_color
|
zodpixel/SML-Cogs
| 17 |
python
|
def random_discord_color():
color = .join([choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
return discord.Color(value=color)
|
def random_discord_color():
color = .join([choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
return discord.Color(value=color)<|docstring|>Return random color as an integer.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.