repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
hishnash/djangochannelsrestframework | djangochannelsrestframework/consumers.py | AsyncAPIConsumer.check_permissions | async def check_permissions(self, action: str, **kwargs):
"""
Check if the action should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in await self.get_permissions(action=action, **kwargs):
if not await ensure_async(permission.has_permission)(
scope=self.scope, consumer=self, action=action, **kwargs):
raise PermissionDenied() | python | async def check_permissions(self, action: str, **kwargs):
"""
Check if the action should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in await self.get_permissions(action=action, **kwargs):
if not await ensure_async(permission.has_permission)(
scope=self.scope, consumer=self, action=action, **kwargs):
raise PermissionDenied() | [
"async",
"def",
"check_permissions",
"(",
"self",
",",
"action",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"permission",
"in",
"await",
"self",
".",
"get_permissions",
"(",
"action",
"=",
"action",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"await",
"ensure_async",
"(",
"permission",
".",
"has_permission",
")",
"(",
"scope",
"=",
"self",
".",
"scope",
",",
"consumer",
"=",
"self",
",",
"action",
"=",
"action",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"PermissionDenied",
"(",
")"
] | Check if the action should be permitted.
Raises an appropriate exception if the request is not permitted. | [
"Check",
"if",
"the",
"action",
"should",
"be",
"permitted",
".",
"Raises",
"an",
"appropriate",
"exception",
"if",
"the",
"request",
"is",
"not",
"permitted",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/consumers.py#L93-L102 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/consumers.py | AsyncAPIConsumer.handle_exception | async def handle_exception(self, exc: Exception, action: str, request_id):
"""
Handle any exception that occurs, by sending an appropriate message
"""
if isinstance(exc, APIException):
await self.reply(
action=action,
errors=self._format_errors(exc.detail),
status=exc.status_code,
request_id=request_id
)
elif exc == Http404 or isinstance(exc, Http404):
await self.reply(
action=action,
errors=self._format_errors('Not found'),
status=404,
request_id=request_id
)
else:
raise exc | python | async def handle_exception(self, exc: Exception, action: str, request_id):
"""
Handle any exception that occurs, by sending an appropriate message
"""
if isinstance(exc, APIException):
await self.reply(
action=action,
errors=self._format_errors(exc.detail),
status=exc.status_code,
request_id=request_id
)
elif exc == Http404 or isinstance(exc, Http404):
await self.reply(
action=action,
errors=self._format_errors('Not found'),
status=404,
request_id=request_id
)
else:
raise exc | [
"async",
"def",
"handle_exception",
"(",
"self",
",",
"exc",
":",
"Exception",
",",
"action",
":",
"str",
",",
"request_id",
")",
":",
"if",
"isinstance",
"(",
"exc",
",",
"APIException",
")",
":",
"await",
"self",
".",
"reply",
"(",
"action",
"=",
"action",
",",
"errors",
"=",
"self",
".",
"_format_errors",
"(",
"exc",
".",
"detail",
")",
",",
"status",
"=",
"exc",
".",
"status_code",
",",
"request_id",
"=",
"request_id",
")",
"elif",
"exc",
"==",
"Http404",
"or",
"isinstance",
"(",
"exc",
",",
"Http404",
")",
":",
"await",
"self",
".",
"reply",
"(",
"action",
"=",
"action",
",",
"errors",
"=",
"self",
".",
"_format_errors",
"(",
"'Not found'",
")",
",",
"status",
"=",
"404",
",",
"request_id",
"=",
"request_id",
")",
"else",
":",
"raise",
"exc"
] | Handle any exception that occurs, by sending an appropriate message | [
"Handle",
"any",
"exception",
"that",
"occurs",
"by",
"sending",
"an",
"appropriate",
"message"
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/consumers.py#L104-L123 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/consumers.py | AsyncAPIConsumer.receive_json | async def receive_json(self, content: typing.Dict, **kwargs):
"""
Called with decoded JSON content.
"""
# TODO assert format, if does not match return message.
request_id = content.pop('request_id')
action = content.pop('action')
await self.handle_action(action, request_id=request_id, **content) | python | async def receive_json(self, content: typing.Dict, **kwargs):
"""
Called with decoded JSON content.
"""
# TODO assert format, if does not match return message.
request_id = content.pop('request_id')
action = content.pop('action')
await self.handle_action(action, request_id=request_id, **content) | [
"async",
"def",
"receive_json",
"(",
"self",
",",
"content",
":",
"typing",
".",
"Dict",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO assert format, if does not match return message.",
"request_id",
"=",
"content",
".",
"pop",
"(",
"'request_id'",
")",
"action",
"=",
"content",
".",
"pop",
"(",
"'action'",
")",
"await",
"self",
".",
"handle_action",
"(",
"action",
",",
"request_id",
"=",
"request_id",
",",
"*",
"*",
"content",
")"
] | Called with decoded JSON content. | [
"Called",
"with",
"decoded",
"JSON",
"content",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/consumers.py#L170-L177 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/decorators.py | action | def action(atomic=None, **kwargs):
"""
Mark a method as an action.
"""
def decorator(func):
if atomic is None:
_atomic = getattr(settings, 'ATOMIC_REQUESTS', False)
else:
_atomic = atomic
func.action = True
func.kwargs = kwargs
if asyncio.iscoroutinefunction(func):
if _atomic:
raise ValueError('Only synchronous actions can be atomic')
return func
if _atomic:
# wrap function in atomic wrapper
func = transaction.atomic(func)
@wraps(func)
async def async_f(self: AsyncAPIConsumer,
*args, **_kwargs):
result, status = await database_sync_to_async(func)(
self, *args, **_kwargs
)
return result, status
async_f.action = True
async_f.kwargs = kwargs
async_f.__name__ = func.__name__
return async_f
return decorator | python | def action(atomic=None, **kwargs):
"""
Mark a method as an action.
"""
def decorator(func):
if atomic is None:
_atomic = getattr(settings, 'ATOMIC_REQUESTS', False)
else:
_atomic = atomic
func.action = True
func.kwargs = kwargs
if asyncio.iscoroutinefunction(func):
if _atomic:
raise ValueError('Only synchronous actions can be atomic')
return func
if _atomic:
# wrap function in atomic wrapper
func = transaction.atomic(func)
@wraps(func)
async def async_f(self: AsyncAPIConsumer,
*args, **_kwargs):
result, status = await database_sync_to_async(func)(
self, *args, **_kwargs
)
return result, status
async_f.action = True
async_f.kwargs = kwargs
async_f.__name__ = func.__name__
return async_f
return decorator | [
"def",
"action",
"(",
"atomic",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"if",
"atomic",
"is",
"None",
":",
"_atomic",
"=",
"getattr",
"(",
"settings",
",",
"'ATOMIC_REQUESTS'",
",",
"False",
")",
"else",
":",
"_atomic",
"=",
"atomic",
"func",
".",
"action",
"=",
"True",
"func",
".",
"kwargs",
"=",
"kwargs",
"if",
"asyncio",
".",
"iscoroutinefunction",
"(",
"func",
")",
":",
"if",
"_atomic",
":",
"raise",
"ValueError",
"(",
"'Only synchronous actions can be atomic'",
")",
"return",
"func",
"if",
"_atomic",
":",
"# wrap function in atomic wrapper",
"func",
"=",
"transaction",
".",
"atomic",
"(",
"func",
")",
"@",
"wraps",
"(",
"func",
")",
"async",
"def",
"async_f",
"(",
"self",
":",
"AsyncAPIConsumer",
",",
"*",
"args",
",",
"*",
"*",
"_kwargs",
")",
":",
"result",
",",
"status",
"=",
"await",
"database_sync_to_async",
"(",
"func",
")",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"_kwargs",
")",
"return",
"result",
",",
"status",
"async_f",
".",
"action",
"=",
"True",
"async_f",
".",
"kwargs",
"=",
"kwargs",
"async_f",
".",
"__name__",
"=",
"func",
".",
"__name__",
"return",
"async_f",
"return",
"decorator"
] | Mark a method as an action. | [
"Mark",
"a",
"method",
"as",
"an",
"action",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/decorators.py#L35-L72 | train |
blue-yonder/bonfire | bonfire/dateutils.py | datetime_parser | def datetime_parser(s):
"""
Parse timestamp s in local time. First the arrow parser is used, if it fails, the parsedatetime parser is used.
:param s:
:return:
"""
try:
ts = arrow.get(s)
# Convert UTC to local, result of get is UTC unless it specifies timezone, bonfire assumes
# all time to be machine local
if ts.tzinfo == arrow.get().tzinfo:
ts = ts.replace(tzinfo='local')
except:
c = pdt.Calendar()
result, what = c.parse(s)
ts = None
if what in (1, 2, 3):
ts = datetime.datetime(*result[:6])
ts = arrow.get(ts)
ts = ts.replace(tzinfo='local')
return ts
if ts is None:
raise ValueError("Cannot parse timestamp '" + s + "'")
return ts | python | def datetime_parser(s):
"""
Parse timestamp s in local time. First the arrow parser is used, if it fails, the parsedatetime parser is used.
:param s:
:return:
"""
try:
ts = arrow.get(s)
# Convert UTC to local, result of get is UTC unless it specifies timezone, bonfire assumes
# all time to be machine local
if ts.tzinfo == arrow.get().tzinfo:
ts = ts.replace(tzinfo='local')
except:
c = pdt.Calendar()
result, what = c.parse(s)
ts = None
if what in (1, 2, 3):
ts = datetime.datetime(*result[:6])
ts = arrow.get(ts)
ts = ts.replace(tzinfo='local')
return ts
if ts is None:
raise ValueError("Cannot parse timestamp '" + s + "'")
return ts | [
"def",
"datetime_parser",
"(",
"s",
")",
":",
"try",
":",
"ts",
"=",
"arrow",
".",
"get",
"(",
"s",
")",
"# Convert UTC to local, result of get is UTC unless it specifies timezone, bonfire assumes",
"# all time to be machine local",
"if",
"ts",
".",
"tzinfo",
"==",
"arrow",
".",
"get",
"(",
")",
".",
"tzinfo",
":",
"ts",
"=",
"ts",
".",
"replace",
"(",
"tzinfo",
"=",
"'local'",
")",
"except",
":",
"c",
"=",
"pdt",
".",
"Calendar",
"(",
")",
"result",
",",
"what",
"=",
"c",
".",
"parse",
"(",
"s",
")",
"ts",
"=",
"None",
"if",
"what",
"in",
"(",
"1",
",",
"2",
",",
"3",
")",
":",
"ts",
"=",
"datetime",
".",
"datetime",
"(",
"*",
"result",
"[",
":",
"6",
"]",
")",
"ts",
"=",
"arrow",
".",
"get",
"(",
"ts",
")",
"ts",
"=",
"ts",
".",
"replace",
"(",
"tzinfo",
"=",
"'local'",
")",
"return",
"ts",
"if",
"ts",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot parse timestamp '\"",
"+",
"s",
"+",
"\"'\"",
")",
"return",
"ts"
] | Parse timestamp s in local time. First the arrow parser is used, if it fails, the parsedatetime parser is used.
:param s:
:return: | [
"Parse",
"timestamp",
"s",
"in",
"local",
"time",
".",
"First",
"the",
"arrow",
"parser",
"is",
"used",
"if",
"it",
"fails",
"the",
"parsedatetime",
"parser",
"is",
"used",
"."
] | d0af9ca10394f366cfa3c60f0741f1f0918011c2 | https://github.com/blue-yonder/bonfire/blob/d0af9ca10394f366cfa3c60f0741f1f0918011c2/bonfire/dateutils.py#L14-L42 | train |
kyb3r/dhooks | dhooks/file.py | File.seek | def seek(self, offset: int = 0, *args, **kwargs):
"""
A shortcut to ``self.fp.seek``.
"""
return self.fp.seek(offset, *args, **kwargs) | python | def seek(self, offset: int = 0, *args, **kwargs):
"""
A shortcut to ``self.fp.seek``.
"""
return self.fp.seek(offset, *args, **kwargs) | [
"def",
"seek",
"(",
"self",
",",
"offset",
":",
"int",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"fp",
".",
"seek",
"(",
"offset",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | A shortcut to ``self.fp.seek``. | [
"A",
"shortcut",
"to",
"self",
".",
"fp",
".",
"seek",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/dhooks/file.py#L32-L38 | train |
kyb3r/dhooks | dhooks/embed.py | Embed.set_title | def set_title(self, title: str, url: str = None) -> None:
"""
Sets the title of the embed.
Parameters
----------
title: str
Title of the embed.
url: str or None, optional
URL hyperlink of the title.
"""
self.title = title
self.url = url | python | def set_title(self, title: str, url: str = None) -> None:
"""
Sets the title of the embed.
Parameters
----------
title: str
Title of the embed.
url: str or None, optional
URL hyperlink of the title.
"""
self.title = title
self.url = url | [
"def",
"set_title",
"(",
"self",
",",
"title",
":",
"str",
",",
"url",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"title",
"=",
"title",
"self",
".",
"url",
"=",
"url"
] | Sets the title of the embed.
Parameters
----------
title: str
Title of the embed.
url: str or None, optional
URL hyperlink of the title. | [
"Sets",
"the",
"title",
"of",
"the",
"embed",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/dhooks/embed.py#L82-L96 | train |
kyb3r/dhooks | dhooks/embed.py | Embed.set_timestamp | def set_timestamp(self, time: Union[str, datetime.datetime] = None,
now: bool = False) -> None:
"""
Sets the timestamp of the embed.
Parameters
----------
time: str or :class:`datetime.datetime`
The ``ISO 8601`` timestamp from the embed.
now: bool
Defaults to :class:`False`.
If set to :class:`True` the current time is used for the timestamp.
"""
if now:
self.timestamp = str(datetime.datetime.utcnow())
else:
self.timestamp = str(time) | python | def set_timestamp(self, time: Union[str, datetime.datetime] = None,
now: bool = False) -> None:
"""
Sets the timestamp of the embed.
Parameters
----------
time: str or :class:`datetime.datetime`
The ``ISO 8601`` timestamp from the embed.
now: bool
Defaults to :class:`False`.
If set to :class:`True` the current time is used for the timestamp.
"""
if now:
self.timestamp = str(datetime.datetime.utcnow())
else:
self.timestamp = str(time) | [
"def",
"set_timestamp",
"(",
"self",
",",
"time",
":",
"Union",
"[",
"str",
",",
"datetime",
".",
"datetime",
"]",
"=",
"None",
",",
"now",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"if",
"now",
":",
"self",
".",
"timestamp",
"=",
"str",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
")",
"else",
":",
"self",
".",
"timestamp",
"=",
"str",
"(",
"time",
")"
] | Sets the timestamp of the embed.
Parameters
----------
time: str or :class:`datetime.datetime`
The ``ISO 8601`` timestamp from the embed.
now: bool
Defaults to :class:`False`.
If set to :class:`True` the current time is used for the timestamp. | [
"Sets",
"the",
"timestamp",
"of",
"the",
"embed",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/dhooks/embed.py#L98-L116 | train |
kyb3r/dhooks | dhooks/embed.py | Embed.add_field | def add_field(self, name: str, value: str, inline: bool = True) -> None:
"""
Adds an embed field.
Parameters
----------
name: str
Name attribute of the embed field.
value: str
Value attribute of the embed field.
inline: bool
Defaults to :class:`True`.
Whether or not the embed should be inline.
"""
field = {
'name': name,
'value': value,
'inline': inline
}
self.fields.append(field) | python | def add_field(self, name: str, value: str, inline: bool = True) -> None:
"""
Adds an embed field.
Parameters
----------
name: str
Name attribute of the embed field.
value: str
Value attribute of the embed field.
inline: bool
Defaults to :class:`True`.
Whether or not the embed should be inline.
"""
field = {
'name': name,
'value': value,
'inline': inline
}
self.fields.append(field) | [
"def",
"add_field",
"(",
"self",
",",
"name",
":",
"str",
",",
"value",
":",
"str",
",",
"inline",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"field",
"=",
"{",
"'name'",
":",
"name",
",",
"'value'",
":",
"value",
",",
"'inline'",
":",
"inline",
"}",
"self",
".",
"fields",
".",
"append",
"(",
"field",
")"
] | Adds an embed field.
Parameters
----------
name: str
Name attribute of the embed field.
value: str
Value attribute of the embed field.
inline: bool
Defaults to :class:`True`.
Whether or not the embed should be inline. | [
"Adds",
"an",
"embed",
"field",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/dhooks/embed.py#L118-L140 | train |
kyb3r/dhooks | dhooks/embed.py | Embed.set_author | def set_author(self, name: str, icon_url: str = None, url: str = None) -> \
None:
"""
Sets the author of the embed.
Parameters
----------
name: str
The author's name.
icon_url: str, optional
URL for the author's icon.
url: str, optional
URL hyperlink for the author.
"""
self.author = {
'name': name,
'icon_url': icon_url,
'url': url
} | python | def set_author(self, name: str, icon_url: str = None, url: str = None) -> \
None:
"""
Sets the author of the embed.
Parameters
----------
name: str
The author's name.
icon_url: str, optional
URL for the author's icon.
url: str, optional
URL hyperlink for the author.
"""
self.author = {
'name': name,
'icon_url': icon_url,
'url': url
} | [
"def",
"set_author",
"(",
"self",
",",
"name",
":",
"str",
",",
"icon_url",
":",
"str",
"=",
"None",
",",
"url",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"author",
"=",
"{",
"'name'",
":",
"name",
",",
"'icon_url'",
":",
"icon_url",
",",
"'url'",
":",
"url",
"}"
] | Sets the author of the embed.
Parameters
----------
name: str
The author's name.
icon_url: str, optional
URL for the author's icon.
url: str, optional
URL hyperlink for the author. | [
"Sets",
"the",
"author",
"of",
"the",
"embed",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/dhooks/embed.py#L142-L163 | train |
kyb3r/dhooks | dhooks/embed.py | Embed.set_footer | def set_footer(self, text: str, icon_url: str = None) -> None:
"""
Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer.
"""
self.footer = {
'text': text,
'icon_url': icon_url
} | python | def set_footer(self, text: str, icon_url: str = None) -> None:
"""
Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer.
"""
self.footer = {
'text': text,
'icon_url': icon_url
} | [
"def",
"set_footer",
"(",
"self",
",",
"text",
":",
"str",
",",
"icon_url",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"footer",
"=",
"{",
"'text'",
":",
"text",
",",
"'icon_url'",
":",
"icon_url",
"}"
] | Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer. | [
"Sets",
"the",
"footer",
"of",
"the",
"embed",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/dhooks/embed.py#L189-L205 | train |
kyb3r/dhooks | examples/async.py | init | async def init(app, loop):
"""Sends a message to the webhook channel when server starts."""
app.session = aiohttp.ClientSession(loop=loop) # to make web requests
app.webhook = Webhook.Async(webhook_url, session=app.session)
em = Embed(color=0x2ecc71)
em.set_author('[INFO] Starting Worker')
em.description = 'Host: {}'.format(socket.gethostname())
await app.webhook.send(embed=em) | python | async def init(app, loop):
"""Sends a message to the webhook channel when server starts."""
app.session = aiohttp.ClientSession(loop=loop) # to make web requests
app.webhook = Webhook.Async(webhook_url, session=app.session)
em = Embed(color=0x2ecc71)
em.set_author('[INFO] Starting Worker')
em.description = 'Host: {}'.format(socket.gethostname())
await app.webhook.send(embed=em) | [
"async",
"def",
"init",
"(",
"app",
",",
"loop",
")",
":",
"app",
".",
"session",
"=",
"aiohttp",
".",
"ClientSession",
"(",
"loop",
"=",
"loop",
")",
"# to make web requests",
"app",
".",
"webhook",
"=",
"Webhook",
".",
"Async",
"(",
"webhook_url",
",",
"session",
"=",
"app",
".",
"session",
")",
"em",
"=",
"Embed",
"(",
"color",
"=",
"0x2ecc71",
")",
"em",
".",
"set_author",
"(",
"'[INFO] Starting Worker'",
")",
"em",
".",
"description",
"=",
"'Host: {}'",
".",
"format",
"(",
"socket",
".",
"gethostname",
"(",
")",
")",
"await",
"app",
".",
"webhook",
".",
"send",
"(",
"embed",
"=",
"em",
")"
] | Sends a message to the webhook channel when server starts. | [
"Sends",
"a",
"message",
"to",
"the",
"webhook",
"channel",
"when",
"server",
"starts",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/examples/async.py#L17-L26 | train |
kyb3r/dhooks | examples/async.py | server_stop | async def server_stop(app, loop):
"""Sends a message to the webhook channel when server stops."""
em = Embed(color=0xe67e22)
em.set_footer('Host: {}'.format(socket.gethostname()))
em.description = '[INFO] Server Stopped'
await app.webhook.send(embed=em)
await app.session.close() | python | async def server_stop(app, loop):
"""Sends a message to the webhook channel when server stops."""
em = Embed(color=0xe67e22)
em.set_footer('Host: {}'.format(socket.gethostname()))
em.description = '[INFO] Server Stopped'
await app.webhook.send(embed=em)
await app.session.close() | [
"async",
"def",
"server_stop",
"(",
"app",
",",
"loop",
")",
":",
"em",
"=",
"Embed",
"(",
"color",
"=",
"0xe67e22",
")",
"em",
".",
"set_footer",
"(",
"'Host: {}'",
".",
"format",
"(",
"socket",
".",
"gethostname",
"(",
")",
")",
")",
"em",
".",
"description",
"=",
"'[INFO] Server Stopped'",
"await",
"app",
".",
"webhook",
".",
"send",
"(",
"embed",
"=",
"em",
")",
"await",
"app",
".",
"session",
".",
"close",
"(",
")"
] | Sends a message to the webhook channel when server stops. | [
"Sends",
"a",
"message",
"to",
"the",
"webhook",
"channel",
"when",
"server",
"stops",
"."
] | 2cde52b26cc94dcbf538ebcc4e17dfc3714d2827 | https://github.com/kyb3r/dhooks/blob/2cde52b26cc94dcbf538ebcc4e17dfc3714d2827/examples/async.py#L30-L37 | train |
tantale/deprecated | deprecated/classic.py | ClassicAdapter.get_deprecated_msg | def get_deprecated_msg(self, wrapped, instance):
"""
Get the deprecation warning message for the user.
:param wrapped: Wrapped class or function.
:param instance: The object to which the wrapped function was bound when it was called.
:return: The warning message.
"""
if instance is None:
if inspect.isclass(wrapped):
fmt = "Call to deprecated class {name}."
else:
fmt = "Call to deprecated function (or staticmethod) {name}."
else:
if inspect.isclass(instance):
fmt = "Call to deprecated class method {name}."
else:
fmt = "Call to deprecated method {name}."
if self.reason:
fmt += " ({reason})"
if self.version:
fmt += " -- Deprecated since version {version}."
return fmt.format(name=wrapped.__name__,
reason=self.reason or "",
version=self.version or "") | python | def get_deprecated_msg(self, wrapped, instance):
"""
Get the deprecation warning message for the user.
:param wrapped: Wrapped class or function.
:param instance: The object to which the wrapped function was bound when it was called.
:return: The warning message.
"""
if instance is None:
if inspect.isclass(wrapped):
fmt = "Call to deprecated class {name}."
else:
fmt = "Call to deprecated function (or staticmethod) {name}."
else:
if inspect.isclass(instance):
fmt = "Call to deprecated class method {name}."
else:
fmt = "Call to deprecated method {name}."
if self.reason:
fmt += " ({reason})"
if self.version:
fmt += " -- Deprecated since version {version}."
return fmt.format(name=wrapped.__name__,
reason=self.reason or "",
version=self.version or "") | [
"def",
"get_deprecated_msg",
"(",
"self",
",",
"wrapped",
",",
"instance",
")",
":",
"if",
"instance",
"is",
"None",
":",
"if",
"inspect",
".",
"isclass",
"(",
"wrapped",
")",
":",
"fmt",
"=",
"\"Call to deprecated class {name}.\"",
"else",
":",
"fmt",
"=",
"\"Call to deprecated function (or staticmethod) {name}.\"",
"else",
":",
"if",
"inspect",
".",
"isclass",
"(",
"instance",
")",
":",
"fmt",
"=",
"\"Call to deprecated class method {name}.\"",
"else",
":",
"fmt",
"=",
"\"Call to deprecated method {name}.\"",
"if",
"self",
".",
"reason",
":",
"fmt",
"+=",
"\" ({reason})\"",
"if",
"self",
".",
"version",
":",
"fmt",
"+=",
"\" -- Deprecated since version {version}.\"",
"return",
"fmt",
".",
"format",
"(",
"name",
"=",
"wrapped",
".",
"__name__",
",",
"reason",
"=",
"self",
".",
"reason",
"or",
"\"\"",
",",
"version",
"=",
"self",
".",
"version",
"or",
"\"\"",
")"
] | Get the deprecation warning message for the user.
:param wrapped: Wrapped class or function.
:param instance: The object to which the wrapped function was bound when it was called.
:return: The warning message. | [
"Get",
"the",
"deprecation",
"warning",
"message",
"for",
"the",
"user",
"."
] | 3dc742c571de7cebbbdaaf4c554f2f36fc61b3db | https://github.com/tantale/deprecated/blob/3dc742c571de7cebbbdaaf4c554f2f36fc61b3db/deprecated/classic.py#L101-L127 | train |
izdi/django-slack-oauth | django_slack_oauth/pipelines.py | slack_user | def slack_user(request, api_data):
"""
Pipeline for backward compatibility prior to 1.0.0 version.
In case if you're willing maintain `slack_user` table.
"""
if request.user.is_anonymous:
return request, api_data
data = deepcopy(api_data)
slacker, _ = SlackUser.objects.get_or_create(slacker=request.user)
slacker.access_token = data.pop('access_token')
slacker.extras = data
slacker.save()
messages.add_message(request, messages.SUCCESS, 'Your account has been successfully updated with '
'Slack. You can share your messages within your slack '
'domain.')
return request, api_data | python | def slack_user(request, api_data):
"""
Pipeline for backward compatibility prior to 1.0.0 version.
In case if you're willing maintain `slack_user` table.
"""
if request.user.is_anonymous:
return request, api_data
data = deepcopy(api_data)
slacker, _ = SlackUser.objects.get_or_create(slacker=request.user)
slacker.access_token = data.pop('access_token')
slacker.extras = data
slacker.save()
messages.add_message(request, messages.SUCCESS, 'Your account has been successfully updated with '
'Slack. You can share your messages within your slack '
'domain.')
return request, api_data | [
"def",
"slack_user",
"(",
"request",
",",
"api_data",
")",
":",
"if",
"request",
".",
"user",
".",
"is_anonymous",
":",
"return",
"request",
",",
"api_data",
"data",
"=",
"deepcopy",
"(",
"api_data",
")",
"slacker",
",",
"_",
"=",
"SlackUser",
".",
"objects",
".",
"get_or_create",
"(",
"slacker",
"=",
"request",
".",
"user",
")",
"slacker",
".",
"access_token",
"=",
"data",
".",
"pop",
"(",
"'access_token'",
")",
"slacker",
".",
"extras",
"=",
"data",
"slacker",
".",
"save",
"(",
")",
"messages",
".",
"add_message",
"(",
"request",
",",
"messages",
".",
"SUCCESS",
",",
"'Your account has been successfully updated with '",
"'Slack. You can share your messages within your slack '",
"'domain.'",
")",
"return",
"request",
",",
"api_data"
] | Pipeline for backward compatibility prior to 1.0.0 version.
In case if you're willing maintain `slack_user` table. | [
"Pipeline",
"for",
"backward",
"compatibility",
"prior",
"to",
"1",
".",
"0",
".",
"0",
"version",
".",
"In",
"case",
"if",
"you",
"re",
"willing",
"maintain",
"slack_user",
"table",
"."
] | 46e10f7c64407a018b9585f257224fc38888fbcb | https://github.com/izdi/django-slack-oauth/blob/46e10f7c64407a018b9585f257224fc38888fbcb/django_slack_oauth/pipelines.py#L26-L46 | train |
matplotlib/cmocean | cmocean/data.py | read | def read(varin, fname='MS2_L10.mat.txt'):
'''Read in dataset for variable var
:param varin: Variable for which to read in data.
'''
# # fname = 'MS09_L10.mat.txt'
# # fname = 'MS09_L05.mat.txt' # has PAR
# fname = 'MS2_L10.mat.txt' # empty PAR
d = np.loadtxt(fname, comments='*')
if fname == 'MS2_L10.mat.txt':
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
elif (fname == 'MS09_L05.mat.txt') or (fname == 'MS09_L10.mat.txt') or (fname == 'MS08_L12.mat.txt'):
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'voltage 4', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
# return data for variable varin
return d[:, 0], d[:, 1], d[:, 2], d[:, var.index(varin)] | python | def read(varin, fname='MS2_L10.mat.txt'):
'''Read in dataset for variable var
:param varin: Variable for which to read in data.
'''
# # fname = 'MS09_L10.mat.txt'
# # fname = 'MS09_L05.mat.txt' # has PAR
# fname = 'MS2_L10.mat.txt' # empty PAR
d = np.loadtxt(fname, comments='*')
if fname == 'MS2_L10.mat.txt':
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
elif (fname == 'MS09_L05.mat.txt') or (fname == 'MS09_L10.mat.txt') or (fname == 'MS08_L12.mat.txt'):
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'voltage 4', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
# return data for variable varin
return d[:, 0], d[:, 1], d[:, 2], d[:, var.index(varin)] | [
"def",
"read",
"(",
"varin",
",",
"fname",
"=",
"'MS2_L10.mat.txt'",
")",
":",
"# # fname = 'MS09_L10.mat.txt'",
"# # fname = 'MS09_L05.mat.txt' # has PAR",
"# fname = 'MS2_L10.mat.txt' # empty PAR",
"d",
"=",
"np",
".",
"loadtxt",
"(",
"fname",
",",
"comments",
"=",
"'*'",
")",
"if",
"fname",
"==",
"'MS2_L10.mat.txt'",
":",
"var",
"=",
"[",
"'lat'",
",",
"'lon'",
",",
"'depth'",
",",
"'temp'",
",",
"'density'",
",",
"'sigma'",
",",
"'oxygen'",
",",
"'voltage 2'",
",",
"'voltage 3'",
",",
"'fluorescence-CDOM'",
",",
"'fluorescence-ECO'",
",",
"'turbidity'",
",",
"'pressure'",
",",
"'salinity'",
",",
"'RINKO temperature'",
",",
"'RINKO DO - CTD temp'",
",",
"'RINKO DO - RINKO temp'",
",",
"'bottom'",
",",
"'PAR'",
"]",
"elif",
"(",
"fname",
"==",
"'MS09_L05.mat.txt'",
")",
"or",
"(",
"fname",
"==",
"'MS09_L10.mat.txt'",
")",
"or",
"(",
"fname",
"==",
"'MS08_L12.mat.txt'",
")",
":",
"var",
"=",
"[",
"'lat'",
",",
"'lon'",
",",
"'depth'",
",",
"'temp'",
",",
"'density'",
",",
"'sigma'",
",",
"'oxygen'",
",",
"'voltage 2'",
",",
"'voltage 3'",
",",
"'voltage 4'",
",",
"'fluorescence-CDOM'",
",",
"'fluorescence-ECO'",
",",
"'turbidity'",
",",
"'pressure'",
",",
"'salinity'",
",",
"'RINKO temperature'",
",",
"'RINKO DO - CTD temp'",
",",
"'RINKO DO - RINKO temp'",
",",
"'bottom'",
",",
"'PAR'",
"]",
"# return data for variable varin",
"return",
"d",
"[",
":",
",",
"0",
"]",
",",
"d",
"[",
":",
",",
"1",
"]",
",",
"d",
"[",
":",
",",
"2",
"]",
",",
"d",
"[",
":",
",",
"var",
".",
"index",
"(",
"varin",
")",
"]"
] | Read in dataset for variable var
:param varin: Variable for which to read in data. | [
"Read",
"in",
"dataset",
"for",
"variable",
"var"
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/data.py#L15-L40 | train |
matplotlib/cmocean | cmocean/data.py | show | def show(cmap, var, vmin=None, vmax=None):
'''Show a colormap for a chosen input variable var side by side with
black and white and jet colormaps.
:param cmap: Colormap instance
:param var: Variable to plot.
:param vmin=None: Min plot value.
:param vmax=None: Max plot value.
'''
# get variable data
lat, lon, z, data = read(var)
fig = plt.figure(figsize=(16, 12))
# Plot with grayscale
ax = fig.add_subplot(3, 1, 1)
map1 = ax.scatter(lon, -z, c=data, cmap='gray', s=10, linewidths=0., vmin=vmin, vmax=vmax)
plt.colorbar(map1, ax=ax)
# Plot with jet
ax = fig.add_subplot(3, 1, 2)
map1 = ax.scatter(lon, -z, c=data, cmap='jet', s=10, linewidths=0., vmin=vmin, vmax=vmax)
plt.colorbar(map1, ax=ax)
# Plot with cmap
ax = fig.add_subplot(3, 1, 3)
map1 = ax.scatter(lon, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=vmin, vmax=vmax)
ax.set_xlabel('Longitude [degrees]')
ax.set_ylabel('Depth [m]')
plt.colorbar(map1, ax=ax)
plt.suptitle(var) | python | def show(cmap, var, vmin=None, vmax=None):
'''Show a colormap for a chosen input variable var side by side with
black and white and jet colormaps.
:param cmap: Colormap instance
:param var: Variable to plot.
:param vmin=None: Min plot value.
:param vmax=None: Max plot value.
'''
# get variable data
lat, lon, z, data = read(var)
fig = plt.figure(figsize=(16, 12))
# Plot with grayscale
ax = fig.add_subplot(3, 1, 1)
map1 = ax.scatter(lon, -z, c=data, cmap='gray', s=10, linewidths=0., vmin=vmin, vmax=vmax)
plt.colorbar(map1, ax=ax)
# Plot with jet
ax = fig.add_subplot(3, 1, 2)
map1 = ax.scatter(lon, -z, c=data, cmap='jet', s=10, linewidths=0., vmin=vmin, vmax=vmax)
plt.colorbar(map1, ax=ax)
# Plot with cmap
ax = fig.add_subplot(3, 1, 3)
map1 = ax.scatter(lon, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=vmin, vmax=vmax)
ax.set_xlabel('Longitude [degrees]')
ax.set_ylabel('Depth [m]')
plt.colorbar(map1, ax=ax)
plt.suptitle(var) | [
"def",
"show",
"(",
"cmap",
",",
"var",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
")",
":",
"# get variable data",
"lat",
",",
"lon",
",",
"z",
",",
"data",
"=",
"read",
"(",
"var",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"12",
")",
")",
"# Plot with grayscale",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"1",
",",
"1",
")",
"map1",
"=",
"ax",
".",
"scatter",
"(",
"lon",
",",
"-",
"z",
",",
"c",
"=",
"data",
",",
"cmap",
"=",
"'gray'",
",",
"s",
"=",
"10",
",",
"linewidths",
"=",
"0.",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"plt",
".",
"colorbar",
"(",
"map1",
",",
"ax",
"=",
"ax",
")",
"# Plot with jet",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"1",
",",
"2",
")",
"map1",
"=",
"ax",
".",
"scatter",
"(",
"lon",
",",
"-",
"z",
",",
"c",
"=",
"data",
",",
"cmap",
"=",
"'jet'",
",",
"s",
"=",
"10",
",",
"linewidths",
"=",
"0.",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"plt",
".",
"colorbar",
"(",
"map1",
",",
"ax",
"=",
"ax",
")",
"# Plot with cmap",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"3",
",",
"1",
",",
"3",
")",
"map1",
"=",
"ax",
".",
"scatter",
"(",
"lon",
",",
"-",
"z",
",",
"c",
"=",
"data",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"10",
",",
"linewidths",
"=",
"0.",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"ax",
".",
"set_xlabel",
"(",
"'Longitude [degrees]'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Depth [m]'",
")",
"plt",
".",
"colorbar",
"(",
"map1",
",",
"ax",
"=",
"ax",
")",
"plt",
".",
"suptitle",
"(",
"var",
")"
] | Show a colormap for a chosen input variable var side by side with
black and white and jet colormaps.
:param cmap: Colormap instance
:param var: Variable to plot.
:param vmin=None: Min plot value.
:param vmax=None: Max plot value. | [
"Show",
"a",
"colormap",
"for",
"a",
"chosen",
"input",
"variable",
"var",
"side",
"by",
"side",
"with",
"black",
"and",
"white",
"and",
"jet",
"colormaps",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/data.py#L43-L76 | train |
matplotlib/cmocean | cmocean/data.py | plot_data | def plot_data():
'''Plot sample data up with the fancy colormaps.
'''
var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM']
# colorbar limits for each property
lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values
# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps
for fname in fnames:
fig, axes = plt.subplots(nrows=4, ncols=2)
fig.set_size_inches(20, 10)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07)
i = 0
for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up
# get variable data
lat, lon, z, data = test.read(Var, fname)
map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1])
# no stupid offset
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(y_formatter)
if i == 6:
ax.set_xlabel('Latitude [degrees]')
ax.set_ylabel('Depth [m]')
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylim(-z.max(), 0)
ax.set_xlim(lat.min(), lat.max())
cb = plt.colorbar(map1, ax=ax, pad=0.02)
cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]')
i += 1
fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight') | python | def plot_data():
'''Plot sample data up with the fancy colormaps.
'''
var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM']
# colorbar limits for each property
lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values
# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps
for fname in fnames:
fig, axes = plt.subplots(nrows=4, ncols=2)
fig.set_size_inches(20, 10)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07)
i = 0
for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up
# get variable data
lat, lon, z, data = test.read(Var, fname)
map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1])
# no stupid offset
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(y_formatter)
if i == 6:
ax.set_xlabel('Latitude [degrees]')
ax.set_ylabel('Depth [m]')
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylim(-z.max(), 0)
ax.set_xlim(lat.min(), lat.max())
cb = plt.colorbar(map1, ax=ax, pad=0.02)
cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]')
i += 1
fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight') | [
"def",
"plot_data",
"(",
")",
":",
"var",
"=",
"[",
"'temp'",
",",
"'oxygen'",
",",
"'salinity'",
",",
"'fluorescence-ECO'",
",",
"'density'",
",",
"'PAR'",
",",
"'turbidity'",
",",
"'fluorescence-CDOM'",
"]",
"# colorbar limits for each property",
"lims",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"26",
",",
"33",
"]",
",",
"[",
"0",
",",
"10",
"]",
",",
"[",
"0",
",",
"36",
"]",
",",
"[",
"0",
",",
"6",
"]",
",",
"[",
"1005",
",",
"1025",
"]",
",",
"[",
"0",
",",
"0.6",
"]",
",",
"[",
"0",
",",
"2",
"]",
",",
"[",
"0",
",",
"9",
"]",
"]",
")",
"# reasonable values",
"# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps",
"for",
"fname",
"in",
"fnames",
":",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"4",
",",
"ncols",
"=",
"2",
")",
"fig",
".",
"set_size_inches",
"(",
"20",
",",
"10",
")",
"fig",
".",
"subplots_adjust",
"(",
"top",
"=",
"0.95",
",",
"bottom",
"=",
"0.01",
",",
"left",
"=",
"0.2",
",",
"right",
"=",
"0.99",
",",
"wspace",
"=",
"0.0",
",",
"hspace",
"=",
"0.07",
")",
"i",
"=",
"0",
"for",
"ax",
",",
"Var",
",",
"cmap",
"in",
"zip",
"(",
"axes",
".",
"flat",
",",
"var",
",",
"cmaps",
")",
":",
"# loop through data to plot up",
"# get variable data",
"lat",
",",
"lon",
",",
"z",
",",
"data",
"=",
"test",
".",
"read",
"(",
"Var",
",",
"fname",
")",
"map1",
"=",
"ax",
".",
"scatter",
"(",
"lat",
",",
"-",
"z",
",",
"c",
"=",
"data",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"10",
",",
"linewidths",
"=",
"0.",
",",
"vmin",
"=",
"lims",
"[",
"i",
",",
"0",
"]",
",",
"vmax",
"=",
"lims",
"[",
"i",
",",
"1",
"]",
")",
"# no stupid offset",
"y_formatter",
"=",
"mpl",
".",
"ticker",
".",
"ScalarFormatter",
"(",
"useOffset",
"=",
"False",
")",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"y_formatter",
")",
"if",
"i",
"==",
"6",
":",
"ax",
".",
"set_xlabel",
"(",
"'Latitude [degrees]'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Depth [m]'",
")",
"else",
":",
"ax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"z",
".",
"max",
"(",
")",
",",
"0",
")",
"ax",
".",
"set_xlim",
"(",
"lat",
".",
"min",
"(",
")",
",",
"lat",
".",
"max",
"(",
")",
")",
"cb",
"=",
"plt",
".",
"colorbar",
"(",
"map1",
",",
"ax",
"=",
"ax",
",",
"pad",
"=",
"0.02",
")",
"cb",
".",
"set_label",
"(",
"cmap",
".",
"name",
"+",
"' ['",
"+",
"'$'",
"+",
"cmap",
".",
"units",
"+",
"'$]'",
")",
"i",
"+=",
"1",
"fig",
".",
"savefig",
"(",
"'figures/'",
"+",
"fname",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"+",
"'.png'",
",",
"bbox_inches",
"=",
"'tight'",
")"
] | Plot sample data up with the fancy colormaps. | [
"Plot",
"sample",
"data",
"up",
"with",
"the",
"fancy",
"colormaps",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/data.py#L79-L115 | train |
matplotlib/cmocean | cmocean/plots.py | plot_lightness | def plot_lightness(saveplot=False):
'''Plot lightness of colormaps together.
'''
from colorspacious import cspace_converter
dc = 1.
x = np.linspace(0.0, 1.0, 256)
locs = [] # locations for text labels
fig = plt.figure(figsize=(16, 5))
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.03, right=0.97)
ax.set_xlim(-0.1, len(cm.cmap_d)/2. + 0.1)
ax.set_ylim(0, 100)
ax.set_xlabel('Lightness for each colormap', fontsize=14)
for j, cmapname in enumerate(cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
lab = cspace_converter("sRGB1", "CAM02-UCS")(rgb)
L = lab[0, :, 0]
if L[-1] > L[0]:
ax.scatter(x+j*dc, L, c=x, cmap=cmap, s=200, linewidths=0.)
else:
ax.scatter(x+j*dc, L[::-1], c=x[::-1], cmap=cmap, s=200, linewidths=0.)
locs.append(x[-1]+j*dc) # store locations for colormap labels
# Set up labels for colormaps
ax.xaxis.set_ticks_position('top')
ticker = mpl.ticker.FixedLocator(locs)
ax.xaxis.set_major_locator(ticker)
formatter = mpl.ticker.FixedFormatter([cmapname for cmapname in cm.cmapnames])
ax.xaxis.set_major_formatter(formatter)
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(60)
if saveplot:
fig.savefig('figures/lightness.png', bbox_inches='tight')
fig.savefig('figures/lightness.pdf', bbox_inches='tight')
plt.show() | python | def plot_lightness(saveplot=False):
'''Plot lightness of colormaps together.
'''
from colorspacious import cspace_converter
dc = 1.
x = np.linspace(0.0, 1.0, 256)
locs = [] # locations for text labels
fig = plt.figure(figsize=(16, 5))
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.03, right=0.97)
ax.set_xlim(-0.1, len(cm.cmap_d)/2. + 0.1)
ax.set_ylim(0, 100)
ax.set_xlabel('Lightness for each colormap', fontsize=14)
for j, cmapname in enumerate(cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
lab = cspace_converter("sRGB1", "CAM02-UCS")(rgb)
L = lab[0, :, 0]
if L[-1] > L[0]:
ax.scatter(x+j*dc, L, c=x, cmap=cmap, s=200, linewidths=0.)
else:
ax.scatter(x+j*dc, L[::-1], c=x[::-1], cmap=cmap, s=200, linewidths=0.)
locs.append(x[-1]+j*dc) # store locations for colormap labels
# Set up labels for colormaps
ax.xaxis.set_ticks_position('top')
ticker = mpl.ticker.FixedLocator(locs)
ax.xaxis.set_major_locator(ticker)
formatter = mpl.ticker.FixedFormatter([cmapname for cmapname in cm.cmapnames])
ax.xaxis.set_major_formatter(formatter)
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(60)
if saveplot:
fig.savefig('figures/lightness.png', bbox_inches='tight')
fig.savefig('figures/lightness.pdf', bbox_inches='tight')
plt.show() | [
"def",
"plot_lightness",
"(",
"saveplot",
"=",
"False",
")",
":",
"from",
"colorspacious",
"import",
"cspace_converter",
"dc",
"=",
"1.",
"x",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"1.0",
",",
"256",
")",
"locs",
"=",
"[",
"]",
"# locations for text labels",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"5",
")",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"fig",
".",
"subplots_adjust",
"(",
"left",
"=",
"0.03",
",",
"right",
"=",
"0.97",
")",
"ax",
".",
"set_xlim",
"(",
"-",
"0.1",
",",
"len",
"(",
"cm",
".",
"cmap_d",
")",
"/",
"2.",
"+",
"0.1",
")",
"ax",
".",
"set_ylim",
"(",
"0",
",",
"100",
")",
"ax",
".",
"set_xlabel",
"(",
"'Lightness for each colormap'",
",",
"fontsize",
"=",
"14",
")",
"for",
"j",
",",
"cmapname",
"in",
"enumerate",
"(",
"cm",
".",
"cmapnames",
")",
":",
"if",
"'_r'",
"in",
"cmapname",
":",
"# skip reversed versions for plot",
"continue",
"cmap",
"=",
"cm",
".",
"cmap_d",
"[",
"cmapname",
"]",
"# get the colormap instance",
"rgb",
"=",
"cmap",
"(",
"x",
")",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"3",
"]",
"lab",
"=",
"cspace_converter",
"(",
"\"sRGB1\"",
",",
"\"CAM02-UCS\"",
")",
"(",
"rgb",
")",
"L",
"=",
"lab",
"[",
"0",
",",
":",
",",
"0",
"]",
"if",
"L",
"[",
"-",
"1",
"]",
">",
"L",
"[",
"0",
"]",
":",
"ax",
".",
"scatter",
"(",
"x",
"+",
"j",
"*",
"dc",
",",
"L",
",",
"c",
"=",
"x",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"200",
",",
"linewidths",
"=",
"0.",
")",
"else",
":",
"ax",
".",
"scatter",
"(",
"x",
"+",
"j",
"*",
"dc",
",",
"L",
"[",
":",
":",
"-",
"1",
"]",
",",
"c",
"=",
"x",
"[",
":",
":",
"-",
"1",
"]",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"200",
",",
"linewidths",
"=",
"0.",
")",
"locs",
".",
"append",
"(",
"x",
"[",
"-",
"1",
"]",
"+",
"j",
"*",
"dc",
")",
"# store locations for colormap labels",
"# Set up labels for colormaps",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'top'",
")",
"ticker",
"=",
"mpl",
".",
"ticker",
".",
"FixedLocator",
"(",
"locs",
")",
"ax",
".",
"xaxis",
".",
"set_major_locator",
"(",
"ticker",
")",
"formatter",
"=",
"mpl",
".",
"ticker",
".",
"FixedFormatter",
"(",
"[",
"cmapname",
"for",
"cmapname",
"in",
"cm",
".",
"cmapnames",
"]",
")",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"formatter",
")",
"labels",
"=",
"ax",
".",
"get_xticklabels",
"(",
")",
"for",
"label",
"in",
"labels",
":",
"label",
".",
"set_rotation",
"(",
"60",
")",
"if",
"saveplot",
":",
"fig",
".",
"savefig",
"(",
"'figures/lightness.png'",
",",
"bbox_inches",
"=",
"'tight'",
")",
"fig",
".",
"savefig",
"(",
"'figures/lightness.pdf'",
",",
"bbox_inches",
"=",
"'tight'",
")",
"plt",
".",
"show",
"(",
")"
] | Plot lightness of colormaps together. | [
"Plot",
"lightness",
"of",
"colormaps",
"together",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/plots.py#L14-L61 | train |
matplotlib/cmocean | cmocean/plots.py | plot_gallery | def plot_gallery(saveplot=False):
'''Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not.
'''
from colorspacious import cspace_converter
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
x = np.linspace(0.0, 1.0, 256)
fig, axes = plt.subplots(nrows=int(len(cm.cmap_d)/2), ncols=1, figsize=(6, 12))
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05)
for ax, cmapname in zip(axes, cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
# Find a good conversion to grayscale
jch = cspace_converter("sRGB1", "CAM02-UCS")(rgb) # Not sure why to use JCh instead so using this.
L = jch[0, :, 0]
L = np.float32(np.vstack((L, L, L)))
ax.imshow(gradient, aspect='auto', cmap=cmap)
pos1 = ax.get_position() # get the original position
pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0]
axbw = fig.add_axes(pos2) # colorbar axes
axbw.set_axis_off()
axbw.imshow(L, aspect='auto', cmap=cm.gray, vmin=0, vmax=100.)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, cmap.name, va='center', ha='right')
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
if saveplot:
fig.savefig('figures/gallery.pdf', bbox_inches='tight')
fig.savefig('figures/gallery.png', bbox_inches='tight')
plt.show() | python | def plot_gallery(saveplot=False):
'''Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not.
'''
from colorspacious import cspace_converter
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
x = np.linspace(0.0, 1.0, 256)
fig, axes = plt.subplots(nrows=int(len(cm.cmap_d)/2), ncols=1, figsize=(6, 12))
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05)
for ax, cmapname in zip(axes, cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
# Find a good conversion to grayscale
jch = cspace_converter("sRGB1", "CAM02-UCS")(rgb) # Not sure why to use JCh instead so using this.
L = jch[0, :, 0]
L = np.float32(np.vstack((L, L, L)))
ax.imshow(gradient, aspect='auto', cmap=cmap)
pos1 = ax.get_position() # get the original position
pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0]
axbw = fig.add_axes(pos2) # colorbar axes
axbw.set_axis_off()
axbw.imshow(L, aspect='auto', cmap=cm.gray, vmin=0, vmax=100.)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, cmap.name, va='center', ha='right')
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
if saveplot:
fig.savefig('figures/gallery.pdf', bbox_inches='tight')
fig.savefig('figures/gallery.png', bbox_inches='tight')
plt.show() | [
"def",
"plot_gallery",
"(",
"saveplot",
"=",
"False",
")",
":",
"from",
"colorspacious",
"import",
"cspace_converter",
"gradient",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"256",
")",
"gradient",
"=",
"np",
".",
"vstack",
"(",
"(",
"gradient",
",",
"gradient",
")",
")",
"x",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"1.0",
",",
"256",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"int",
"(",
"len",
"(",
"cm",
".",
"cmap_d",
")",
"/",
"2",
")",
",",
"ncols",
"=",
"1",
",",
"figsize",
"=",
"(",
"6",
",",
"12",
")",
")",
"fig",
".",
"subplots_adjust",
"(",
"top",
"=",
"0.99",
",",
"bottom",
"=",
"0.01",
",",
"left",
"=",
"0.2",
",",
"right",
"=",
"0.99",
",",
"wspace",
"=",
"0.05",
")",
"for",
"ax",
",",
"cmapname",
"in",
"zip",
"(",
"axes",
",",
"cm",
".",
"cmapnames",
")",
":",
"if",
"'_r'",
"in",
"cmapname",
":",
"# skip reversed versions for plot",
"continue",
"cmap",
"=",
"cm",
".",
"cmap_d",
"[",
"cmapname",
"]",
"# get the colormap instance",
"rgb",
"=",
"cmap",
"(",
"x",
")",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"3",
"]",
"# Find a good conversion to grayscale",
"jch",
"=",
"cspace_converter",
"(",
"\"sRGB1\"",
",",
"\"CAM02-UCS\"",
")",
"(",
"rgb",
")",
"# Not sure why to use JCh instead so using this.",
"L",
"=",
"jch",
"[",
"0",
",",
":",
",",
"0",
"]",
"L",
"=",
"np",
".",
"float32",
"(",
"np",
".",
"vstack",
"(",
"(",
"L",
",",
"L",
",",
"L",
")",
")",
")",
"ax",
".",
"imshow",
"(",
"gradient",
",",
"aspect",
"=",
"'auto'",
",",
"cmap",
"=",
"cmap",
")",
"pos1",
"=",
"ax",
".",
"get_position",
"(",
")",
"# get the original position",
"pos2",
"=",
"[",
"pos1",
".",
"x0",
",",
"pos1",
".",
"y0",
",",
"pos1",
".",
"width",
",",
"pos1",
".",
"height",
"/",
"3.0",
"]",
"axbw",
"=",
"fig",
".",
"add_axes",
"(",
"pos2",
")",
"# colorbar axes",
"axbw",
".",
"set_axis_off",
"(",
")",
"axbw",
".",
"imshow",
"(",
"L",
",",
"aspect",
"=",
"'auto'",
",",
"cmap",
"=",
"cm",
".",
"gray",
",",
"vmin",
"=",
"0",
",",
"vmax",
"=",
"100.",
")",
"pos",
"=",
"list",
"(",
"ax",
".",
"get_position",
"(",
")",
".",
"bounds",
")",
"x_text",
"=",
"pos",
"[",
"0",
"]",
"-",
"0.01",
"y_text",
"=",
"pos",
"[",
"1",
"]",
"+",
"pos",
"[",
"3",
"]",
"/",
"2.",
"fig",
".",
"text",
"(",
"x_text",
",",
"y_text",
",",
"cmap",
".",
"name",
",",
"va",
"=",
"'center'",
",",
"ha",
"=",
"'right'",
")",
"# Turn off *all* ticks & spines, not just the ones with colormaps.",
"for",
"ax",
"in",
"axes",
":",
"ax",
".",
"set_axis_off",
"(",
")",
"if",
"saveplot",
":",
"fig",
".",
"savefig",
"(",
"'figures/gallery.pdf'",
",",
"bbox_inches",
"=",
"'tight'",
")",
"fig",
".",
"savefig",
"(",
"'figures/gallery.png'",
",",
"bbox_inches",
"=",
"'tight'",
")",
"plt",
".",
"show",
"(",
")"
] | Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not. | [
"Make",
"plot",
"of",
"colormaps",
"and",
"labels",
"like",
"in",
"the",
"matplotlib",
"gallery",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/plots.py#L64-L115 | train |
matplotlib/cmocean | cmocean/plots.py | wrap_viscm | def wrap_viscm(cmap, dpi=100, saveplot=False):
'''Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not.
'''
from viscm import viscm
viscm(cmap)
fig = plt.gcf()
fig.set_size_inches(22, 10)
plt.show()
if saveplot:
fig.savefig('figures/eval_' + cmap.name + '.png', bbox_inches='tight', dpi=dpi)
fig.savefig('figures/eval_' + cmap.name + '.pdf', bbox_inches='tight', dpi=dpi) | python | def wrap_viscm(cmap, dpi=100, saveplot=False):
'''Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not.
'''
from viscm import viscm
viscm(cmap)
fig = plt.gcf()
fig.set_size_inches(22, 10)
plt.show()
if saveplot:
fig.savefig('figures/eval_' + cmap.name + '.png', bbox_inches='tight', dpi=dpi)
fig.savefig('figures/eval_' + cmap.name + '.pdf', bbox_inches='tight', dpi=dpi) | [
"def",
"wrap_viscm",
"(",
"cmap",
",",
"dpi",
"=",
"100",
",",
"saveplot",
"=",
"False",
")",
":",
"from",
"viscm",
"import",
"viscm",
"viscm",
"(",
"cmap",
")",
"fig",
"=",
"plt",
".",
"gcf",
"(",
")",
"fig",
".",
"set_size_inches",
"(",
"22",
",",
"10",
")",
"plt",
".",
"show",
"(",
")",
"if",
"saveplot",
":",
"fig",
".",
"savefig",
"(",
"'figures/eval_'",
"+",
"cmap",
".",
"name",
"+",
"'.png'",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"dpi",
")",
"fig",
".",
"savefig",
"(",
"'figures/eval_'",
"+",
"cmap",
".",
"name",
"+",
"'.pdf'",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"dpi",
")"
] | Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not. | [
"Evaluate",
"goodness",
"of",
"colormap",
"using",
"perceptual",
"deltas",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/plots.py#L118-L136 | train |
matplotlib/cmocean | cmocean/plots.py | quick_plot | def quick_plot(cmap, fname=None, fig=None, ax=None, N=10):
'''Show quick test of a colormap.
'''
x = np.linspace(0, 10, N)
X, _ = np.meshgrid(x, x)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
mappable = ax.pcolor(X, cmap=cmap)
ax.set_title(cmap.name, fontsize=14)
ax.set_xticks([])
ax.set_yticks([])
plt.colorbar(mappable)
plt.show()
if fname is not None:
plt.savefig(fname + '.png', bbox_inches='tight') | python | def quick_plot(cmap, fname=None, fig=None, ax=None, N=10):
'''Show quick test of a colormap.
'''
x = np.linspace(0, 10, N)
X, _ = np.meshgrid(x, x)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
mappable = ax.pcolor(X, cmap=cmap)
ax.set_title(cmap.name, fontsize=14)
ax.set_xticks([])
ax.set_yticks([])
plt.colorbar(mappable)
plt.show()
if fname is not None:
plt.savefig(fname + '.png', bbox_inches='tight') | [
"def",
"quick_plot",
"(",
"cmap",
",",
"fname",
"=",
"None",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"N",
"=",
"10",
")",
":",
"x",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"10",
",",
"N",
")",
"X",
",",
"_",
"=",
"np",
".",
"meshgrid",
"(",
"x",
",",
"x",
")",
"if",
"ax",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"mappable",
"=",
"ax",
".",
"pcolor",
"(",
"X",
",",
"cmap",
"=",
"cmap",
")",
"ax",
".",
"set_title",
"(",
"cmap",
".",
"name",
",",
"fontsize",
"=",
"14",
")",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"plt",
".",
"colorbar",
"(",
"mappable",
")",
"plt",
".",
"show",
"(",
")",
"if",
"fname",
"is",
"not",
"None",
":",
"plt",
".",
"savefig",
"(",
"fname",
"+",
"'.png'",
",",
"bbox_inches",
"=",
"'tight'",
")"
] | Show quick test of a colormap. | [
"Show",
"quick",
"test",
"of",
"a",
"colormap",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/plots.py#L164-L183 | train |
matplotlib/cmocean | cmocean/tools.py | print_colormaps | def print_colormaps(cmaps, N=256, returnrgb=True, savefiles=False):
'''Print colormaps in 256 RGB colors to text files.
:param returnrgb=False: Whether or not to return the rgb array. Only makes sense to do if print one colormaps' rgb.
'''
rgb = []
for cmap in cmaps:
rgbtemp = cmap(np.linspace(0, 1, N))[np.newaxis, :, :3][0]
if savefiles:
np.savetxt(cmap.name + '-rgb.txt', rgbtemp)
rgb.append(rgbtemp)
if returnrgb:
return rgb | python | def print_colormaps(cmaps, N=256, returnrgb=True, savefiles=False):
'''Print colormaps in 256 RGB colors to text files.
:param returnrgb=False: Whether or not to return the rgb array. Only makes sense to do if print one colormaps' rgb.
'''
rgb = []
for cmap in cmaps:
rgbtemp = cmap(np.linspace(0, 1, N))[np.newaxis, :, :3][0]
if savefiles:
np.savetxt(cmap.name + '-rgb.txt', rgbtemp)
rgb.append(rgbtemp)
if returnrgb:
return rgb | [
"def",
"print_colormaps",
"(",
"cmaps",
",",
"N",
"=",
"256",
",",
"returnrgb",
"=",
"True",
",",
"savefiles",
"=",
"False",
")",
":",
"rgb",
"=",
"[",
"]",
"for",
"cmap",
"in",
"cmaps",
":",
"rgbtemp",
"=",
"cmap",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"N",
")",
")",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"3",
"]",
"[",
"0",
"]",
"if",
"savefiles",
":",
"np",
".",
"savetxt",
"(",
"cmap",
".",
"name",
"+",
"'-rgb.txt'",
",",
"rgbtemp",
")",
"rgb",
".",
"append",
"(",
"rgbtemp",
")",
"if",
"returnrgb",
":",
"return",
"rgb"
] | Print colormaps in 256 RGB colors to text files.
:param returnrgb=False: Whether or not to return the rgb array. Only makes sense to do if print one colormaps' rgb. | [
"Print",
"colormaps",
"in",
"256",
"RGB",
"colors",
"to",
"text",
"files",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/tools.py#L17-L34 | train |
matplotlib/cmocean | cmocean/tools.py | cmap | def cmap(rgbin, N=256):
'''Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
'''
# rgb inputs here
if not isinstance(rgbin[0], _string_types):
# normalize to be out of 1 if out of 256 instead
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', rgbin, N=N)
return cmap | python | def cmap(rgbin, N=256):
'''Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
'''
# rgb inputs here
if not isinstance(rgbin[0], _string_types):
# normalize to be out of 1 if out of 256 instead
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', rgbin, N=N)
return cmap | [
"def",
"cmap",
"(",
"rgbin",
",",
"N",
"=",
"256",
")",
":",
"# rgb inputs here",
"if",
"not",
"isinstance",
"(",
"rgbin",
"[",
"0",
"]",
",",
"_string_types",
")",
":",
"# normalize to be out of 1 if out of 256 instead",
"if",
"rgbin",
".",
"max",
"(",
")",
">",
"1",
":",
"rgbin",
"=",
"rgbin",
"/",
"256.",
"cmap",
"=",
"mpl",
".",
"colors",
".",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"'mycmap'",
",",
"rgbin",
",",
"N",
"=",
"N",
")",
"return",
"cmap"
] | Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to. | [
"Input",
"an",
"array",
"of",
"rgb",
"values",
"to",
"generate",
"a",
"colormap",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/tools.py#L73-L91 | train |
matplotlib/cmocean | cmocean/tools.py | lighten | def lighten(cmapin, alpha):
'''Lighten a colormap by adding alpha < 1.
:param cmap: A colormap object, like cmocean.cm.matter.
:param alpha: An alpha or transparency value to assign the colormap. Alpha
of 1 is opaque and of 1 is fully transparent.
Outputs resultant colormap object.
This will lighten the appearance of a plot you make using the output
colormap object. It is also possible to lighten many plots in the
plotting function itself (e.g. pcolormesh or contourf).
'''
# set the alpha value while retaining the number of rows in original cmap
return cmap(cmapin(np.linspace(0,1,cmapin.N), alpha)) | python | def lighten(cmapin, alpha):
'''Lighten a colormap by adding alpha < 1.
:param cmap: A colormap object, like cmocean.cm.matter.
:param alpha: An alpha or transparency value to assign the colormap. Alpha
of 1 is opaque and of 1 is fully transparent.
Outputs resultant colormap object.
This will lighten the appearance of a plot you make using the output
colormap object. It is also possible to lighten many plots in the
plotting function itself (e.g. pcolormesh or contourf).
'''
# set the alpha value while retaining the number of rows in original cmap
return cmap(cmapin(np.linspace(0,1,cmapin.N), alpha)) | [
"def",
"lighten",
"(",
"cmapin",
",",
"alpha",
")",
":",
"# set the alpha value while retaining the number of rows in original cmap",
"return",
"cmap",
"(",
"cmapin",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"cmapin",
".",
"N",
")",
",",
"alpha",
")",
")"
] | Lighten a colormap by adding alpha < 1.
:param cmap: A colormap object, like cmocean.cm.matter.
:param alpha: An alpha or transparency value to assign the colormap. Alpha
of 1 is opaque and of 1 is fully transparent.
Outputs resultant colormap object.
This will lighten the appearance of a plot you make using the output
colormap object. It is also possible to lighten many plots in the
plotting function itself (e.g. pcolormesh or contourf). | [
"Lighten",
"a",
"colormap",
"by",
"adding",
"alpha",
"<",
"1",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/tools.py#L94-L109 | train |
matplotlib/cmocean | cmocean/tools.py | crop_by_percent | def crop_by_percent(cmap, per, which='both', N=None):
'''Crop end or ends of a colormap by per percent.
:param cmap: A colormap object, like cmocean.cm.matter.
:param per: Percent of colormap to remove. If which=='both', take this
percent off both ends of colormap. If which=='min' or which=='max',
take percent only off the specified end of colormap.
:param which='both': which end or ends of colormap to cut off. which='both'
removes from both ends, which='min' from bottom end, and which='max'
from top end.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
Outputs resultant colormap object.
This is a wrapper around crop() to make it easier to use for cropping
based on percent.
Examples:
# example with oxy map: cut off yellow part which is top 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 0; vmax = 8; pivot = 5
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='max', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example with oxy map: cut off red part which is bottom 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 2; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='min', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# crop both dark ends off colormap to reduce range
newcmap = crop_by_percent(cmocean.cm.balance, 10, which='both', N=None)
plt.figure()
A = np.random.randint(-5, 5, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
'''
if which == 'both': # take percent off both ends of cmap
vmin = -100; vmax = 100; pivot = 0
dmax = per
elif which == 'min': # take percent off bottom of cmap
vmax = 10; pivot = 5
vmin = (0 + per/100)*2*pivot
dmax = None
elif which == 'max': # take percent off top of cmap
vmin = 0; pivot = 5
vmax = (1 - per/100)*2*pivot
dmax = None
newcmap = crop(cmap, vmin, vmax, pivot, dmax=dmax, N=N)
return newcmap | python | def crop_by_percent(cmap, per, which='both', N=None):
'''Crop end or ends of a colormap by per percent.
:param cmap: A colormap object, like cmocean.cm.matter.
:param per: Percent of colormap to remove. If which=='both', take this
percent off both ends of colormap. If which=='min' or which=='max',
take percent only off the specified end of colormap.
:param which='both': which end or ends of colormap to cut off. which='both'
removes from both ends, which='min' from bottom end, and which='max'
from top end.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
Outputs resultant colormap object.
This is a wrapper around crop() to make it easier to use for cropping
based on percent.
Examples:
# example with oxy map: cut off yellow part which is top 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 0; vmax = 8; pivot = 5
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='max', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example with oxy map: cut off red part which is bottom 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 2; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='min', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# crop both dark ends off colormap to reduce range
newcmap = crop_by_percent(cmocean.cm.balance, 10, which='both', N=None)
plt.figure()
A = np.random.randint(-5, 5, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
'''
if which == 'both': # take percent off both ends of cmap
vmin = -100; vmax = 100; pivot = 0
dmax = per
elif which == 'min': # take percent off bottom of cmap
vmax = 10; pivot = 5
vmin = (0 + per/100)*2*pivot
dmax = None
elif which == 'max': # take percent off top of cmap
vmin = 0; pivot = 5
vmax = (1 - per/100)*2*pivot
dmax = None
newcmap = crop(cmap, vmin, vmax, pivot, dmax=dmax, N=N)
return newcmap | [
"def",
"crop_by_percent",
"(",
"cmap",
",",
"per",
",",
"which",
"=",
"'both'",
",",
"N",
"=",
"None",
")",
":",
"if",
"which",
"==",
"'both'",
":",
"# take percent off both ends of cmap",
"vmin",
"=",
"-",
"100",
"vmax",
"=",
"100",
"pivot",
"=",
"0",
"dmax",
"=",
"per",
"elif",
"which",
"==",
"'min'",
":",
"# take percent off bottom of cmap",
"vmax",
"=",
"10",
"pivot",
"=",
"5",
"vmin",
"=",
"(",
"0",
"+",
"per",
"/",
"100",
")",
"*",
"2",
"*",
"pivot",
"dmax",
"=",
"None",
"elif",
"which",
"==",
"'max'",
":",
"# take percent off top of cmap",
"vmin",
"=",
"0",
"pivot",
"=",
"5",
"vmax",
"=",
"(",
"1",
"-",
"per",
"/",
"100",
")",
"*",
"2",
"*",
"pivot",
"dmax",
"=",
"None",
"newcmap",
"=",
"crop",
"(",
"cmap",
",",
"vmin",
",",
"vmax",
",",
"pivot",
",",
"dmax",
"=",
"dmax",
",",
"N",
"=",
"N",
")",
"return",
"newcmap"
] | Crop end or ends of a colormap by per percent.
:param cmap: A colormap object, like cmocean.cm.matter.
:param per: Percent of colormap to remove. If which=='both', take this
percent off both ends of colormap. If which=='min' or which=='max',
take percent only off the specified end of colormap.
:param which='both': which end or ends of colormap to cut off. which='both'
removes from both ends, which='min' from bottom end, and which='max'
from top end.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
Outputs resultant colormap object.
This is a wrapper around crop() to make it easier to use for cropping
based on percent.
Examples:
# example with oxy map: cut off yellow part which is top 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 0; vmax = 8; pivot = 5
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='max', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example with oxy map: cut off red part which is bottom 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 2; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='min', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# crop both dark ends off colormap to reduce range
newcmap = crop_by_percent(cmocean.cm.balance, 10, which='both', N=None)
plt.figure()
A = np.random.randint(-5, 5, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar() | [
"Crop",
"end",
"or",
"ends",
"of",
"a",
"colormap",
"by",
"per",
"percent",
"."
] | 37edd4a209a733d87dea7fed9eb22adc1d5a57c8 | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/tools.py#L198-L270 | train |
enricobacis/wos | wos/client.py | WosClient._premium | def _premium(fn):
"""Premium decorator for APIs that require premium access level."""
@_functools.wraps(fn)
def _fn(self, *args, **kwargs):
if self._lite:
raise RuntimeError('Premium API not available in lite access.')
return fn(self, *args, **kwargs)
return _fn | python | def _premium(fn):
"""Premium decorator for APIs that require premium access level."""
@_functools.wraps(fn)
def _fn(self, *args, **kwargs):
if self._lite:
raise RuntimeError('Premium API not available in lite access.')
return fn(self, *args, **kwargs)
return _fn | [
"def",
"_premium",
"(",
"fn",
")",
":",
"@",
"_functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"_fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_lite",
":",
"raise",
"RuntimeError",
"(",
"'Premium API not available in lite access.'",
")",
"return",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_fn"
] | Premium decorator for APIs that require premium access level. | [
"Premium",
"decorator",
"for",
"APIs",
"that",
"require",
"premium",
"access",
"level",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L70-L77 | train |
enricobacis/wos | wos/client.py | WosClient.make_retrieveParameters | def make_retrieveParameters(offset=1, count=100, name='RS', sort='D'):
"""Create retrieve parameters dictionary to be used with APIs.
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:name: Name of the field to order by. Use a two-character abbreviation
to specify the field ('AU': Author, 'CF': Conference Title,
'CG': Page, 'CW': Source, 'CV': Volume, 'LC': Local Times Cited,
'LD': Load Date, 'PG': Page, 'PY': Publication Year, 'RS':
Relevance, 'SO': Source, 'TC': Times Cited, 'VL': Volume)
:sort: Must be A (ascending) or D (descending). The sort parameter can
only be D for Relevance and TimesCited.
"""
return _OrderedDict([
('firstRecord', offset),
('count', count),
('sortField', _OrderedDict([('name', name), ('sort', sort)]))
]) | python | def make_retrieveParameters(offset=1, count=100, name='RS', sort='D'):
"""Create retrieve parameters dictionary to be used with APIs.
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:name: Name of the field to order by. Use a two-character abbreviation
to specify the field ('AU': Author, 'CF': Conference Title,
'CG': Page, 'CW': Source, 'CV': Volume, 'LC': Local Times Cited,
'LD': Load Date, 'PG': Page, 'PY': Publication Year, 'RS':
Relevance, 'SO': Source, 'TC': Times Cited, 'VL': Volume)
:sort: Must be A (ascending) or D (descending). The sort parameter can
only be D for Relevance and TimesCited.
"""
return _OrderedDict([
('firstRecord', offset),
('count', count),
('sortField', _OrderedDict([('name', name), ('sort', sort)]))
]) | [
"def",
"make_retrieveParameters",
"(",
"offset",
"=",
"1",
",",
"count",
"=",
"100",
",",
"name",
"=",
"'RS'",
",",
"sort",
"=",
"'D'",
")",
":",
"return",
"_OrderedDict",
"(",
"[",
"(",
"'firstRecord'",
",",
"offset",
")",
",",
"(",
"'count'",
",",
"count",
")",
",",
"(",
"'sortField'",
",",
"_OrderedDict",
"(",
"[",
"(",
"'name'",
",",
"name",
")",
",",
"(",
"'sort'",
",",
"sort",
")",
"]",
")",
")",
"]",
")"
] | Create retrieve parameters dictionary to be used with APIs.
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:name: Name of the field to order by. Use a two-character abbreviation
to specify the field ('AU': Author, 'CF': Conference Title,
'CG': Page, 'CW': Source, 'CV': Volume, 'LC': Local Times Cited,
'LD': Load Date, 'PG': Page, 'PY': Publication Year, 'RS':
Relevance, 'SO': Source, 'TC': Times Cited, 'VL': Volume)
:sort: Must be A (ascending) or D (descending). The sort parameter can
only be D for Relevance and TimesCited. | [
"Create",
"retrieve",
"parameters",
"dictionary",
"to",
"be",
"used",
"with",
"APIs",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L80-L102 | train |
enricobacis/wos | wos/client.py | WosClient.connect | def connect(self):
"""Authenticate to WOS and set the SID cookie."""
if not self._SID:
self._SID = self._auth.service.authenticate()
print('Authenticated (SID: %s)' % self._SID)
self._search.set_options(headers={'Cookie': 'SID="%s"' % self._SID})
self._auth.options.headers.update({'Cookie': 'SID="%s"' % self._SID})
return self._SID | python | def connect(self):
"""Authenticate to WOS and set the SID cookie."""
if not self._SID:
self._SID = self._auth.service.authenticate()
print('Authenticated (SID: %s)' % self._SID)
self._search.set_options(headers={'Cookie': 'SID="%s"' % self._SID})
self._auth.options.headers.update({'Cookie': 'SID="%s"' % self._SID})
return self._SID | [
"def",
"connect",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_SID",
":",
"self",
".",
"_SID",
"=",
"self",
".",
"_auth",
".",
"service",
".",
"authenticate",
"(",
")",
"print",
"(",
"'Authenticated (SID: %s)'",
"%",
"self",
".",
"_SID",
")",
"self",
".",
"_search",
".",
"set_options",
"(",
"headers",
"=",
"{",
"'Cookie'",
":",
"'SID=\"%s\"'",
"%",
"self",
".",
"_SID",
"}",
")",
"self",
".",
"_auth",
".",
"options",
".",
"headers",
".",
"update",
"(",
"{",
"'Cookie'",
":",
"'SID=\"%s\"'",
"%",
"self",
".",
"_SID",
"}",
")",
"return",
"self",
".",
"_SID"
] | Authenticate to WOS and set the SID cookie. | [
"Authenticate",
"to",
"WOS",
"and",
"set",
"the",
"SID",
"cookie",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L104-L112 | train |
enricobacis/wos | wos/client.py | WosClient.close | def close(self):
"""The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests."""
if self._SID:
self._auth.service.closeSession()
self._SID = None | python | def close(self):
"""The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests."""
if self._SID:
self._auth.service.closeSession()
self._SID = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_SID",
":",
"self",
".",
"_auth",
".",
"service",
".",
"closeSession",
"(",
")",
"self",
".",
"_SID",
"=",
"None"
] | The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests. | [
"The",
"close",
"operation",
"loads",
"the",
"session",
"if",
"it",
"is",
"valid",
"and",
"then",
"closes",
"it",
"and",
"releases",
"the",
"session",
"seat",
".",
"All",
"the",
"session",
"data",
"are",
"deleted",
"and",
"become",
"invalid",
"after",
"the",
"request",
"is",
"processed",
".",
"The",
"session",
"ID",
"can",
"no",
"longer",
"be",
"used",
"in",
"subsequent",
"requests",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L114-L121 | train |
enricobacis/wos | wos/client.py | WosClient.search | def search(self, query, count=5, offset=1, editions=None,
symbolicTimeSpan=None, timeSpan=None, retrieveParameters=None):
"""The search operation submits a search query to the specified
database edition and retrieves data. This operation returns a query ID
that can be used in subsequent operations to retrieve more records.
:query: User query for requesting data. The query parser will return
errors for invalid queries
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:editions: List of editions to be searched. If None, user permissions
will be substituted.
Fields:
collection - Name of the collection
edition - Name of the edition
:symbolicTimeSpan: This element defines a range of load dates. The load
date is the date when a record was added to a
database. If symbolicTimeSpan is specified, the
timeSpan parameter must be omitted. If timeSpan and
symbolicTimeSpan are both omitted, then the maximum
publication date time span will be inferred from the
editions data.
Valid values:
'1week' - Specifies to use the end date as today and
the begin date as 1 week prior to today.
'2week' - Specifies to use the end date as today and
the begin date as 2 week prior to today.
'4week' - Specifies to use the end date as today and
the begin date as 4 week prior to today.
:timeSpan: This element defines specifies a range of publication dates.
If timeSpan is used, the symbolicTimeSpan parameter must be
omitted. If timeSpan and symbolicTimeSpan are both omitted,
then the maximum time span will be inferred from the
editions data.
Fields:
begin - Beginning date for this search. Format: YYYY-MM-DD
end - Ending date for this search. Format: YYYY-MM-DD
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.search(
queryParameters=_OrderedDict([
('databaseId', 'WOS'),
('userQuery', query),
('editions', editions),
('symbolicTimeSpan', symbolicTimeSpan),
('timeSpan', timeSpan),
('queryLanguage', 'en')
]),
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | python | def search(self, query, count=5, offset=1, editions=None,
symbolicTimeSpan=None, timeSpan=None, retrieveParameters=None):
"""The search operation submits a search query to the specified
database edition and retrieves data. This operation returns a query ID
that can be used in subsequent operations to retrieve more records.
:query: User query for requesting data. The query parser will return
errors for invalid queries
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:editions: List of editions to be searched. If None, user permissions
will be substituted.
Fields:
collection - Name of the collection
edition - Name of the edition
:symbolicTimeSpan: This element defines a range of load dates. The load
date is the date when a record was added to a
database. If symbolicTimeSpan is specified, the
timeSpan parameter must be omitted. If timeSpan and
symbolicTimeSpan are both omitted, then the maximum
publication date time span will be inferred from the
editions data.
Valid values:
'1week' - Specifies to use the end date as today and
the begin date as 1 week prior to today.
'2week' - Specifies to use the end date as today and
the begin date as 2 week prior to today.
'4week' - Specifies to use the end date as today and
the begin date as 4 week prior to today.
:timeSpan: This element defines specifies a range of publication dates.
If timeSpan is used, the symbolicTimeSpan parameter must be
omitted. If timeSpan and symbolicTimeSpan are both omitted,
then the maximum time span will be inferred from the
editions data.
Fields:
begin - Beginning date for this search. Format: YYYY-MM-DD
end - Ending date for this search. Format: YYYY-MM-DD
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.search(
queryParameters=_OrderedDict([
('databaseId', 'WOS'),
('userQuery', query),
('editions', editions),
('symbolicTimeSpan', symbolicTimeSpan),
('timeSpan', timeSpan),
('queryLanguage', 'en')
]),
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | [
"def",
"search",
"(",
"self",
",",
"query",
",",
"count",
"=",
"5",
",",
"offset",
"=",
"1",
",",
"editions",
"=",
"None",
",",
"symbolicTimeSpan",
"=",
"None",
",",
"timeSpan",
"=",
"None",
",",
"retrieveParameters",
"=",
"None",
")",
":",
"return",
"self",
".",
"_search",
".",
"service",
".",
"search",
"(",
"queryParameters",
"=",
"_OrderedDict",
"(",
"[",
"(",
"'databaseId'",
",",
"'WOS'",
")",
",",
"(",
"'userQuery'",
",",
"query",
")",
",",
"(",
"'editions'",
",",
"editions",
")",
",",
"(",
"'symbolicTimeSpan'",
",",
"symbolicTimeSpan",
")",
",",
"(",
"'timeSpan'",
",",
"timeSpan",
")",
",",
"(",
"'queryLanguage'",
",",
"'en'",
")",
"]",
")",
",",
"retrieveParameters",
"=",
"(",
"retrieveParameters",
"or",
"self",
".",
"make_retrieveParameters",
"(",
"offset",
",",
"count",
")",
")",
")"
] | The search operation submits a search query to the specified
database edition and retrieves data. This operation returns a query ID
that can be used in subsequent operations to retrieve more records.
:query: User query for requesting data. The query parser will return
errors for invalid queries
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:editions: List of editions to be searched. If None, user permissions
will be substituted.
Fields:
collection - Name of the collection
edition - Name of the edition
:symbolicTimeSpan: This element defines a range of load dates. The load
date is the date when a record was added to a
database. If symbolicTimeSpan is specified, the
timeSpan parameter must be omitted. If timeSpan and
symbolicTimeSpan are both omitted, then the maximum
publication date time span will be inferred from the
editions data.
Valid values:
'1week' - Specifies to use the end date as today and
the begin date as 1 week prior to today.
'2week' - Specifies to use the end date as today and
the begin date as 2 week prior to today.
'4week' - Specifies to use the end date as today and
the begin date as 4 week prior to today.
:timeSpan: This element defines specifies a range of publication dates.
If timeSpan is used, the symbolicTimeSpan parameter must be
omitted. If timeSpan and symbolicTimeSpan are both omitted,
then the maximum time span will be inferred from the
editions data.
Fields:
begin - Beginning date for this search. Format: YYYY-MM-DD
end - Ending date for this search. Format: YYYY-MM-DD
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used. | [
"The",
"search",
"operation",
"submits",
"a",
"search",
"query",
"to",
"the",
"specified",
"database",
"edition",
"and",
"retrieves",
"data",
".",
"This",
"operation",
"returns",
"a",
"query",
"ID",
"that",
"can",
"be",
"used",
"in",
"subsequent",
"operations",
"to",
"retrieve",
"more",
"records",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L124-L187 | train |
enricobacis/wos | wos/client.py | WosClient.citedReferences | def citedReferences(self, uid, count=100, offset=1,
retrieveParameters=None):
"""The citedReferences operation returns references cited by an article
identified by a unique identifier. You may specify only one identifier
per request.
:uid: Thomson Reuters unique record identifier
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferences(
databaseId='WOS',
uid=uid,
queryLanguage='en',
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | python | def citedReferences(self, uid, count=100, offset=1,
retrieveParameters=None):
"""The citedReferences operation returns references cited by an article
identified by a unique identifier. You may specify only one identifier
per request.
:uid: Thomson Reuters unique record identifier
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferences(
databaseId='WOS',
uid=uid,
queryLanguage='en',
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | [
"def",
"citedReferences",
"(",
"self",
",",
"uid",
",",
"count",
"=",
"100",
",",
"offset",
"=",
"1",
",",
"retrieveParameters",
"=",
"None",
")",
":",
"return",
"self",
".",
"_search",
".",
"service",
".",
"citedReferences",
"(",
"databaseId",
"=",
"'WOS'",
",",
"uid",
"=",
"uid",
",",
"queryLanguage",
"=",
"'en'",
",",
"retrieveParameters",
"=",
"(",
"retrieveParameters",
"or",
"self",
".",
"make_retrieveParameters",
"(",
"offset",
",",
"count",
")",
")",
")"
] | The citedReferences operation returns references cited by an article
identified by a unique identifier. You may specify only one identifier
per request.
:uid: Thomson Reuters unique record identifier
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used. | [
"The",
"citedReferences",
"operation",
"returns",
"references",
"cited",
"by",
"an",
"article",
"identified",
"by",
"a",
"unique",
"identifier",
".",
"You",
"may",
"specify",
"only",
"one",
"identifier",
"per",
"request",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L250-L274 | train |
enricobacis/wos | wos/client.py | WosClient.citedReferencesRetrieve | def citedReferencesRetrieve(self, queryId, count=100, offset=1,
retrieveParameters=None):
"""The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferencesRetrieve(
queryId=queryId,
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | python | def citedReferencesRetrieve(self, queryId, count=100, offset=1,
retrieveParameters=None):
"""The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferencesRetrieve(
queryId=queryId,
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | [
"def",
"citedReferencesRetrieve",
"(",
"self",
",",
"queryId",
",",
"count",
"=",
"100",
",",
"offset",
"=",
"1",
",",
"retrieveParameters",
"=",
"None",
")",
":",
"return",
"self",
".",
"_search",
".",
"service",
".",
"citedReferencesRetrieve",
"(",
"queryId",
"=",
"queryId",
",",
"retrieveParameters",
"=",
"(",
"retrieveParameters",
"or",
"self",
".",
"make_retrieveParameters",
"(",
"offset",
",",
"count",
")",
")",
")"
] | The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used. | [
"The",
"citedReferencesRetrieve",
"operation",
"submits",
"a",
"query",
"returned",
"by",
"a",
"previous",
"citedReferences",
"operation",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L278-L305 | train |
enricobacis/wos | wos/utils.py | single | def single(wosclient, wos_query, xml_query=None, count=5, offset=1):
"""Perform a single Web of Science query and then XML query the results."""
result = wosclient.search(wos_query, count, offset)
xml = _re.sub(' xmlns="[^"]+"', '', result.records, count=1).encode('utf-8')
if xml_query:
xml = _ET.fromstring(xml)
return [el.text for el in xml.findall(xml_query)]
else:
return _minidom.parseString(xml).toprettyxml() | python | def single(wosclient, wos_query, xml_query=None, count=5, offset=1):
"""Perform a single Web of Science query and then XML query the results."""
result = wosclient.search(wos_query, count, offset)
xml = _re.sub(' xmlns="[^"]+"', '', result.records, count=1).encode('utf-8')
if xml_query:
xml = _ET.fromstring(xml)
return [el.text for el in xml.findall(xml_query)]
else:
return _minidom.parseString(xml).toprettyxml() | [
"def",
"single",
"(",
"wosclient",
",",
"wos_query",
",",
"xml_query",
"=",
"None",
",",
"count",
"=",
"5",
",",
"offset",
"=",
"1",
")",
":",
"result",
"=",
"wosclient",
".",
"search",
"(",
"wos_query",
",",
"count",
",",
"offset",
")",
"xml",
"=",
"_re",
".",
"sub",
"(",
"' xmlns=\"[^\"]+\"'",
",",
"''",
",",
"result",
".",
"records",
",",
"count",
"=",
"1",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"xml_query",
":",
"xml",
"=",
"_ET",
".",
"fromstring",
"(",
"xml",
")",
"return",
"[",
"el",
".",
"text",
"for",
"el",
"in",
"xml",
".",
"findall",
"(",
"xml_query",
")",
"]",
"else",
":",
"return",
"_minidom",
".",
"parseString",
"(",
"xml",
")",
".",
"toprettyxml",
"(",
")"
] | Perform a single Web of Science query and then XML query the results. | [
"Perform",
"a",
"single",
"Web",
"of",
"Science",
"query",
"and",
"then",
"XML",
"query",
"the",
"results",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/utils.py#L10-L18 | train |
enricobacis/wos | wos/utils.py | query | def query(wosclient, wos_query, xml_query=None, count=5, offset=1, limit=100):
"""Query Web of Science and XML query results with multiple requests."""
results = [single(wosclient, wos_query, xml_query, min(limit, count-x+1), x)
for x in range(offset, count+1, limit)]
if xml_query:
return [el for res in results for el in res]
else:
pattern = _re.compile(r'^<\?xml.*?\n<records>\n|\n</records>$.*')
return ('<?xml version="1.0" ?>\n<records>' +
'\n'.join(pattern.sub('', res) for res in results) +
'</records>') | python | def query(wosclient, wos_query, xml_query=None, count=5, offset=1, limit=100):
"""Query Web of Science and XML query results with multiple requests."""
results = [single(wosclient, wos_query, xml_query, min(limit, count-x+1), x)
for x in range(offset, count+1, limit)]
if xml_query:
return [el for res in results for el in res]
else:
pattern = _re.compile(r'^<\?xml.*?\n<records>\n|\n</records>$.*')
return ('<?xml version="1.0" ?>\n<records>' +
'\n'.join(pattern.sub('', res) for res in results) +
'</records>') | [
"def",
"query",
"(",
"wosclient",
",",
"wos_query",
",",
"xml_query",
"=",
"None",
",",
"count",
"=",
"5",
",",
"offset",
"=",
"1",
",",
"limit",
"=",
"100",
")",
":",
"results",
"=",
"[",
"single",
"(",
"wosclient",
",",
"wos_query",
",",
"xml_query",
",",
"min",
"(",
"limit",
",",
"count",
"-",
"x",
"+",
"1",
")",
",",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"offset",
",",
"count",
"+",
"1",
",",
"limit",
")",
"]",
"if",
"xml_query",
":",
"return",
"[",
"el",
"for",
"res",
"in",
"results",
"for",
"el",
"in",
"res",
"]",
"else",
":",
"pattern",
"=",
"_re",
".",
"compile",
"(",
"r'^<\\?xml.*?\\n<records>\\n|\\n</records>$.*'",
")",
"return",
"(",
"'<?xml version=\"1.0\" ?>\\n<records>'",
"+",
"'\\n'",
".",
"join",
"(",
"pattern",
".",
"sub",
"(",
"''",
",",
"res",
")",
"for",
"res",
"in",
"results",
")",
"+",
"'</records>'",
")"
] | Query Web of Science and XML query results with multiple requests. | [
"Query",
"Web",
"of",
"Science",
"and",
"XML",
"query",
"results",
"with",
"multiple",
"requests",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/utils.py#L21-L31 | train |
enricobacis/wos | wos/utils.py | doi_to_wos | def doi_to_wos(wosclient, doi):
"""Convert DOI to WOS identifier."""
results = query(wosclient, 'DO="%s"' % doi, './REC/UID', count=1)
return results[0].lstrip('WOS:') if results else None | python | def doi_to_wos(wosclient, doi):
"""Convert DOI to WOS identifier."""
results = query(wosclient, 'DO="%s"' % doi, './REC/UID', count=1)
return results[0].lstrip('WOS:') if results else None | [
"def",
"doi_to_wos",
"(",
"wosclient",
",",
"doi",
")",
":",
"results",
"=",
"query",
"(",
"wosclient",
",",
"'DO=\"%s\"'",
"%",
"doi",
",",
"'./REC/UID'",
",",
"count",
"=",
"1",
")",
"return",
"results",
"[",
"0",
"]",
".",
"lstrip",
"(",
"'WOS:'",
")",
"if",
"results",
"else",
"None"
] | Convert DOI to WOS identifier. | [
"Convert",
"DOI",
"to",
"WOS",
"identifier",
"."
] | a51f4d1a983c2c7529caac3e09606a432223630d | https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/utils.py#L34-L37 | train |
adamchainz/django-perf-rec | django_perf_rec/sql.py | sql_fingerprint | def sql_fingerprint(query, hide_columns=True):
"""
Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries.
"""
parsed_query = parse(query)[0]
sql_recursively_simplify(parsed_query, hide_columns=hide_columns)
return str(parsed_query) | python | def sql_fingerprint(query, hide_columns=True):
"""
Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries.
"""
parsed_query = parse(query)[0]
sql_recursively_simplify(parsed_query, hide_columns=hide_columns)
return str(parsed_query) | [
"def",
"sql_fingerprint",
"(",
"query",
",",
"hide_columns",
"=",
"True",
")",
":",
"parsed_query",
"=",
"parse",
"(",
"query",
")",
"[",
"0",
"]",
"sql_recursively_simplify",
"(",
"parsed_query",
",",
"hide_columns",
"=",
"hide_columns",
")",
"return",
"str",
"(",
"parsed_query",
")"
] | Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries. | [
"Simplify",
"a",
"query",
"taking",
"away",
"exact",
"values",
"and",
"fields",
"selected",
"."
] | 76a1874820b55bcbc2f95a85bbda3cb056584e2c | https://github.com/adamchainz/django-perf-rec/blob/76a1874820b55bcbc2f95a85bbda3cb056584e2c/django_perf_rec/sql.py#L7-L15 | train |
adamchainz/django-perf-rec | django_perf_rec/sql.py | match_keyword | def match_keyword(token, keywords):
"""
Checks if the given token represents one of the given keywords
"""
if not token:
return False
if not token.is_keyword:
return False
return token.value.upper() in keywords | python | def match_keyword(token, keywords):
"""
Checks if the given token represents one of the given keywords
"""
if not token:
return False
if not token.is_keyword:
return False
return token.value.upper() in keywords | [
"def",
"match_keyword",
"(",
"token",
",",
"keywords",
")",
":",
"if",
"not",
"token",
":",
"return",
"False",
"if",
"not",
"token",
".",
"is_keyword",
":",
"return",
"False",
"return",
"token",
".",
"value",
".",
"upper",
"(",
")",
"in",
"keywords"
] | Checks if the given token represents one of the given keywords | [
"Checks",
"if",
"the",
"given",
"token",
"represents",
"one",
"of",
"the",
"given",
"keywords"
] | 76a1874820b55bcbc2f95a85bbda3cb056584e2c | https://github.com/adamchainz/django-perf-rec/blob/76a1874820b55bcbc2f95a85bbda3cb056584e2c/django_perf_rec/sql.py#L84-L93 | train |
adamchainz/django-perf-rec | django_perf_rec/sql.py | _is_group | def _is_group(token):
"""
sqlparse 0.2.2 changed it from a callable to a bool property
"""
is_group = token.is_group
if isinstance(is_group, bool):
return is_group
else:
return is_group() | python | def _is_group(token):
"""
sqlparse 0.2.2 changed it from a callable to a bool property
"""
is_group = token.is_group
if isinstance(is_group, bool):
return is_group
else:
return is_group() | [
"def",
"_is_group",
"(",
"token",
")",
":",
"is_group",
"=",
"token",
".",
"is_group",
"if",
"isinstance",
"(",
"is_group",
",",
"bool",
")",
":",
"return",
"is_group",
"else",
":",
"return",
"is_group",
"(",
")"
] | sqlparse 0.2.2 changed it from a callable to a bool property | [
"sqlparse",
"0",
".",
"2",
".",
"2",
"changed",
"it",
"from",
"a",
"callable",
"to",
"a",
"bool",
"property"
] | 76a1874820b55bcbc2f95a85bbda3cb056584e2c | https://github.com/adamchainz/django-perf-rec/blob/76a1874820b55bcbc2f95a85bbda3cb056584e2c/django_perf_rec/sql.py#L96-L104 | train |
adamchainz/django-perf-rec | django_perf_rec/utils.py | sorted_names | def sorted_names(names):
"""
Sort a list of names but keep the word 'default' first if it's there.
"""
names = list(names)
have_default = False
if 'default' in names:
names.remove('default')
have_default = True
sorted_names = sorted(names)
if have_default:
sorted_names = ['default'] + sorted_names
return sorted_names | python | def sorted_names(names):
"""
Sort a list of names but keep the word 'default' first if it's there.
"""
names = list(names)
have_default = False
if 'default' in names:
names.remove('default')
have_default = True
sorted_names = sorted(names)
if have_default:
sorted_names = ['default'] + sorted_names
return sorted_names | [
"def",
"sorted_names",
"(",
"names",
")",
":",
"names",
"=",
"list",
"(",
"names",
")",
"have_default",
"=",
"False",
"if",
"'default'",
"in",
"names",
":",
"names",
".",
"remove",
"(",
"'default'",
")",
"have_default",
"=",
"True",
"sorted_names",
"=",
"sorted",
"(",
"names",
")",
"if",
"have_default",
":",
"sorted_names",
"=",
"[",
"'default'",
"]",
"+",
"sorted_names",
"return",
"sorted_names"
] | Sort a list of names but keep the word 'default' first if it's there. | [
"Sort",
"a",
"list",
"of",
"names",
"but",
"keep",
"the",
"word",
"default",
"first",
"if",
"it",
"s",
"there",
"."
] | 76a1874820b55bcbc2f95a85bbda3cb056584e2c | https://github.com/adamchainz/django-perf-rec/blob/76a1874820b55bcbc2f95a85bbda3cb056584e2c/django_perf_rec/utils.py#L82-L98 | train |
adamchainz/django-perf-rec | django_perf_rec/utils.py | record_diff | def record_diff(old, new):
"""
Generate a human-readable diff of two performance records.
"""
return '\n'.join(difflib.ndiff(
['%s: %s' % (k, v) for op in old for k, v in op.items()],
['%s: %s' % (k, v) for op in new for k, v in op.items()],
)) | python | def record_diff(old, new):
"""
Generate a human-readable diff of two performance records.
"""
return '\n'.join(difflib.ndiff(
['%s: %s' % (k, v) for op in old for k, v in op.items()],
['%s: %s' % (k, v) for op in new for k, v in op.items()],
)) | [
"def",
"record_diff",
"(",
"old",
",",
"new",
")",
":",
"return",
"'\\n'",
".",
"join",
"(",
"difflib",
".",
"ndiff",
"(",
"[",
"'%s: %s'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"op",
"in",
"old",
"for",
"k",
",",
"v",
"in",
"op",
".",
"items",
"(",
")",
"]",
",",
"[",
"'%s: %s'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"op",
"in",
"new",
"for",
"k",
",",
"v",
"in",
"op",
".",
"items",
"(",
")",
"]",
",",
")",
")"
] | Generate a human-readable diff of two performance records. | [
"Generate",
"a",
"human",
"-",
"readable",
"diff",
"of",
"two",
"performance",
"records",
"."
] | 76a1874820b55bcbc2f95a85bbda3cb056584e2c | https://github.com/adamchainz/django-perf-rec/blob/76a1874820b55bcbc2f95a85bbda3cb056584e2c/django_perf_rec/utils.py#L101-L108 | train |
reportportal/client-Python | reportportal_client/service_async.py | QueueListener.dequeue | def dequeue(self, block=True):
"""Dequeue a record and return item."""
return self.queue.get(block, self.queue_get_timeout) | python | def dequeue(self, block=True):
"""Dequeue a record and return item."""
return self.queue.get(block, self.queue_get_timeout) | [
"def",
"dequeue",
"(",
"self",
",",
"block",
"=",
"True",
")",
":",
"return",
"self",
".",
"queue",
".",
"get",
"(",
"block",
",",
"self",
".",
"queue_get_timeout",
")"
] | Dequeue a record and return item. | [
"Dequeue",
"a",
"record",
"and",
"return",
"item",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L39-L41 | train |
reportportal/client-Python | reportportal_client/service_async.py | QueueListener.start | def start(self):
"""Start the listener.
This starts up a background thread to monitor the queue for
items to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start() | python | def start(self):
"""Start the listener.
This starts up a background thread to monitor the queue for
items to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"_thread",
"=",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_monitor",
")",
"t",
".",
"setDaemon",
"(",
"True",
")",
"t",
".",
"start",
"(",
")"
] | Start the listener.
This starts up a background thread to monitor the queue for
items to process. | [
"Start",
"the",
"listener",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L43-L51 | train |
reportportal/client-Python | reportportal_client/service_async.py | QueueListener.handle | def handle(self, record):
"""Handle an item.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler(record) | python | def handle(self, record):
"""Handle an item.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler(record) | [
"def",
"handle",
"(",
"self",
",",
"record",
")",
":",
"record",
"=",
"self",
".",
"prepare",
"(",
"record",
")",
"for",
"handler",
"in",
"self",
".",
"handlers",
":",
"handler",
"(",
"record",
")"
] | Handle an item.
This just loops through the handlers offering them the record
to handle. | [
"Handle",
"an",
"item",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L62-L70 | train |
reportportal/client-Python | reportportal_client/service_async.py | QueueListener._monitor | def _monitor(self):
"""Monitor the queue for items, and ask the handler to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
err_msg = ("invalid internal state:"
" _stop_nowait can not be set if _stop is not set")
assert self._stop.isSet() or not self._stop_nowait.isSet(), err_msg
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel_item:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue,
# handle then unless _stop_nowait is set.
while not self._stop_nowait.isSet():
try:
record = self.dequeue(False)
if record is self._sentinel_item:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break | python | def _monitor(self):
"""Monitor the queue for items, and ask the handler to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
err_msg = ("invalid internal state:"
" _stop_nowait can not be set if _stop is not set")
assert self._stop.isSet() or not self._stop_nowait.isSet(), err_msg
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel_item:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue,
# handle then unless _stop_nowait is set.
while not self._stop_nowait.isSet():
try:
record = self.dequeue(False)
if record is self._sentinel_item:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break | [
"def",
"_monitor",
"(",
"self",
")",
":",
"err_msg",
"=",
"(",
"\"invalid internal state:\"",
"\" _stop_nowait can not be set if _stop is not set\"",
")",
"assert",
"self",
".",
"_stop",
".",
"isSet",
"(",
")",
"or",
"not",
"self",
".",
"_stop_nowait",
".",
"isSet",
"(",
")",
",",
"err_msg",
"q",
"=",
"self",
".",
"queue",
"has_task_done",
"=",
"hasattr",
"(",
"q",
",",
"'task_done'",
")",
"while",
"not",
"self",
".",
"_stop",
".",
"isSet",
"(",
")",
":",
"try",
":",
"record",
"=",
"self",
".",
"dequeue",
"(",
"True",
")",
"if",
"record",
"is",
"self",
".",
"_sentinel_item",
":",
"break",
"self",
".",
"handle",
"(",
"record",
")",
"if",
"has_task_done",
":",
"q",
".",
"task_done",
"(",
")",
"except",
"queue",
".",
"Empty",
":",
"pass",
"# There might still be records in the queue,",
"# handle then unless _stop_nowait is set.",
"while",
"not",
"self",
".",
"_stop_nowait",
".",
"isSet",
"(",
")",
":",
"try",
":",
"record",
"=",
"self",
".",
"dequeue",
"(",
"False",
")",
"if",
"record",
"is",
"self",
".",
"_sentinel_item",
":",
"break",
"self",
".",
"handle",
"(",
"record",
")",
"if",
"has_task_done",
":",
"q",
".",
"task_done",
"(",
")",
"except",
"queue",
".",
"Empty",
":",
"break"
] | Monitor the queue for items, and ask the handler to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue. | [
"Monitor",
"the",
"queue",
"for",
"items",
"and",
"ask",
"the",
"handler",
"to",
"deal",
"with",
"them",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L72-L106 | train |
reportportal/client-Python | reportportal_client/service_async.py | QueueListener.stop | def stop(self, nowait=False):
"""Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
"""
self._stop.set()
if nowait:
self._stop_nowait.set()
self.queue.put_nowait(self._sentinel_item)
if (self._thread.isAlive() and
self._thread is not threading.currentThread()):
self._thread.join()
self._thread = None | python | def stop(self, nowait=False):
"""Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
"""
self._stop.set()
if nowait:
self._stop_nowait.set()
self.queue.put_nowait(self._sentinel_item)
if (self._thread.isAlive() and
self._thread is not threading.currentThread()):
self._thread.join()
self._thread = None | [
"def",
"stop",
"(",
"self",
",",
"nowait",
"=",
"False",
")",
":",
"self",
".",
"_stop",
".",
"set",
"(",
")",
"if",
"nowait",
":",
"self",
".",
"_stop_nowait",
".",
"set",
"(",
")",
"self",
".",
"queue",
".",
"put_nowait",
"(",
"self",
".",
"_sentinel_item",
")",
"if",
"(",
"self",
".",
"_thread",
".",
"isAlive",
"(",
")",
"and",
"self",
".",
"_thread",
"is",
"not",
"threading",
".",
"currentThread",
"(",
")",
")",
":",
"self",
".",
"_thread",
".",
"join",
"(",
")",
"self",
".",
"_thread",
"=",
"None"
] | Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items. | [
"Stop",
"the",
"listener",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L108-L126 | train |
reportportal/client-Python | reportportal_client/service_async.py | ReportPortalServiceAsync.terminate | def terminate(self, nowait=False):
"""Finalize and stop service
Args:
nowait: set to True to terminate immediately and skip processing
messages still in the queue
"""
logger.debug("Acquiring lock for service termination")
with self.lock:
logger.debug("Terminating service")
if not self.listener:
logger.warning("Service already stopped.")
return
self.listener.stop(nowait)
try:
if not nowait:
self._post_log_batch()
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
raise
finally:
self.queue = None
self.listener = None | python | def terminate(self, nowait=False):
"""Finalize and stop service
Args:
nowait: set to True to terminate immediately and skip processing
messages still in the queue
"""
logger.debug("Acquiring lock for service termination")
with self.lock:
logger.debug("Terminating service")
if not self.listener:
logger.warning("Service already stopped.")
return
self.listener.stop(nowait)
try:
if not nowait:
self._post_log_batch()
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
raise
finally:
self.queue = None
self.listener = None | [
"def",
"terminate",
"(",
"self",
",",
"nowait",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Acquiring lock for service termination\"",
")",
"with",
"self",
".",
"lock",
":",
"logger",
".",
"debug",
"(",
"\"Terminating service\"",
")",
"if",
"not",
"self",
".",
"listener",
":",
"logger",
".",
"warning",
"(",
"\"Service already stopped.\"",
")",
"return",
"self",
".",
"listener",
".",
"stop",
"(",
"nowait",
")",
"try",
":",
"if",
"not",
"nowait",
":",
"self",
".",
"_post_log_batch",
"(",
")",
"except",
"Exception",
":",
"if",
"self",
".",
"error_handler",
":",
"self",
".",
"error_handler",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"else",
":",
"raise",
"finally",
":",
"self",
".",
"queue",
"=",
"None",
"self",
".",
"listener",
"=",
"None"
] | Finalize and stop service
Args:
nowait: set to True to terminate immediately and skip processing
messages still in the queue | [
"Finalize",
"and",
"stop",
"service"
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L169-L196 | train |
reportportal/client-Python | reportportal_client/service_async.py | ReportPortalServiceAsync.process_log | def process_log(self, **log_item):
"""Special handler for log messages.
Accumulate incoming log messages and post them in batch.
"""
logger.debug("Processing log item: %s", log_item)
self.log_batch.append(log_item)
if len(self.log_batch) >= self.log_batch_size:
self._post_log_batch() | python | def process_log(self, **log_item):
"""Special handler for log messages.
Accumulate incoming log messages and post them in batch.
"""
logger.debug("Processing log item: %s", log_item)
self.log_batch.append(log_item)
if len(self.log_batch) >= self.log_batch_size:
self._post_log_batch() | [
"def",
"process_log",
"(",
"self",
",",
"*",
"*",
"log_item",
")",
":",
"logger",
".",
"debug",
"(",
"\"Processing log item: %s\"",
",",
"log_item",
")",
"self",
".",
"log_batch",
".",
"append",
"(",
"log_item",
")",
"if",
"len",
"(",
"self",
".",
"log_batch",
")",
">=",
"self",
".",
"log_batch_size",
":",
"self",
".",
"_post_log_batch",
"(",
")"
] | Special handler for log messages.
Accumulate incoming log messages and post them in batch. | [
"Special",
"handler",
"for",
"log",
"messages",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L206-L214 | train |
reportportal/client-Python | reportportal_client/service_async.py | ReportPortalServiceAsync.process_item | def process_item(self, item):
"""Main item handler.
Called by queue listener.
"""
logger.debug("Processing item: %s (queue size: %s)", item,
self.queue.qsize())
method, kwargs = item
if method not in self.supported_methods:
raise Error("Not expected service method: {}".format(method))
try:
if method == "log":
self.process_log(**kwargs)
else:
self._post_log_batch()
getattr(self.rp_client, method)(**kwargs)
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
self.terminate(nowait=True)
raise | python | def process_item(self, item):
"""Main item handler.
Called by queue listener.
"""
logger.debug("Processing item: %s (queue size: %s)", item,
self.queue.qsize())
method, kwargs = item
if method not in self.supported_methods:
raise Error("Not expected service method: {}".format(method))
try:
if method == "log":
self.process_log(**kwargs)
else:
self._post_log_batch()
getattr(self.rp_client, method)(**kwargs)
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
self.terminate(nowait=True)
raise | [
"def",
"process_item",
"(",
"self",
",",
"item",
")",
":",
"logger",
".",
"debug",
"(",
"\"Processing item: %s (queue size: %s)\"",
",",
"item",
",",
"self",
".",
"queue",
".",
"qsize",
"(",
")",
")",
"method",
",",
"kwargs",
"=",
"item",
"if",
"method",
"not",
"in",
"self",
".",
"supported_methods",
":",
"raise",
"Error",
"(",
"\"Not expected service method: {}\"",
".",
"format",
"(",
"method",
")",
")",
"try",
":",
"if",
"method",
"==",
"\"log\"",
":",
"self",
".",
"process_log",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"self",
".",
"_post_log_batch",
"(",
")",
"getattr",
"(",
"self",
".",
"rp_client",
",",
"method",
")",
"(",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"if",
"self",
".",
"error_handler",
":",
"self",
".",
"error_handler",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"else",
":",
"self",
".",
"terminate",
"(",
"nowait",
"=",
"True",
")",
"raise"
] | Main item handler.
Called by queue listener. | [
"Main",
"item",
"handler",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L216-L239 | train |
reportportal/client-Python | reportportal_client/service_async.py | ReportPortalServiceAsync.log | def log(self, time, message, level=None, attachment=None):
"""Logs a message with attachment.
The attachment is a dict of:
name: name of attachment
data: file content
mime: content type for attachment
"""
logger.debug("log queued")
args = {
"time": time,
"message": message,
"level": level,
"attachment": attachment,
}
self.queue.put_nowait(("log", args)) | python | def log(self, time, message, level=None, attachment=None):
"""Logs a message with attachment.
The attachment is a dict of:
name: name of attachment
data: file content
mime: content type for attachment
"""
logger.debug("log queued")
args = {
"time": time,
"message": message,
"level": level,
"attachment": attachment,
}
self.queue.put_nowait(("log", args)) | [
"def",
"log",
"(",
"self",
",",
"time",
",",
"message",
",",
"level",
"=",
"None",
",",
"attachment",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"log queued\"",
")",
"args",
"=",
"{",
"\"time\"",
":",
"time",
",",
"\"message\"",
":",
"message",
",",
"\"level\"",
":",
"level",
",",
"\"attachment\"",
":",
"attachment",
",",
"}",
"self",
".",
"queue",
".",
"put_nowait",
"(",
"(",
"\"log\"",
",",
"args",
")",
")"
] | Logs a message with attachment.
The attachment is a dict of:
name: name of attachment
data: file content
mime: content type for attachment | [
"Logs",
"a",
"message",
"with",
"attachment",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service_async.py#L296-L312 | train |
reportportal/client-Python | reportportal_client/service.py | ReportPortalService.log_batch | def log_batch(self, log_data):
"""Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
"""
url = uri_join(self.base_url, "log")
attachments = []
for log_item in log_data:
log_item["item_id"] = self.stack[-1]
attachment = log_item.get("attachment", None)
if "attachment" in log_item:
del log_item["attachment"]
if attachment:
if not isinstance(attachment, collections.Mapping):
attachment = {"data": attachment}
name = attachment.get("name", str(uuid.uuid4()))
log_item["file"] = {"name": name}
attachments.append(("file", (
name,
attachment["data"],
attachment.get("mime", "application/octet-stream")
)))
files = [(
"json_request_part", (
None,
json.dumps(log_data),
"application/json"
)
)]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(
url=url,
files=files,
verify=self.verify_ssl
)
except KeyError:
if i < POST_LOGBATCH_RETRY_COUNT - 1:
continue
else:
raise
break
logger.debug("log_batch - Stack: %s", self.stack)
logger.debug("log_batch response: %s", r.text)
return _get_data(r) | python | def log_batch(self, log_data):
"""Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
"""
url = uri_join(self.base_url, "log")
attachments = []
for log_item in log_data:
log_item["item_id"] = self.stack[-1]
attachment = log_item.get("attachment", None)
if "attachment" in log_item:
del log_item["attachment"]
if attachment:
if not isinstance(attachment, collections.Mapping):
attachment = {"data": attachment}
name = attachment.get("name", str(uuid.uuid4()))
log_item["file"] = {"name": name}
attachments.append(("file", (
name,
attachment["data"],
attachment.get("mime", "application/octet-stream")
)))
files = [(
"json_request_part", (
None,
json.dumps(log_data),
"application/json"
)
)]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(
url=url,
files=files,
verify=self.verify_ssl
)
except KeyError:
if i < POST_LOGBATCH_RETRY_COUNT - 1:
continue
else:
raise
break
logger.debug("log_batch - Stack: %s", self.stack)
logger.debug("log_batch response: %s", r.text)
return _get_data(r) | [
"def",
"log_batch",
"(",
"self",
",",
"log_data",
")",
":",
"url",
"=",
"uri_join",
"(",
"self",
".",
"base_url",
",",
"\"log\"",
")",
"attachments",
"=",
"[",
"]",
"for",
"log_item",
"in",
"log_data",
":",
"log_item",
"[",
"\"item_id\"",
"]",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"attachment",
"=",
"log_item",
".",
"get",
"(",
"\"attachment\"",
",",
"None",
")",
"if",
"\"attachment\"",
"in",
"log_item",
":",
"del",
"log_item",
"[",
"\"attachment\"",
"]",
"if",
"attachment",
":",
"if",
"not",
"isinstance",
"(",
"attachment",
",",
"collections",
".",
"Mapping",
")",
":",
"attachment",
"=",
"{",
"\"data\"",
":",
"attachment",
"}",
"name",
"=",
"attachment",
".",
"get",
"(",
"\"name\"",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"log_item",
"[",
"\"file\"",
"]",
"=",
"{",
"\"name\"",
":",
"name",
"}",
"attachments",
".",
"append",
"(",
"(",
"\"file\"",
",",
"(",
"name",
",",
"attachment",
"[",
"\"data\"",
"]",
",",
"attachment",
".",
"get",
"(",
"\"mime\"",
",",
"\"application/octet-stream\"",
")",
")",
")",
")",
"files",
"=",
"[",
"(",
"\"json_request_part\"",
",",
"(",
"None",
",",
"json",
".",
"dumps",
"(",
"log_data",
")",
",",
"\"application/json\"",
")",
")",
"]",
"files",
".",
"extend",
"(",
"attachments",
")",
"from",
"reportportal_client",
"import",
"POST_LOGBATCH_RETRY_COUNT",
"for",
"i",
"in",
"range",
"(",
"POST_LOGBATCH_RETRY_COUNT",
")",
":",
"try",
":",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
"=",
"url",
",",
"files",
"=",
"files",
",",
"verify",
"=",
"self",
".",
"verify_ssl",
")",
"except",
"KeyError",
":",
"if",
"i",
"<",
"POST_LOGBATCH_RETRY_COUNT",
"-",
"1",
":",
"continue",
"else",
":",
"raise",
"break",
"logger",
".",
"debug",
"(",
"\"log_batch - Stack: %s\"",
",",
"self",
".",
"stack",
")",
"logger",
".",
"debug",
"(",
"\"log_batch response: %s\"",
",",
"r",
".",
"text",
")",
"return",
"_get_data",
"(",
"r",
")"
] | Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment | [
"Logs",
"batch",
"of",
"messages",
"with",
"attachment",
"."
] | 8d22445d0de73f46fb23d0c0e49ac309335173ce | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service.py#L250-L312 | train |
saltstack/pytest-salt | versioneer.py | git_versions_from_keywords | def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date, "branch": None}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None,
"branch": None} | python | def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date, "branch": None}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None,
"branch": None} | [
"def",
"git_versions_from_keywords",
"(",
"keywords",
",",
"tag_prefix",
",",
"verbose",
")",
":",
"if",
"not",
"keywords",
":",
"raise",
"NotThisMethod",
"(",
"\"no keywords at all, weird\"",
")",
"date",
"=",
"keywords",
".",
"get",
"(",
"\"date\"",
")",
"if",
"date",
"is",
"not",
"None",
":",
"# git-2.2.0 added \"%cI\", which expands to an ISO-8601 -compliant",
"# datestamp. However we prefer \"%ci\" (which expands to an \"ISO-8601",
"# -like\" string, which we must then edit to make compliant), because",
"# it's been around since git-1.5.3, and it's too difficult to",
"# discover which version we're using, or to work around using an",
"# older one.",
"date",
"=",
"date",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"T\"",
",",
"1",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
",",
"1",
")",
"refnames",
"=",
"keywords",
"[",
"\"refnames\"",
"]",
".",
"strip",
"(",
")",
"if",
"refnames",
".",
"startswith",
"(",
"\"$Format\"",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"keywords are unexpanded, not using\"",
")",
"raise",
"NotThisMethod",
"(",
"\"unexpanded keywords, not a git-archive tarball\"",
")",
"refs",
"=",
"set",
"(",
"[",
"r",
".",
"strip",
"(",
")",
"for",
"r",
"in",
"refnames",
".",
"strip",
"(",
"\"()\"",
")",
".",
"split",
"(",
"\",\"",
")",
"]",
")",
"# starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of",
"# just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.",
"TAG",
"=",
"\"tag: \"",
"tags",
"=",
"set",
"(",
"[",
"r",
"[",
"len",
"(",
"TAG",
")",
":",
"]",
"for",
"r",
"in",
"refs",
"if",
"r",
".",
"startswith",
"(",
"TAG",
")",
"]",
")",
"if",
"not",
"tags",
":",
"# Either we're using git < 1.8.3, or there really are no tags. We use",
"# a heuristic: assume all version tags have a digit. The old git %d",
"# expansion behaves like git log --decorate=short and strips out the",
"# refs/heads/ and refs/tags/ prefixes that would let us distinguish",
"# between branches and tags. By ignoring refnames without digits, we",
"# filter out many common branch names like \"release\" and",
"# \"stabilization\", as well as \"HEAD\" and \"master\".",
"tags",
"=",
"set",
"(",
"[",
"r",
"for",
"r",
"in",
"refs",
"if",
"re",
".",
"search",
"(",
"r'\\d'",
",",
"r",
")",
"]",
")",
"if",
"verbose",
":",
"print",
"(",
"\"discarding '%s', no digits\"",
"%",
"\",\"",
".",
"join",
"(",
"refs",
"-",
"tags",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"likely tags: %s\"",
"%",
"\",\"",
".",
"join",
"(",
"sorted",
"(",
"tags",
")",
")",
")",
"for",
"ref",
"in",
"sorted",
"(",
"tags",
")",
":",
"# sorting will prefer e.g. \"2.0\" over \"2.0rc1\"",
"if",
"ref",
".",
"startswith",
"(",
"tag_prefix",
")",
":",
"r",
"=",
"ref",
"[",
"len",
"(",
"tag_prefix",
")",
":",
"]",
"if",
"verbose",
":",
"print",
"(",
"\"picking %s\"",
"%",
"r",
")",
"return",
"{",
"\"version\"",
":",
"r",
",",
"\"full-revisionid\"",
":",
"keywords",
"[",
"\"full\"",
"]",
".",
"strip",
"(",
")",
",",
"\"dirty\"",
":",
"False",
",",
"\"error\"",
":",
"None",
",",
"\"date\"",
":",
"date",
",",
"\"branch\"",
":",
"None",
"}",
"# no suitable tags, so version is \"0+unknown\", but full hex is still there",
"if",
"verbose",
":",
"print",
"(",
"\"no suitable tags, using unknown + full revision id\"",
")",
"return",
"{",
"\"version\"",
":",
"\"0+unknown\"",
",",
"\"full-revisionid\"",
":",
"keywords",
"[",
"\"full\"",
"]",
".",
"strip",
"(",
")",
",",
"\"dirty\"",
":",
"False",
",",
"\"error\"",
":",
"\"no suitable tags\"",
",",
"\"date\"",
":",
"None",
",",
"\"branch\"",
":",
"None",
"}"
] | Get version information from git keywords. | [
"Get",
"version",
"information",
"from",
"git",
"keywords",
"."
] | 3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d | https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/versioneer.py#L1042-L1094 | train |
saltstack/pytest-salt | versioneer.py | render_pep440_branch_based | def render_pep440_branch_based(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.BRANCH_gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.BRANCH_gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.BRANCH_gHEX[.dirty]
"""
replacements = ([' ', '.'], ['(', ''], [')', ''], ['\\', '.'], ['/', '.'])
branch_name = pieces.get('branch') or ''
if branch_name:
for old, new in replacements:
branch_name = branch_name.replace(old, new)
else:
branch_name = 'unknown_branch'
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += '.dev0' + plus_or_dot(pieces)
rendered += "%d.%s.g%s" % (
pieces["distance"],
branch_name,
pieces['short']
)
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.%s.g%s" % (
pieces["distance"],
branch_name,
pieces['short']
)
if pieces["dirty"]:
rendered += ".dirty"
return rendered | python | def render_pep440_branch_based(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.BRANCH_gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.BRANCH_gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.BRANCH_gHEX[.dirty]
"""
replacements = ([' ', '.'], ['(', ''], [')', ''], ['\\', '.'], ['/', '.'])
branch_name = pieces.get('branch') or ''
if branch_name:
for old, new in replacements:
branch_name = branch_name.replace(old, new)
else:
branch_name = 'unknown_branch'
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += '.dev0' + plus_or_dot(pieces)
rendered += "%d.%s.g%s" % (
pieces["distance"],
branch_name,
pieces['short']
)
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.%s.g%s" % (
pieces["distance"],
branch_name,
pieces['short']
)
if pieces["dirty"]:
rendered += ".dirty"
return rendered | [
"def",
"render_pep440_branch_based",
"(",
"pieces",
")",
":",
"replacements",
"=",
"(",
"[",
"' '",
",",
"'.'",
"]",
",",
"[",
"'('",
",",
"''",
"]",
",",
"[",
"')'",
",",
"''",
"]",
",",
"[",
"'\\\\'",
",",
"'.'",
"]",
",",
"[",
"'/'",
",",
"'.'",
"]",
")",
"branch_name",
"=",
"pieces",
".",
"get",
"(",
"'branch'",
")",
"or",
"''",
"if",
"branch_name",
":",
"for",
"old",
",",
"new",
"in",
"replacements",
":",
"branch_name",
"=",
"branch_name",
".",
"replace",
"(",
"old",
",",
"new",
")",
"else",
":",
"branch_name",
"=",
"'unknown_branch'",
"if",
"pieces",
"[",
"\"closest-tag\"",
"]",
":",
"rendered",
"=",
"pieces",
"[",
"\"closest-tag\"",
"]",
"if",
"pieces",
"[",
"\"distance\"",
"]",
"or",
"pieces",
"[",
"\"dirty\"",
"]",
":",
"rendered",
"+=",
"'.dev0'",
"+",
"plus_or_dot",
"(",
"pieces",
")",
"rendered",
"+=",
"\"%d.%s.g%s\"",
"%",
"(",
"pieces",
"[",
"\"distance\"",
"]",
",",
"branch_name",
",",
"pieces",
"[",
"'short'",
"]",
")",
"if",
"pieces",
"[",
"\"dirty\"",
"]",
":",
"rendered",
"+=",
"\".dirty\"",
"else",
":",
"# exception #1",
"rendered",
"=",
"\"0+untagged.%d.%s.g%s\"",
"%",
"(",
"pieces",
"[",
"\"distance\"",
"]",
",",
"branch_name",
",",
"pieces",
"[",
"'short'",
"]",
")",
"if",
"pieces",
"[",
"\"dirty\"",
"]",
":",
"rendered",
"+=",
"\".dirty\"",
"return",
"rendered"
] | Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.BRANCH_gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.BRANCH_gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.BRANCH_gHEX[.dirty] | [
"Build",
"up",
"version",
"string",
"with",
"post",
"-",
"release",
"local",
"version",
"identifier",
"."
] | 3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d | https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/versioneer.py#L1458-L1495 | train |
saltstack/pytest-salt | versioneer.py | render | def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "pep440-branch-based":
rendered = render_pep440_branch_based(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")} | python | def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "pep440-branch-based":
rendered = render_pep440_branch_based(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")} | [
"def",
"render",
"(",
"pieces",
",",
"style",
")",
":",
"if",
"pieces",
"[",
"\"error\"",
"]",
":",
"return",
"{",
"\"version\"",
":",
"\"unknown\"",
",",
"\"full-revisionid\"",
":",
"pieces",
".",
"get",
"(",
"\"long\"",
")",
",",
"\"dirty\"",
":",
"None",
",",
"\"error\"",
":",
"pieces",
"[",
"\"error\"",
"]",
",",
"\"date\"",
":",
"None",
"}",
"if",
"not",
"style",
"or",
"style",
"==",
"\"default\"",
":",
"style",
"=",
"\"pep440\"",
"# the default",
"if",
"style",
"==",
"\"pep440\"",
":",
"rendered",
"=",
"render_pep440",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-pre\"",
":",
"rendered",
"=",
"render_pep440_pre",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-post\"",
":",
"rendered",
"=",
"render_pep440_post",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-old\"",
":",
"rendered",
"=",
"render_pep440_old",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-branch-based\"",
":",
"rendered",
"=",
"render_pep440_branch_based",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"git-describe\"",
":",
"rendered",
"=",
"render_git_describe",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"git-describe-long\"",
":",
"rendered",
"=",
"render_git_describe_long",
"(",
"pieces",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown style '%s'\"",
"%",
"style",
")",
"return",
"{",
"\"version\"",
":",
"rendered",
",",
"\"full-revisionid\"",
":",
"pieces",
"[",
"\"long\"",
"]",
",",
"\"dirty\"",
":",
"pieces",
"[",
"\"dirty\"",
"]",
",",
"\"error\"",
":",
"None",
",",
"\"date\"",
":",
"pieces",
".",
"get",
"(",
"\"date\"",
")",
"}"
] | Render the given version pieces into the requested style. | [
"Render",
"the",
"given",
"version",
"pieces",
"into",
"the",
"requested",
"style",
"."
] | 3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d | https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/versioneer.py#L1498-L1529 | train |
saltstack/pytest-salt | versioneer.py | do_setup | def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET_RE.search(old) is None:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0 | python | def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET_RE.search(old) is None:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0 | [
"def",
"do_setup",
"(",
")",
":",
"root",
"=",
"get_root",
"(",
")",
"try",
":",
"cfg",
"=",
"get_config_from_root",
"(",
"root",
")",
"except",
"(",
"EnvironmentError",
",",
"configparser",
".",
"NoSectionError",
",",
"configparser",
".",
"NoOptionError",
")",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"(",
"EnvironmentError",
",",
"configparser",
".",
"NoSectionError",
")",
")",
":",
"print",
"(",
"\"Adding sample versioneer config to setup.cfg\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"\"setup.cfg\"",
")",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"SAMPLE_CONFIG",
")",
"print",
"(",
"CONFIG_ERROR",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"1",
"print",
"(",
"\" creating %s\"",
"%",
"cfg",
".",
"versionfile_source",
")",
"with",
"open",
"(",
"cfg",
".",
"versionfile_source",
",",
"\"w\"",
")",
"as",
"f",
":",
"LONG",
"=",
"LONG_VERSION_PY",
"[",
"cfg",
".",
"VCS",
"]",
"f",
".",
"write",
"(",
"LONG",
"%",
"{",
"\"DOLLAR\"",
":",
"\"$\"",
",",
"\"STYLE\"",
":",
"cfg",
".",
"style",
",",
"\"TAG_PREFIX\"",
":",
"cfg",
".",
"tag_prefix",
",",
"\"PARENTDIR_PREFIX\"",
":",
"cfg",
".",
"parentdir_prefix",
",",
"\"VERSIONFILE_SOURCE\"",
":",
"cfg",
".",
"versionfile_source",
",",
"}",
")",
"ipy",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"cfg",
".",
"versionfile_source",
")",
",",
"\"__init__.py\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ipy",
")",
":",
"try",
":",
"with",
"open",
"(",
"ipy",
",",
"\"r\"",
")",
"as",
"f",
":",
"old",
"=",
"f",
".",
"read",
"(",
")",
"except",
"EnvironmentError",
":",
"old",
"=",
"\"\"",
"if",
"INIT_PY_SNIPPET_RE",
".",
"search",
"(",
"old",
")",
"is",
"None",
":",
"print",
"(",
"\" appending to %s\"",
"%",
"ipy",
")",
"with",
"open",
"(",
"ipy",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"INIT_PY_SNIPPET",
")",
"else",
":",
"print",
"(",
"\" %s unmodified\"",
"%",
"ipy",
")",
"else",
":",
"print",
"(",
"\" %s doesn't exist, ok\"",
"%",
"ipy",
")",
"ipy",
"=",
"None",
"# Make sure both the top-level \"versioneer.py\" and versionfile_source",
"# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so",
"# they'll be copied into source distributions. Pip won't be able to",
"# install the package without this.",
"manifest_in",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"\"MANIFEST.in\"",
")",
"simple_includes",
"=",
"set",
"(",
")",
"try",
":",
"with",
"open",
"(",
"manifest_in",
",",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"\"include \"",
")",
":",
"for",
"include",
"in",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
":",
"simple_includes",
".",
"add",
"(",
"include",
")",
"except",
"EnvironmentError",
":",
"pass",
"# That doesn't cover everything MANIFEST.in can do",
"# (http://docs.python.org/2/distutils/sourcedist.html#commands), so",
"# it might give some false negatives. Appending redundant 'include'",
"# lines is safe, though.",
"if",
"\"versioneer.py\"",
"not",
"in",
"simple_includes",
":",
"print",
"(",
"\" appending 'versioneer.py' to MANIFEST.in\"",
")",
"with",
"open",
"(",
"manifest_in",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"include versioneer.py\\n\"",
")",
"else",
":",
"print",
"(",
"\" 'versioneer.py' already in MANIFEST.in\"",
")",
"if",
"cfg",
".",
"versionfile_source",
"not",
"in",
"simple_includes",
":",
"print",
"(",
"\" appending versionfile_source ('%s') to MANIFEST.in\"",
"%",
"cfg",
".",
"versionfile_source",
")",
"with",
"open",
"(",
"manifest_in",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"include %s\\n\"",
"%",
"cfg",
".",
"versionfile_source",
")",
"else",
":",
"print",
"(",
"\" versionfile_source already in MANIFEST.in\"",
")",
"# Make VCS-specific changes. For git, this means creating/changing",
"# .gitattributes to mark _version.py for export-subst keyword",
"# substitution.",
"do_vcs_install",
"(",
"manifest_in",
",",
"cfg",
".",
"versionfile_source",
",",
"ipy",
")",
"return",
"0"
] | Do main VCS-independent setup function for installing Versioneer. | [
"Do",
"main",
"VCS",
"-",
"independent",
"setup",
"function",
"for",
"installing",
"Versioneer",
"."
] | 3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d | https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/versioneer.py#L1846-L1925 | train |
saltstack/pytest-salt | versioneer.py | scan_setup_py | def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass(" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors | python | def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass(" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors | [
"def",
"scan_setup_py",
"(",
")",
":",
"found",
"=",
"set",
"(",
")",
"setters",
"=",
"False",
"errors",
"=",
"0",
"with",
"open",
"(",
"\"setup.py\"",
",",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"if",
"\"import versioneer\"",
"in",
"line",
":",
"found",
".",
"add",
"(",
"\"import\"",
")",
"if",
"\"versioneer.get_cmdclass(\"",
"in",
"line",
":",
"found",
".",
"add",
"(",
"\"cmdclass\"",
")",
"if",
"\"versioneer.get_version()\"",
"in",
"line",
":",
"found",
".",
"add",
"(",
"\"get_version\"",
")",
"if",
"\"versioneer.VCS\"",
"in",
"line",
":",
"setters",
"=",
"True",
"if",
"\"versioneer.versionfile_source\"",
"in",
"line",
":",
"setters",
"=",
"True",
"if",
"len",
"(",
"found",
")",
"!=",
"3",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Your setup.py appears to be missing some important items\"",
")",
"print",
"(",
"\"(but I might be wrong). Please make sure it has something\"",
")",
"print",
"(",
"\"roughly like the following:\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\" import versioneer\"",
")",
"print",
"(",
"\" setup( version=versioneer.get_version(),\"",
")",
"print",
"(",
"\" cmdclass=versioneer.get_cmdclass(), ...)\"",
")",
"print",
"(",
"\"\"",
")",
"errors",
"+=",
"1",
"if",
"setters",
":",
"print",
"(",
"\"You should remove lines like 'versioneer.VCS = ' and\"",
")",
"print",
"(",
"\"'versioneer.versionfile_source = ' . This configuration\"",
")",
"print",
"(",
"\"now lives in setup.cfg, and should be removed from setup.py\"",
")",
"print",
"(",
"\"\"",
")",
"errors",
"+=",
"1",
"return",
"errors"
] | Validate the contents of setup.py against Versioneer's expectations. | [
"Validate",
"the",
"contents",
"of",
"setup",
".",
"py",
"against",
"Versioneer",
"s",
"expectations",
"."
] | 3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d | https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/versioneer.py#L1928-L1962 | train |
saltstack/pytest-salt | setup.py | read | def read(fname):
'''
Read a file from the directory where setup.py resides
'''
file_path = os.path.join(SETUP_DIRNAME, fname)
with codecs.open(file_path, encoding='utf-8') as rfh:
return rfh.read() | python | def read(fname):
'''
Read a file from the directory where setup.py resides
'''
file_path = os.path.join(SETUP_DIRNAME, fname)
with codecs.open(file_path, encoding='utf-8') as rfh:
return rfh.read() | [
"def",
"read",
"(",
"fname",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"SETUP_DIRNAME",
",",
"fname",
")",
"with",
"codecs",
".",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"rfh",
":",
"return",
"rfh",
".",
"read",
"(",
")"
] | Read a file from the directory where setup.py resides | [
"Read",
"a",
"file",
"from",
"the",
"directory",
"where",
"setup",
".",
"py",
"resides"
] | 3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d | https://github.com/saltstack/pytest-salt/blob/3e8c379b3636c64707e7a08b8eb6c9af20a1ac4d/setup.py#L23-L29 | train |
jeongyoonlee/Kaggler | kaggler/model/nn.py | NN.func | def func(self, w, *args):
"""Return the costs of the neural network for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
combined cost of RMSE, L1, and L2 regularization
"""
x0 = args[0]
x1 = args[1]
n0 = x0.shape[0]
n1 = x1.shape[0]
# n -- number of pairs to evaluate
n = max(n0, n1) * 10
idx0 = np.random.choice(range(n0), size=n)
idx1 = np.random.choice(range(n1), size=n)
# b -- bias for the input and h layers
b0 = np.ones((n0, 1))
b1 = np.ones((n1, 1))
i1 = self.i + 1
h = self.h
h1 = h + 1
# Predict for features -- cannot use predict_raw() because here
# different weights can be used.
if sparse.issparse(x0):
p0 = np.hstack((sigm(sparse.hstack((x0, b0)).dot(w[:-h1].reshape(
i1, h))), b0)).dot(w[-h1:].reshape(h1, 1))
p1 = np.hstack((sigm(sparse.hstack((x1, b1)).dot(w[:-h1].reshape(
i1, h))), b1)).dot(w[-h1:].reshape(h1, 1))
else:
p0 = np.hstack((sigm(np.hstack((x0, b0)).dot(w[:-h1].reshape(
i1, h))), b0)).dot(w[-h1:].reshape(h1, 1))
p1 = np.hstack((sigm(np.hstack((x1, b1)).dot(w[:-h1].reshape(
i1, h))), b1)).dot(w[-h1:].reshape(h1, 1))
p0 = p0[idx0]
p1 = p1[idx1]
# Return the cost that consists of the sum of squared error +
# L2-regularization for weights between the input and h layers +
# L2-regularization for weights between the h and output layers.
#return .5 * (sum((1 - sigm(p1 - p0)) ** 2) + self.l1 * sum(w[:-h1] ** 2) +
return .5 * (sum((1 - p1 + p0) ** 2) / n +
self.l1 * sum(w[:-h1] ** 2) / (i1 * h) +
self.l2 * sum(w[-h1:] ** 2) / h1) | python | def func(self, w, *args):
"""Return the costs of the neural network for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
combined cost of RMSE, L1, and L2 regularization
"""
x0 = args[0]
x1 = args[1]
n0 = x0.shape[0]
n1 = x1.shape[0]
# n -- number of pairs to evaluate
n = max(n0, n1) * 10
idx0 = np.random.choice(range(n0), size=n)
idx1 = np.random.choice(range(n1), size=n)
# b -- bias for the input and h layers
b0 = np.ones((n0, 1))
b1 = np.ones((n1, 1))
i1 = self.i + 1
h = self.h
h1 = h + 1
# Predict for features -- cannot use predict_raw() because here
# different weights can be used.
if sparse.issparse(x0):
p0 = np.hstack((sigm(sparse.hstack((x0, b0)).dot(w[:-h1].reshape(
i1, h))), b0)).dot(w[-h1:].reshape(h1, 1))
p1 = np.hstack((sigm(sparse.hstack((x1, b1)).dot(w[:-h1].reshape(
i1, h))), b1)).dot(w[-h1:].reshape(h1, 1))
else:
p0 = np.hstack((sigm(np.hstack((x0, b0)).dot(w[:-h1].reshape(
i1, h))), b0)).dot(w[-h1:].reshape(h1, 1))
p1 = np.hstack((sigm(np.hstack((x1, b1)).dot(w[:-h1].reshape(
i1, h))), b1)).dot(w[-h1:].reshape(h1, 1))
p0 = p0[idx0]
p1 = p1[idx1]
# Return the cost that consists of the sum of squared error +
# L2-regularization for weights between the input and h layers +
# L2-regularization for weights between the h and output layers.
#return .5 * (sum((1 - sigm(p1 - p0)) ** 2) + self.l1 * sum(w[:-h1] ** 2) +
return .5 * (sum((1 - p1 + p0) ** 2) / n +
self.l1 * sum(w[:-h1] ** 2) / (i1 * h) +
self.l2 * sum(w[-h1:] ** 2) / h1) | [
"def",
"func",
"(",
"self",
",",
"w",
",",
"*",
"args",
")",
":",
"x0",
"=",
"args",
"[",
"0",
"]",
"x1",
"=",
"args",
"[",
"1",
"]",
"n0",
"=",
"x0",
".",
"shape",
"[",
"0",
"]",
"n1",
"=",
"x1",
".",
"shape",
"[",
"0",
"]",
"# n -- number of pairs to evaluate",
"n",
"=",
"max",
"(",
"n0",
",",
"n1",
")",
"*",
"10",
"idx0",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"n0",
")",
",",
"size",
"=",
"n",
")",
"idx1",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"n1",
")",
",",
"size",
"=",
"n",
")",
"# b -- bias for the input and h layers",
"b0",
"=",
"np",
".",
"ones",
"(",
"(",
"n0",
",",
"1",
")",
")",
"b1",
"=",
"np",
".",
"ones",
"(",
"(",
"n1",
",",
"1",
")",
")",
"i1",
"=",
"self",
".",
"i",
"+",
"1",
"h",
"=",
"self",
".",
"h",
"h1",
"=",
"h",
"+",
"1",
"# Predict for features -- cannot use predict_raw() because here",
"# different weights can be used.",
"if",
"sparse",
".",
"issparse",
"(",
"x0",
")",
":",
"p0",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"sparse",
".",
"hstack",
"(",
"(",
"x0",
",",
"b0",
")",
")",
".",
"dot",
"(",
"w",
"[",
":",
"-",
"h1",
"]",
".",
"reshape",
"(",
"i1",
",",
"h",
")",
")",
")",
",",
"b0",
")",
")",
".",
"dot",
"(",
"w",
"[",
"-",
"h1",
":",
"]",
".",
"reshape",
"(",
"h1",
",",
"1",
")",
")",
"p1",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"sparse",
".",
"hstack",
"(",
"(",
"x1",
",",
"b1",
")",
")",
".",
"dot",
"(",
"w",
"[",
":",
"-",
"h1",
"]",
".",
"reshape",
"(",
"i1",
",",
"h",
")",
")",
")",
",",
"b1",
")",
")",
".",
"dot",
"(",
"w",
"[",
"-",
"h1",
":",
"]",
".",
"reshape",
"(",
"h1",
",",
"1",
")",
")",
"else",
":",
"p0",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"np",
".",
"hstack",
"(",
"(",
"x0",
",",
"b0",
")",
")",
".",
"dot",
"(",
"w",
"[",
":",
"-",
"h1",
"]",
".",
"reshape",
"(",
"i1",
",",
"h",
")",
")",
")",
",",
"b0",
")",
")",
".",
"dot",
"(",
"w",
"[",
"-",
"h1",
":",
"]",
".",
"reshape",
"(",
"h1",
",",
"1",
")",
")",
"p1",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"np",
".",
"hstack",
"(",
"(",
"x1",
",",
"b1",
")",
")",
".",
"dot",
"(",
"w",
"[",
":",
"-",
"h1",
"]",
".",
"reshape",
"(",
"i1",
",",
"h",
")",
")",
")",
",",
"b1",
")",
")",
".",
"dot",
"(",
"w",
"[",
"-",
"h1",
":",
"]",
".",
"reshape",
"(",
"h1",
",",
"1",
")",
")",
"p0",
"=",
"p0",
"[",
"idx0",
"]",
"p1",
"=",
"p1",
"[",
"idx1",
"]",
"# Return the cost that consists of the sum of squared error +",
"# L2-regularization for weights between the input and h layers +",
"# L2-regularization for weights between the h and output layers.",
"#return .5 * (sum((1 - sigm(p1 - p0)) ** 2) + self.l1 * sum(w[:-h1] ** 2) +",
"return",
".5",
"*",
"(",
"sum",
"(",
"(",
"1",
"-",
"p1",
"+",
"p0",
")",
"**",
"2",
")",
"/",
"n",
"+",
"self",
".",
"l1",
"*",
"sum",
"(",
"w",
"[",
":",
"-",
"h1",
"]",
"**",
"2",
")",
"/",
"(",
"i1",
"*",
"h",
")",
"+",
"self",
".",
"l2",
"*",
"sum",
"(",
"w",
"[",
"-",
"h1",
":",
"]",
"**",
"2",
")",
"/",
"h1",
")"
] | Return the costs of the neural network for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
combined cost of RMSE, L1, and L2 regularization | [
"Return",
"the",
"costs",
"of",
"the",
"neural",
"network",
"for",
"predictions",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/model/nn.py#L204-L256 | train |
jeongyoonlee/Kaggler | kaggler/model/nn.py | NN.fprime | def fprime(self, w, *args):
"""Return the derivatives of the cost function for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
gradients of the cost function for predictions
"""
x0 = args[0]
x1 = args[1]
n0 = x0.shape[0]
n1 = x1.shape[0]
# n -- number of pairs to evaluate
n = max(n0, n1) * 10
idx0 = np.random.choice(range(n0), size=n)
idx1 = np.random.choice(range(n1), size=n)
# b -- bias for the input and h layers
b = np.ones((n, 1))
i1 = self.i + 1
h = self.h
h1 = h + 1
w2 = w[-h1:].reshape(h1, 1)
w1 = w[:-h1].reshape(i1, h)
if sparse.issparse(x0):
x0 = x0.tocsr()[idx0]
x1 = x1.tocsr()[idx1]
xb0 = sparse.hstack((x0, b))
xb1 = sparse.hstack((x1, b))
else:
x0 = x0[idx0]
x1 = x1[idx1]
xb0 = np.hstack((x0, b))
xb1 = np.hstack((x1, b))
z0 = np.hstack((sigm(xb0.dot(w1)), b))
z1 = np.hstack((sigm(xb1.dot(w1)), b))
y0 = z0.dot(w2)
y1 = z1.dot(w2)
#e = 1 - sigm(y1 - y0)
#dy = e * dsigm(y1 - y0)
e = 1 - (y1 - y0)
dy = e / n
# Calculate the derivative of the cost function w.r.t. F and w2 where:
# F -- weights between the input and h layers
# w2 -- weights between the h and output layers
dw1 = -(xb1.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb1.dot(w1))) -
xb0.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb0.dot(w1)))
).reshape(i1 * h) + self.l1 * w[:-h1] / (i1 * h)
dw2 = -(z1 - z0).T.dot(dy).reshape(h1) + self.l2 * w[-h1:] / h1
return np.append(dw1, dw2) | python | def fprime(self, w, *args):
"""Return the derivatives of the cost function for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
gradients of the cost function for predictions
"""
x0 = args[0]
x1 = args[1]
n0 = x0.shape[0]
n1 = x1.shape[0]
# n -- number of pairs to evaluate
n = max(n0, n1) * 10
idx0 = np.random.choice(range(n0), size=n)
idx1 = np.random.choice(range(n1), size=n)
# b -- bias for the input and h layers
b = np.ones((n, 1))
i1 = self.i + 1
h = self.h
h1 = h + 1
w2 = w[-h1:].reshape(h1, 1)
w1 = w[:-h1].reshape(i1, h)
if sparse.issparse(x0):
x0 = x0.tocsr()[idx0]
x1 = x1.tocsr()[idx1]
xb0 = sparse.hstack((x0, b))
xb1 = sparse.hstack((x1, b))
else:
x0 = x0[idx0]
x1 = x1[idx1]
xb0 = np.hstack((x0, b))
xb1 = np.hstack((x1, b))
z0 = np.hstack((sigm(xb0.dot(w1)), b))
z1 = np.hstack((sigm(xb1.dot(w1)), b))
y0 = z0.dot(w2)
y1 = z1.dot(w2)
#e = 1 - sigm(y1 - y0)
#dy = e * dsigm(y1 - y0)
e = 1 - (y1 - y0)
dy = e / n
# Calculate the derivative of the cost function w.r.t. F and w2 where:
# F -- weights between the input and h layers
# w2 -- weights between the h and output layers
dw1 = -(xb1.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb1.dot(w1))) -
xb0.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb0.dot(w1)))
).reshape(i1 * h) + self.l1 * w[:-h1] / (i1 * h)
dw2 = -(z1 - z0).T.dot(dy).reshape(h1) + self.l2 * w[-h1:] / h1
return np.append(dw1, dw2) | [
"def",
"fprime",
"(",
"self",
",",
"w",
",",
"*",
"args",
")",
":",
"x0",
"=",
"args",
"[",
"0",
"]",
"x1",
"=",
"args",
"[",
"1",
"]",
"n0",
"=",
"x0",
".",
"shape",
"[",
"0",
"]",
"n1",
"=",
"x1",
".",
"shape",
"[",
"0",
"]",
"# n -- number of pairs to evaluate",
"n",
"=",
"max",
"(",
"n0",
",",
"n1",
")",
"*",
"10",
"idx0",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"n0",
")",
",",
"size",
"=",
"n",
")",
"idx1",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"n1",
")",
",",
"size",
"=",
"n",
")",
"# b -- bias for the input and h layers",
"b",
"=",
"np",
".",
"ones",
"(",
"(",
"n",
",",
"1",
")",
")",
"i1",
"=",
"self",
".",
"i",
"+",
"1",
"h",
"=",
"self",
".",
"h",
"h1",
"=",
"h",
"+",
"1",
"w2",
"=",
"w",
"[",
"-",
"h1",
":",
"]",
".",
"reshape",
"(",
"h1",
",",
"1",
")",
"w1",
"=",
"w",
"[",
":",
"-",
"h1",
"]",
".",
"reshape",
"(",
"i1",
",",
"h",
")",
"if",
"sparse",
".",
"issparse",
"(",
"x0",
")",
":",
"x0",
"=",
"x0",
".",
"tocsr",
"(",
")",
"[",
"idx0",
"]",
"x1",
"=",
"x1",
".",
"tocsr",
"(",
")",
"[",
"idx1",
"]",
"xb0",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"x0",
",",
"b",
")",
")",
"xb1",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"x1",
",",
"b",
")",
")",
"else",
":",
"x0",
"=",
"x0",
"[",
"idx0",
"]",
"x1",
"=",
"x1",
"[",
"idx1",
"]",
"xb0",
"=",
"np",
".",
"hstack",
"(",
"(",
"x0",
",",
"b",
")",
")",
"xb1",
"=",
"np",
".",
"hstack",
"(",
"(",
"x1",
",",
"b",
")",
")",
"z0",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"xb0",
".",
"dot",
"(",
"w1",
")",
")",
",",
"b",
")",
")",
"z1",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"xb1",
".",
"dot",
"(",
"w1",
")",
")",
",",
"b",
")",
")",
"y0",
"=",
"z0",
".",
"dot",
"(",
"w2",
")",
"y1",
"=",
"z1",
".",
"dot",
"(",
"w2",
")",
"#e = 1 - sigm(y1 - y0)",
"#dy = e * dsigm(y1 - y0)",
"e",
"=",
"1",
"-",
"(",
"y1",
"-",
"y0",
")",
"dy",
"=",
"e",
"/",
"n",
"# Calculate the derivative of the cost function w.r.t. F and w2 where:",
"# F -- weights between the input and h layers",
"# w2 -- weights between the h and output layers",
"dw1",
"=",
"-",
"(",
"xb1",
".",
"T",
".",
"dot",
"(",
"dy",
".",
"dot",
"(",
"w2",
"[",
":",
"-",
"1",
"]",
".",
"reshape",
"(",
"1",
",",
"h",
")",
")",
"*",
"dsigm",
"(",
"xb1",
".",
"dot",
"(",
"w1",
")",
")",
")",
"-",
"xb0",
".",
"T",
".",
"dot",
"(",
"dy",
".",
"dot",
"(",
"w2",
"[",
":",
"-",
"1",
"]",
".",
"reshape",
"(",
"1",
",",
"h",
")",
")",
"*",
"dsigm",
"(",
"xb0",
".",
"dot",
"(",
"w1",
")",
")",
")",
")",
".",
"reshape",
"(",
"i1",
"*",
"h",
")",
"+",
"self",
".",
"l1",
"*",
"w",
"[",
":",
"-",
"h1",
"]",
"/",
"(",
"i1",
"*",
"h",
")",
"dw2",
"=",
"-",
"(",
"z1",
"-",
"z0",
")",
".",
"T",
".",
"dot",
"(",
"dy",
")",
".",
"reshape",
"(",
"h1",
")",
"+",
"self",
".",
"l2",
"*",
"w",
"[",
"-",
"h1",
":",
"]",
"/",
"h1",
"return",
"np",
".",
"append",
"(",
"dw1",
",",
"dw2",
")"
] | Return the derivatives of the cost function for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
gradients of the cost function for predictions | [
"Return",
"the",
"derivatives",
"of",
"the",
"cost",
"function",
"for",
"predictions",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/model/nn.py#L258-L320 | train |
jeongyoonlee/Kaggler | kaggler/preprocessing/data.py | Normalizer._transform_col | def _transform_col(self, x, col):
"""Normalize one numerical column.
Args:
x (numpy.array): a numerical column to normalize
col (int): column index
Returns:
A normalized feature vector.
"""
return norm.ppf(self.ecdfs[col](x) * .998 + .001) | python | def _transform_col(self, x, col):
"""Normalize one numerical column.
Args:
x (numpy.array): a numerical column to normalize
col (int): column index
Returns:
A normalized feature vector.
"""
return norm.ppf(self.ecdfs[col](x) * .998 + .001) | [
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"col",
")",
":",
"return",
"norm",
".",
"ppf",
"(",
"self",
".",
"ecdfs",
"[",
"col",
"]",
"(",
"x",
")",
"*",
".998",
"+",
".001",
")"
] | Normalize one numerical column.
Args:
x (numpy.array): a numerical column to normalize
col (int): column index
Returns:
A normalized feature vector. | [
"Normalize",
"one",
"numerical",
"column",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L66-L77 | train |
jeongyoonlee/Kaggler | kaggler/preprocessing/data.py | LabelEncoder._get_label_encoder_and_max | def _get_label_encoder_and_max(self, x):
"""Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
"""
# NaN cannot be used as a key for dict. So replace it with a random integer.
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
# If every label appears more than min_obs, new label starts from 0.
# Otherwise, new label starts from 1 and 0 is used for all old labels
# that appear less than min_obs.
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label | python | def _get_label_encoder_and_max(self, x):
"""Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
"""
# NaN cannot be used as a key for dict. So replace it with a random integer.
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
# If every label appears more than min_obs, new label starts from 0.
# Otherwise, new label starts from 1 and 0 is used for all old labels
# that appear less than min_obs.
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label | [
"def",
"_get_label_encoder_and_max",
"(",
"self",
",",
"x",
")",
":",
"# NaN cannot be used as a key for dict. So replace it with a random integer.",
"label_count",
"=",
"x",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"value_counts",
"(",
")",
"n_uniq",
"=",
"label_count",
".",
"shape",
"[",
"0",
"]",
"label_count",
"=",
"label_count",
"[",
"label_count",
">=",
"self",
".",
"min_obs",
"]",
"n_uniq_new",
"=",
"label_count",
".",
"shape",
"[",
"0",
"]",
"# If every label appears more than min_obs, new label starts from 0.",
"# Otherwise, new label starts from 1 and 0 is used for all old labels",
"# that appear less than min_obs.",
"offset",
"=",
"0",
"if",
"n_uniq",
"==",
"n_uniq_new",
"else",
"1",
"label_encoder",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"arange",
"(",
"n_uniq_new",
")",
"+",
"offset",
",",
"index",
"=",
"label_count",
".",
"index",
")",
"max_label",
"=",
"label_encoder",
".",
"max",
"(",
")",
"label_encoder",
"=",
"label_encoder",
".",
"to_dict",
"(",
")",
"return",
"label_encoder",
",",
"max_label"
] | Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label | [
"Return",
"a",
"mapping",
"from",
"values",
"and",
"its",
"maximum",
"of",
"a",
"column",
"to",
"integer",
"labels",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L101-L128 | train |
jeongyoonlee/Kaggler | kaggler/preprocessing/data.py | LabelEncoder._transform_col | def _transform_col(self, x, i):
"""Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
"""
return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0) | python | def _transform_col(self, x, i):
"""Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
"""
return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0) | [
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"return",
"x",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"map",
"(",
"self",
".",
"label_encoders",
"[",
"i",
"]",
")",
".",
"fillna",
"(",
"0",
")"
] | Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels. | [
"Encode",
"one",
"categorical",
"column",
"into",
"labels",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L130-L140 | train |
jeongyoonlee/Kaggler | kaggler/preprocessing/data.py | OneHotEncoder._transform_col | def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix((np.ones_like(i), (i, j)),
shape=(x.shape[0], label_max))
else:
# if there is no non-zero value, return no matrix
return None | python | def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix((np.ones_like(i), (i, j)),
shape=(x.shape[0], label_max))
else:
# if there is no non-zero value, return no matrix
return None | [
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"labels",
"=",
"self",
".",
"label_encoder",
".",
"_transform_col",
"(",
"x",
",",
"i",
")",
"label_max",
"=",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
"# build row and column index for non-zero values of a sparse matrix",
"index",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"len",
"(",
"labels",
")",
")",
")",
"i",
"=",
"index",
"[",
"labels",
">",
"0",
"]",
"j",
"=",
"labels",
"[",
"labels",
">",
"0",
"]",
"-",
"1",
"# column index starts from 0",
"if",
"len",
"(",
"i",
")",
">",
"0",
":",
"return",
"sparse",
".",
"coo_matrix",
"(",
"(",
"np",
".",
"ones_like",
"(",
"i",
")",
",",
"(",
"i",
",",
"j",
")",
")",
",",
"shape",
"=",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"label_max",
")",
")",
"else",
":",
"# if there is no non-zero value, return no matrix",
"return",
"None"
] | Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables | [
"Encode",
"one",
"categorical",
"column",
"into",
"sparse",
"matrix",
"with",
"one",
"-",
"hot",
"-",
"encoding",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L212-L237 | train |
jeongyoonlee/Kaggler | kaggler/preprocessing/data.py | OneHotEncoder.transform | def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new | python | def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"X",
".",
"columns",
")",
":",
"X_col",
"=",
"self",
".",
"_transform_col",
"(",
"X",
"[",
"col",
"]",
",",
"i",
")",
"if",
"X_col",
"is",
"not",
"None",
":",
"if",
"i",
"==",
"0",
":",
"X_new",
"=",
"X_col",
"else",
":",
"X_new",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"X_new",
",",
"X_col",
")",
")",
"logger",
".",
"debug",
"(",
"'{} --> {} features'",
".",
"format",
"(",
"col",
",",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
")",
")",
"return",
"X_new"
] | Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables | [
"Encode",
"categorical",
"columns",
"into",
"sparse",
"matrix",
"with",
"one",
"-",
"hot",
"-",
"encoding",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L244-L267 | train |
jeongyoonlee/Kaggler | kaggler/online_model/DecisionTree/OnlineClassificationTree.py | ClassificationTree.predict | def predict(self, x):
"""
Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent.
"""
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
return argmax(d2)
else:
if self.criterion(x):
return self.right.predict(x)
else:
return self.left.predict(x) | python | def predict(self, x):
"""
Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent.
"""
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
return argmax(d2)
else:
if self.criterion(x):
return self.right.predict(x)
else:
return self.left.predict(x) | [
"def",
"predict",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"_is_leaf",
"(",
")",
":",
"d1",
"=",
"self",
".",
"predict_initialize",
"[",
"'count_dict'",
"]",
"d2",
"=",
"count_dict",
"(",
"self",
".",
"Y",
")",
"for",
"key",
",",
"value",
"in",
"d1",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"in",
"d2",
":",
"d2",
"[",
"key",
"]",
"+=",
"value",
"else",
":",
"d2",
"[",
"key",
"]",
"=",
"value",
"return",
"argmax",
"(",
"d2",
")",
"else",
":",
"if",
"self",
".",
"criterion",
"(",
"x",
")",
":",
"return",
"self",
".",
"right",
".",
"predict",
"(",
"x",
")",
"else",
":",
"return",
"self",
".",
"left",
".",
"predict",
"(",
"x",
")"
] | Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent. | [
"Make",
"prediction",
"recursively",
".",
"Use",
"both",
"the",
"samples",
"inside",
"the",
"current",
"node",
"and",
"the",
"statistics",
"inherited",
"from",
"parent",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/online_model/DecisionTree/OnlineClassificationTree.py#L74-L92 | train |
jeongyoonlee/Kaggler | kaggler/ensemble/linear.py | netflix | def netflix(es, ps, e0, l=.0001):
"""
Combine predictions with the optimal weights to minimize RMSE.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
Ensemble prediction (np.array) and weights (np.array) for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = .5 * (n * e0**2 + (X**2).sum(axis=0) - n * np.array(es)**2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w | python | def netflix(es, ps, e0, l=.0001):
"""
Combine predictions with the optimal weights to minimize RMSE.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
Ensemble prediction (np.array) and weights (np.array) for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = .5 * (n * e0**2 + (X**2).sum(axis=0) - n * np.array(es)**2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w | [
"def",
"netflix",
"(",
"es",
",",
"ps",
",",
"e0",
",",
"l",
"=",
".0001",
")",
":",
"m",
"=",
"len",
"(",
"es",
")",
"n",
"=",
"len",
"(",
"ps",
"[",
"0",
"]",
")",
"X",
"=",
"np",
".",
"stack",
"(",
"ps",
")",
".",
"T",
"pTy",
"=",
".5",
"*",
"(",
"n",
"*",
"e0",
"**",
"2",
"+",
"(",
"X",
"**",
"2",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"-",
"n",
"*",
"np",
".",
"array",
"(",
"es",
")",
"**",
"2",
")",
"w",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"X",
".",
"T",
".",
"dot",
"(",
"X",
")",
"+",
"l",
"*",
"n",
"*",
"np",
".",
"eye",
"(",
"m",
")",
")",
".",
"dot",
"(",
"pTy",
")",
"return",
"X",
".",
"dot",
"(",
"w",
")",
",",
"w"
] | Combine predictions with the optimal weights to minimize RMSE.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
Ensemble prediction (np.array) and weights (np.array) for input predictions | [
"Combine",
"predictions",
"with",
"the",
"optimal",
"weights",
"to",
"minimize",
"RMSE",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/ensemble/linear.py#L7-L28 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | save_data | def save_data(X, y, path):
"""Save data as a CSV, LibSVM or HDF5 file based on the file extension.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector. If None, all zero vector will be saved.
path (str): Path to the CSV, LibSVM or HDF5 file to save data.
"""
catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
if y is None:
y = np.zeros((X.shape[0], ))
func(X, y, path) | python | def save_data(X, y, path):
"""Save data as a CSV, LibSVM or HDF5 file based on the file extension.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector. If None, all zero vector will be saved.
path (str): Path to the CSV, LibSVM or HDF5 file to save data.
"""
catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
if y is None:
y = np.zeros((X.shape[0], ))
func(X, y, path) | [
"def",
"save_data",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"catalog",
"=",
"{",
"'.csv'",
":",
"save_csv",
",",
"'.sps'",
":",
"save_libsvm",
",",
"'.h5'",
":",
"save_hdf5",
"}",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"func",
"=",
"catalog",
"[",
"ext",
"]",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
")",
")",
"func",
"(",
"X",
",",
"y",
",",
"path",
")"
] | Save data as a CSV, LibSVM or HDF5 file based on the file extension.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector. If None, all zero vector will be saved.
path (str): Path to the CSV, LibSVM or HDF5 file to save data. | [
"Save",
"data",
"as",
"a",
"CSV",
"LibSVM",
"or",
"HDF5",
"file",
"based",
"on",
"the",
"file",
"extension",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L34-L50 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | save_csv | def save_csv(X, y, path):
"""Save data as a CSV file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
if sparse.issparse(X):
X = X.todense()
np.savetxt(path, np.hstack((y.reshape((-1, 1)), X)), delimiter=',') | python | def save_csv(X, y, path):
"""Save data as a CSV file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
if sparse.issparse(X):
X = X.todense()
np.savetxt(path, np.hstack((y.reshape((-1, 1)), X)), delimiter=',') | [
"def",
"save_csv",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"todense",
"(",
")",
"np",
".",
"savetxt",
"(",
"path",
",",
"np",
".",
"hstack",
"(",
"(",
"y",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"X",
")",
")",
",",
"delimiter",
"=",
"','",
")"
] | Save data as a CSV file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data. | [
"Save",
"data",
"as",
"a",
"CSV",
"file",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L53-L65 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | save_libsvm | def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False) | python | def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False) | [
"def",
"save_libsvm",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"dump_svmlight_file",
"(",
"X",
",",
"y",
",",
"path",
",",
"zero_based",
"=",
"False",
")"
] | Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data. | [
"Save",
"data",
"as",
"a",
"LibSVM",
"file",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L68-L77 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | save_hdf5 | def save_hdf5(X, y, path):
"""Save data as a HDF5 file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the HDF5 file to save data.
"""
with h5py.File(path, 'w') as f:
is_sparse = 1 if sparse.issparse(X) else 0
f['issparse'] = is_sparse
f['target'] = y
if is_sparse:
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
f['shape'] = np.array(X.shape)
f['data'] = X.data
f['indices'] = X.indices
f['indptr'] = X.indptr
else:
f['data'] = X | python | def save_hdf5(X, y, path):
"""Save data as a HDF5 file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the HDF5 file to save data.
"""
with h5py.File(path, 'w') as f:
is_sparse = 1 if sparse.issparse(X) else 0
f['issparse'] = is_sparse
f['target'] = y
if is_sparse:
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
f['shape'] = np.array(X.shape)
f['data'] = X.data
f['indices'] = X.indices
f['indptr'] = X.indptr
else:
f['data'] = X | [
"def",
"save_hdf5",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"is_sparse",
"=",
"1",
"if",
"sparse",
".",
"issparse",
"(",
"X",
")",
"else",
"0",
"f",
"[",
"'issparse'",
"]",
"=",
"is_sparse",
"f",
"[",
"'target'",
"]",
"=",
"y",
"if",
"is_sparse",
":",
"if",
"not",
"sparse",
".",
"isspmatrix_csr",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"tocsr",
"(",
")",
"f",
"[",
"'shape'",
"]",
"=",
"np",
".",
"array",
"(",
"X",
".",
"shape",
")",
"f",
"[",
"'data'",
"]",
"=",
"X",
".",
"data",
"f",
"[",
"'indices'",
"]",
"=",
"X",
".",
"indices",
"f",
"[",
"'indptr'",
"]",
"=",
"X",
".",
"indptr",
"else",
":",
"f",
"[",
"'data'",
"]",
"=",
"X"
] | Save data as a HDF5 file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the HDF5 file to save data. | [
"Save",
"data",
"as",
"a",
"HDF5",
"file",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L80-L103 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | load_data | def load_data(path, dense=False):
"""Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
X, y = func(path)
if dense and sparse.issparse(X):
X = X.todense()
return X, y | python | def load_data(path, dense=False):
"""Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
X, y = func(path)
if dense and sparse.issparse(X):
X = X.todense()
return X, y | [
"def",
"load_data",
"(",
"path",
",",
"dense",
"=",
"False",
")",
":",
"catalog",
"=",
"{",
"'.csv'",
":",
"load_csv",
",",
"'.sps'",
":",
"load_svmlight_file",
",",
"'.h5'",
":",
"load_hdf5",
"}",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"func",
"=",
"catalog",
"[",
"ext",
"]",
"X",
",",
"y",
"=",
"func",
"(",
"path",
")",
"if",
"dense",
"and",
"sparse",
".",
"issparse",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"todense",
"(",
")",
"return",
"X",
",",
"y"
] | Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y | [
"Load",
"data",
"from",
"a",
"CSV",
"LibSVM",
"or",
"HDF5",
"file",
"based",
"on",
"the",
"file",
"extension",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L106-L127 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | load_csv | def load_csv(path):
"""Load data from a CSV file.
Args:
path (str): A path to the CSV format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with open(path) as f:
line = f.readline().strip()
X = np.loadtxt(path, delimiter=',',
skiprows=0 if is_number(line.split(',')[0]) else 1)
y = np.array(X[:, 0]).flatten()
X = X[:, 1:]
return X, y | python | def load_csv(path):
"""Load data from a CSV file.
Args:
path (str): A path to the CSV format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with open(path) as f:
line = f.readline().strip()
X = np.loadtxt(path, delimiter=',',
skiprows=0 if is_number(line.split(',')[0]) else 1)
y = np.array(X[:, 0]).flatten()
X = X[:, 1:]
return X, y | [
"def",
"load_csv",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"X",
"=",
"np",
".",
"loadtxt",
"(",
"path",
",",
"delimiter",
"=",
"','",
",",
"skiprows",
"=",
"0",
"if",
"is_number",
"(",
"line",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
")",
"else",
"1",
")",
"y",
"=",
"np",
".",
"array",
"(",
"X",
"[",
":",
",",
"0",
"]",
")",
".",
"flatten",
"(",
")",
"X",
"=",
"X",
"[",
":",
",",
"1",
":",
"]",
"return",
"X",
",",
"y"
] | Load data from a CSV file.
Args:
path (str): A path to the CSV format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y | [
"Load",
"data",
"from",
"a",
"CSV",
"file",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L130-L151 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | load_hdf5 | def load_hdf5(path):
"""Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with h5py.File(path, 'r') as f:
is_sparse = f['issparse'][...]
if is_sparse:
shape = tuple(f['shape'][...])
data = f['data'][...]
indices = f['indices'][...]
indptr = f['indptr'][...]
X = sparse.csr_matrix((data, indices, indptr), shape=shape)
else:
X = f['data'][...]
y = f['target'][...]
return X, y | python | def load_hdf5(path):
"""Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with h5py.File(path, 'r') as f:
is_sparse = f['issparse'][...]
if is_sparse:
shape = tuple(f['shape'][...])
data = f['data'][...]
indices = f['indices'][...]
indptr = f['indptr'][...]
X = sparse.csr_matrix((data, indices, indptr), shape=shape)
else:
X = f['data'][...]
y = f['target'][...]
return X, y | [
"def",
"load_hdf5",
"(",
"path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"is_sparse",
"=",
"f",
"[",
"'issparse'",
"]",
"[",
"...",
"]",
"if",
"is_sparse",
":",
"shape",
"=",
"tuple",
"(",
"f",
"[",
"'shape'",
"]",
"[",
"...",
"]",
")",
"data",
"=",
"f",
"[",
"'data'",
"]",
"[",
"...",
"]",
"indices",
"=",
"f",
"[",
"'indices'",
"]",
"[",
"...",
"]",
"indptr",
"=",
"f",
"[",
"'indptr'",
"]",
"[",
"...",
"]",
"X",
"=",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"shape",
")",
"else",
":",
"X",
"=",
"f",
"[",
"'data'",
"]",
"[",
"...",
"]",
"y",
"=",
"f",
"[",
"'target'",
"]",
"[",
"...",
"]",
"return",
"X",
",",
"y"
] | Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y | [
"Load",
"data",
"from",
"a",
"HDF5",
"file",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L154-L179 | train |
jeongyoonlee/Kaggler | kaggler/data_io.py | read_sps | def read_sps(path):
"""Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int).
"""
for line in open(path):
# parse x
xs = line.rstrip().split(' ')
yield xs[1:], int(xs[0]) | python | def read_sps(path):
"""Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int).
"""
for line in open(path):
# parse x
xs = line.rstrip().split(' ')
yield xs[1:], int(xs[0]) | [
"def",
"read_sps",
"(",
"path",
")",
":",
"for",
"line",
"in",
"open",
"(",
"path",
")",
":",
"# parse x",
"xs",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"yield",
"xs",
"[",
"1",
":",
"]",
",",
"int",
"(",
"xs",
"[",
"0",
"]",
")"
] | Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int). | [
"Read",
"a",
"LibSVM",
"file",
"line",
"-",
"by",
"-",
"line",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L182-L196 | train |
jeongyoonlee/Kaggler | kaggler/metrics/regression.py | gini | def gini(y, p):
"""Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient
"""
# check and get number of samples
assert y.shape == p.shape
n_samples = y.shape[0]
# sort rows on prediction column
# (from largest to smallest)
arr = np.array([y, p]).transpose()
true_order = arr[arr[:,0].argsort()][::-1,0]
pred_order = arr[arr[:,1].argsort()][::-1,0]
# get Lorenz curves
l_true = np.cumsum(true_order) / np.sum(true_order)
l_pred = np.cumsum(pred_order) / np.sum(pred_order)
l_ones = np.linspace(1/n_samples, 1, n_samples)
# get Gini coefficients (area between curves)
g_true = np.sum(l_ones - l_true)
g_pred = np.sum(l_ones - l_pred)
# normalize to true Gini coefficient
return g_pred / g_true | python | def gini(y, p):
"""Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient
"""
# check and get number of samples
assert y.shape == p.shape
n_samples = y.shape[0]
# sort rows on prediction column
# (from largest to smallest)
arr = np.array([y, p]).transpose()
true_order = arr[arr[:,0].argsort()][::-1,0]
pred_order = arr[arr[:,1].argsort()][::-1,0]
# get Lorenz curves
l_true = np.cumsum(true_order) / np.sum(true_order)
l_pred = np.cumsum(pred_order) / np.sum(pred_order)
l_ones = np.linspace(1/n_samples, 1, n_samples)
# get Gini coefficients (area between curves)
g_true = np.sum(l_ones - l_true)
g_pred = np.sum(l_ones - l_pred)
# normalize to true Gini coefficient
return g_pred / g_true | [
"def",
"gini",
"(",
"y",
",",
"p",
")",
":",
"# check and get number of samples",
"assert",
"y",
".",
"shape",
"==",
"p",
".",
"shape",
"n_samples",
"=",
"y",
".",
"shape",
"[",
"0",
"]",
"# sort rows on prediction column",
"# (from largest to smallest)",
"arr",
"=",
"np",
".",
"array",
"(",
"[",
"y",
",",
"p",
"]",
")",
".",
"transpose",
"(",
")",
"true_order",
"=",
"arr",
"[",
"arr",
"[",
":",
",",
"0",
"]",
".",
"argsort",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
",",
"0",
"]",
"pred_order",
"=",
"arr",
"[",
"arr",
"[",
":",
",",
"1",
"]",
".",
"argsort",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
",",
"0",
"]",
"# get Lorenz curves",
"l_true",
"=",
"np",
".",
"cumsum",
"(",
"true_order",
")",
"/",
"np",
".",
"sum",
"(",
"true_order",
")",
"l_pred",
"=",
"np",
".",
"cumsum",
"(",
"pred_order",
")",
"/",
"np",
".",
"sum",
"(",
"pred_order",
")",
"l_ones",
"=",
"np",
".",
"linspace",
"(",
"1",
"/",
"n_samples",
",",
"1",
",",
"n_samples",
")",
"# get Gini coefficients (area between curves)",
"g_true",
"=",
"np",
".",
"sum",
"(",
"l_ones",
"-",
"l_true",
")",
"g_pred",
"=",
"np",
".",
"sum",
"(",
"l_ones",
"-",
"l_pred",
")",
"# normalize to true Gini coefficient",
"return",
"g_pred",
"/",
"g_true"
] | Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient | [
"Normalized",
"Gini",
"Coefficient",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/metrics/regression.py#L46-L78 | train |
jeongyoonlee/Kaggler | kaggler/metrics/classification.py | logloss | def logloss(y, p):
"""Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error
"""
p[p < EPS] = EPS
p[p > 1 - EPS] = 1 - EPS
return log_loss(y, p) | python | def logloss(y, p):
"""Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error
"""
p[p < EPS] = EPS
p[p > 1 - EPS] = 1 - EPS
return log_loss(y, p) | [
"def",
"logloss",
"(",
"y",
",",
"p",
")",
":",
"p",
"[",
"p",
"<",
"EPS",
"]",
"=",
"EPS",
"p",
"[",
"p",
">",
"1",
"-",
"EPS",
"]",
"=",
"1",
"-",
"EPS",
"return",
"log_loss",
"(",
"y",
",",
"p",
")"
] | Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error | [
"Bounded",
"log",
"loss",
"error",
"."
] | 20661105b61958dc9a3c529c1d3b2313ab23ae32 | https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/metrics/classification.py#L10-L23 | train |
vividvilla/csvtotable | csvtotable/convert.py | convert | def convert(input_file_name, **kwargs):
"""Convert CSV file to HTML table"""
delimiter = kwargs["delimiter"] or ","
quotechar = kwargs["quotechar"] or "|"
if six.PY2:
delimiter = delimiter.encode("utf-8")
quotechar = quotechar.encode("utf-8")
# Read CSV and form a header and rows list
with open(input_file_name, "rb") as input_file:
reader = csv.reader(input_file,
encoding="utf-8",
delimiter=delimiter,
quotechar=quotechar)
csv_headers = []
if not kwargs.get("no_header"):
# Read header from first line
csv_headers = next(reader)
csv_rows = [row for row in reader if row]
# Set default column name if header is not present
if not csv_headers and len(csv_rows) > 0:
end = len(csv_rows[0]) + 1
csv_headers = ["Column {}".format(n) for n in range(1, end)]
# Render csv to HTML
html = render_template(csv_headers, csv_rows, **kwargs)
# Freeze all JS files in template
return freeze_js(html) | python | def convert(input_file_name, **kwargs):
"""Convert CSV file to HTML table"""
delimiter = kwargs["delimiter"] or ","
quotechar = kwargs["quotechar"] or "|"
if six.PY2:
delimiter = delimiter.encode("utf-8")
quotechar = quotechar.encode("utf-8")
# Read CSV and form a header and rows list
with open(input_file_name, "rb") as input_file:
reader = csv.reader(input_file,
encoding="utf-8",
delimiter=delimiter,
quotechar=quotechar)
csv_headers = []
if not kwargs.get("no_header"):
# Read header from first line
csv_headers = next(reader)
csv_rows = [row for row in reader if row]
# Set default column name if header is not present
if not csv_headers and len(csv_rows) > 0:
end = len(csv_rows[0]) + 1
csv_headers = ["Column {}".format(n) for n in range(1, end)]
# Render csv to HTML
html = render_template(csv_headers, csv_rows, **kwargs)
# Freeze all JS files in template
return freeze_js(html) | [
"def",
"convert",
"(",
"input_file_name",
",",
"*",
"*",
"kwargs",
")",
":",
"delimiter",
"=",
"kwargs",
"[",
"\"delimiter\"",
"]",
"or",
"\",\"",
"quotechar",
"=",
"kwargs",
"[",
"\"quotechar\"",
"]",
"or",
"\"|\"",
"if",
"six",
".",
"PY2",
":",
"delimiter",
"=",
"delimiter",
".",
"encode",
"(",
"\"utf-8\"",
")",
"quotechar",
"=",
"quotechar",
".",
"encode",
"(",
"\"utf-8\"",
")",
"# Read CSV and form a header and rows list",
"with",
"open",
"(",
"input_file_name",
",",
"\"rb\"",
")",
"as",
"input_file",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"input_file",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"delimiter",
"=",
"delimiter",
",",
"quotechar",
"=",
"quotechar",
")",
"csv_headers",
"=",
"[",
"]",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"no_header\"",
")",
":",
"# Read header from first line",
"csv_headers",
"=",
"next",
"(",
"reader",
")",
"csv_rows",
"=",
"[",
"row",
"for",
"row",
"in",
"reader",
"if",
"row",
"]",
"# Set default column name if header is not present",
"if",
"not",
"csv_headers",
"and",
"len",
"(",
"csv_rows",
")",
">",
"0",
":",
"end",
"=",
"len",
"(",
"csv_rows",
"[",
"0",
"]",
")",
"+",
"1",
"csv_headers",
"=",
"[",
"\"Column {}\"",
".",
"format",
"(",
"n",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"end",
")",
"]",
"# Render csv to HTML",
"html",
"=",
"render_template",
"(",
"csv_headers",
",",
"csv_rows",
",",
"*",
"*",
"kwargs",
")",
"# Freeze all JS files in template",
"return",
"freeze_js",
"(",
"html",
")"
] | Convert CSV file to HTML table | [
"Convert",
"CSV",
"file",
"to",
"HTML",
"table"
] | d894dca1fcc1071c9a52260a9194f8cc3b327905 | https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L36-L68 | train |
vividvilla/csvtotable | csvtotable/convert.py | save | def save(file_name, content):
"""Save content to a file"""
with open(file_name, "w", encoding="utf-8") as output_file:
output_file.write(content)
return output_file.name | python | def save(file_name, content):
"""Save content to a file"""
with open(file_name, "w", encoding="utf-8") as output_file:
output_file.write(content)
return output_file.name | [
"def",
"save",
"(",
"file_name",
",",
"content",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"content",
")",
"return",
"output_file",
".",
"name"
] | Save content to a file | [
"Save",
"content",
"to",
"a",
"file"
] | d894dca1fcc1071c9a52260a9194f8cc3b327905 | https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L71-L75 | train |
vividvilla/csvtotable | csvtotable/convert.py | serve | def serve(content):
"""Write content to a temp file and serve it in browser"""
temp_folder = tempfile.gettempdir()
temp_file_name = tempfile.gettempprefix() + str(uuid.uuid4()) + ".html"
# Generate a file path with a random name in temporary dir
temp_file_path = os.path.join(temp_folder, temp_file_name)
# save content to temp file
save(temp_file_path, content)
# Open templfile in a browser
webbrowser.open("file://{}".format(temp_file_path))
# Block the thread while content is served
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
# cleanup the temp file
os.remove(temp_file_path) | python | def serve(content):
"""Write content to a temp file and serve it in browser"""
temp_folder = tempfile.gettempdir()
temp_file_name = tempfile.gettempprefix() + str(uuid.uuid4()) + ".html"
# Generate a file path with a random name in temporary dir
temp_file_path = os.path.join(temp_folder, temp_file_name)
# save content to temp file
save(temp_file_path, content)
# Open templfile in a browser
webbrowser.open("file://{}".format(temp_file_path))
# Block the thread while content is served
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
# cleanup the temp file
os.remove(temp_file_path) | [
"def",
"serve",
"(",
"content",
")",
":",
"temp_folder",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"temp_file_name",
"=",
"tempfile",
".",
"gettempprefix",
"(",
")",
"+",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"+",
"\".html\"",
"# Generate a file path with a random name in temporary dir",
"temp_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_folder",
",",
"temp_file_name",
")",
"# save content to temp file",
"save",
"(",
"temp_file_path",
",",
"content",
")",
"# Open templfile in a browser",
"webbrowser",
".",
"open",
"(",
"\"file://{}\"",
".",
"format",
"(",
"temp_file_path",
")",
")",
"# Block the thread while content is served",
"try",
":",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"# cleanup the temp file",
"os",
".",
"remove",
"(",
"temp_file_path",
")"
] | Write content to a temp file and serve it in browser | [
"Write",
"content",
"to",
"a",
"temp",
"file",
"and",
"serve",
"it",
"in",
"browser"
] | d894dca1fcc1071c9a52260a9194f8cc3b327905 | https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L78-L97 | train |
vividvilla/csvtotable | csvtotable/convert.py | render_template | def render_template(table_headers, table_items, **options):
"""
Render Jinja2 template
"""
caption = options.get("caption") or "Table"
display_length = options.get("display_length") or -1
height = options.get("height") or "70vh"
default_length_menu = [-1, 10, 25, 50]
pagination = options.get("pagination")
virtual_scroll_limit = options.get("virtual_scroll")
# Change % to vh
height = height.replace("%", "vh")
# Header columns
columns = []
for header in table_headers:
columns.append({"title": header})
# Data table options
datatable_options = {
"columns": columns,
"data": table_items,
"iDisplayLength": display_length,
"sScrollX": "100%",
"sScrollXInner": "100%"
}
# Enable virtual scroll for rows bigger than 1000 rows
is_paging = pagination
virtual_scroll = False
scroll_y = height
if virtual_scroll_limit:
if virtual_scroll_limit != -1 and len(table_items) > virtual_scroll_limit:
virtual_scroll = True
display_length = -1
fmt = ("\nVirtual scroll is enabled since number of rows exceeds {limit}."
" You can set custom row limit by setting flag -vs, --virtual-scroll."
" Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable.")
logger.warn(fmt.format(limit=virtual_scroll_limit))
if not is_paging:
fmt = "\nPagination can not be disabled in virtual scroll mode."
logger.warn(fmt)
is_paging = True
if is_paging and not virtual_scroll:
# Add display length to the default display length menu
length_menu = []
if display_length != -1:
length_menu = sorted(default_length_menu + [display_length])
else:
length_menu = default_length_menu
# Set label as "All" it display length is -1
length_menu_label = [str("All") if i == -1 else i for i in length_menu]
datatable_options["lengthMenu"] = [length_menu, length_menu_label]
datatable_options["iDisplayLength"] = display_length
if is_paging:
datatable_options["paging"] = True
else:
datatable_options["paging"] = False
if scroll_y:
datatable_options["scrollY"] = scroll_y
if virtual_scroll:
datatable_options["scroller"] = True
datatable_options["bPaginate"] = False
datatable_options["deferRender"] = True
datatable_options["bLengthChange"] = False
enable_export = options.get("export")
if enable_export:
if options["export_options"]:
allowed = list(options["export_options"])
else:
allowed = ["copy", "csv", "json", "print"]
datatable_options["dom"] = "Bfrtip"
datatable_options["buttons"] = allowed
datatable_options_json = json.dumps(datatable_options,
separators=(",", ":"))
return template.render(title=caption or "Table",
caption=caption,
datatable_options=datatable_options_json,
virtual_scroll=virtual_scroll,
enable_export=enable_export) | python | def render_template(table_headers, table_items, **options):
"""
Render Jinja2 template
"""
caption = options.get("caption") or "Table"
display_length = options.get("display_length") or -1
height = options.get("height") or "70vh"
default_length_menu = [-1, 10, 25, 50]
pagination = options.get("pagination")
virtual_scroll_limit = options.get("virtual_scroll")
# Change % to vh
height = height.replace("%", "vh")
# Header columns
columns = []
for header in table_headers:
columns.append({"title": header})
# Data table options
datatable_options = {
"columns": columns,
"data": table_items,
"iDisplayLength": display_length,
"sScrollX": "100%",
"sScrollXInner": "100%"
}
# Enable virtual scroll for rows bigger than 1000 rows
is_paging = pagination
virtual_scroll = False
scroll_y = height
if virtual_scroll_limit:
if virtual_scroll_limit != -1 and len(table_items) > virtual_scroll_limit:
virtual_scroll = True
display_length = -1
fmt = ("\nVirtual scroll is enabled since number of rows exceeds {limit}."
" You can set custom row limit by setting flag -vs, --virtual-scroll."
" Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable.")
logger.warn(fmt.format(limit=virtual_scroll_limit))
if not is_paging:
fmt = "\nPagination can not be disabled in virtual scroll mode."
logger.warn(fmt)
is_paging = True
if is_paging and not virtual_scroll:
# Add display length to the default display length menu
length_menu = []
if display_length != -1:
length_menu = sorted(default_length_menu + [display_length])
else:
length_menu = default_length_menu
# Set label as "All" it display length is -1
length_menu_label = [str("All") if i == -1 else i for i in length_menu]
datatable_options["lengthMenu"] = [length_menu, length_menu_label]
datatable_options["iDisplayLength"] = display_length
if is_paging:
datatable_options["paging"] = True
else:
datatable_options["paging"] = False
if scroll_y:
datatable_options["scrollY"] = scroll_y
if virtual_scroll:
datatable_options["scroller"] = True
datatable_options["bPaginate"] = False
datatable_options["deferRender"] = True
datatable_options["bLengthChange"] = False
enable_export = options.get("export")
if enable_export:
if options["export_options"]:
allowed = list(options["export_options"])
else:
allowed = ["copy", "csv", "json", "print"]
datatable_options["dom"] = "Bfrtip"
datatable_options["buttons"] = allowed
datatable_options_json = json.dumps(datatable_options,
separators=(",", ":"))
return template.render(title=caption or "Table",
caption=caption,
datatable_options=datatable_options_json,
virtual_scroll=virtual_scroll,
enable_export=enable_export) | [
"def",
"render_template",
"(",
"table_headers",
",",
"table_items",
",",
"*",
"*",
"options",
")",
":",
"caption",
"=",
"options",
".",
"get",
"(",
"\"caption\"",
")",
"or",
"\"Table\"",
"display_length",
"=",
"options",
".",
"get",
"(",
"\"display_length\"",
")",
"or",
"-",
"1",
"height",
"=",
"options",
".",
"get",
"(",
"\"height\"",
")",
"or",
"\"70vh\"",
"default_length_menu",
"=",
"[",
"-",
"1",
",",
"10",
",",
"25",
",",
"50",
"]",
"pagination",
"=",
"options",
".",
"get",
"(",
"\"pagination\"",
")",
"virtual_scroll_limit",
"=",
"options",
".",
"get",
"(",
"\"virtual_scroll\"",
")",
"# Change % to vh",
"height",
"=",
"height",
".",
"replace",
"(",
"\"%\"",
",",
"\"vh\"",
")",
"# Header columns",
"columns",
"=",
"[",
"]",
"for",
"header",
"in",
"table_headers",
":",
"columns",
".",
"append",
"(",
"{",
"\"title\"",
":",
"header",
"}",
")",
"# Data table options",
"datatable_options",
"=",
"{",
"\"columns\"",
":",
"columns",
",",
"\"data\"",
":",
"table_items",
",",
"\"iDisplayLength\"",
":",
"display_length",
",",
"\"sScrollX\"",
":",
"\"100%\"",
",",
"\"sScrollXInner\"",
":",
"\"100%\"",
"}",
"# Enable virtual scroll for rows bigger than 1000 rows",
"is_paging",
"=",
"pagination",
"virtual_scroll",
"=",
"False",
"scroll_y",
"=",
"height",
"if",
"virtual_scroll_limit",
":",
"if",
"virtual_scroll_limit",
"!=",
"-",
"1",
"and",
"len",
"(",
"table_items",
")",
">",
"virtual_scroll_limit",
":",
"virtual_scroll",
"=",
"True",
"display_length",
"=",
"-",
"1",
"fmt",
"=",
"(",
"\"\\nVirtual scroll is enabled since number of rows exceeds {limit}.\"",
"\" You can set custom row limit by setting flag -vs, --virtual-scroll.\"",
"\" Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable.\"",
")",
"logger",
".",
"warn",
"(",
"fmt",
".",
"format",
"(",
"limit",
"=",
"virtual_scroll_limit",
")",
")",
"if",
"not",
"is_paging",
":",
"fmt",
"=",
"\"\\nPagination can not be disabled in virtual scroll mode.\"",
"logger",
".",
"warn",
"(",
"fmt",
")",
"is_paging",
"=",
"True",
"if",
"is_paging",
"and",
"not",
"virtual_scroll",
":",
"# Add display length to the default display length menu",
"length_menu",
"=",
"[",
"]",
"if",
"display_length",
"!=",
"-",
"1",
":",
"length_menu",
"=",
"sorted",
"(",
"default_length_menu",
"+",
"[",
"display_length",
"]",
")",
"else",
":",
"length_menu",
"=",
"default_length_menu",
"# Set label as \"All\" it display length is -1",
"length_menu_label",
"=",
"[",
"str",
"(",
"\"All\"",
")",
"if",
"i",
"==",
"-",
"1",
"else",
"i",
"for",
"i",
"in",
"length_menu",
"]",
"datatable_options",
"[",
"\"lengthMenu\"",
"]",
"=",
"[",
"length_menu",
",",
"length_menu_label",
"]",
"datatable_options",
"[",
"\"iDisplayLength\"",
"]",
"=",
"display_length",
"if",
"is_paging",
":",
"datatable_options",
"[",
"\"paging\"",
"]",
"=",
"True",
"else",
":",
"datatable_options",
"[",
"\"paging\"",
"]",
"=",
"False",
"if",
"scroll_y",
":",
"datatable_options",
"[",
"\"scrollY\"",
"]",
"=",
"scroll_y",
"if",
"virtual_scroll",
":",
"datatable_options",
"[",
"\"scroller\"",
"]",
"=",
"True",
"datatable_options",
"[",
"\"bPaginate\"",
"]",
"=",
"False",
"datatable_options",
"[",
"\"deferRender\"",
"]",
"=",
"True",
"datatable_options",
"[",
"\"bLengthChange\"",
"]",
"=",
"False",
"enable_export",
"=",
"options",
".",
"get",
"(",
"\"export\"",
")",
"if",
"enable_export",
":",
"if",
"options",
"[",
"\"export_options\"",
"]",
":",
"allowed",
"=",
"list",
"(",
"options",
"[",
"\"export_options\"",
"]",
")",
"else",
":",
"allowed",
"=",
"[",
"\"copy\"",
",",
"\"csv\"",
",",
"\"json\"",
",",
"\"print\"",
"]",
"datatable_options",
"[",
"\"dom\"",
"]",
"=",
"\"Bfrtip\"",
"datatable_options",
"[",
"\"buttons\"",
"]",
"=",
"allowed",
"datatable_options_json",
"=",
"json",
".",
"dumps",
"(",
"datatable_options",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
"return",
"template",
".",
"render",
"(",
"title",
"=",
"caption",
"or",
"\"Table\"",
",",
"caption",
"=",
"caption",
",",
"datatable_options",
"=",
"datatable_options_json",
",",
"virtual_scroll",
"=",
"virtual_scroll",
",",
"enable_export",
"=",
"enable_export",
")"
] | Render Jinja2 template | [
"Render",
"Jinja2",
"template"
] | d894dca1fcc1071c9a52260a9194f8cc3b327905 | https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L100-L193 | train |
vividvilla/csvtotable | csvtotable/convert.py | freeze_js | def freeze_js(html):
"""
Freeze all JS assets to the rendered html itself.
"""
matches = js_src_pattern.finditer(html)
if not matches:
return html
# Reverse regex matches to replace match string with respective JS content
for match in reversed(tuple(matches)):
# JS file name
file_name = match.group(1)
file_path = os.path.join(js_files_path, file_name)
with open(file_path, "r", encoding="utf-8") as f:
file_content = f.read()
# Replace matched string with inline JS
fmt = '<script type="text/javascript">{}</script>'
js_content = fmt.format(file_content)
html = html[:match.start()] + js_content + html[match.end():]
return html | python | def freeze_js(html):
"""
Freeze all JS assets to the rendered html itself.
"""
matches = js_src_pattern.finditer(html)
if not matches:
return html
# Reverse regex matches to replace match string with respective JS content
for match in reversed(tuple(matches)):
# JS file name
file_name = match.group(1)
file_path = os.path.join(js_files_path, file_name)
with open(file_path, "r", encoding="utf-8") as f:
file_content = f.read()
# Replace matched string with inline JS
fmt = '<script type="text/javascript">{}</script>'
js_content = fmt.format(file_content)
html = html[:match.start()] + js_content + html[match.end():]
return html | [
"def",
"freeze_js",
"(",
"html",
")",
":",
"matches",
"=",
"js_src_pattern",
".",
"finditer",
"(",
"html",
")",
"if",
"not",
"matches",
":",
"return",
"html",
"# Reverse regex matches to replace match string with respective JS content",
"for",
"match",
"in",
"reversed",
"(",
"tuple",
"(",
"matches",
")",
")",
":",
"# JS file name",
"file_name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"js_files_path",
",",
"file_name",
")",
"with",
"open",
"(",
"file_path",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"file_content",
"=",
"f",
".",
"read",
"(",
")",
"# Replace matched string with inline JS",
"fmt",
"=",
"'<script type=\"text/javascript\">{}</script>'",
"js_content",
"=",
"fmt",
".",
"format",
"(",
"file_content",
")",
"html",
"=",
"html",
"[",
":",
"match",
".",
"start",
"(",
")",
"]",
"+",
"js_content",
"+",
"html",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
"return",
"html"
] | Freeze all JS assets to the rendered html itself. | [
"Freeze",
"all",
"JS",
"assets",
"to",
"the",
"rendered",
"html",
"itself",
"."
] | d894dca1fcc1071c9a52260a9194f8cc3b327905 | https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L196-L218 | train |
vividvilla/csvtotable | csvtotable/cli.py | cli | def cli(*args, **kwargs):
"""
CSVtoTable commandline utility.
"""
# Convert CSV file
content = convert.convert(kwargs["input_file"], **kwargs)
# Serve the temporary file in browser.
if kwargs["serve"]:
convert.serve(content)
# Write to output file
elif kwargs["output_file"]:
# Check if file can be overwrite
if (not kwargs["overwrite"] and
not prompt_overwrite(kwargs["output_file"])):
raise click.Abort()
convert.save(kwargs["output_file"], content)
click.secho("File converted successfully: {}".format(
kwargs["output_file"]), fg="green")
else:
# If its not server and output file is missing then raise error
raise click.BadOptionUsage("Missing argument \"output_file\".") | python | def cli(*args, **kwargs):
"""
CSVtoTable commandline utility.
"""
# Convert CSV file
content = convert.convert(kwargs["input_file"], **kwargs)
# Serve the temporary file in browser.
if kwargs["serve"]:
convert.serve(content)
# Write to output file
elif kwargs["output_file"]:
# Check if file can be overwrite
if (not kwargs["overwrite"] and
not prompt_overwrite(kwargs["output_file"])):
raise click.Abort()
convert.save(kwargs["output_file"], content)
click.secho("File converted successfully: {}".format(
kwargs["output_file"]), fg="green")
else:
# If its not server and output file is missing then raise error
raise click.BadOptionUsage("Missing argument \"output_file\".") | [
"def",
"cli",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Convert CSV file",
"content",
"=",
"convert",
".",
"convert",
"(",
"kwargs",
"[",
"\"input_file\"",
"]",
",",
"*",
"*",
"kwargs",
")",
"# Serve the temporary file in browser.",
"if",
"kwargs",
"[",
"\"serve\"",
"]",
":",
"convert",
".",
"serve",
"(",
"content",
")",
"# Write to output file",
"elif",
"kwargs",
"[",
"\"output_file\"",
"]",
":",
"# Check if file can be overwrite",
"if",
"(",
"not",
"kwargs",
"[",
"\"overwrite\"",
"]",
"and",
"not",
"prompt_overwrite",
"(",
"kwargs",
"[",
"\"output_file\"",
"]",
")",
")",
":",
"raise",
"click",
".",
"Abort",
"(",
")",
"convert",
".",
"save",
"(",
"kwargs",
"[",
"\"output_file\"",
"]",
",",
"content",
")",
"click",
".",
"secho",
"(",
"\"File converted successfully: {}\"",
".",
"format",
"(",
"kwargs",
"[",
"\"output_file\"",
"]",
")",
",",
"fg",
"=",
"\"green\"",
")",
"else",
":",
"# If its not server and output file is missing then raise error",
"raise",
"click",
".",
"BadOptionUsage",
"(",
"\"Missing argument \\\"output_file\\\".\"",
")"
] | CSVtoTable commandline utility. | [
"CSVtoTable",
"commandline",
"utility",
"."
] | d894dca1fcc1071c9a52260a9194f8cc3b327905 | https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/cli.py#L54-L76 | train |
django-userena-ce/django-userena-ce | userena/views.py | activate_retry | def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,))) | python | def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,))) | [
"def",
"activate_retry",
"(",
"request",
",",
"activation_key",
",",
"template_name",
"=",
"'userena/activate_retry_success.html'",
",",
"extra_context",
"=",
"None",
")",
":",
"if",
"not",
"userena_settings",
".",
"USERENA_ACTIVATION_RETRY",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")",
"try",
":",
"if",
"UserenaSignup",
".",
"objects",
".",
"check_expired_activation",
"(",
"activation_key",
")",
":",
"new_key",
"=",
"UserenaSignup",
".",
"objects",
".",
"reissue_activation",
"(",
"activation_key",
")",
"if",
"new_key",
":",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")",
"else",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")",
"else",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")",
"except",
"UserenaSignup",
".",
"DoesNotExist",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")"
] | Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary. | [
"Reissue",
"a",
"new",
"activation_key",
"for",
"the",
"user",
"with",
"the",
"expired",
"activation_key",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/views.py#L227-L267 | train |
django-userena-ce/django-userena-ce | userena/views.py | disabled_account | def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | python | def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | [
"def",
"disabled_account",
"(",
"request",
",",
"username",
",",
"template_name",
",",
"extra_context",
"=",
"None",
")",
":",
"user",
"=",
"get_object_or_404",
"(",
"get_user_model",
"(",
")",
",",
"username__iexact",
"=",
"username",
")",
"if",
"user",
".",
"is_active",
":",
"raise",
"Http404",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"'viewed_user'",
"]",
"=",
"user",
"extra_context",
"[",
"'profile'",
"]",
"=",
"get_user_profile",
"(",
"user",
"=",
"user",
")",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")"
] | Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user. | [
"Checks",
"if",
"the",
"account",
"is",
"disabled",
"if",
"so",
"returns",
"the",
"disabled",
"account",
"template",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/views.py#L354-L390 | train |
django-userena-ce/django-userena-ce | userena/views.py | profile_list | def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request) | python | def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request) | [
"def",
"profile_list",
"(",
"request",
",",
"page",
"=",
"1",
",",
"template_name",
"=",
"'userena/profile_list.html'",
",",
"paginate_by",
"=",
"50",
",",
"extra_context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"warnings",
".",
"warn",
"(",
"\"views.profile_list is deprecated. Use ProfileListView instead\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"try",
":",
"page",
"=",
"int",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
",",
"None",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"page",
"=",
"page",
"if",
"userena_settings",
".",
"USERENA_DISABLE_PROFILE_LIST",
"and",
"not",
"request",
".",
"user",
".",
"is_staff",
":",
"raise",
"Http404",
"profile_model",
"=",
"get_profile_model",
"(",
")",
"queryset",
"=",
"profile_model",
".",
"objects",
".",
"get_visible_profiles",
"(",
"request",
".",
"user",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"return",
"ProfileListView",
".",
"as_view",
"(",
"queryset",
"=",
"queryset",
",",
"paginate_by",
"=",
"paginate_by",
",",
"page",
"=",
"page",
",",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
",",
"*",
"*",
"kwargs",
")",
"(",
"request",
")"
] | Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``. | [
"Returns",
"a",
"list",
"of",
"all",
"profiles",
"that",
"are",
"public",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/views.py#L757-L818 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageContactManager.get_or_create | def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) |
Q(um_from_user=um_to_user, um_to_user=um_from_user))
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user,
um_to_user=um_to_user,
latest_message=message)
return (contact, created) | python | def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) |
Q(um_from_user=um_to_user, um_to_user=um_from_user))
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user,
um_to_user=um_to_user,
latest_message=message)
return (contact, created) | [
"def",
"get_or_create",
"(",
"self",
",",
"um_from_user",
",",
"um_to_user",
",",
"message",
")",
":",
"created",
"=",
"False",
"try",
":",
"contact",
"=",
"self",
".",
"get",
"(",
"Q",
"(",
"um_from_user",
"=",
"um_from_user",
",",
"um_to_user",
"=",
"um_to_user",
")",
"|",
"Q",
"(",
"um_from_user",
"=",
"um_to_user",
",",
"um_to_user",
"=",
"um_from_user",
")",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"created",
"=",
"True",
"contact",
"=",
"self",
".",
"create",
"(",
"um_from_user",
"=",
"um_from_user",
",",
"um_to_user",
"=",
"um_to_user",
",",
"latest_message",
"=",
"message",
")",
"return",
"(",
"contact",
",",
"created",
")"
] | Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner. | [
"Get",
"or",
"create",
"a",
"Contact"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L11-L30 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageContactManager.update_contact | def update_contact(self, um_from_user, um_to_user, message):
""" Get or update a contacts information """
contact, created = self.get_or_create(um_from_user,
um_to_user,
message)
# If the contact already existed, update the message
if not created:
contact.latest_message = message
contact.save()
return contact | python | def update_contact(self, um_from_user, um_to_user, message):
""" Get or update a contacts information """
contact, created = self.get_or_create(um_from_user,
um_to_user,
message)
# If the contact already existed, update the message
if not created:
contact.latest_message = message
contact.save()
return contact | [
"def",
"update_contact",
"(",
"self",
",",
"um_from_user",
",",
"um_to_user",
",",
"message",
")",
":",
"contact",
",",
"created",
"=",
"self",
".",
"get_or_create",
"(",
"um_from_user",
",",
"um_to_user",
",",
"message",
")",
"# If the contact already existed, update the message",
"if",
"not",
"created",
":",
"contact",
".",
"latest_message",
"=",
"message",
"contact",
".",
"save",
"(",
")",
"return",
"contact"
] | Get or update a contacts information | [
"Get",
"or",
"update",
"a",
"contacts",
"information"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L32-L42 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageContactManager.get_contacts_for | def get_contacts_for(self, user):
"""
Returns the contacts for this user.
Contacts are other users that this user has received messages
from or send messages to.
:param user:
The :class:`User` which to get the contacts for.
"""
contacts = self.filter(Q(um_from_user=user) | Q(um_to_user=user))
return contacts | python | def get_contacts_for(self, user):
"""
Returns the contacts for this user.
Contacts are other users that this user has received messages
from or send messages to.
:param user:
The :class:`User` which to get the contacts for.
"""
contacts = self.filter(Q(um_from_user=user) | Q(um_to_user=user))
return contacts | [
"def",
"get_contacts_for",
"(",
"self",
",",
"user",
")",
":",
"contacts",
"=",
"self",
".",
"filter",
"(",
"Q",
"(",
"um_from_user",
"=",
"user",
")",
"|",
"Q",
"(",
"um_to_user",
"=",
"user",
")",
")",
"return",
"contacts"
] | Returns the contacts for this user.
Contacts are other users that this user has received messages
from or send messages to.
:param user:
The :class:`User` which to get the contacts for. | [
"Returns",
"the",
"contacts",
"for",
"this",
"user",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L44-L56 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageManager.send_message | def send_message(self, sender, um_to_user_list, body):
"""
Send a message from a user, to a user.
:param sender:
The :class:`User` which sends the message.
:param um_to_user_list:
A list which elements are :class:`User` to whom the message is for.
:param message:
String containing the message.
"""
msg = self.model(sender=sender,
body=body)
msg.save()
# Save the recipients
msg.save_recipients(um_to_user_list)
msg.update_contacts(um_to_user_list)
signals.email_sent.send(sender=None,msg=msg)
return msg | python | def send_message(self, sender, um_to_user_list, body):
"""
Send a message from a user, to a user.
:param sender:
The :class:`User` which sends the message.
:param um_to_user_list:
A list which elements are :class:`User` to whom the message is for.
:param message:
String containing the message.
"""
msg = self.model(sender=sender,
body=body)
msg.save()
# Save the recipients
msg.save_recipients(um_to_user_list)
msg.update_contacts(um_to_user_list)
signals.email_sent.send(sender=None,msg=msg)
return msg | [
"def",
"send_message",
"(",
"self",
",",
"sender",
",",
"um_to_user_list",
",",
"body",
")",
":",
"msg",
"=",
"self",
".",
"model",
"(",
"sender",
"=",
"sender",
",",
"body",
"=",
"body",
")",
"msg",
".",
"save",
"(",
")",
"# Save the recipients",
"msg",
".",
"save_recipients",
"(",
"um_to_user_list",
")",
"msg",
".",
"update_contacts",
"(",
"um_to_user_list",
")",
"signals",
".",
"email_sent",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"msg",
"=",
"msg",
")",
"return",
"msg"
] | Send a message from a user, to a user.
:param sender:
The :class:`User` which sends the message.
:param um_to_user_list:
A list which elements are :class:`User` to whom the message is for.
:param message:
String containing the message. | [
"Send",
"a",
"message",
"from",
"a",
"user",
"to",
"a",
"user",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L61-L84 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageManager.get_conversation_between | def get_conversation_between(self, um_from_user, um_to_user):
""" Returns a conversation between two users """
messages = self.filter(Q(sender=um_from_user, recipients=um_to_user,
sender_deleted_at__isnull=True) |
Q(sender=um_to_user, recipients=um_from_user,
messagerecipient__deleted_at__isnull=True))
return messages | python | def get_conversation_between(self, um_from_user, um_to_user):
""" Returns a conversation between two users """
messages = self.filter(Q(sender=um_from_user, recipients=um_to_user,
sender_deleted_at__isnull=True) |
Q(sender=um_to_user, recipients=um_from_user,
messagerecipient__deleted_at__isnull=True))
return messages | [
"def",
"get_conversation_between",
"(",
"self",
",",
"um_from_user",
",",
"um_to_user",
")",
":",
"messages",
"=",
"self",
".",
"filter",
"(",
"Q",
"(",
"sender",
"=",
"um_from_user",
",",
"recipients",
"=",
"um_to_user",
",",
"sender_deleted_at__isnull",
"=",
"True",
")",
"|",
"Q",
"(",
"sender",
"=",
"um_to_user",
",",
"recipients",
"=",
"um_from_user",
",",
"messagerecipient__deleted_at__isnull",
"=",
"True",
")",
")",
"return",
"messages"
] | Returns a conversation between two users | [
"Returns",
"a",
"conversation",
"between",
"two",
"users"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L86-L92 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageRecipientManager.count_unread_messages_for | def count_unread_messages_for(self, user):
"""
Returns the amount of unread messages for this user
:param user:
A Django :class:`User`
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(user=user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total | python | def count_unread_messages_for(self, user):
"""
Returns the amount of unread messages for this user
:param user:
A Django :class:`User`
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(user=user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total | [
"def",
"count_unread_messages_for",
"(",
"self",
",",
"user",
")",
":",
"unread_total",
"=",
"self",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"read_at__isnull",
"=",
"True",
",",
"deleted_at__isnull",
"=",
"True",
")",
".",
"count",
"(",
")",
"return",
"unread_total"
] | Returns the amount of unread messages for this user
:param user:
A Django :class:`User`
:return:
An integer with the amount of unread messages. | [
"Returns",
"the",
"amount",
"of",
"unread",
"messages",
"for",
"this",
"user"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L97-L112 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/managers.py | MessageRecipientManager.count_unread_messages_between | def count_unread_messages_between(self, um_to_user, um_from_user):
"""
Returns the amount of unread messages between two users
:param um_to_user:
A Django :class:`User` for who the messages are for.
:param um_from_user:
A Django :class:`User` from whom the messages originate from.
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(message__sender=um_from_user,
user=um_to_user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total | python | def count_unread_messages_between(self, um_to_user, um_from_user):
"""
Returns the amount of unread messages between two users
:param um_to_user:
A Django :class:`User` for who the messages are for.
:param um_from_user:
A Django :class:`User` from whom the messages originate from.
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(message__sender=um_from_user,
user=um_to_user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total | [
"def",
"count_unread_messages_between",
"(",
"self",
",",
"um_to_user",
",",
"um_from_user",
")",
":",
"unread_total",
"=",
"self",
".",
"filter",
"(",
"message__sender",
"=",
"um_from_user",
",",
"user",
"=",
"um_to_user",
",",
"read_at__isnull",
"=",
"True",
",",
"deleted_at__isnull",
"=",
"True",
")",
".",
"count",
"(",
")",
"return",
"unread_total"
] | Returns the amount of unread messages between two users
:param um_to_user:
A Django :class:`User` for who the messages are for.
:param um_from_user:
A Django :class:`User` from whom the messages originate from.
:return:
An integer with the amount of unread messages. | [
"Returns",
"the",
"amount",
"of",
"unread",
"messages",
"between",
"two",
"users"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L114-L133 | train |
django-userena-ce/django-userena-ce | userena/managers.py | UserenaManager.reissue_activation | def reissue_activation(self, activation_key):
"""
Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key.
"""
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
try:
salt, new_activation_key = generate_sha1(userena.user.username)
userena.activation_key = new_activation_key
userena.save(using=self._db)
userena.user.date_joined = get_datetime_now()
userena.user.save(using=self._db)
userena.send_activation_email()
return True
except Exception:
return False | python | def reissue_activation(self, activation_key):
"""
Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key.
"""
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
try:
salt, new_activation_key = generate_sha1(userena.user.username)
userena.activation_key = new_activation_key
userena.save(using=self._db)
userena.user.date_joined = get_datetime_now()
userena.user.save(using=self._db)
userena.send_activation_email()
return True
except Exception:
return False | [
"def",
"reissue_activation",
"(",
"self",
",",
"activation_key",
")",
":",
"try",
":",
"userena",
"=",
"self",
".",
"get",
"(",
"activation_key",
"=",
"activation_key",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"False",
"try",
":",
"salt",
",",
"new_activation_key",
"=",
"generate_sha1",
"(",
"userena",
".",
"user",
".",
"username",
")",
"userena",
".",
"activation_key",
"=",
"new_activation_key",
"userena",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"userena",
".",
"user",
".",
"date_joined",
"=",
"get_datetime_now",
"(",
")",
"userena",
".",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"userena",
".",
"send_activation_email",
"(",
")",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
] | Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key. | [
"Creates",
"a",
"new",
"activation_key",
"resetting",
"activation",
"timeframe",
"when",
"users",
"let",
"the",
"previous",
"key",
"expire",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L106-L128 | train |
django-userena-ce/django-userena-ce | userena/managers.py | UserenaManager.check_expired_activation | def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist | python | def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist | [
"def",
"check_expired_activation",
"(",
"self",
",",
"activation_key",
")",
":",
"if",
"SHA1_RE",
".",
"search",
"(",
"activation_key",
")",
":",
"userena",
"=",
"self",
".",
"get",
"(",
"activation_key",
"=",
"activation_key",
")",
"return",
"userena",
".",
"activation_key_expired",
"(",
")",
"raise",
"self",
".",
"model",
".",
"DoesNotExist"
] | Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid. | [
"Check",
"if",
"activation_key",
"is",
"still",
"valid",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L163-L180 | train |
django-userena-ce/django-userena-ce | userena/managers.py | UserenaManager.check_permissions | def check_permissions(self):
"""
Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong.
"""
# Variable to supply some feedback
changed_permissions = []
changed_users = []
warnings = []
# Check that all the permissions are available.
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
model_obj = get_profile_model()
else: model_obj = get_user_model()
model_content_type = ContentType.objects.get_for_model(model_obj)
for perm in perms:
try:
Permission.objects.get(codename=perm[0],
content_type=model_content_type)
except Permission.DoesNotExist:
changed_permissions.append(perm[1])
Permission.objects.create(name=perm[1],
codename=perm[0],
content_type=model_content_type)
# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a
# requirement of django-guardian
for user in get_user_model().objects.exclude(username=settings.ANONYMOUS_USER_NAME):
try:
user_profile = get_user_profile(user=user)
except ObjectDoesNotExist:
warnings.append(_("No profile found for %(username)s") \
% {'username': user.username})
else:
all_permissions = get_perms(user, user_profile) + get_perms(user, user)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
perm_object = get_user_profile(user=user)
else: perm_object = user
for perm in perms:
if perm[0] not in all_permissions:
assign_perm(perm[0], user, perm_object)
changed_users.append(user)
return (changed_permissions, changed_users, warnings) | python | def check_permissions(self):
"""
Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong.
"""
# Variable to supply some feedback
changed_permissions = []
changed_users = []
warnings = []
# Check that all the permissions are available.
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
model_obj = get_profile_model()
else: model_obj = get_user_model()
model_content_type = ContentType.objects.get_for_model(model_obj)
for perm in perms:
try:
Permission.objects.get(codename=perm[0],
content_type=model_content_type)
except Permission.DoesNotExist:
changed_permissions.append(perm[1])
Permission.objects.create(name=perm[1],
codename=perm[0],
content_type=model_content_type)
# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a
# requirement of django-guardian
for user in get_user_model().objects.exclude(username=settings.ANONYMOUS_USER_NAME):
try:
user_profile = get_user_profile(user=user)
except ObjectDoesNotExist:
warnings.append(_("No profile found for %(username)s") \
% {'username': user.username})
else:
all_permissions = get_perms(user, user_profile) + get_perms(user, user)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
perm_object = get_user_profile(user=user)
else: perm_object = user
for perm in perms:
if perm[0] not in all_permissions:
assign_perm(perm[0], user, perm_object)
changed_users.append(user)
return (changed_permissions, changed_users, warnings) | [
"def",
"check_permissions",
"(",
"self",
")",
":",
"# Variable to supply some feedback",
"changed_permissions",
"=",
"[",
"]",
"changed_users",
"=",
"[",
"]",
"warnings",
"=",
"[",
"]",
"# Check that all the permissions are available.",
"for",
"model",
",",
"perms",
"in",
"ASSIGNED_PERMISSIONS",
".",
"items",
"(",
")",
":",
"if",
"model",
"==",
"'profile'",
":",
"model_obj",
"=",
"get_profile_model",
"(",
")",
"else",
":",
"model_obj",
"=",
"get_user_model",
"(",
")",
"model_content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"model_obj",
")",
"for",
"perm",
"in",
"perms",
":",
"try",
":",
"Permission",
".",
"objects",
".",
"get",
"(",
"codename",
"=",
"perm",
"[",
"0",
"]",
",",
"content_type",
"=",
"model_content_type",
")",
"except",
"Permission",
".",
"DoesNotExist",
":",
"changed_permissions",
".",
"append",
"(",
"perm",
"[",
"1",
"]",
")",
"Permission",
".",
"objects",
".",
"create",
"(",
"name",
"=",
"perm",
"[",
"1",
"]",
",",
"codename",
"=",
"perm",
"[",
"0",
"]",
",",
"content_type",
"=",
"model_content_type",
")",
"# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a",
"# requirement of django-guardian",
"for",
"user",
"in",
"get_user_model",
"(",
")",
".",
"objects",
".",
"exclude",
"(",
"username",
"=",
"settings",
".",
"ANONYMOUS_USER_NAME",
")",
":",
"try",
":",
"user_profile",
"=",
"get_user_profile",
"(",
"user",
"=",
"user",
")",
"except",
"ObjectDoesNotExist",
":",
"warnings",
".",
"append",
"(",
"_",
"(",
"\"No profile found for %(username)s\"",
")",
"%",
"{",
"'username'",
":",
"user",
".",
"username",
"}",
")",
"else",
":",
"all_permissions",
"=",
"get_perms",
"(",
"user",
",",
"user_profile",
")",
"+",
"get_perms",
"(",
"user",
",",
"user",
")",
"for",
"model",
",",
"perms",
"in",
"ASSIGNED_PERMISSIONS",
".",
"items",
"(",
")",
":",
"if",
"model",
"==",
"'profile'",
":",
"perm_object",
"=",
"get_user_profile",
"(",
"user",
"=",
"user",
")",
"else",
":",
"perm_object",
"=",
"user",
"for",
"perm",
"in",
"perms",
":",
"if",
"perm",
"[",
"0",
"]",
"not",
"in",
"all_permissions",
":",
"assign_perm",
"(",
"perm",
"[",
"0",
"]",
",",
"user",
",",
"perm_object",
")",
"changed_users",
".",
"append",
"(",
"user",
")",
"return",
"(",
"changed_permissions",
",",
"changed_users",
",",
"warnings",
")"
] | Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong. | [
"Checks",
"that",
"all",
"permissions",
"are",
"set",
"correctly",
"for",
"the",
"users",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L236-L287 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/templatetags/umessages_tags.py | get_unread_message_count_for | def get_unread_message_count_for(parser, token):
"""
Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
user, var_name = m.groups()
return MessageCount(user, var_name) | python | def get_unread_message_count_for(parser, token):
"""
Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
user, var_name = m.groups()
return MessageCount(user, var_name) | [
"def",
"get_unread_message_count_for",
"(",
"parser",
",",
"token",
")",
":",
"try",
":",
"tag_name",
",",
"arg",
"=",
"token",
".",
"contents",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag requires arguments\"",
"%",
"token",
".",
"contents",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'(.*?) as (\\w+)'",
",",
"arg",
")",
"if",
"not",
"m",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag had invalid arguments\"",
"%",
"tag_name",
")",
"user",
",",
"var_name",
"=",
"m",
".",
"groups",
"(",
")",
"return",
"MessageCount",
"(",
"user",
",",
"var_name",
")"
] | Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %} | [
"Returns",
"the",
"unread",
"message",
"count",
"for",
"a",
"user",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/templatetags/umessages_tags.py#L40-L61 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/templatetags/umessages_tags.py | get_unread_message_count_between | def get_unread_message_count_between(parser, token):
"""
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) and (.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
um_from_user, um_to_user, var_name = m.groups()
return MessageCount(um_from_user, var_name, um_to_user) | python | def get_unread_message_count_between(parser, token):
"""
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) and (.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
um_from_user, um_to_user, var_name = m.groups()
return MessageCount(um_from_user, var_name, um_to_user) | [
"def",
"get_unread_message_count_between",
"(",
"parser",
",",
"token",
")",
":",
"try",
":",
"tag_name",
",",
"arg",
"=",
"token",
".",
"contents",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag requires arguments\"",
"%",
"token",
".",
"contents",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'(.*?) and (.*?) as (\\w+)'",
",",
"arg",
")",
"if",
"not",
"m",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag had invalid arguments\"",
"%",
"tag_name",
")",
"um_from_user",
",",
"um_to_user",
",",
"var_name",
"=",
"m",
".",
"groups",
"(",
")",
"return",
"MessageCount",
"(",
"um_from_user",
",",
"var_name",
",",
"um_to_user",
")"
] | Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %} | [
"Returns",
"the",
"unread",
"message",
"count",
"between",
"two",
"users",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/templatetags/umessages_tags.py#L64-L85 | train |
django-userena-ce/django-userena-ce | userena/models.py | upload_to_mugshot | def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension} | python | def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension} | [
"def",
"upload_to_mugshot",
"(",
"instance",
",",
"filename",
")",
":",
"extension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"salt",
",",
"hash",
"=",
"generate_sha1",
"(",
"instance",
".",
"pk",
")",
"path",
"=",
"userena_settings",
".",
"USERENA_MUGSHOT_PATH",
"%",
"{",
"'username'",
":",
"instance",
".",
"user",
".",
"username",
",",
"'id'",
":",
"instance",
".",
"user",
".",
"id",
",",
"'date'",
":",
"instance",
".",
"user",
".",
"date_joined",
",",
"'date_now'",
":",
"get_datetime_now",
"(",
")",
".",
"date",
"(",
")",
"}",
"return",
"'%(path)s%(hash)s.%(extension)s'",
"%",
"{",
"'path'",
":",
"path",
",",
"'hash'",
":",
"hash",
"[",
":",
"10",
"]",
",",
"'extension'",
":",
"extension",
"}"
] | Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory. | [
"Uploads",
"a",
"mugshot",
"for",
"a",
"user",
"to",
"the",
"USERENA_MUGSHOT_PATH",
"and",
"saving",
"it",
"under",
"unique",
"hash",
"for",
"the",
"image",
".",
"This",
"is",
"for",
"privacy",
"reasons",
"so",
"others",
"can",
"t",
"just",
"browse",
"through",
"the",
"mugshot",
"directory",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/models.py#L24-L39 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/views.py | message_compose | def message_compose(request, recipients=None, compose_form=ComposeForm,
success_url=None, template_name="umessages/message_form.html",
recipient_filter=None, extra_context=None):
"""
Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used.
"""
initial_data = dict()
if recipients:
username_list = [r.strip() for r in recipients.split("+")]
recipients = [u for u in get_user_model().objects.filter(username__in=username_list)]
initial_data["to"] = recipients
form = compose_form(initial=initial_data)
if request.method == "POST":
form = compose_form(request.POST)
if form.is_valid():
requested_redirect = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
message = form.save(request.user)
recipients = form.cleaned_data['to']
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Message is sent.'),
fail_silently=True)
# Redirect mechanism
redirect_to = reverse('userena_umessages_list')
if requested_redirect: redirect_to = requested_redirect
elif success_url: redirect_to = success_url
elif len(recipients) == 1:
redirect_to = reverse('userena_umessages_detail',
kwargs={'username': recipients[0].username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context["form"] = form
extra_context["recipients"] = recipients
return render(request, template_name, extra_context) | python | def message_compose(request, recipients=None, compose_form=ComposeForm,
success_url=None, template_name="umessages/message_form.html",
recipient_filter=None, extra_context=None):
"""
Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used.
"""
initial_data = dict()
if recipients:
username_list = [r.strip() for r in recipients.split("+")]
recipients = [u for u in get_user_model().objects.filter(username__in=username_list)]
initial_data["to"] = recipients
form = compose_form(initial=initial_data)
if request.method == "POST":
form = compose_form(request.POST)
if form.is_valid():
requested_redirect = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
message = form.save(request.user)
recipients = form.cleaned_data['to']
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Message is sent.'),
fail_silently=True)
# Redirect mechanism
redirect_to = reverse('userena_umessages_list')
if requested_redirect: redirect_to = requested_redirect
elif success_url: redirect_to = success_url
elif len(recipients) == 1:
redirect_to = reverse('userena_umessages_detail',
kwargs={'username': recipients[0].username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context["form"] = form
extra_context["recipients"] = recipients
return render(request, template_name, extra_context) | [
"def",
"message_compose",
"(",
"request",
",",
"recipients",
"=",
"None",
",",
"compose_form",
"=",
"ComposeForm",
",",
"success_url",
"=",
"None",
",",
"template_name",
"=",
"\"umessages/message_form.html\"",
",",
"recipient_filter",
"=",
"None",
",",
"extra_context",
"=",
"None",
")",
":",
"initial_data",
"=",
"dict",
"(",
")",
"if",
"recipients",
":",
"username_list",
"=",
"[",
"r",
".",
"strip",
"(",
")",
"for",
"r",
"in",
"recipients",
".",
"split",
"(",
"\"+\"",
")",
"]",
"recipients",
"=",
"[",
"u",
"for",
"u",
"in",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"username__in",
"=",
"username_list",
")",
"]",
"initial_data",
"[",
"\"to\"",
"]",
"=",
"recipients",
"form",
"=",
"compose_form",
"(",
"initial",
"=",
"initial_data",
")",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"form",
"=",
"compose_form",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"requested_redirect",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"request",
".",
"POST",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"False",
")",
")",
"message",
"=",
"form",
".",
"save",
"(",
"request",
".",
"user",
")",
"recipients",
"=",
"form",
".",
"cleaned_data",
"[",
"'to'",
"]",
"if",
"userena_settings",
".",
"USERENA_USE_MESSAGES",
":",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"'Message is sent.'",
")",
",",
"fail_silently",
"=",
"True",
")",
"# Redirect mechanism",
"redirect_to",
"=",
"reverse",
"(",
"'userena_umessages_list'",
")",
"if",
"requested_redirect",
":",
"redirect_to",
"=",
"requested_redirect",
"elif",
"success_url",
":",
"redirect_to",
"=",
"success_url",
"elif",
"len",
"(",
"recipients",
")",
"==",
"1",
":",
"redirect_to",
"=",
"reverse",
"(",
"'userena_umessages_detail'",
",",
"kwargs",
"=",
"{",
"'username'",
":",
"recipients",
"[",
"0",
"]",
".",
"username",
"}",
")",
"return",
"redirect",
"(",
"redirect_to",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"\"form\"",
"]",
"=",
"form",
"extra_context",
"[",
"\"recipients\"",
"]",
"=",
"recipients",
"return",
"render",
"(",
"request",
",",
"template_name",
",",
"extra_context",
")"
] | Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used. | [
"Compose",
"a",
"new",
"message"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/views.py#L73-L141 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/views.py | message_remove | def message_remove(request, undo=False):
"""
A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``.
"""
message_pks = request.POST.getlist('message_pks')
redirect_to = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
if message_pks:
# Check that all values are integers.
valid_message_pk_list = set()
for pk in message_pks:
try: valid_pk = int(pk)
except (TypeError, ValueError): pass
else:
valid_message_pk_list.add(valid_pk)
# Delete all the messages, if they belong to the user.
now = get_datetime_now()
changed_message_list = set()
for pk in valid_message_pk_list:
message = get_object_or_404(Message, pk=pk)
# Check if the user is the owner
if message.sender == request.user:
if undo:
message.sender_deleted_at = None
else:
message.sender_deleted_at = now
message.save()
changed_message_list.add(message.pk)
# Check if the user is a recipient of the message
if request.user in message.recipients.all():
mr = message.messagerecipient_set.get(user=request.user,
message=message)
if undo:
mr.deleted_at = None
else:
mr.deleted_at = now
mr.save()
changed_message_list.add(message.pk)
# Send messages
if (len(changed_message_list) > 0) and userena_settings.USERENA_USE_MESSAGES:
if undo:
message = ungettext('Message is succesfully restored.',
'Messages are succesfully restored.',
len(changed_message_list))
else:
message = ungettext('Message is successfully removed.',
'Messages are successfully removed.',
len(changed_message_list))
messages.success(request, message, fail_silently=True)
if redirect_to: return redirect(redirect_to)
else: return redirect(reverse('userena_umessages_list')) | python | def message_remove(request, undo=False):
"""
A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``.
"""
message_pks = request.POST.getlist('message_pks')
redirect_to = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
if message_pks:
# Check that all values are integers.
valid_message_pk_list = set()
for pk in message_pks:
try: valid_pk = int(pk)
except (TypeError, ValueError): pass
else:
valid_message_pk_list.add(valid_pk)
# Delete all the messages, if they belong to the user.
now = get_datetime_now()
changed_message_list = set()
for pk in valid_message_pk_list:
message = get_object_or_404(Message, pk=pk)
# Check if the user is the owner
if message.sender == request.user:
if undo:
message.sender_deleted_at = None
else:
message.sender_deleted_at = now
message.save()
changed_message_list.add(message.pk)
# Check if the user is a recipient of the message
if request.user in message.recipients.all():
mr = message.messagerecipient_set.get(user=request.user,
message=message)
if undo:
mr.deleted_at = None
else:
mr.deleted_at = now
mr.save()
changed_message_list.add(message.pk)
# Send messages
if (len(changed_message_list) > 0) and userena_settings.USERENA_USE_MESSAGES:
if undo:
message = ungettext('Message is succesfully restored.',
'Messages are succesfully restored.',
len(changed_message_list))
else:
message = ungettext('Message is successfully removed.',
'Messages are successfully removed.',
len(changed_message_list))
messages.success(request, message, fail_silently=True)
if redirect_to: return redirect(redirect_to)
else: return redirect(reverse('userena_umessages_list')) | [
"def",
"message_remove",
"(",
"request",
",",
"undo",
"=",
"False",
")",
":",
"message_pks",
"=",
"request",
".",
"POST",
".",
"getlist",
"(",
"'message_pks'",
")",
"redirect_to",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"request",
".",
"POST",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"False",
")",
")",
"if",
"message_pks",
":",
"# Check that all values are integers.",
"valid_message_pk_list",
"=",
"set",
"(",
")",
"for",
"pk",
"in",
"message_pks",
":",
"try",
":",
"valid_pk",
"=",
"int",
"(",
"pk",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"else",
":",
"valid_message_pk_list",
".",
"add",
"(",
"valid_pk",
")",
"# Delete all the messages, if they belong to the user.",
"now",
"=",
"get_datetime_now",
"(",
")",
"changed_message_list",
"=",
"set",
"(",
")",
"for",
"pk",
"in",
"valid_message_pk_list",
":",
"message",
"=",
"get_object_or_404",
"(",
"Message",
",",
"pk",
"=",
"pk",
")",
"# Check if the user is the owner",
"if",
"message",
".",
"sender",
"==",
"request",
".",
"user",
":",
"if",
"undo",
":",
"message",
".",
"sender_deleted_at",
"=",
"None",
"else",
":",
"message",
".",
"sender_deleted_at",
"=",
"now",
"message",
".",
"save",
"(",
")",
"changed_message_list",
".",
"add",
"(",
"message",
".",
"pk",
")",
"# Check if the user is a recipient of the message",
"if",
"request",
".",
"user",
"in",
"message",
".",
"recipients",
".",
"all",
"(",
")",
":",
"mr",
"=",
"message",
".",
"messagerecipient_set",
".",
"get",
"(",
"user",
"=",
"request",
".",
"user",
",",
"message",
"=",
"message",
")",
"if",
"undo",
":",
"mr",
".",
"deleted_at",
"=",
"None",
"else",
":",
"mr",
".",
"deleted_at",
"=",
"now",
"mr",
".",
"save",
"(",
")",
"changed_message_list",
".",
"add",
"(",
"message",
".",
"pk",
")",
"# Send messages",
"if",
"(",
"len",
"(",
"changed_message_list",
")",
">",
"0",
")",
"and",
"userena_settings",
".",
"USERENA_USE_MESSAGES",
":",
"if",
"undo",
":",
"message",
"=",
"ungettext",
"(",
"'Message is succesfully restored.'",
",",
"'Messages are succesfully restored.'",
",",
"len",
"(",
"changed_message_list",
")",
")",
"else",
":",
"message",
"=",
"ungettext",
"(",
"'Message is successfully removed.'",
",",
"'Messages are successfully removed.'",
",",
"len",
"(",
"changed_message_list",
")",
")",
"messages",
".",
"success",
"(",
"request",
",",
"message",
",",
"fail_silently",
"=",
"True",
")",
"if",
"redirect_to",
":",
"return",
"redirect",
"(",
"redirect_to",
")",
"else",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_umessages_list'",
")",
")"
] | A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``. | [
"A",
"POST",
"to",
"remove",
"messages",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/views.py#L145-L217 | train |
Subsets and Splits