id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,700 | arraylabs/pymyq | pymyq/device.py | MyQDevice.open_allowed | def open_allowed(self) -> bool:
"""Door can be opened unattended."""
return next(
attr['Value'] for attr in self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\
== "1" | python | def open_allowed(self) -> bool:
"""Door can be opened unattended."""
return next(
attr['Value'] for attr in self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\
== "1" | [
"def",
"open_allowed",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"next",
"(",
"attr",
"[",
"'Value'",
"]",
"for",
"attr",
"in",
"self",
".",
"_device_json",
".",
"get",
"(",
"'Attributes'",
",",
"[",
"]",
")",
"if",
"attr",
".",
"get",
"(",
"'AttributeDisplayName'",
")",
"==",
"'isunattendedopenallowed'",
")",
"==",
"\"1\""
] | Door can be opened unattended. | [
"Door",
"can",
"be",
"opened",
"unattended",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L89-L94 |
5,701 | arraylabs/pymyq | pymyq/device.py | MyQDevice.close_allowed | def close_allowed(self) -> bool:
"""Door can be closed unattended."""
return next(
attr['Value'] for attr in self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\
== "1" | python | def close_allowed(self) -> bool:
"""Door can be closed unattended."""
return next(
attr['Value'] for attr in self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\
== "1" | [
"def",
"close_allowed",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"next",
"(",
"attr",
"[",
"'Value'",
"]",
"for",
"attr",
"in",
"self",
".",
"_device_json",
".",
"get",
"(",
"'Attributes'",
",",
"[",
"]",
")",
"if",
"attr",
".",
"get",
"(",
"'AttributeDisplayName'",
")",
"==",
"'isunattendedcloseallowed'",
")",
"==",
"\"1\""
] | Door can be closed unattended. | [
"Door",
"can",
"be",
"closed",
"unattended",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L97-L102 |
5,702 | arraylabs/pymyq | pymyq/device.py | MyQDevice._update_state | def _update_state(self, value: str) -> None:
"""Update state temporary during open or close."""
attribute = next(attr for attr in self._device['device_info'].get(
'Attributes', []) if attr.get(
'AttributeDisplayName') == 'doorstate')
if attribute is not None:
attribute['Value'] = value | python | def _update_state(self, value: str) -> None:
"""Update state temporary during open or close."""
attribute = next(attr for attr in self._device['device_info'].get(
'Attributes', []) if attr.get(
'AttributeDisplayName') == 'doorstate')
if attribute is not None:
attribute['Value'] = value | [
"def",
"_update_state",
"(",
"self",
",",
"value",
":",
"str",
")",
"->",
"None",
":",
"attribute",
"=",
"next",
"(",
"attr",
"for",
"attr",
"in",
"self",
".",
"_device",
"[",
"'device_info'",
"]",
".",
"get",
"(",
"'Attributes'",
",",
"[",
"]",
")",
"if",
"attr",
".",
"get",
"(",
"'AttributeDisplayName'",
")",
"==",
"'doorstate'",
")",
"if",
"attribute",
"is",
"not",
"None",
":",
"attribute",
"[",
"'Value'",
"]",
"=",
"value"
] | Update state temporary during open or close. | [
"Update",
"state",
"temporary",
"during",
"open",
"or",
"close",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L113-L119 |
5,703 | arraylabs/pymyq | pymyq/device.py | MyQDevice._coerce_state_from_string | def _coerce_state_from_string(value: Union[int, str]) -> str:
"""Return a proper state from a string input."""
try:
return STATE_MAP[int(value)]
except KeyError:
_LOGGER.error('Unknown state: %s', value)
return STATE_UNKNOWN | python | def _coerce_state_from_string(value: Union[int, str]) -> str:
"""Return a proper state from a string input."""
try:
return STATE_MAP[int(value)]
except KeyError:
_LOGGER.error('Unknown state: %s', value)
return STATE_UNKNOWN | [
"def",
"_coerce_state_from_string",
"(",
"value",
":",
"Union",
"[",
"int",
",",
"str",
"]",
")",
"->",
"str",
":",
"try",
":",
"return",
"STATE_MAP",
"[",
"int",
"(",
"value",
")",
"]",
"except",
"KeyError",
":",
"_LOGGER",
".",
"error",
"(",
"'Unknown state: %s'",
",",
"value",
")",
"return",
"STATE_UNKNOWN"
] | Return a proper state from a string input. | [
"Return",
"a",
"proper",
"state",
"from",
"a",
"string",
"input",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L127-L133 |
5,704 | arraylabs/pymyq | pymyq/device.py | MyQDevice._set_state | async def _set_state(self, state: int) -> bool:
"""Set the state of the device."""
try:
set_state_resp = await self.api._request(
'put',
DEVICE_SET_ENDPOINT,
json={
'attributeName': 'desireddoorstate',
'myQDeviceId': self.device_id,
'AttributeValue': state,
})
except RequestError as err:
_LOGGER.error('%s: Setting state failed (and halting): %s',
self.name, err)
return False
if set_state_resp is None:
return False
if int(set_state_resp.get('ReturnCode', 1)) != 0:
_LOGGER.error(
'%s: Error setting the device state: %s', self.name,
set_state_resp.get('ErrorMessage', 'Unknown Error'))
return False
return True | python | async def _set_state(self, state: int) -> bool:
"""Set the state of the device."""
try:
set_state_resp = await self.api._request(
'put',
DEVICE_SET_ENDPOINT,
json={
'attributeName': 'desireddoorstate',
'myQDeviceId': self.device_id,
'AttributeValue': state,
})
except RequestError as err:
_LOGGER.error('%s: Setting state failed (and halting): %s',
self.name, err)
return False
if set_state_resp is None:
return False
if int(set_state_resp.get('ReturnCode', 1)) != 0:
_LOGGER.error(
'%s: Error setting the device state: %s', self.name,
set_state_resp.get('ErrorMessage', 'Unknown Error'))
return False
return True | [
"async",
"def",
"_set_state",
"(",
"self",
",",
"state",
":",
"int",
")",
"->",
"bool",
":",
"try",
":",
"set_state_resp",
"=",
"await",
"self",
".",
"api",
".",
"_request",
"(",
"'put'",
",",
"DEVICE_SET_ENDPOINT",
",",
"json",
"=",
"{",
"'attributeName'",
":",
"'desireddoorstate'",
",",
"'myQDeviceId'",
":",
"self",
".",
"device_id",
",",
"'AttributeValue'",
":",
"state",
",",
"}",
")",
"except",
"RequestError",
"as",
"err",
":",
"_LOGGER",
".",
"error",
"(",
"'%s: Setting state failed (and halting): %s'",
",",
"self",
".",
"name",
",",
"err",
")",
"return",
"False",
"if",
"set_state_resp",
"is",
"None",
":",
"return",
"False",
"if",
"int",
"(",
"set_state_resp",
".",
"get",
"(",
"'ReturnCode'",
",",
"1",
")",
")",
"!=",
"0",
":",
"_LOGGER",
".",
"error",
"(",
"'%s: Error setting the device state: %s'",
",",
"self",
".",
"name",
",",
"set_state_resp",
".",
"get",
"(",
"'ErrorMessage'",
",",
"'Unknown Error'",
")",
")",
"return",
"False",
"return",
"True"
] | Set the state of the device. | [
"Set",
"the",
"state",
"of",
"the",
"device",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L136-L161 |
5,705 | arraylabs/pymyq | pymyq/device.py | MyQDevice.close | async def close(self) -> bool:
"""Close the device."""
_LOGGER.debug('%s: Sending close command', self.name)
if not await self._set_state(0):
return False
# Do not allow update of this device's state for 10 seconds.
self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10)
# Ensure state is closed or closing.
if self.state not in (STATE_CLOSED, STATE_CLOSING):
# Set state to closing.
self._update_state('5')
self._device_json = self._device['device_info']
_LOGGER.debug('%s: Close command send', self.name)
return True | python | async def close(self) -> bool:
"""Close the device."""
_LOGGER.debug('%s: Sending close command', self.name)
if not await self._set_state(0):
return False
# Do not allow update of this device's state for 10 seconds.
self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10)
# Ensure state is closed or closing.
if self.state not in (STATE_CLOSED, STATE_CLOSING):
# Set state to closing.
self._update_state('5')
self._device_json = self._device['device_info']
_LOGGER.debug('%s: Close command send', self.name)
return True | [
"async",
"def",
"close",
"(",
"self",
")",
"->",
"bool",
":",
"_LOGGER",
".",
"debug",
"(",
"'%s: Sending close command'",
",",
"self",
".",
"name",
")",
"if",
"not",
"await",
"self",
".",
"_set_state",
"(",
"0",
")",
":",
"return",
"False",
"# Do not allow update of this device's state for 10 seconds.",
"self",
".",
"next_allowed_update",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"10",
")",
"# Ensure state is closed or closing.",
"if",
"self",
".",
"state",
"not",
"in",
"(",
"STATE_CLOSED",
",",
"STATE_CLOSING",
")",
":",
"# Set state to closing.",
"self",
".",
"_update_state",
"(",
"'5'",
")",
"self",
".",
"_device_json",
"=",
"self",
".",
"_device",
"[",
"'device_info'",
"]",
"_LOGGER",
".",
"debug",
"(",
"'%s: Close command send'",
",",
"self",
".",
"name",
")",
"return",
"True"
] | Close the device. | [
"Close",
"the",
"device",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L163-L179 |
5,706 | arraylabs/pymyq | pymyq/device.py | MyQDevice.update | async def update(self) -> None:
"""Retrieve updated device state."""
if self.next_allowed_update is not None and \
datetime.utcnow() < self.next_allowed_update:
return
self.next_allowed_update = None
await self.api._update_device_state()
self._device_json = self._device['device_info'] | python | async def update(self) -> None:
"""Retrieve updated device state."""
if self.next_allowed_update is not None and \
datetime.utcnow() < self.next_allowed_update:
return
self.next_allowed_update = None
await self.api._update_device_state()
self._device_json = self._device['device_info'] | [
"async",
"def",
"update",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"next_allowed_update",
"is",
"not",
"None",
"and",
"datetime",
".",
"utcnow",
"(",
")",
"<",
"self",
".",
"next_allowed_update",
":",
"return",
"self",
".",
"next_allowed_update",
"=",
"None",
"await",
"self",
".",
"api",
".",
"_update_device_state",
"(",
")",
"self",
".",
"_device_json",
"=",
"self",
".",
"_device",
"[",
"'device_info'",
"]"
] | Retrieve updated device state. | [
"Retrieve",
"updated",
"device",
"state",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L200-L208 |
5,707 | sporestack/bitcash | bitcash/network/services.py | NetworkAPI.get_transaction | def get_transaction(cls, txid):
"""Gets the full transaction details.
:param txid: The transaction id in question.
:type txid: ``str``
:raises ConnectionError: If all API services fail.
:rtype: ``Transaction``
"""
for api_call in cls.GET_TX_MAIN:
try:
return api_call(txid)
except cls.IGNORED_ERRORS:
pass
raise ConnectionError('All APIs are unreachable.') | python | def get_transaction(cls, txid):
"""Gets the full transaction details.
:param txid: The transaction id in question.
:type txid: ``str``
:raises ConnectionError: If all API services fail.
:rtype: ``Transaction``
"""
for api_call in cls.GET_TX_MAIN:
try:
return api_call(txid)
except cls.IGNORED_ERRORS:
pass
raise ConnectionError('All APIs are unreachable.') | [
"def",
"get_transaction",
"(",
"cls",
",",
"txid",
")",
":",
"for",
"api_call",
"in",
"cls",
".",
"GET_TX_MAIN",
":",
"try",
":",
"return",
"api_call",
"(",
"txid",
")",
"except",
"cls",
".",
"IGNORED_ERRORS",
":",
"pass",
"raise",
"ConnectionError",
"(",
"'All APIs are unreachable.'",
")"
] | Gets the full transaction details.
:param txid: The transaction id in question.
:type txid: ``str``
:raises ConnectionError: If all API services fail.
:rtype: ``Transaction`` | [
"Gets",
"the",
"full",
"transaction",
"details",
"."
] | c7a18b9d82af98f1000c456dd06131524c260b7f | https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/network/services.py#L346-L361 |
5,708 | sporestack/bitcash | bitcash/network/services.py | NetworkAPI.get_tx_amount | def get_tx_amount(cls, txid, txindex):
"""Gets the amount of a given transaction output.
:param txid: The transaction id in question.
:type txid: ``str``
:param txindex: The transaction index in question.
:type txindex: ``int``
:raises ConnectionError: If all API services fail.
:rtype: ``Decimal``
"""
for api_call in cls.GET_TX_AMOUNT_MAIN:
try:
return api_call(txid, txindex)
except cls.IGNORED_ERRORS:
pass
raise ConnectionError('All APIs are unreachable.') | python | def get_tx_amount(cls, txid, txindex):
"""Gets the amount of a given transaction output.
:param txid: The transaction id in question.
:type txid: ``str``
:param txindex: The transaction index in question.
:type txindex: ``int``
:raises ConnectionError: If all API services fail.
:rtype: ``Decimal``
"""
for api_call in cls.GET_TX_AMOUNT_MAIN:
try:
return api_call(txid, txindex)
except cls.IGNORED_ERRORS:
pass
raise ConnectionError('All APIs are unreachable.') | [
"def",
"get_tx_amount",
"(",
"cls",
",",
"txid",
",",
"txindex",
")",
":",
"for",
"api_call",
"in",
"cls",
".",
"GET_TX_AMOUNT_MAIN",
":",
"try",
":",
"return",
"api_call",
"(",
"txid",
",",
"txindex",
")",
"except",
"cls",
".",
"IGNORED_ERRORS",
":",
"pass",
"raise",
"ConnectionError",
"(",
"'All APIs are unreachable.'",
")"
] | Gets the amount of a given transaction output.
:param txid: The transaction id in question.
:type txid: ``str``
:param txindex: The transaction index in question.
:type txindex: ``int``
:raises ConnectionError: If all API services fail.
:rtype: ``Decimal`` | [
"Gets",
"the",
"amount",
"of",
"a",
"given",
"transaction",
"output",
"."
] | c7a18b9d82af98f1000c456dd06131524c260b7f | https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/network/services.py#L383-L400 |
5,709 | sporestack/bitcash | bitcash/network/fees.py | get_fee | def get_fee(speed=FEE_SPEED_MEDIUM):
"""Gets the recommended satoshi per byte fee.
:param speed: One of: 'fast', 'medium', 'slow'.
:type speed: ``string``
:rtype: ``int``
"""
if speed == FEE_SPEED_FAST:
return DEFAULT_FEE_FAST
elif speed == FEE_SPEED_MEDIUM:
return DEFAULT_FEE_MEDIUM
elif speed == FEE_SPEED_SLOW:
return DEFAULT_FEE_SLOW
else:
raise ValueError('Invalid speed argument.') | python | def get_fee(speed=FEE_SPEED_MEDIUM):
"""Gets the recommended satoshi per byte fee.
:param speed: One of: 'fast', 'medium', 'slow'.
:type speed: ``string``
:rtype: ``int``
"""
if speed == FEE_SPEED_FAST:
return DEFAULT_FEE_FAST
elif speed == FEE_SPEED_MEDIUM:
return DEFAULT_FEE_MEDIUM
elif speed == FEE_SPEED_SLOW:
return DEFAULT_FEE_SLOW
else:
raise ValueError('Invalid speed argument.') | [
"def",
"get_fee",
"(",
"speed",
"=",
"FEE_SPEED_MEDIUM",
")",
":",
"if",
"speed",
"==",
"FEE_SPEED_FAST",
":",
"return",
"DEFAULT_FEE_FAST",
"elif",
"speed",
"==",
"FEE_SPEED_MEDIUM",
":",
"return",
"DEFAULT_FEE_MEDIUM",
"elif",
"speed",
"==",
"FEE_SPEED_SLOW",
":",
"return",
"DEFAULT_FEE_SLOW",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid speed argument.'",
")"
] | Gets the recommended satoshi per byte fee.
:param speed: One of: 'fast', 'medium', 'slow'.
:type speed: ``string``
:rtype: ``int`` | [
"Gets",
"the",
"recommended",
"satoshi",
"per",
"byte",
"fee",
"."
] | c7a18b9d82af98f1000c456dd06131524c260b7f | https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/network/fees.py#L15-L29 |
5,710 | Groundworkstech/pybfd | setup.py | CustomBuildExtension.find_binutils_libs | def find_binutils_libs(self, libdir, lib_ext):
"""Find Binutils libraries."""
bfd_expr = re.compile("(lib(?:bfd)|(?:opcodes))(.*?)\%s" % lib_ext )
libs = {}
for root, dirs, files in os.walk(libdir):
for f in files:
m = bfd_expr.search(f)
if m:
lib, version = m.groups()
fp = os.path.join(root, f)
if version in libs:
libs[ version ].append( fp )
else:
libs[ version ] = [fp,]
# first, search for multiarch files.
# check if we found more than one version of the multiarch libs.
multiarch_libs = dict( [(v,_l) for v, _l in libs.items() \
if v.find("multiarch") != -1 ] )
if len(multiarch_libs) > 1:
print "[W] Multiple binutils versions detected. Trying to build with default..."
return multiarch_libs.values()[0]
if len(multiarch_libs) == 1:
return multiarch_libs.values()[0]
# or use the default libs, or .. none
return libs.get("",[]) | python | def find_binutils_libs(self, libdir, lib_ext):
"""Find Binutils libraries."""
bfd_expr = re.compile("(lib(?:bfd)|(?:opcodes))(.*?)\%s" % lib_ext )
libs = {}
for root, dirs, files in os.walk(libdir):
for f in files:
m = bfd_expr.search(f)
if m:
lib, version = m.groups()
fp = os.path.join(root, f)
if version in libs:
libs[ version ].append( fp )
else:
libs[ version ] = [fp,]
# first, search for multiarch files.
# check if we found more than one version of the multiarch libs.
multiarch_libs = dict( [(v,_l) for v, _l in libs.items() \
if v.find("multiarch") != -1 ] )
if len(multiarch_libs) > 1:
print "[W] Multiple binutils versions detected. Trying to build with default..."
return multiarch_libs.values()[0]
if len(multiarch_libs) == 1:
return multiarch_libs.values()[0]
# or use the default libs, or .. none
return libs.get("",[]) | [
"def",
"find_binutils_libs",
"(",
"self",
",",
"libdir",
",",
"lib_ext",
")",
":",
"bfd_expr",
"=",
"re",
".",
"compile",
"(",
"\"(lib(?:bfd)|(?:opcodes))(.*?)\\%s\"",
"%",
"lib_ext",
")",
"libs",
"=",
"{",
"}",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"libdir",
")",
":",
"for",
"f",
"in",
"files",
":",
"m",
"=",
"bfd_expr",
".",
"search",
"(",
"f",
")",
"if",
"m",
":",
"lib",
",",
"version",
"=",
"m",
".",
"groups",
"(",
")",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"if",
"version",
"in",
"libs",
":",
"libs",
"[",
"version",
"]",
".",
"append",
"(",
"fp",
")",
"else",
":",
"libs",
"[",
"version",
"]",
"=",
"[",
"fp",
",",
"]",
"# first, search for multiarch files.",
"# check if we found more than one version of the multiarch libs.",
"multiarch_libs",
"=",
"dict",
"(",
"[",
"(",
"v",
",",
"_l",
")",
"for",
"v",
",",
"_l",
"in",
"libs",
".",
"items",
"(",
")",
"if",
"v",
".",
"find",
"(",
"\"multiarch\"",
")",
"!=",
"-",
"1",
"]",
")",
"if",
"len",
"(",
"multiarch_libs",
")",
">",
"1",
":",
"print",
"\"[W] Multiple binutils versions detected. Trying to build with default...\"",
"return",
"multiarch_libs",
".",
"values",
"(",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"multiarch_libs",
")",
"==",
"1",
":",
"return",
"multiarch_libs",
".",
"values",
"(",
")",
"[",
"0",
"]",
"# or use the default libs, or .. none",
"return",
"libs",
".",
"get",
"(",
"\"\"",
",",
"[",
"]",
")"
] | Find Binutils libraries. | [
"Find",
"Binutils",
"libraries",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L117-L142 |
5,711 | Groundworkstech/pybfd | setup.py | CustomBuildExtension.generate_source_files | def generate_source_files( self ):
"""
Genertate source files to be used during the compile process of the
extension module.
This is better than just hardcoding the values on python files because
header definitions might change along differente Binutils versions and
we'll be able to catch the changes and keep the correct values.
"""
from pybfd.gen_supported_disasm import get_supported_architectures, \
get_supported_machines, \
generate_supported_architectures_source, \
generate_supported_disassembler_header, \
gen_supported_archs
#
# Step 1 . Get the patch to libopcodes and nm utility for further
# usage.
#
libs_dirs = [os.path.dirname(lib) for lib in self.libs]
libopcodes = [lib for lib in self.libs if os.path.basename(lib).startswith("libopcodes")][0]
print "[+] Detecting libbfd/libopcodes compiled architectures"
if self.with_static_binutils: # use the nm from the binutils distro
nms = [
os.path.join( libs_dir, "..", "bin", "nm" ), # default name of nm
os.path.join( libs_dir, "..", "bin", "gnm" ) # in OSX brew install binutils's nm as gnm.
]
path_to_nm = None
for nm_fullpath in nms:
if os.path.isfile( nm_fullpath ):
path_to_nm = nm_fullpath
break
if path_to_nm == None:
raise Exception("no suitable 'nm' found.")
else:
path_to_nm = "nm" # Use the nm in the $PATH (TODO: its assume that nm exists)
#
# Step 2 .
#
# Prepare the libs to be used as option of the compiler.
path_to_bfd_header = os.path.join( self.includes, "bfd.h")
supported_machines = get_supported_machines(path_to_bfd_header)
supported_archs = get_supported_architectures(
path_to_nm,
libopcodes,
supported_machines,
self.with_static_binutils == None)
source_bfd_archs_c = generate_supported_architectures_source(supported_archs, supported_machines)
print "[+] Generating .C files..."
gen_file = os.path.join(PACKAGE_DIR, "gen_bfd_archs.c")
with open(gen_file, "w+") as fd:
fd.write(source_bfd_archs_c)
print "[+] %s" % gen_file
if self.with_static_binutils:
link_to_libs = [] # ...
else:
link_to_libs = [self.prepare_libs_for_cc(os.path.basename(lib)) for lib in self.libs]
c_compiler = new_compiler()
objects = c_compiler.compile(
[os.path.join(PACKAGE_DIR, "gen_bfd_archs.c"), ],
include_dirs = [self.includes,]
)
program = c_compiler.link_executable(
objects,
libraries = link_to_libs,
library_dirs = libs_dirs,
output_progname = "gen_bfd_archs",
output_dir = PACKAGE_DIR
)
gen_tool = os.path.join(PACKAGE_DIR, "gen_bfd_archs")
gen_file = os.path.join(self.build_lib, PACKAGE_DIR, "bfd_archs.py")
cmd = "%s > %s" % (
gen_tool,
gen_file )
print "[+] Generating .py files..."
# generate C dependent definitions
os.system( cmd )
# generate python specific data
with open(gen_file, "a") as f:
f.write( gen_supported_archs(supported_archs) )
# Remove unused files.
for obj in objects:
os.unlink(obj)
os.unlink(gen_tool)
print "[+] %s" % gen_file
#
# Step 3 . Generate header file to be used by the PyBFD extension
# modules bfd.c and opcodes.c.
#
gen_source = generate_supported_disassembler_header(supported_archs)
if len(supported_archs) == 0:
raise Exception("Unable to determine libopcodes' supported " \
"platforms from '%s'" % libopcodes)
print "[+] Generating .h files..."
gen_file = os.path.join(PACKAGE_DIR, "supported_disasm.h")
with open(gen_file, "w+") as fd:
fd.write(gen_source)
print "[+] %s" % gen_file
return supported_archs | python | def generate_source_files( self ):
"""
Genertate source files to be used during the compile process of the
extension module.
This is better than just hardcoding the values on python files because
header definitions might change along differente Binutils versions and
we'll be able to catch the changes and keep the correct values.
"""
from pybfd.gen_supported_disasm import get_supported_architectures, \
get_supported_machines, \
generate_supported_architectures_source, \
generate_supported_disassembler_header, \
gen_supported_archs
#
# Step 1 . Get the patch to libopcodes and nm utility for further
# usage.
#
libs_dirs = [os.path.dirname(lib) for lib in self.libs]
libopcodes = [lib for lib in self.libs if os.path.basename(lib).startswith("libopcodes")][0]
print "[+] Detecting libbfd/libopcodes compiled architectures"
if self.with_static_binutils: # use the nm from the binutils distro
nms = [
os.path.join( libs_dir, "..", "bin", "nm" ), # default name of nm
os.path.join( libs_dir, "..", "bin", "gnm" ) # in OSX brew install binutils's nm as gnm.
]
path_to_nm = None
for nm_fullpath in nms:
if os.path.isfile( nm_fullpath ):
path_to_nm = nm_fullpath
break
if path_to_nm == None:
raise Exception("no suitable 'nm' found.")
else:
path_to_nm = "nm" # Use the nm in the $PATH (TODO: its assume that nm exists)
#
# Step 2 .
#
# Prepare the libs to be used as option of the compiler.
path_to_bfd_header = os.path.join( self.includes, "bfd.h")
supported_machines = get_supported_machines(path_to_bfd_header)
supported_archs = get_supported_architectures(
path_to_nm,
libopcodes,
supported_machines,
self.with_static_binutils == None)
source_bfd_archs_c = generate_supported_architectures_source(supported_archs, supported_machines)
print "[+] Generating .C files..."
gen_file = os.path.join(PACKAGE_DIR, "gen_bfd_archs.c")
with open(gen_file, "w+") as fd:
fd.write(source_bfd_archs_c)
print "[+] %s" % gen_file
if self.with_static_binutils:
link_to_libs = [] # ...
else:
link_to_libs = [self.prepare_libs_for_cc(os.path.basename(lib)) for lib in self.libs]
c_compiler = new_compiler()
objects = c_compiler.compile(
[os.path.join(PACKAGE_DIR, "gen_bfd_archs.c"), ],
include_dirs = [self.includes,]
)
program = c_compiler.link_executable(
objects,
libraries = link_to_libs,
library_dirs = libs_dirs,
output_progname = "gen_bfd_archs",
output_dir = PACKAGE_DIR
)
gen_tool = os.path.join(PACKAGE_DIR, "gen_bfd_archs")
gen_file = os.path.join(self.build_lib, PACKAGE_DIR, "bfd_archs.py")
cmd = "%s > %s" % (
gen_tool,
gen_file )
print "[+] Generating .py files..."
# generate C dependent definitions
os.system( cmd )
# generate python specific data
with open(gen_file, "a") as f:
f.write( gen_supported_archs(supported_archs) )
# Remove unused files.
for obj in objects:
os.unlink(obj)
os.unlink(gen_tool)
print "[+] %s" % gen_file
#
# Step 3 . Generate header file to be used by the PyBFD extension
# modules bfd.c and opcodes.c.
#
gen_source = generate_supported_disassembler_header(supported_archs)
if len(supported_archs) == 0:
raise Exception("Unable to determine libopcodes' supported " \
"platforms from '%s'" % libopcodes)
print "[+] Generating .h files..."
gen_file = os.path.join(PACKAGE_DIR, "supported_disasm.h")
with open(gen_file, "w+") as fd:
fd.write(gen_source)
print "[+] %s" % gen_file
return supported_archs | [
"def",
"generate_source_files",
"(",
"self",
")",
":",
"from",
"pybfd",
".",
"gen_supported_disasm",
"import",
"get_supported_architectures",
",",
"get_supported_machines",
",",
"generate_supported_architectures_source",
",",
"generate_supported_disassembler_header",
",",
"gen_supported_archs",
"#",
"# Step 1 . Get the patch to libopcodes and nm utility for further",
"# usage.",
"#",
"libs_dirs",
"=",
"[",
"os",
".",
"path",
".",
"dirname",
"(",
"lib",
")",
"for",
"lib",
"in",
"self",
".",
"libs",
"]",
"libopcodes",
"=",
"[",
"lib",
"for",
"lib",
"in",
"self",
".",
"libs",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"lib",
")",
".",
"startswith",
"(",
"\"libopcodes\"",
")",
"]",
"[",
"0",
"]",
"print",
"\"[+] Detecting libbfd/libopcodes compiled architectures\"",
"if",
"self",
".",
"with_static_binutils",
":",
"# use the nm from the binutils distro",
"nms",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"libs_dir",
",",
"\"..\"",
",",
"\"bin\"",
",",
"\"nm\"",
")",
",",
"# default name of nm",
"os",
".",
"path",
".",
"join",
"(",
"libs_dir",
",",
"\"..\"",
",",
"\"bin\"",
",",
"\"gnm\"",
")",
"# in OSX brew install binutils's nm as gnm.",
"]",
"path_to_nm",
"=",
"None",
"for",
"nm_fullpath",
"in",
"nms",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"nm_fullpath",
")",
":",
"path_to_nm",
"=",
"nm_fullpath",
"break",
"if",
"path_to_nm",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"no suitable 'nm' found.\"",
")",
"else",
":",
"path_to_nm",
"=",
"\"nm\"",
"# Use the nm in the $PATH (TODO: its assume that nm exists)",
"#",
"# Step 2 .",
"#",
"# Prepare the libs to be used as option of the compiler.",
"path_to_bfd_header",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"includes",
",",
"\"bfd.h\"",
")",
"supported_machines",
"=",
"get_supported_machines",
"(",
"path_to_bfd_header",
")",
"supported_archs",
"=",
"get_supported_architectures",
"(",
"path_to_nm",
",",
"libopcodes",
",",
"supported_machines",
",",
"self",
".",
"with_static_binutils",
"==",
"None",
")",
"source_bfd_archs_c",
"=",
"generate_supported_architectures_source",
"(",
"supported_archs",
",",
"supported_machines",
")",
"print",
"\"[+] Generating .C files...\"",
"gen_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"PACKAGE_DIR",
",",
"\"gen_bfd_archs.c\"",
")",
"with",
"open",
"(",
"gen_file",
",",
"\"w+\"",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"source_bfd_archs_c",
")",
"print",
"\"[+] %s\"",
"%",
"gen_file",
"if",
"self",
".",
"with_static_binutils",
":",
"link_to_libs",
"=",
"[",
"]",
"# ...",
"else",
":",
"link_to_libs",
"=",
"[",
"self",
".",
"prepare_libs_for_cc",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"lib",
")",
")",
"for",
"lib",
"in",
"self",
".",
"libs",
"]",
"c_compiler",
"=",
"new_compiler",
"(",
")",
"objects",
"=",
"c_compiler",
".",
"compile",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"PACKAGE_DIR",
",",
"\"gen_bfd_archs.c\"",
")",
",",
"]",
",",
"include_dirs",
"=",
"[",
"self",
".",
"includes",
",",
"]",
")",
"program",
"=",
"c_compiler",
".",
"link_executable",
"(",
"objects",
",",
"libraries",
"=",
"link_to_libs",
",",
"library_dirs",
"=",
"libs_dirs",
",",
"output_progname",
"=",
"\"gen_bfd_archs\"",
",",
"output_dir",
"=",
"PACKAGE_DIR",
")",
"gen_tool",
"=",
"os",
".",
"path",
".",
"join",
"(",
"PACKAGE_DIR",
",",
"\"gen_bfd_archs\"",
")",
"gen_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"build_lib",
",",
"PACKAGE_DIR",
",",
"\"bfd_archs.py\"",
")",
"cmd",
"=",
"\"%s > %s\"",
"%",
"(",
"gen_tool",
",",
"gen_file",
")",
"print",
"\"[+] Generating .py files...\"",
"# generate C dependent definitions",
"os",
".",
"system",
"(",
"cmd",
")",
"# generate python specific data",
"with",
"open",
"(",
"gen_file",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"gen_supported_archs",
"(",
"supported_archs",
")",
")",
"# Remove unused files.",
"for",
"obj",
"in",
"objects",
":",
"os",
".",
"unlink",
"(",
"obj",
")",
"os",
".",
"unlink",
"(",
"gen_tool",
")",
"print",
"\"[+] %s\"",
"%",
"gen_file",
"#",
"# Step 3 . Generate header file to be used by the PyBFD extension",
"# modules bfd.c and opcodes.c.",
"#",
"gen_source",
"=",
"generate_supported_disassembler_header",
"(",
"supported_archs",
")",
"if",
"len",
"(",
"supported_archs",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"Unable to determine libopcodes' supported \"",
"\"platforms from '%s'\"",
"%",
"libopcodes",
")",
"print",
"\"[+] Generating .h files...\"",
"gen_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"PACKAGE_DIR",
",",
"\"supported_disasm.h\"",
")",
"with",
"open",
"(",
"gen_file",
",",
"\"w+\"",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"gen_source",
")",
"print",
"\"[+] %s\"",
"%",
"gen_file",
"return",
"supported_archs"
] | Genertate source files to be used during the compile process of the
extension module.
This is better than just hardcoding the values on python files because
header definitions might change along differente Binutils versions and
we'll be able to catch the changes and keep the correct values. | [
"Genertate",
"source",
"files",
"to",
"be",
"used",
"during",
"the",
"compile",
"process",
"of",
"the",
"extension",
"module",
".",
"This",
"is",
"better",
"than",
"just",
"hardcoding",
"the",
"values",
"on",
"python",
"files",
"because",
"header",
"definitions",
"might",
"change",
"along",
"differente",
"Binutils",
"versions",
"and",
"we",
"ll",
"be",
"able",
"to",
"catch",
"the",
"changes",
"and",
"keep",
"the",
"correct",
"values",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L152-L265 |
5,712 | Groundworkstech/pybfd | setup.py | CustomBuildExtension._darwin_current_arch | def _darwin_current_arch(self):
"""Add Mac OS X support."""
if sys.platform == "darwin":
if sys.maxsize > 2 ** 32: # 64bits.
return platform.mac_ver()[2] # Both Darwin and Python are 64bits.
else: # Python 32 bits
return platform.processor() | python | def _darwin_current_arch(self):
"""Add Mac OS X support."""
if sys.platform == "darwin":
if sys.maxsize > 2 ** 32: # 64bits.
return platform.mac_ver()[2] # Both Darwin and Python are 64bits.
else: # Python 32 bits
return platform.processor() | [
"def",
"_darwin_current_arch",
"(",
"self",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"\"darwin\"",
":",
"if",
"sys",
".",
"maxsize",
">",
"2",
"**",
"32",
":",
"# 64bits.",
"return",
"platform",
".",
"mac_ver",
"(",
")",
"[",
"2",
"]",
"# Both Darwin and Python are 64bits.",
"else",
":",
"# Python 32 bits",
"return",
"platform",
".",
"processor",
"(",
")"
] | Add Mac OS X support. | [
"Add",
"Mac",
"OS",
"X",
"support",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L267-L273 |
5,713 | Groundworkstech/pybfd | pybfd/objdump.py | init_parser | def init_parser():
"""Initialize option parser."""
usage = "Usage: %(prog)s <option(s)> <file(s)>"
description = " Display information from object <file(s)>.\n"
description += " At least one of the following switches must be given:"
#
# Create an argument parser and an exclusive group.
#
parser = ArgumentParser(
usage=usage, description=description, add_help=False)
group = parser.add_mutually_exclusive_group()
#
# Add objdump parameters.
#
group.add_argument("-a", "--archive-headers",
action=DumpArchieveHeadersAction,
type=FileType("r"), nargs="+",
help="Display archive header information")
group.add_argument("-f", "--file-headers",
action=DumpFileHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the overall file header")
#group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents")
#group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents")
group.add_argument("-h", "--section-headers",
action=DumpSectionHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the section headers")
#group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers")
group.add_argument("-d", "--disassemble",
action=DisassembleSectionAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
group.add_argument("-D", "--disassemble-all",
action=DisassembleSectionsAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
#group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly")
group.add_argument("-s", "--full-contents",
action=DumpSectionContentAction,
type=FileType("r"), nargs="+",
help="Display the full contents of all sections requested")
#group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file")
#group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style")
#group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file")
#-W[lLiaprmfFsoRt] or")
#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,")
# =frames-interp,=str,=loc,=Ranges,=pubtypes,")
# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]")
# Display DWARF info in the file")
group.add_argument("-t", "--syms",
action=DumpFileSymbols,
type=FileType("r"), nargs="+",
help="Display the contents of the symbol table(s)")
#-T, --dynamic-syms Display the contents of the dynamic symbol table")
#-r, --reloc Display the relocation entries in the file")
#-R, --dynamic-reloc Display the dynamic relocation entries in the file")
group.add_argument("-v", "--version", action="version",
version="%%(prog)s %s (%s)" % (__version__, __description__),
help="Display this program's version number")
group.add_argument("-i", "--info",
action=ListFormatAndArchitecturesInformationAction,
nargs=REMAINDER,
help="List object formats and architectures supported")
group.add_argument("-H", "--help", action="store_true", default=False,
help="Display this information")
return parser | python | def init_parser():
"""Initialize option parser."""
usage = "Usage: %(prog)s <option(s)> <file(s)>"
description = " Display information from object <file(s)>.\n"
description += " At least one of the following switches must be given:"
#
# Create an argument parser and an exclusive group.
#
parser = ArgumentParser(
usage=usage, description=description, add_help=False)
group = parser.add_mutually_exclusive_group()
#
# Add objdump parameters.
#
group.add_argument("-a", "--archive-headers",
action=DumpArchieveHeadersAction,
type=FileType("r"), nargs="+",
help="Display archive header information")
group.add_argument("-f", "--file-headers",
action=DumpFileHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the overall file header")
#group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents")
#group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents")
group.add_argument("-h", "--section-headers",
action=DumpSectionHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the section headers")
#group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers")
group.add_argument("-d", "--disassemble",
action=DisassembleSectionAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
group.add_argument("-D", "--disassemble-all",
action=DisassembleSectionsAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
#group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly")
group.add_argument("-s", "--full-contents",
action=DumpSectionContentAction,
type=FileType("r"), nargs="+",
help="Display the full contents of all sections requested")
#group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file")
#group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style")
#group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file")
#-W[lLiaprmfFsoRt] or")
#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,")
# =frames-interp,=str,=loc,=Ranges,=pubtypes,")
# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]")
# Display DWARF info in the file")
group.add_argument("-t", "--syms",
action=DumpFileSymbols,
type=FileType("r"), nargs="+",
help="Display the contents of the symbol table(s)")
#-T, --dynamic-syms Display the contents of the dynamic symbol table")
#-r, --reloc Display the relocation entries in the file")
#-R, --dynamic-reloc Display the dynamic relocation entries in the file")
group.add_argument("-v", "--version", action="version",
version="%%(prog)s %s (%s)" % (__version__, __description__),
help="Display this program's version number")
group.add_argument("-i", "--info",
action=ListFormatAndArchitecturesInformationAction,
nargs=REMAINDER,
help="List object formats and architectures supported")
group.add_argument("-H", "--help", action="store_true", default=False,
help="Display this information")
return parser | [
"def",
"init_parser",
"(",
")",
":",
"usage",
"=",
"\"Usage: %(prog)s <option(s)> <file(s)>\"",
"description",
"=",
"\" Display information from object <file(s)>.\\n\"",
"description",
"+=",
"\" At least one of the following switches must be given:\"",
"#",
"# Create an argument parser and an exclusive group.",
"#",
"parser",
"=",
"ArgumentParser",
"(",
"usage",
"=",
"usage",
",",
"description",
"=",
"description",
",",
"add_help",
"=",
"False",
")",
"group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"#",
"# Add objdump parameters.",
"#",
"group",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--archive-headers\"",
",",
"action",
"=",
"DumpArchieveHeadersAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display archive header information\"",
")",
"group",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--file-headers\"",
",",
"action",
"=",
"DumpFileHeadersAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the contents of the overall file header\"",
")",
"#group.add_argument(\"-p\", \"--private-headers\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display object format specific file header contents\")",
"#group.add_argument(\"-P\", \"--private=OPT,OPT...\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display object format specific contents\")",
"group",
".",
"add_argument",
"(",
"\"-h\"",
",",
"\"--section-headers\"",
",",
"action",
"=",
"DumpSectionHeadersAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the contents of the section headers\"",
")",
"#group.add_argument(\"-x\", \"--all-headers\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display the contents of all headers\")",
"group",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--disassemble\"",
",",
"action",
"=",
"DisassembleSectionAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display assembler contents of executable sections\"",
")",
"group",
".",
"add_argument",
"(",
"\"-D\"",
",",
"\"--disassemble-all\"",
",",
"action",
"=",
"DisassembleSectionsAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display assembler contents of executable sections\"",
")",
"#group.add_argument(\"-S\", \"--source\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Intermix source code with disassembly\")",
"group",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--full-contents\"",
",",
"action",
"=",
"DumpSectionContentAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the full contents of all sections requested\"",
")",
"#group.add_argument(\"-g\", \"--debugging\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display debug information in object file\")",
"#group.add_argument(\"-e\", \"--debugging-tags\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display debug information using ctags style\")",
"#group.add_argument(\"-G\", \"--stabs\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display (in raw form) any STABS info in the file\")",
"#-W[lLiaprmfFsoRt] or\")",
"#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,\")",
"# =frames-interp,=str,=loc,=Ranges,=pubtypes,\")",
"# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]\")",
"# Display DWARF info in the file\")",
"group",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--syms\"",
",",
"action",
"=",
"DumpFileSymbols",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the contents of the symbol table(s)\"",
")",
"#-T, --dynamic-syms Display the contents of the dynamic symbol table\")",
"#-r, --reloc Display the relocation entries in the file\")",
"#-R, --dynamic-reloc Display the dynamic relocation entries in the file\")",
"group",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"%%(prog)s %s (%s)\"",
"%",
"(",
"__version__",
",",
"__description__",
")",
",",
"help",
"=",
"\"Display this program's version number\"",
")",
"group",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--info\"",
",",
"action",
"=",
"ListFormatAndArchitecturesInformationAction",
",",
"nargs",
"=",
"REMAINDER",
",",
"help",
"=",
"\"List object formats and architectures supported\"",
")",
"group",
".",
"add_argument",
"(",
"\"-H\"",
",",
"\"--help\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Display this information\"",
")",
"return",
"parser"
] | Initialize option parser. | [
"Initialize",
"option",
"parser",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/objdump.py#L254-L341 |
5,714 | Groundworkstech/pybfd | pybfd/objdump.py | DumpSectionContentAction.dump | def dump(self, src, length=16, start=0, preffix=""):
"""Dump the specified buffer in hex + ASCII format."""
FILTER = \
"".join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in xrange(256)])
result = list()
for i in xrange(0, len(src), length):
s = src[i : i + length]
hexa = " ".join(["%02X" % ord(x) for x in s])
printable = s.translate(FILTER)
result.append("%s%08X %-*s %s\n" % \
(preffix, start + i, length * 3, hexa, printable))
return ''.join(result) | python | def dump(self, src, length=16, start=0, preffix=""):
"""Dump the specified buffer in hex + ASCII format."""
FILTER = \
"".join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in xrange(256)])
result = list()
for i in xrange(0, len(src), length):
s = src[i : i + length]
hexa = " ".join(["%02X" % ord(x) for x in s])
printable = s.translate(FILTER)
result.append("%s%08X %-*s %s\n" % \
(preffix, start + i, length * 3, hexa, printable))
return ''.join(result) | [
"def",
"dump",
"(",
"self",
",",
"src",
",",
"length",
"=",
"16",
",",
"start",
"=",
"0",
",",
"preffix",
"=",
"\"\"",
")",
":",
"FILTER",
"=",
"\"\"",
".",
"join",
"(",
"[",
"(",
"len",
"(",
"repr",
"(",
"chr",
"(",
"x",
")",
")",
")",
"==",
"3",
")",
"and",
"chr",
"(",
"x",
")",
"or",
"'.'",
"for",
"x",
"in",
"xrange",
"(",
"256",
")",
"]",
")",
"result",
"=",
"list",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"src",
")",
",",
"length",
")",
":",
"s",
"=",
"src",
"[",
"i",
":",
"i",
"+",
"length",
"]",
"hexa",
"=",
"\" \"",
".",
"join",
"(",
"[",
"\"%02X\"",
"%",
"ord",
"(",
"x",
")",
"for",
"x",
"in",
"s",
"]",
")",
"printable",
"=",
"s",
".",
"translate",
"(",
"FILTER",
")",
"result",
".",
"append",
"(",
"\"%s%08X %-*s %s\\n\"",
"%",
"(",
"preffix",
",",
"start",
"+",
"i",
",",
"length",
"*",
"3",
",",
"hexa",
",",
"printable",
")",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Dump the specified buffer in hex + ASCII format. | [
"Dump",
"the",
"specified",
"buffer",
"in",
"hex",
"+",
"ASCII",
"format",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/objdump.py#L208-L224 |
5,715 | Groundworkstech/pybfd | pybfd/section.py | BfdSection.content | def content(self):
"""Return the entire section content."""
return _bfd.section_get_content(self.bfd, self._ptr, 0, self.size) | python | def content(self):
"""Return the entire section content."""
return _bfd.section_get_content(self.bfd, self._ptr, 0, self.size) | [
"def",
"content",
"(",
"self",
")",
":",
"return",
"_bfd",
".",
"section_get_content",
"(",
"self",
".",
"bfd",
",",
"self",
".",
"_ptr",
",",
"0",
",",
"self",
".",
"size",
")"
] | Return the entire section content. | [
"Return",
"the",
"entire",
"section",
"content",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/section.py#L473-L475 |
5,716 | Groundworkstech/pybfd | pybfd/section.py | BfdSection.get_content | def get_content(self, offset, size):
"""Return the specified number of bytes from the current section."""
return _bfd.section_get_content(self.bfd, self._ptr, offset, size) | python | def get_content(self, offset, size):
"""Return the specified number of bytes from the current section."""
return _bfd.section_get_content(self.bfd, self._ptr, offset, size) | [
"def",
"get_content",
"(",
"self",
",",
"offset",
",",
"size",
")",
":",
"return",
"_bfd",
".",
"section_get_content",
"(",
"self",
".",
"bfd",
",",
"self",
".",
"_ptr",
",",
"offset",
",",
"size",
")"
] | Return the specified number of bytes from the current section. | [
"Return",
"the",
"specified",
"number",
"of",
"bytes",
"from",
"the",
"current",
"section",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/section.py#L477-L479 |
5,717 | Groundworkstech/pybfd | pybfd/opcodes.py | main | def main():
"""Test case for simple opcode disassembly."""
test_targets = (
[ARCH_I386, MACH_I386_I386_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x89\xe5\xE8\xB8\xFF\xFF\xFF", 0x1000],
[ARCH_I386, MACH_X86_64_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x48\x89\xe5\xE8\xA3\xFF\xFF\xFF", 0x1000],
[ARCH_ARM, MACH_ARM_2, ENDIAN_LITTLE, "\x04\xe0\x2d\xe5\xED\xFF\xFF\xEB", 0x1000],
[ARCH_MIPS, MACH_MIPSISA32, ENDIAN_BIG, "\x0C\x10\x00\x97\x00\x00\x00\x00", 0x1000],
[ARCH_POWERPC, MACH_PPC, ENDIAN_BIG, "\x94\x21\xFF\xE8\x7C\x08\x02\xA6", 0x1000],
#[ARCH_XTENSA, MACH_XTENSA, ENDIAN_BIG, "\x6C\x10\x06\xD7\x10", 0x1000],
)
for target_arch, target_mach, target_endian, binary, address in test_targets:
#
# Initialize libopcodes with the current architecture.
#
opcodes = Opcodes(target_arch, target_mach, target_endian)
# Print some architecture-specific information.
print "\n[+] Architecture %s - Machine %d" % \
(opcodes.architecture_name, opcodes.machine)
print "[+] Disassembly:"
# Print all the disassembled instructions.
for vma, size, disasm in opcodes.disassemble(binary, address):
print "0x%X (size=%d)\t %s" % (vma, size, disasm) | python | def main():
"""Test case for simple opcode disassembly."""
test_targets = (
[ARCH_I386, MACH_I386_I386_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x89\xe5\xE8\xB8\xFF\xFF\xFF", 0x1000],
[ARCH_I386, MACH_X86_64_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x48\x89\xe5\xE8\xA3\xFF\xFF\xFF", 0x1000],
[ARCH_ARM, MACH_ARM_2, ENDIAN_LITTLE, "\x04\xe0\x2d\xe5\xED\xFF\xFF\xEB", 0x1000],
[ARCH_MIPS, MACH_MIPSISA32, ENDIAN_BIG, "\x0C\x10\x00\x97\x00\x00\x00\x00", 0x1000],
[ARCH_POWERPC, MACH_PPC, ENDIAN_BIG, "\x94\x21\xFF\xE8\x7C\x08\x02\xA6", 0x1000],
#[ARCH_XTENSA, MACH_XTENSA, ENDIAN_BIG, "\x6C\x10\x06\xD7\x10", 0x1000],
)
for target_arch, target_mach, target_endian, binary, address in test_targets:
#
# Initialize libopcodes with the current architecture.
#
opcodes = Opcodes(target_arch, target_mach, target_endian)
# Print some architecture-specific information.
print "\n[+] Architecture %s - Machine %d" % \
(opcodes.architecture_name, opcodes.machine)
print "[+] Disassembly:"
# Print all the disassembled instructions.
for vma, size, disasm in opcodes.disassemble(binary, address):
print "0x%X (size=%d)\t %s" % (vma, size, disasm) | [
"def",
"main",
"(",
")",
":",
"test_targets",
"=",
"(",
"[",
"ARCH_I386",
",",
"MACH_I386_I386_INTEL_SYNTAX",
",",
"ENDIAN_MONO",
",",
"\"\\x55\\x89\\xe5\\xE8\\xB8\\xFF\\xFF\\xFF\"",
",",
"0x1000",
"]",
",",
"[",
"ARCH_I386",
",",
"MACH_X86_64_INTEL_SYNTAX",
",",
"ENDIAN_MONO",
",",
"\"\\x55\\x48\\x89\\xe5\\xE8\\xA3\\xFF\\xFF\\xFF\"",
",",
"0x1000",
"]",
",",
"[",
"ARCH_ARM",
",",
"MACH_ARM_2",
",",
"ENDIAN_LITTLE",
",",
"\"\\x04\\xe0\\x2d\\xe5\\xED\\xFF\\xFF\\xEB\"",
",",
"0x1000",
"]",
",",
"[",
"ARCH_MIPS",
",",
"MACH_MIPSISA32",
",",
"ENDIAN_BIG",
",",
"\"\\x0C\\x10\\x00\\x97\\x00\\x00\\x00\\x00\"",
",",
"0x1000",
"]",
",",
"[",
"ARCH_POWERPC",
",",
"MACH_PPC",
",",
"ENDIAN_BIG",
",",
"\"\\x94\\x21\\xFF\\xE8\\x7C\\x08\\x02\\xA6\"",
",",
"0x1000",
"]",
",",
"#[ARCH_XTENSA, MACH_XTENSA, ENDIAN_BIG, \"\\x6C\\x10\\x06\\xD7\\x10\", 0x1000],",
")",
"for",
"target_arch",
",",
"target_mach",
",",
"target_endian",
",",
"binary",
",",
"address",
"in",
"test_targets",
":",
"#",
"# Initialize libopcodes with the current architecture.",
"#",
"opcodes",
"=",
"Opcodes",
"(",
"target_arch",
",",
"target_mach",
",",
"target_endian",
")",
"# Print some architecture-specific information.",
"print",
"\"\\n[+] Architecture %s - Machine %d\"",
"%",
"(",
"opcodes",
".",
"architecture_name",
",",
"opcodes",
".",
"machine",
")",
"print",
"\"[+] Disassembly:\"",
"# Print all the disassembled instructions.",
"for",
"vma",
",",
"size",
",",
"disasm",
"in",
"opcodes",
".",
"disassemble",
"(",
"binary",
",",
"address",
")",
":",
"print",
"\"0x%X (size=%d)\\t %s\"",
"%",
"(",
"vma",
",",
"size",
",",
"disasm",
")"
] | Test case for simple opcode disassembly. | [
"Test",
"case",
"for",
"simple",
"opcode",
"disassembly",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L186-L211 |
5,718 | Groundworkstech/pybfd | pybfd/opcodes.py | Opcodes.initialize_bfd | def initialize_bfd(self, abfd):
"""Initialize underlying libOpcodes library using BFD."""
self._ptr = _opcodes.initialize_bfd(abfd._ptr)
# Already done inside opcodes.c
#self.architecture = abfd.architecture
#self.machine = abfd.machine
#self.endian = abfd.endian
# force intel syntax
if self.architecture == ARCH_I386:
if abfd.arch_size == 32:
self.machine = MACH_I386_I386_INTEL_SYNTAX
#abfd.machine = MACH_I386_I386_INTEL_SYNTAX
elif abfd.arch_size == 64:
self.machine = MACH_X86_64_INTEL_SYNTAX | python | def initialize_bfd(self, abfd):
"""Initialize underlying libOpcodes library using BFD."""
self._ptr = _opcodes.initialize_bfd(abfd._ptr)
# Already done inside opcodes.c
#self.architecture = abfd.architecture
#self.machine = abfd.machine
#self.endian = abfd.endian
# force intel syntax
if self.architecture == ARCH_I386:
if abfd.arch_size == 32:
self.machine = MACH_I386_I386_INTEL_SYNTAX
#abfd.machine = MACH_I386_I386_INTEL_SYNTAX
elif abfd.arch_size == 64:
self.machine = MACH_X86_64_INTEL_SYNTAX | [
"def",
"initialize_bfd",
"(",
"self",
",",
"abfd",
")",
":",
"self",
".",
"_ptr",
"=",
"_opcodes",
".",
"initialize_bfd",
"(",
"abfd",
".",
"_ptr",
")",
"# Already done inside opcodes.c",
"#self.architecture = abfd.architecture",
"#self.machine = abfd.machine",
"#self.endian = abfd.endian",
"# force intel syntax",
"if",
"self",
".",
"architecture",
"==",
"ARCH_I386",
":",
"if",
"abfd",
".",
"arch_size",
"==",
"32",
":",
"self",
".",
"machine",
"=",
"MACH_I386_I386_INTEL_SYNTAX",
"#abfd.machine = MACH_I386_I386_INTEL_SYNTAX",
"elif",
"abfd",
".",
"arch_size",
"==",
"64",
":",
"self",
".",
"machine",
"=",
"MACH_X86_64_INTEL_SYNTAX"
] | Initialize underlying libOpcodes library using BFD. | [
"Initialize",
"underlying",
"libOpcodes",
"library",
"using",
"BFD",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L84-L99 |
5,719 | Groundworkstech/pybfd | pybfd/opcodes.py | Opcodes.initialize_non_bfd | def initialize_non_bfd(self, architecture=None, machine=None,
endian=ENDIAN_UNKNOWN):
"""Initialize underlying libOpcodes library not using BFD."""
if None in [architecture, machine, endian]:
return
self.architecture = architecture
self.machine = machine
self.endian = endian | python | def initialize_non_bfd(self, architecture=None, machine=None,
endian=ENDIAN_UNKNOWN):
"""Initialize underlying libOpcodes library not using BFD."""
if None in [architecture, machine, endian]:
return
self.architecture = architecture
self.machine = machine
self.endian = endian | [
"def",
"initialize_non_bfd",
"(",
"self",
",",
"architecture",
"=",
"None",
",",
"machine",
"=",
"None",
",",
"endian",
"=",
"ENDIAN_UNKNOWN",
")",
":",
"if",
"None",
"in",
"[",
"architecture",
",",
"machine",
",",
"endian",
"]",
":",
"return",
"self",
".",
"architecture",
"=",
"architecture",
"self",
".",
"machine",
"=",
"machine",
"self",
".",
"endian",
"=",
"endian"
] | Initialize underlying libOpcodes library not using BFD. | [
"Initialize",
"underlying",
"libOpcodes",
"library",
"not",
"using",
"BFD",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L102-L111 |
5,720 | Groundworkstech/pybfd | pybfd/opcodes.py | Opcodes.initialize_smart_disassemble | def initialize_smart_disassemble(self, data, start_address=0):
"""
Set the binary buffer to disassemble with other related information
ready for an instruction by instruction disassembly session.
"""
_opcodes.initialize_smart_disassemble(
self._ptr, data, start_address) | python | def initialize_smart_disassemble(self, data, start_address=0):
"""
Set the binary buffer to disassemble with other related information
ready for an instruction by instruction disassembly session.
"""
_opcodes.initialize_smart_disassemble(
self._ptr, data, start_address) | [
"def",
"initialize_smart_disassemble",
"(",
"self",
",",
"data",
",",
"start_address",
"=",
"0",
")",
":",
"_opcodes",
".",
"initialize_smart_disassemble",
"(",
"self",
".",
"_ptr",
",",
"data",
",",
"start_address",
")"
] | Set the binary buffer to disassemble with other related information
ready for an instruction by instruction disassembly session. | [
"Set",
"the",
"binary",
"buffer",
"to",
"disassemble",
"with",
"other",
"related",
"information",
"ready",
"for",
"an",
"instruction",
"by",
"instruction",
"disassembly",
"session",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L113-L120 |
5,721 | Groundworkstech/pybfd | pybfd/opcodes.py | Opcodes.print_single_instruction_callback | def print_single_instruction_callback(self, address, size, branch_delay_insn,
insn_type, target, target2, disassembly):
"""
Callack on each disassembled instruction to print its information.
"""
print "0x%X SZ=%d BD=%d IT=%d\t%s" % \
(address, size, branch_delay_insn, insn_type, disassembly)
return PYBFD_DISASM_CONTINUE | python | def print_single_instruction_callback(self, address, size, branch_delay_insn,
insn_type, target, target2, disassembly):
"""
Callack on each disassembled instruction to print its information.
"""
print "0x%X SZ=%d BD=%d IT=%d\t%s" % \
(address, size, branch_delay_insn, insn_type, disassembly)
return PYBFD_DISASM_CONTINUE | [
"def",
"print_single_instruction_callback",
"(",
"self",
",",
"address",
",",
"size",
",",
"branch_delay_insn",
",",
"insn_type",
",",
"target",
",",
"target2",
",",
"disassembly",
")",
":",
"print",
"\"0x%X SZ=%d BD=%d IT=%d\\t%s\"",
"%",
"(",
"address",
",",
"size",
",",
"branch_delay_insn",
",",
"insn_type",
",",
"disassembly",
")",
"return",
"PYBFD_DISASM_CONTINUE"
] | Callack on each disassembled instruction to print its information. | [
"Callack",
"on",
"each",
"disassembled",
"instruction",
"to",
"print",
"its",
"information",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L131-L140 |
5,722 | Groundworkstech/pybfd | pybfd/opcodes.py | Opcodes.disassemble | def disassemble(self, data, start_address=0):
"""
Return a list containing the virtual memory address, instruction length
and disassembly code for the given binary buffer.
"""
return _opcodes.disassemble(self._ptr, data, start_address) | python | def disassemble(self, data, start_address=0):
"""
Return a list containing the virtual memory address, instruction length
and disassembly code for the given binary buffer.
"""
return _opcodes.disassemble(self._ptr, data, start_address) | [
"def",
"disassemble",
"(",
"self",
",",
"data",
",",
"start_address",
"=",
"0",
")",
":",
"return",
"_opcodes",
".",
"disassemble",
"(",
"self",
".",
"_ptr",
",",
"data",
",",
"start_address",
")"
] | Return a list containing the virtual memory address, instruction length
and disassembly code for the given binary buffer. | [
"Return",
"a",
"list",
"containing",
"the",
"virtual",
"memory",
"address",
"instruction",
"length",
"and",
"disassembly",
"code",
"for",
"the",
"given",
"binary",
"buffer",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L142-L148 |
5,723 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.open | def open(self, _file, target=DEFAULT_TARGET):
"""
Open the existing file for reading.
@param _file : A filename of file descriptor.
@param target: A user-specific BFD target name.
@return : None
"""
# Close any existing BFD structure instance.
self.close()
#
# STEP 1. Open the BFD pointer.
#
# Determine if the user passed a file-descriptor or a _file and
# proceed accordingly.
if type(_file) is FileType:
# The user specified a file descriptor.
filename = _file.name
if islink(filename):
raise BfdException("Symlinks file-descriptors are not valid")
try:
self._ptr = _bfd.fdopenr(filename, target, dup(_file.fileno()))
except Exception, err:
raise BfdException(
"Unable to open file-descriptor %s : %s" % (filename, err))
elif type(_file) is StringType:
# The user spcified a filaname so first check if file exists.
filename = _file
try:
with open(_file): pass
except IOError:
raise BfdException("File %s does not exist." % filename)
#
# Proceed to open the specified file and create a new BFD.
#
try:
self._ptr = _bfd.openr(filename, target)
except (TypeError, IOError), err:
raise BfdException(
"Unable to open file %s : %s" % (filename, err))
elif type(_file) is IntType:
# The user specified an already-open BFD pointer so we avoid any
# further open operation and move on to file format recognition.
self._ptr = _file
else:
raise BfdException(
"Invalid file type specified for open operation (%r)" % _file)
#
# STEP 2. Determine file format of the BFD.
#
# Now that the BFD is open we'll proceed to determine its file format.
# We'll use the objdump logic to determine it and raise an error in
# case we were unable to get it right.
#
try:
# Type opening it as an archieve and if it success then check
# subfiles.
if _bfd.check_format(self._ptr, BfdFormat.ARCHIVE):
# Set current format and store the inner file list.
self.file_format = BfdFormat.ARCHIVE
self.__populate_archive_files()
else:
# DO NOT USE bfd_check_format_matches() becuase its not tested.
# An implementation example if on objdump.c at function
# display_bfd().
if _bfd.check_format(self._ptr, BfdFormat.OBJECT):
self.file_format = BfdFormat.OBJECT
elif _bfd.check_format(self._ptr, BfdFormat.CORE):
self.file_format = BfdFormat.CORE
else:
pass
raise BfdException(_bfd.get_last_error_message())
except TypeError, err:
raise BfdException(
"Unable to initialize file format : %s" % err)
#
# STEP 3. Extract inner sections and symbolic information.
#
if self._ptr is not None:
# If the file is a valid BFD file format but not an archive then
# get its sections and symbolic information (if any).
if self.file_format in [BfdFormat.OBJECT, BfdFormat.CORE]:
self.__populate_sections()
self.__populate_symbols() | python | def open(self, _file, target=DEFAULT_TARGET):
"""
Open the existing file for reading.
@param _file : A filename of file descriptor.
@param target: A user-specific BFD target name.
@return : None
"""
# Close any existing BFD structure instance.
self.close()
#
# STEP 1. Open the BFD pointer.
#
# Determine if the user passed a file-descriptor or a _file and
# proceed accordingly.
if type(_file) is FileType:
# The user specified a file descriptor.
filename = _file.name
if islink(filename):
raise BfdException("Symlinks file-descriptors are not valid")
try:
self._ptr = _bfd.fdopenr(filename, target, dup(_file.fileno()))
except Exception, err:
raise BfdException(
"Unable to open file-descriptor %s : %s" % (filename, err))
elif type(_file) is StringType:
# The user spcified a filaname so first check if file exists.
filename = _file
try:
with open(_file): pass
except IOError:
raise BfdException("File %s does not exist." % filename)
#
# Proceed to open the specified file and create a new BFD.
#
try:
self._ptr = _bfd.openr(filename, target)
except (TypeError, IOError), err:
raise BfdException(
"Unable to open file %s : %s" % (filename, err))
elif type(_file) is IntType:
# The user specified an already-open BFD pointer so we avoid any
# further open operation and move on to file format recognition.
self._ptr = _file
else:
raise BfdException(
"Invalid file type specified for open operation (%r)" % _file)
#
# STEP 2. Determine file format of the BFD.
#
# Now that the BFD is open we'll proceed to determine its file format.
# We'll use the objdump logic to determine it and raise an error in
# case we were unable to get it right.
#
try:
# Type opening it as an archieve and if it success then check
# subfiles.
if _bfd.check_format(self._ptr, BfdFormat.ARCHIVE):
# Set current format and store the inner file list.
self.file_format = BfdFormat.ARCHIVE
self.__populate_archive_files()
else:
# DO NOT USE bfd_check_format_matches() becuase its not tested.
# An implementation example if on objdump.c at function
# display_bfd().
if _bfd.check_format(self._ptr, BfdFormat.OBJECT):
self.file_format = BfdFormat.OBJECT
elif _bfd.check_format(self._ptr, BfdFormat.CORE):
self.file_format = BfdFormat.CORE
else:
pass
raise BfdException(_bfd.get_last_error_message())
except TypeError, err:
raise BfdException(
"Unable to initialize file format : %s" % err)
#
# STEP 3. Extract inner sections and symbolic information.
#
if self._ptr is not None:
# If the file is a valid BFD file format but not an archive then
# get its sections and symbolic information (if any).
if self.file_format in [BfdFormat.OBJECT, BfdFormat.CORE]:
self.__populate_sections()
self.__populate_symbols() | [
"def",
"open",
"(",
"self",
",",
"_file",
",",
"target",
"=",
"DEFAULT_TARGET",
")",
":",
"# Close any existing BFD structure instance. ",
"self",
".",
"close",
"(",
")",
"#",
"# STEP 1. Open the BFD pointer.",
"#",
"# Determine if the user passed a file-descriptor or a _file and",
"# proceed accordingly.",
"if",
"type",
"(",
"_file",
")",
"is",
"FileType",
":",
"# The user specified a file descriptor.",
"filename",
"=",
"_file",
".",
"name",
"if",
"islink",
"(",
"filename",
")",
":",
"raise",
"BfdException",
"(",
"\"Symlinks file-descriptors are not valid\"",
")",
"try",
":",
"self",
".",
"_ptr",
"=",
"_bfd",
".",
"fdopenr",
"(",
"filename",
",",
"target",
",",
"dup",
"(",
"_file",
".",
"fileno",
"(",
")",
")",
")",
"except",
"Exception",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to open file-descriptor %s : %s\"",
"%",
"(",
"filename",
",",
"err",
")",
")",
"elif",
"type",
"(",
"_file",
")",
"is",
"StringType",
":",
"# The user spcified a filaname so first check if file exists.",
"filename",
"=",
"_file",
"try",
":",
"with",
"open",
"(",
"_file",
")",
":",
"pass",
"except",
"IOError",
":",
"raise",
"BfdException",
"(",
"\"File %s does not exist.\"",
"%",
"filename",
")",
"#",
"# Proceed to open the specified file and create a new BFD.",
"#",
"try",
":",
"self",
".",
"_ptr",
"=",
"_bfd",
".",
"openr",
"(",
"filename",
",",
"target",
")",
"except",
"(",
"TypeError",
",",
"IOError",
")",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to open file %s : %s\"",
"%",
"(",
"filename",
",",
"err",
")",
")",
"elif",
"type",
"(",
"_file",
")",
"is",
"IntType",
":",
"# The user specified an already-open BFD pointer so we avoid any",
"# further open operation and move on to file format recognition.",
"self",
".",
"_ptr",
"=",
"_file",
"else",
":",
"raise",
"BfdException",
"(",
"\"Invalid file type specified for open operation (%r)\"",
"%",
"_file",
")",
"#",
"# STEP 2. Determine file format of the BFD.",
"#",
"# Now that the BFD is open we'll proceed to determine its file format.",
"# We'll use the objdump logic to determine it and raise an error in",
"# case we were unable to get it right.",
"#",
"try",
":",
"# Type opening it as an archieve and if it success then check",
"# subfiles.",
"if",
"_bfd",
".",
"check_format",
"(",
"self",
".",
"_ptr",
",",
"BfdFormat",
".",
"ARCHIVE",
")",
":",
"# Set current format and store the inner file list.",
"self",
".",
"file_format",
"=",
"BfdFormat",
".",
"ARCHIVE",
"self",
".",
"__populate_archive_files",
"(",
")",
"else",
":",
"# DO NOT USE bfd_check_format_matches() becuase its not tested.",
"# An implementation example if on objdump.c at function",
"# display_bfd().",
"if",
"_bfd",
".",
"check_format",
"(",
"self",
".",
"_ptr",
",",
"BfdFormat",
".",
"OBJECT",
")",
":",
"self",
".",
"file_format",
"=",
"BfdFormat",
".",
"OBJECT",
"elif",
"_bfd",
".",
"check_format",
"(",
"self",
".",
"_ptr",
",",
"BfdFormat",
".",
"CORE",
")",
":",
"self",
".",
"file_format",
"=",
"BfdFormat",
".",
"CORE",
"else",
":",
"pass",
"raise",
"BfdException",
"(",
"_bfd",
".",
"get_last_error_message",
"(",
")",
")",
"except",
"TypeError",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to initialize file format : %s\"",
"%",
"err",
")",
"#",
"# STEP 3. Extract inner sections and symbolic information.",
"#",
"if",
"self",
".",
"_ptr",
"is",
"not",
"None",
":",
"# If the file is a valid BFD file format but not an archive then",
"# get its sections and symbolic information (if any).",
"if",
"self",
".",
"file_format",
"in",
"[",
"BfdFormat",
".",
"OBJECT",
",",
"BfdFormat",
".",
"CORE",
"]",
":",
"self",
".",
"__populate_sections",
"(",
")",
"self",
".",
"__populate_symbols",
"(",
")"
] | Open the existing file for reading.
@param _file : A filename of file descriptor.
@param target: A user-specific BFD target name.
@return : None | [
"Open",
"the",
"existing",
"file",
"for",
"reading",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L118-L215 |
5,724 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.__populate_archive_files | def __populate_archive_files(self):
"""Store the list of files inside an archive file."""
self.archive_files = []
for _ptr in _bfd.archive_list_files(self._ptr):
try:
self.archive_files.append(Bfd(_ptr))
except BfdException, err:
#print "Error populating archive file list : %s" % err
#print_exc()
pass | python | def __populate_archive_files(self):
"""Store the list of files inside an archive file."""
self.archive_files = []
for _ptr in _bfd.archive_list_files(self._ptr):
try:
self.archive_files.append(Bfd(_ptr))
except BfdException, err:
#print "Error populating archive file list : %s" % err
#print_exc()
pass | [
"def",
"__populate_archive_files",
"(",
"self",
")",
":",
"self",
".",
"archive_files",
"=",
"[",
"]",
"for",
"_ptr",
"in",
"_bfd",
".",
"archive_list_files",
"(",
"self",
".",
"_ptr",
")",
":",
"try",
":",
"self",
".",
"archive_files",
".",
"append",
"(",
"Bfd",
"(",
"_ptr",
")",
")",
"except",
"BfdException",
",",
"err",
":",
"#print \"Error populating archive file list : %s\" % err",
"#print_exc()",
"pass"
] | Store the list of files inside an archive file. | [
"Store",
"the",
"list",
"of",
"files",
"inside",
"an",
"archive",
"file",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L217-L226 |
5,725 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.archive_filenames | def archive_filenames(self):
"""Return the list of files inside an archive file."""
try:
return _bfd.archive_list_filenames(self._ptr)
except TypeError, err:
raise BfdException(err) | python | def archive_filenames(self):
"""Return the list of files inside an archive file."""
try:
return _bfd.archive_list_filenames(self._ptr)
except TypeError, err:
raise BfdException(err) | [
"def",
"archive_filenames",
"(",
"self",
")",
":",
"try",
":",
"return",
"_bfd",
".",
"archive_list_filenames",
"(",
"self",
".",
"_ptr",
")",
"except",
"TypeError",
",",
"err",
":",
"raise",
"BfdException",
"(",
"err",
")"
] | Return the list of files inside an archive file. | [
"Return",
"the",
"list",
"of",
"files",
"inside",
"an",
"archive",
"file",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L239-L244 |
5,726 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.file_format_name | def file_format_name(self):
"""Return the current format name of the open bdf."""
try:
return BfdFormatNamesLong[self.file_format]
except IndexError, err:
raise BfdException("Invalid format specified (%d)" % self.file_format) | python | def file_format_name(self):
"""Return the current format name of the open bdf."""
try:
return BfdFormatNamesLong[self.file_format]
except IndexError, err:
raise BfdException("Invalid format specified (%d)" % self.file_format) | [
"def",
"file_format_name",
"(",
"self",
")",
":",
"try",
":",
"return",
"BfdFormatNamesLong",
"[",
"self",
".",
"file_format",
"]",
"except",
"IndexError",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Invalid format specified (%d)\"",
"%",
"self",
".",
"file_format",
")"
] | Return the current format name of the open bdf. | [
"Return",
"the",
"current",
"format",
"name",
"of",
"the",
"open",
"bdf",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L272-L277 |
5,727 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.__populate_sections | def __populate_sections(self):
"""Get a list of the section present in the bfd to populate our
internal list.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
for section in _bfd.get_sections_list(self._ptr):
try:
bfd_section = BfdSection(self._ptr, section)
self._sections[bfd_section.name] = bfd_section
except BfdSectionException, err:
#print "Exception during section pasing : %s" % err
pass | python | def __populate_sections(self):
"""Get a list of the section present in the bfd to populate our
internal list.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
for section in _bfd.get_sections_list(self._ptr):
try:
bfd_section = BfdSection(self._ptr, section)
self._sections[bfd_section.name] = bfd_section
except BfdSectionException, err:
#print "Exception during section pasing : %s" % err
pass | [
"def",
"__populate_sections",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"for",
"section",
"in",
"_bfd",
".",
"get_sections_list",
"(",
"self",
".",
"_ptr",
")",
":",
"try",
":",
"bfd_section",
"=",
"BfdSection",
"(",
"self",
".",
"_ptr",
",",
"section",
")",
"self",
".",
"_sections",
"[",
"bfd_section",
".",
"name",
"]",
"=",
"bfd_section",
"except",
"BfdSectionException",
",",
"err",
":",
"#print \"Exception during section pasing : %s\" % err",
"pass"
] | Get a list of the section present in the bfd to populate our
internal list. | [
"Get",
"a",
"list",
"of",
"the",
"section",
"present",
"in",
"the",
"bfd",
"to",
"populate",
"our",
"internal",
"list",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L295-L309 |
5,728 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.__populate_symbols | def __populate_symbols(self):
"""Get a list of the symbols present in the bfd to populate our
internal list.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
try:
symbols = _bfd.get_symbols(self._ptr)
# Temporary dictionary ordered by section index. This is necessary
# because the symbolic information return the section index it belongs
# to.
sections = {}
for section in self.sections:
sections[self.sections[section].index] = self.sections[section]
for symbol in symbols:
# Extract each field for further processing.
symbol_section_index = symbol[0]
symbol_name = symbol[1]
symbol_value = symbol[2]
symbol_flags = symbol[3]
# Get the effective address of the current symbol.
symbol_flags = tuple(
[f for f in SYMBOL_FLAGS_LIST if symbol_flags & f == f] )
# Create a new symbol instance to hold symbolic information.
new_symbol = Symbol(
sections.get(symbol_section_index, None),
symbol_name,
symbol_value,
symbol_flags)
if new_symbol.section is None:
continue
symbol_address = new_symbol.section.vma + new_symbol.value
#if new_symbol.flags in \
# [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]:
# symbol_address = new_symbol.section.vma + new_symbol.value
#else:
# # TODO: Enhance this!
# # Discard any other symbol information.
# continue
self._symbols[symbol_address] = new_symbol
del sections
except BfdSectionException, err:
raise BfdException("Exception on symbolic ifnormation parsing.") | python | def __populate_symbols(self):
"""Get a list of the symbols present in the bfd to populate our
internal list.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
try:
symbols = _bfd.get_symbols(self._ptr)
# Temporary dictionary ordered by section index. This is necessary
# because the symbolic information return the section index it belongs
# to.
sections = {}
for section in self.sections:
sections[self.sections[section].index] = self.sections[section]
for symbol in symbols:
# Extract each field for further processing.
symbol_section_index = symbol[0]
symbol_name = symbol[1]
symbol_value = symbol[2]
symbol_flags = symbol[3]
# Get the effective address of the current symbol.
symbol_flags = tuple(
[f for f in SYMBOL_FLAGS_LIST if symbol_flags & f == f] )
# Create a new symbol instance to hold symbolic information.
new_symbol = Symbol(
sections.get(symbol_section_index, None),
symbol_name,
symbol_value,
symbol_flags)
if new_symbol.section is None:
continue
symbol_address = new_symbol.section.vma + new_symbol.value
#if new_symbol.flags in \
# [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]:
# symbol_address = new_symbol.section.vma + new_symbol.value
#else:
# # TODO: Enhance this!
# # Discard any other symbol information.
# continue
self._symbols[symbol_address] = new_symbol
del sections
except BfdSectionException, err:
raise BfdException("Exception on symbolic ifnormation parsing.") | [
"def",
"__populate_symbols",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"try",
":",
"symbols",
"=",
"_bfd",
".",
"get_symbols",
"(",
"self",
".",
"_ptr",
")",
"# Temporary dictionary ordered by section index. This is necessary",
"# because the symbolic information return the section index it belongs",
"# to.",
"sections",
"=",
"{",
"}",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"sections",
"[",
"self",
".",
"sections",
"[",
"section",
"]",
".",
"index",
"]",
"=",
"self",
".",
"sections",
"[",
"section",
"]",
"for",
"symbol",
"in",
"symbols",
":",
"# Extract each field for further processing.",
"symbol_section_index",
"=",
"symbol",
"[",
"0",
"]",
"symbol_name",
"=",
"symbol",
"[",
"1",
"]",
"symbol_value",
"=",
"symbol",
"[",
"2",
"]",
"symbol_flags",
"=",
"symbol",
"[",
"3",
"]",
"# Get the effective address of the current symbol.",
"symbol_flags",
"=",
"tuple",
"(",
"[",
"f",
"for",
"f",
"in",
"SYMBOL_FLAGS_LIST",
"if",
"symbol_flags",
"&",
"f",
"==",
"f",
"]",
")",
"# Create a new symbol instance to hold symbolic information.",
"new_symbol",
"=",
"Symbol",
"(",
"sections",
".",
"get",
"(",
"symbol_section_index",
",",
"None",
")",
",",
"symbol_name",
",",
"symbol_value",
",",
"symbol_flags",
")",
"if",
"new_symbol",
".",
"section",
"is",
"None",
":",
"continue",
"symbol_address",
"=",
"new_symbol",
".",
"section",
".",
"vma",
"+",
"new_symbol",
".",
"value",
"#if new_symbol.flags in \\",
"# [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]:",
"# symbol_address = new_symbol.section.vma + new_symbol.value",
"#else:",
"# # TODO: Enhance this!",
"# # Discard any other symbol information.",
"# continue",
"self",
".",
"_symbols",
"[",
"symbol_address",
"]",
"=",
"new_symbol",
"del",
"sections",
"except",
"BfdSectionException",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Exception on symbolic ifnormation parsing.\"",
")"
] | Get a list of the symbols present in the bfd to populate our
internal list. | [
"Get",
"a",
"list",
"of",
"the",
"symbols",
"present",
"in",
"the",
"bfd",
"to",
"populate",
"our",
"internal",
"list",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L311-L362 |
5,729 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.close | def close(self):
"""Close any existing BFD structure before open a new one."""
if self._ptr:
#try:
# # Release inner BFD files in case we're an archive BFD.
# if self.is_archive:
# [inner_bfd.close() for inner_bfd in self.archive_files]
#except TypeError, err:
# pass
try:
_bfd.close(self._ptr)
except TypeError, err:
raise BfdException("Unable to close bfd (%s)" % err)
finally:
self._ptr = None | python | def close(self):
"""Close any existing BFD structure before open a new one."""
if self._ptr:
#try:
# # Release inner BFD files in case we're an archive BFD.
# if self.is_archive:
# [inner_bfd.close() for inner_bfd in self.archive_files]
#except TypeError, err:
# pass
try:
_bfd.close(self._ptr)
except TypeError, err:
raise BfdException("Unable to close bfd (%s)" % err)
finally:
self._ptr = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ptr",
":",
"#try:",
"# # Release inner BFD files in case we're an archive BFD.",
"# if self.is_archive:",
"# [inner_bfd.close() for inner_bfd in self.archive_files]",
"#except TypeError, err:",
"# pass",
"try",
":",
"_bfd",
".",
"close",
"(",
"self",
".",
"_ptr",
")",
"except",
"TypeError",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to close bfd (%s)\"",
"%",
"err",
")",
"finally",
":",
"self",
".",
"_ptr",
"=",
"None"
] | Close any existing BFD structure before open a new one. | [
"Close",
"any",
"existing",
"BFD",
"structure",
"before",
"open",
"a",
"new",
"one",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L364-L379 |
5,730 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.filename | def filename(self):
"""Return the filename of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILENAME) | python | def filename(self):
"""Return the filename of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILENAME) | [
"def",
"filename",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"FILENAME",
")"
] | Return the filename of the BFD file being processed. | [
"Return",
"the",
"filename",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L390-L395 |
5,731 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.cacheable | def cacheable(self):
"""Return the cacheable attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.CACHEABLE) | python | def cacheable(self):
"""Return the cacheable attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.CACHEABLE) | [
"def",
"cacheable",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"CACHEABLE",
")"
] | Return the cacheable attribute of the BFD file being processed. | [
"Return",
"the",
"cacheable",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L398-L403 |
5,732 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.format | def format(self):
"""Return the format attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT) | python | def format(self):
"""Return the format attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT) | [
"def",
"format",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"FORMAT",
")"
] | Return the format attribute of the BFD file being processed. | [
"Return",
"the",
"format",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L406-L411 |
5,733 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.target | def target(self):
"""Return the target of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.TARGET) | python | def target(self):
"""Return the target of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.TARGET) | [
"def",
"target",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"TARGET",
")"
] | Return the target of the BFD file being processed. | [
"Return",
"the",
"target",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L414-L419 |
5,734 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.machine | def machine(self):
"""Return the flavour attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FLAVOUR) | python | def machine(self):
"""Return the flavour attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FLAVOUR) | [
"def",
"machine",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"FLAVOUR",
")"
] | Return the flavour attribute of the BFD file being processed. | [
"Return",
"the",
"flavour",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L435-L440 |
5,735 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.family_coff | def family_coff(self):
"""Return the family_coff attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FAMILY_COFF) | python | def family_coff(self):
"""Return the family_coff attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FAMILY_COFF) | [
"def",
"family_coff",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"FAMILY_COFF",
")"
] | Return the family_coff attribute of the BFD file being processed. | [
"Return",
"the",
"family_coff",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L450-L455 |
5,736 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.big_endian | def big_endian(self):
"""Return the big endian attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_BIG_ENDIAN) | python | def big_endian(self):
"""Return the big endian attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_BIG_ENDIAN) | [
"def",
"big_endian",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"IS_BIG_ENDIAN",
")"
] | Return the big endian attribute of the BFD file being processed. | [
"Return",
"the",
"big",
"endian",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L466-L471 |
5,737 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.little_endian | def little_endian(self):
"""
Return the little_endian attribute of the BFD file being processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_LITTLE_ENDIAN) | python | def little_endian(self):
"""
Return the little_endian attribute of the BFD file being processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_LITTLE_ENDIAN) | [
"def",
"little_endian",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"IS_LITTLE_ENDIAN",
")"
] | Return the little_endian attribute of the BFD file being processed. | [
"Return",
"the",
"little_endian",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L474-L481 |
5,738 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.header_big_endian | def header_big_endian(self):
"""
Return the header_big_endian attribute of the BFD file being processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HEADER_BIG_ENDIAN) | python | def header_big_endian(self):
"""
Return the header_big_endian attribute of the BFD file being processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HEADER_BIG_ENDIAN) | [
"def",
"header_big_endian",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"HEADER_BIG_ENDIAN",
")"
] | Return the header_big_endian attribute of the BFD file being processed. | [
"Return",
"the",
"header_big_endian",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L484-L493 |
5,739 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.header_little_endian | def header_little_endian(self):
"""Return the header_little_endian attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HEADER_LITTLE_ENDIAN) | python | def header_little_endian(self):
"""Return the header_little_endian attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HEADER_LITTLE_ENDIAN) | [
"def",
"header_little_endian",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"HEADER_LITTLE_ENDIAN",
")"
] | Return the header_little_endian attribute of the BFD file being
processed. | [
"Return",
"the",
"header_little_endian",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L496-L505 |
5,740 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.file_flags | def file_flags(self):
"""Return the file flags attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILE_FLAGS) | python | def file_flags(self):
"""Return the file flags attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILE_FLAGS) | [
"def",
"file_flags",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"FILE_FLAGS",
")"
] | Return the file flags attribute of the BFD file being processed. | [
"Return",
"the",
"file",
"flags",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L508-L513 |
5,741 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.file_flags | def file_flags(self, _file_flags):
"""Set the new file flags attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.set_file_flags(self._ptr, _file_flags) | python | def file_flags(self, _file_flags):
"""Set the new file flags attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.set_file_flags(self._ptr, _file_flags) | [
"def",
"file_flags",
"(",
"self",
",",
"_file_flags",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"set_file_flags",
"(",
"self",
".",
"_ptr",
",",
"_file_flags",
")"
] | Set the new file flags attribute of the BFD file being processed. | [
"Set",
"the",
"new",
"file",
"flags",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L516-L521 |
5,742 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.applicable_file_flags | def applicable_file_flags(self):
"""
Return the applicable file flags attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.APPLICABLE_FILE_FLAGS) | python | def applicable_file_flags(self):
"""
Return the applicable file flags attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.APPLICABLE_FILE_FLAGS) | [
"def",
"applicable_file_flags",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"APPLICABLE_FILE_FLAGS",
")"
] | Return the applicable file flags attribute of the BFD file being
processed. | [
"Return",
"the",
"applicable",
"file",
"flags",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L524-L534 |
5,743 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.my_archieve | def my_archieve(self):
"""Return the my archieve attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.MY_ARCHIEVE) | python | def my_archieve(self):
"""Return the my archieve attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.MY_ARCHIEVE) | [
"def",
"my_archieve",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"MY_ARCHIEVE",
")"
] | Return the my archieve attribute of the BFD file being processed. | [
"Return",
"the",
"my",
"archieve",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L537-L542 |
5,744 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.has_map | def has_map(self):
"""Return the has map attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.HAS_MAP) | python | def has_map(self):
"""Return the has map attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.HAS_MAP) | [
"def",
"has_map",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"HAS_MAP",
")"
] | Return the has map attribute of the BFD file being processed. | [
"Return",
"the",
"has",
"map",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L545-L550 |
5,745 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.is_thin_archieve | def is_thin_archieve(self):
"""
Return the is thin archieve attribute of the BFD file being processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.IS_THIN_ARCHIEVE) | python | def is_thin_archieve(self):
"""
Return the is thin archieve attribute of the BFD file being processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.IS_THIN_ARCHIEVE) | [
"def",
"is_thin_archieve",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"IS_THIN_ARCHIEVE",
")"
] | Return the is thin archieve attribute of the BFD file being processed. | [
"Return",
"the",
"is",
"thin",
"archieve",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L553-L561 |
5,746 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.has_gap_in_elf_shndx | def has_gap_in_elf_shndx(self):
"""Return the has gap in elf shndx attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HAS_GAP_IN_ELF_SHNDX) | python | def has_gap_in_elf_shndx(self):
"""Return the has gap in elf shndx attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HAS_GAP_IN_ELF_SHNDX) | [
"def",
"has_gap_in_elf_shndx",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"HAS_GAP_IN_ELF_SHNDX",
")"
] | Return the has gap in elf shndx attribute of the BFD file being
processed. | [
"Return",
"the",
"has",
"gap",
"in",
"elf",
"shndx",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L564-L572 |
5,747 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.valid_reloction_types | def valid_reloction_types(self):
"""Return the valid_reloc_types attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.VALID_RELOC_TYPES) | python | def valid_reloction_types(self):
"""Return the valid_reloc_types attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.VALID_RELOC_TYPES) | [
"def",
"valid_reloction_types",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"VALID_RELOC_TYPES",
")"
] | Return the valid_reloc_types attribute of the BFD file being processed. | [
"Return",
"the",
"valid_reloc_types",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L575-L581 |
5,748 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.user_data | def user_data(self):
"""Return the usrdata attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.USRDATA) | python | def user_data(self):
"""Return the usrdata attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.USRDATA) | [
"def",
"user_data",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"USRDATA",
")"
] | Return the usrdata attribute of the BFD file being processed. | [
"Return",
"the",
"usrdata",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L584-L589 |
5,749 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.start_address | def start_address(self):
"""Return the start address attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.START_ADDRESS) | python | def start_address(self):
"""Return the start address attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.START_ADDRESS) | [
"def",
"start_address",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"START_ADDRESS",
")"
] | Return the start address attribute of the BFD file being processed. | [
"Return",
"the",
"start",
"address",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L592-L597 |
5,750 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.symbols_count | def symbols_count(self):
"""Return the symcount attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.SYMCOUNT) | python | def symbols_count(self):
"""Return the symcount attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.SYMCOUNT) | [
"def",
"symbols_count",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"SYMCOUNT",
")"
] | Return the symcount attribute of the BFD file being processed. | [
"Return",
"the",
"symcount",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L630-L635 |
5,751 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.out_symbols | def out_symbols(self):
"""Return the out symbols attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.OUTSYMBOLS) | python | def out_symbols(self):
"""Return the out symbols attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.OUTSYMBOLS) | [
"def",
"out_symbols",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"OUTSYMBOLS",
")"
] | Return the out symbols attribute of the BFD file being processed. | [
"Return",
"the",
"out",
"symbols",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L638-L643 |
5,752 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.sections_count | def sections_count(self):
"""Return the sections_count attribute of the BFD file being processed."""
# This should match the 'sections' attribute length so instead should
# use :
#
# len(bfd.sections)
#
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.COUNT_SECTIONS) | python | def sections_count(self):
"""Return the sections_count attribute of the BFD file being processed."""
# This should match the 'sections' attribute length so instead should
# use :
#
# len(bfd.sections)
#
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.COUNT_SECTIONS) | [
"def",
"sections_count",
"(",
"self",
")",
":",
"# This should match the 'sections' attribute length so instead should",
"# use :",
"#",
"# len(bfd.sections)",
"#",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"COUNT_SECTIONS",
")"
] | Return the sections_count attribute of the BFD file being processed. | [
"Return",
"the",
"sections_count",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L646-L656 |
5,753 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.dynamic_symbols_count | def dynamic_symbols_count(self):
"""Return the dynamic symbols count attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.DYNAMIC_SYMCOUNT) | python | def dynamic_symbols_count(self):
"""Return the dynamic symbols count attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.DYNAMIC_SYMCOUNT) | [
"def",
"dynamic_symbols_count",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"DYNAMIC_SYMCOUNT",
")"
] | Return the dynamic symbols count attribute of the BFD file being
processed. | [
"Return",
"the",
"dynamic",
"symbols",
"count",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L659-L668 |
5,754 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.symbol_leading_char | def symbol_leading_char(self):
"""Return the symbol leading char attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.SYMBOL_LEADING_CHAR) | python | def symbol_leading_char(self):
"""Return the symbol leading char attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.SYMBOL_LEADING_CHAR) | [
"def",
"symbol_leading_char",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"return",
"_bfd",
".",
"get_bfd_attribute",
"(",
"self",
".",
"_ptr",
",",
"BfdAttributes",
".",
"SYMBOL_LEADING_CHAR",
")"
] | Return the symbol leading char attribute of the BFD file being
processed. | [
"Return",
"the",
"symbol",
"leading",
"char",
"attribute",
"of",
"the",
"BFD",
"file",
"being",
"processed",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L671-L680 |
5,755 | Groundworkstech/pybfd | pybfd/bfd.py | Bfd.arch_size | def arch_size(self):
"""Return the architecure size in bits."""
if not self._ptr:
raise BfdException("BFD not initialized")
try:
return _bfd.get_arch_size(self._ptr)
except Exception, err:
raise BfdException("Unable to determine architeure size.") | python | def arch_size(self):
"""Return the architecure size in bits."""
if not self._ptr:
raise BfdException("BFD not initialized")
try:
return _bfd.get_arch_size(self._ptr)
except Exception, err:
raise BfdException("Unable to determine architeure size.") | [
"def",
"arch_size",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ptr",
":",
"raise",
"BfdException",
"(",
"\"BFD not initialized\"",
")",
"try",
":",
"return",
"_bfd",
".",
"get_arch_size",
"(",
"self",
".",
"_ptr",
")",
"except",
"Exception",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to determine architeure size.\"",
")"
] | Return the architecure size in bits. | [
"Return",
"the",
"architecure",
"size",
"in",
"bits",
"."
] | 9e722435929b4ad52212043a6f1e9e9ce60b5d72 | https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L697-L705 |
5,756 | getsenic/nuimo-linux-python | nuimo/nuimo.py | Controller.display_matrix | def display_matrix(self, matrix, interval=2.0, brightness=1.0, fading=False, ignore_duplicates=False):
"""
Displays an LED matrix on Nuimo's LED matrix display.
:param matrix: the matrix to display
:param interval: interval in seconds until the matrix disappears again
:param brightness: led brightness between 0..1
:param fading: if True, the previous matrix fades into the new matrix
:param ignore_duplicates: if True, the matrix is not sent again if already being displayed
"""
self._matrix_writer.write(
matrix=matrix,
interval=interval,
brightness=brightness,
fading=fading,
ignore_duplicates=ignore_duplicates
) | python | def display_matrix(self, matrix, interval=2.0, brightness=1.0, fading=False, ignore_duplicates=False):
"""
Displays an LED matrix on Nuimo's LED matrix display.
:param matrix: the matrix to display
:param interval: interval in seconds until the matrix disappears again
:param brightness: led brightness between 0..1
:param fading: if True, the previous matrix fades into the new matrix
:param ignore_duplicates: if True, the matrix is not sent again if already being displayed
"""
self._matrix_writer.write(
matrix=matrix,
interval=interval,
brightness=brightness,
fading=fading,
ignore_duplicates=ignore_duplicates
) | [
"def",
"display_matrix",
"(",
"self",
",",
"matrix",
",",
"interval",
"=",
"2.0",
",",
"brightness",
"=",
"1.0",
",",
"fading",
"=",
"False",
",",
"ignore_duplicates",
"=",
"False",
")",
":",
"self",
".",
"_matrix_writer",
".",
"write",
"(",
"matrix",
"=",
"matrix",
",",
"interval",
"=",
"interval",
",",
"brightness",
"=",
"brightness",
",",
"fading",
"=",
"fading",
",",
"ignore_duplicates",
"=",
"ignore_duplicates",
")"
] | Displays an LED matrix on Nuimo's LED matrix display.
:param matrix: the matrix to display
:param interval: interval in seconds until the matrix disappears again
:param brightness: led brightness between 0..1
:param fading: if True, the previous matrix fades into the new matrix
:param ignore_duplicates: if True, the matrix is not sent again if already being displayed | [
"Displays",
"an",
"LED",
"matrix",
"on",
"Nuimo",
"s",
"LED",
"matrix",
"display",
"."
] | 1918e6e51ad6569eb134904e891122479fafa2d6 | https://github.com/getsenic/nuimo-linux-python/blob/1918e6e51ad6569eb134904e891122479fafa2d6/nuimo/nuimo.py#L208-L224 |
5,757 | secynic/ipwhois | ipwhois/net.py | Net.get_asn_origin_whois | def get_asn_origin_whois(self, asn_registry='radb', asn=None,
retry_count=3, server=None, port=43):
"""
The function for retrieving CIDR info for an ASN via whois.
Args:
asn_registry (:obj:`str`): The source to run the query against
(asn.ASN_ORIGIN_WHOIS).
asn (:obj:`str`): The AS number (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to.
port (:obj:`int`): The network port to connect on. Defaults to 43.
Returns:
str: The raw ASN origin whois data.
Raises:
WhoisLookupError: The ASN origin whois lookup failed.
WhoisRateLimitError: The ASN origin Whois request rate limited and
retries were exhausted.
"""
try:
if server is None:
server = ASN_ORIGIN_WHOIS[asn_registry]['server']
# Create the connection for the whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(self.timeout)
log.debug('ASN origin WHOIS query for {0} at {1}:{2}'.format(
asn, server, port))
conn.connect((server, port))
# Prep the query.
query = ' -i origin {0}{1}'.format(asn, '\r\n')
# Query the whois server, and store the results.
conn.send(query.encode())
response = ''
while True:
d = conn.recv(4096).decode()
response += d
if not d:
break
conn.close()
# TODO: this was taken from get_whois(). Need to test rate limiting
if 'Query rate limit exceeded' in response: # pragma: no cover
if retry_count > 0:
log.debug('ASN origin WHOIS query rate limit exceeded. '
'Waiting...')
sleep(1)
return self.get_asn_origin_whois(
asn_registry=asn_registry, asn=asn,
retry_count=retry_count-1,
server=server, port=port
)
else:
raise WhoisRateLimitError(
'ASN origin Whois lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(asn))
elif ('error 501' in response or 'error 230' in response
): # pragma: no cover
log.debug('ASN origin WHOIS query error: {0}'.format(response))
raise ValueError
return str(response)
except (socket.timeout, socket.error) as e:
log.debug('ASN origin WHOIS query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('ASN origin WHOIS query retrying (count: {0})'
''.format(str(retry_count)))
return self.get_asn_origin_whois(
asn_registry=asn_registry, asn=asn,
retry_count=retry_count-1, server=server, port=port
)
else:
raise WhoisLookupError(
'ASN origin WHOIS lookup failed for {0}.'.format(asn)
)
except WhoisRateLimitError: # pragma: no cover
raise
except: # pragma: no cover
raise WhoisLookupError(
'ASN origin WHOIS lookup failed for {0}.'.format(asn)
) | python | def get_asn_origin_whois(self, asn_registry='radb', asn=None,
retry_count=3, server=None, port=43):
"""
The function for retrieving CIDR info for an ASN via whois.
Args:
asn_registry (:obj:`str`): The source to run the query against
(asn.ASN_ORIGIN_WHOIS).
asn (:obj:`str`): The AS number (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to.
port (:obj:`int`): The network port to connect on. Defaults to 43.
Returns:
str: The raw ASN origin whois data.
Raises:
WhoisLookupError: The ASN origin whois lookup failed.
WhoisRateLimitError: The ASN origin Whois request rate limited and
retries were exhausted.
"""
try:
if server is None:
server = ASN_ORIGIN_WHOIS[asn_registry]['server']
# Create the connection for the whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(self.timeout)
log.debug('ASN origin WHOIS query for {0} at {1}:{2}'.format(
asn, server, port))
conn.connect((server, port))
# Prep the query.
query = ' -i origin {0}{1}'.format(asn, '\r\n')
# Query the whois server, and store the results.
conn.send(query.encode())
response = ''
while True:
d = conn.recv(4096).decode()
response += d
if not d:
break
conn.close()
# TODO: this was taken from get_whois(). Need to test rate limiting
if 'Query rate limit exceeded' in response: # pragma: no cover
if retry_count > 0:
log.debug('ASN origin WHOIS query rate limit exceeded. '
'Waiting...')
sleep(1)
return self.get_asn_origin_whois(
asn_registry=asn_registry, asn=asn,
retry_count=retry_count-1,
server=server, port=port
)
else:
raise WhoisRateLimitError(
'ASN origin Whois lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(asn))
elif ('error 501' in response or 'error 230' in response
): # pragma: no cover
log.debug('ASN origin WHOIS query error: {0}'.format(response))
raise ValueError
return str(response)
except (socket.timeout, socket.error) as e:
log.debug('ASN origin WHOIS query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('ASN origin WHOIS query retrying (count: {0})'
''.format(str(retry_count)))
return self.get_asn_origin_whois(
asn_registry=asn_registry, asn=asn,
retry_count=retry_count-1, server=server, port=port
)
else:
raise WhoisLookupError(
'ASN origin WHOIS lookup failed for {0}.'.format(asn)
)
except WhoisRateLimitError: # pragma: no cover
raise
except: # pragma: no cover
raise WhoisLookupError(
'ASN origin WHOIS lookup failed for {0}.'.format(asn)
) | [
"def",
"get_asn_origin_whois",
"(",
"self",
",",
"asn_registry",
"=",
"'radb'",
",",
"asn",
"=",
"None",
",",
"retry_count",
"=",
"3",
",",
"server",
"=",
"None",
",",
"port",
"=",
"43",
")",
":",
"try",
":",
"if",
"server",
"is",
"None",
":",
"server",
"=",
"ASN_ORIGIN_WHOIS",
"[",
"asn_registry",
"]",
"[",
"'server'",
"]",
"# Create the connection for the whois query.",
"conn",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"conn",
".",
"settimeout",
"(",
"self",
".",
"timeout",
")",
"log",
".",
"debug",
"(",
"'ASN origin WHOIS query for {0} at {1}:{2}'",
".",
"format",
"(",
"asn",
",",
"server",
",",
"port",
")",
")",
"conn",
".",
"connect",
"(",
"(",
"server",
",",
"port",
")",
")",
"# Prep the query.",
"query",
"=",
"' -i origin {0}{1}'",
".",
"format",
"(",
"asn",
",",
"'\\r\\n'",
")",
"# Query the whois server, and store the results.",
"conn",
".",
"send",
"(",
"query",
".",
"encode",
"(",
")",
")",
"response",
"=",
"''",
"while",
"True",
":",
"d",
"=",
"conn",
".",
"recv",
"(",
"4096",
")",
".",
"decode",
"(",
")",
"response",
"+=",
"d",
"if",
"not",
"d",
":",
"break",
"conn",
".",
"close",
"(",
")",
"# TODO: this was taken from get_whois(). Need to test rate limiting",
"if",
"'Query rate limit exceeded'",
"in",
"response",
":",
"# pragma: no cover",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'ASN origin WHOIS query rate limit exceeded. '",
"'Waiting...'",
")",
"sleep",
"(",
"1",
")",
"return",
"self",
".",
"get_asn_origin_whois",
"(",
"asn_registry",
"=",
"asn_registry",
",",
"asn",
"=",
"asn",
",",
"retry_count",
"=",
"retry_count",
"-",
"1",
",",
"server",
"=",
"server",
",",
"port",
"=",
"port",
")",
"else",
":",
"raise",
"WhoisRateLimitError",
"(",
"'ASN origin Whois lookup failed for {0}. Rate limit '",
"'exceeded, wait and try again (possibly a '",
"'temporary block).'",
".",
"format",
"(",
"asn",
")",
")",
"elif",
"(",
"'error 501'",
"in",
"response",
"or",
"'error 230'",
"in",
"response",
")",
":",
"# pragma: no cover",
"log",
".",
"debug",
"(",
"'ASN origin WHOIS query error: {0}'",
".",
"format",
"(",
"response",
")",
")",
"raise",
"ValueError",
"return",
"str",
"(",
"response",
")",
"except",
"(",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'ASN origin WHOIS query socket error: {0}'",
".",
"format",
"(",
"e",
")",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'ASN origin WHOIS query retrying (count: {0})'",
"''",
".",
"format",
"(",
"str",
"(",
"retry_count",
")",
")",
")",
"return",
"self",
".",
"get_asn_origin_whois",
"(",
"asn_registry",
"=",
"asn_registry",
",",
"asn",
"=",
"asn",
",",
"retry_count",
"=",
"retry_count",
"-",
"1",
",",
"server",
"=",
"server",
",",
"port",
"=",
"port",
")",
"else",
":",
"raise",
"WhoisLookupError",
"(",
"'ASN origin WHOIS lookup failed for {0}.'",
".",
"format",
"(",
"asn",
")",
")",
"except",
"WhoisRateLimitError",
":",
"# pragma: no cover",
"raise",
"except",
":",
"# pragma: no cover",
"raise",
"WhoisLookupError",
"(",
"'ASN origin WHOIS lookup failed for {0}.'",
".",
"format",
"(",
"asn",
")",
")"
] | The function for retrieving CIDR info for an ASN via whois.
Args:
asn_registry (:obj:`str`): The source to run the query against
(asn.ASN_ORIGIN_WHOIS).
asn (:obj:`str`): The AS number (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
server (:obj:`str`): An optional server to connect to.
port (:obj:`int`): The network port to connect on. Defaults to 43.
Returns:
str: The raw ASN origin whois data.
Raises:
WhoisLookupError: The ASN origin whois lookup failed.
WhoisRateLimitError: The ASN origin Whois request rate limited and
retries were exhausted. | [
"The",
"function",
"for",
"retrieving",
"CIDR",
"info",
"for",
"an",
"ASN",
"via",
"whois",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/net.py#L431-L541 |
5,758 | secynic/ipwhois | ipwhois/net.py | Net.get_http_json | def get_http_json(self, url=None, retry_count=3, rate_limit_timeout=120,
headers=None):
"""
The function for retrieving a json result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
rate_limit_timeout (:obj:`int`): The number of seconds to wait
before retrying when a rate limit notice is returned via
rdap+json or HTTP error 429. Defaults to 60.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'application/rdap+json'.
Returns:
dict: The data in json format.
Raises:
HTTPLookupError: The HTTP lookup failed.
HTTPRateLimitError: The HTTP request rate limited and retries
were exhausted.
"""
if headers is None:
headers = {'Accept': 'application/rdap+json'}
try:
# Create the connection for the whois query.
log.debug('HTTP query for {0} at {1}'.format(
self.address_str, url))
conn = Request(url, headers=headers)
data = self.opener.open(conn, timeout=self.timeout)
try:
d = json.loads(data.readall().decode('utf-8', 'ignore'))
except AttributeError: # pragma: no cover
d = json.loads(data.read().decode('utf-8', 'ignore'))
try:
# Tests written but commented out. I do not want to send a
# flood of requests on every test.
for tmp in d['notices']: # pragma: no cover
if tmp['title'] == 'Rate Limit Notice':
log.debug('RDAP query rate limit exceeded.')
if retry_count > 0:
log.debug('Waiting {0} seconds...'.format(
str(rate_limit_timeout)))
sleep(rate_limit_timeout)
return self.get_http_json(
url=url, retry_count=retry_count-1,
rate_limit_timeout=rate_limit_timeout,
headers=headers
)
else:
raise HTTPRateLimitError(
'HTTP lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(url))
except (KeyError, IndexError): # pragma: no cover
pass
return d
except HTTPError as e: # pragma: no cover
# RIPE is producing this HTTP error rather than a JSON error.
if e.code == 429:
log.debug('HTTP query rate limit exceeded.')
if retry_count > 0:
log.debug('Waiting {0} seconds...'.format(
str(rate_limit_timeout)))
sleep(rate_limit_timeout)
return self.get_http_json(
url=url, retry_count=retry_count - 1,
rate_limit_timeout=rate_limit_timeout,
headers=headers
)
else:
raise HTTPRateLimitError(
'HTTP lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(url))
else:
raise HTTPLookupError('HTTP lookup failed for {0} with error '
'code {1}.'.format(url, str(e.code)))
except (URLError, socket.timeout, socket.error) as e:
log.debug('HTTP query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('HTTP query retrying (count: {0})'.format(
str(retry_count)))
return self.get_http_json(
url=url, retry_count=retry_count-1,
rate_limit_timeout=rate_limit_timeout, headers=headers
)
else:
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
url))
except (HTTPLookupError, HTTPRateLimitError) as e: # pragma: no cover
raise e
except: # pragma: no cover
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) | python | def get_http_json(self, url=None, retry_count=3, rate_limit_timeout=120,
headers=None):
"""
The function for retrieving a json result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
rate_limit_timeout (:obj:`int`): The number of seconds to wait
before retrying when a rate limit notice is returned via
rdap+json or HTTP error 429. Defaults to 60.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'application/rdap+json'.
Returns:
dict: The data in json format.
Raises:
HTTPLookupError: The HTTP lookup failed.
HTTPRateLimitError: The HTTP request rate limited and retries
were exhausted.
"""
if headers is None:
headers = {'Accept': 'application/rdap+json'}
try:
# Create the connection for the whois query.
log.debug('HTTP query for {0} at {1}'.format(
self.address_str, url))
conn = Request(url, headers=headers)
data = self.opener.open(conn, timeout=self.timeout)
try:
d = json.loads(data.readall().decode('utf-8', 'ignore'))
except AttributeError: # pragma: no cover
d = json.loads(data.read().decode('utf-8', 'ignore'))
try:
# Tests written but commented out. I do not want to send a
# flood of requests on every test.
for tmp in d['notices']: # pragma: no cover
if tmp['title'] == 'Rate Limit Notice':
log.debug('RDAP query rate limit exceeded.')
if retry_count > 0:
log.debug('Waiting {0} seconds...'.format(
str(rate_limit_timeout)))
sleep(rate_limit_timeout)
return self.get_http_json(
url=url, retry_count=retry_count-1,
rate_limit_timeout=rate_limit_timeout,
headers=headers
)
else:
raise HTTPRateLimitError(
'HTTP lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(url))
except (KeyError, IndexError): # pragma: no cover
pass
return d
except HTTPError as e: # pragma: no cover
# RIPE is producing this HTTP error rather than a JSON error.
if e.code == 429:
log.debug('HTTP query rate limit exceeded.')
if retry_count > 0:
log.debug('Waiting {0} seconds...'.format(
str(rate_limit_timeout)))
sleep(rate_limit_timeout)
return self.get_http_json(
url=url, retry_count=retry_count - 1,
rate_limit_timeout=rate_limit_timeout,
headers=headers
)
else:
raise HTTPRateLimitError(
'HTTP lookup failed for {0}. Rate limit '
'exceeded, wait and try again (possibly a '
'temporary block).'.format(url))
else:
raise HTTPLookupError('HTTP lookup failed for {0} with error '
'code {1}.'.format(url, str(e.code)))
except (URLError, socket.timeout, socket.error) as e:
log.debug('HTTP query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('HTTP query retrying (count: {0})'.format(
str(retry_count)))
return self.get_http_json(
url=url, retry_count=retry_count-1,
rate_limit_timeout=rate_limit_timeout, headers=headers
)
else:
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
url))
except (HTTPLookupError, HTTPRateLimitError) as e: # pragma: no cover
raise e
except: # pragma: no cover
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) | [
"def",
"get_http_json",
"(",
"self",
",",
"url",
"=",
"None",
",",
"retry_count",
"=",
"3",
",",
"rate_limit_timeout",
"=",
"120",
",",
"headers",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/rdap+json'",
"}",
"try",
":",
"# Create the connection for the whois query.",
"log",
".",
"debug",
"(",
"'HTTP query for {0} at {1}'",
".",
"format",
"(",
"self",
".",
"address_str",
",",
"url",
")",
")",
"conn",
"=",
"Request",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"data",
"=",
"self",
".",
"opener",
".",
"open",
"(",
"conn",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"try",
":",
"d",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"readall",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
")",
"except",
"AttributeError",
":",
"# pragma: no cover",
"d",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
")",
"try",
":",
"# Tests written but commented out. I do not want to send a",
"# flood of requests on every test.",
"for",
"tmp",
"in",
"d",
"[",
"'notices'",
"]",
":",
"# pragma: no cover",
"if",
"tmp",
"[",
"'title'",
"]",
"==",
"'Rate Limit Notice'",
":",
"log",
".",
"debug",
"(",
"'RDAP query rate limit exceeded.'",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'Waiting {0} seconds...'",
".",
"format",
"(",
"str",
"(",
"rate_limit_timeout",
")",
")",
")",
"sleep",
"(",
"rate_limit_timeout",
")",
"return",
"self",
".",
"get_http_json",
"(",
"url",
"=",
"url",
",",
"retry_count",
"=",
"retry_count",
"-",
"1",
",",
"rate_limit_timeout",
"=",
"rate_limit_timeout",
",",
"headers",
"=",
"headers",
")",
"else",
":",
"raise",
"HTTPRateLimitError",
"(",
"'HTTP lookup failed for {0}. Rate limit '",
"'exceeded, wait and try again (possibly a '",
"'temporary block).'",
".",
"format",
"(",
"url",
")",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"# pragma: no cover",
"pass",
"return",
"d",
"except",
"HTTPError",
"as",
"e",
":",
"# pragma: no cover",
"# RIPE is producing this HTTP error rather than a JSON error.",
"if",
"e",
".",
"code",
"==",
"429",
":",
"log",
".",
"debug",
"(",
"'HTTP query rate limit exceeded.'",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'Waiting {0} seconds...'",
".",
"format",
"(",
"str",
"(",
"rate_limit_timeout",
")",
")",
")",
"sleep",
"(",
"rate_limit_timeout",
")",
"return",
"self",
".",
"get_http_json",
"(",
"url",
"=",
"url",
",",
"retry_count",
"=",
"retry_count",
"-",
"1",
",",
"rate_limit_timeout",
"=",
"rate_limit_timeout",
",",
"headers",
"=",
"headers",
")",
"else",
":",
"raise",
"HTTPRateLimitError",
"(",
"'HTTP lookup failed for {0}. Rate limit '",
"'exceeded, wait and try again (possibly a '",
"'temporary block).'",
".",
"format",
"(",
"url",
")",
")",
"else",
":",
"raise",
"HTTPLookupError",
"(",
"'HTTP lookup failed for {0} with error '",
"'code {1}.'",
".",
"format",
"(",
"url",
",",
"str",
"(",
"e",
".",
"code",
")",
")",
")",
"except",
"(",
"URLError",
",",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'HTTP query socket error: {0}'",
".",
"format",
"(",
"e",
")",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'HTTP query retrying (count: {0})'",
".",
"format",
"(",
"str",
"(",
"retry_count",
")",
")",
")",
"return",
"self",
".",
"get_http_json",
"(",
"url",
"=",
"url",
",",
"retry_count",
"=",
"retry_count",
"-",
"1",
",",
"rate_limit_timeout",
"=",
"rate_limit_timeout",
",",
"headers",
"=",
"headers",
")",
"else",
":",
"raise",
"HTTPLookupError",
"(",
"'HTTP lookup failed for {0}.'",
".",
"format",
"(",
"url",
")",
")",
"except",
"(",
"HTTPLookupError",
",",
"HTTPRateLimitError",
")",
"as",
"e",
":",
"# pragma: no cover",
"raise",
"e",
"except",
":",
"# pragma: no cover",
"raise",
"HTTPLookupError",
"(",
"'HTTP lookup failed for {0}.'",
".",
"format",
"(",
"url",
")",
")"
] | The function for retrieving a json result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
rate_limit_timeout (:obj:`int`): The number of seconds to wait
before retrying when a rate limit notice is returned via
rdap+json or HTTP error 429. Defaults to 60.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'application/rdap+json'.
Returns:
dict: The data in json format.
Raises:
HTTPLookupError: The HTTP lookup failed.
HTTPRateLimitError: The HTTP request rate limited and retries
were exhausted. | [
"The",
"function",
"for",
"retrieving",
"a",
"json",
"result",
"via",
"HTTP",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/net.py#L672-L793 |
5,759 | secynic/ipwhois | ipwhois/net.py | Net.get_host | def get_host(self, retry_count=3):
"""
The function for retrieving host information for an IP address.
Args:
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
Returns:
namedtuple:
:hostname (str): The hostname returned mapped to the given IP
address.
:aliaslist (list): Alternate names for the given IP address.
:ipaddrlist (list): IPv4/v6 addresses mapped to the same hostname.
Raises:
HostLookupError: The host lookup failed.
"""
try:
default_timeout_set = False
if not socket.getdefaulttimeout():
socket.setdefaulttimeout(self.timeout)
default_timeout_set = True
log.debug('Host query for {0}'.format(self.address_str))
ret = socket.gethostbyaddr(self.address_str)
if default_timeout_set: # pragma: no cover
socket.setdefaulttimeout(None)
results = namedtuple('get_host_results', 'hostname, aliaslist, '
'ipaddrlist')
return results(ret)
except (socket.timeout, socket.error) as e:
log.debug('Host query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('Host query retrying (count: {0})'.format(
str(retry_count)))
return self.get_host(retry_count - 1)
else:
raise HostLookupError(
'Host lookup failed for {0}.'.format(self.address_str)
)
except: # pragma: no cover
raise HostLookupError(
'Host lookup failed for {0}.'.format(self.address_str)
) | python | def get_host(self, retry_count=3):
"""
The function for retrieving host information for an IP address.
Args:
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
Returns:
namedtuple:
:hostname (str): The hostname returned mapped to the given IP
address.
:aliaslist (list): Alternate names for the given IP address.
:ipaddrlist (list): IPv4/v6 addresses mapped to the same hostname.
Raises:
HostLookupError: The host lookup failed.
"""
try:
default_timeout_set = False
if not socket.getdefaulttimeout():
socket.setdefaulttimeout(self.timeout)
default_timeout_set = True
log.debug('Host query for {0}'.format(self.address_str))
ret = socket.gethostbyaddr(self.address_str)
if default_timeout_set: # pragma: no cover
socket.setdefaulttimeout(None)
results = namedtuple('get_host_results', 'hostname, aliaslist, '
'ipaddrlist')
return results(ret)
except (socket.timeout, socket.error) as e:
log.debug('Host query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('Host query retrying (count: {0})'.format(
str(retry_count)))
return self.get_host(retry_count - 1)
else:
raise HostLookupError(
'Host lookup failed for {0}.'.format(self.address_str)
)
except: # pragma: no cover
raise HostLookupError(
'Host lookup failed for {0}.'.format(self.address_str)
) | [
"def",
"get_host",
"(",
"self",
",",
"retry_count",
"=",
"3",
")",
":",
"try",
":",
"default_timeout_set",
"=",
"False",
"if",
"not",
"socket",
".",
"getdefaulttimeout",
"(",
")",
":",
"socket",
".",
"setdefaulttimeout",
"(",
"self",
".",
"timeout",
")",
"default_timeout_set",
"=",
"True",
"log",
".",
"debug",
"(",
"'Host query for {0}'",
".",
"format",
"(",
"self",
".",
"address_str",
")",
")",
"ret",
"=",
"socket",
".",
"gethostbyaddr",
"(",
"self",
".",
"address_str",
")",
"if",
"default_timeout_set",
":",
"# pragma: no cover",
"socket",
".",
"setdefaulttimeout",
"(",
"None",
")",
"results",
"=",
"namedtuple",
"(",
"'get_host_results'",
",",
"'hostname, aliaslist, '",
"'ipaddrlist'",
")",
"return",
"results",
"(",
"ret",
")",
"except",
"(",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'Host query socket error: {0}'",
".",
"format",
"(",
"e",
")",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'Host query retrying (count: {0})'",
".",
"format",
"(",
"str",
"(",
"retry_count",
")",
")",
")",
"return",
"self",
".",
"get_host",
"(",
"retry_count",
"-",
"1",
")",
"else",
":",
"raise",
"HostLookupError",
"(",
"'Host lookup failed for {0}.'",
".",
"format",
"(",
"self",
".",
"address_str",
")",
")",
"except",
":",
"# pragma: no cover",
"raise",
"HostLookupError",
"(",
"'Host lookup failed for {0}.'",
".",
"format",
"(",
"self",
".",
"address_str",
")",
")"
] | The function for retrieving host information for an IP address.
Args:
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
Returns:
namedtuple:
:hostname (str): The hostname returned mapped to the given IP
address.
:aliaslist (list): Alternate names for the given IP address.
:ipaddrlist (list): IPv4/v6 addresses mapped to the same hostname.
Raises:
HostLookupError: The host lookup failed. | [
"The",
"function",
"for",
"retrieving",
"host",
"information",
"for",
"an",
"IP",
"address",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/net.py#L795-L855 |
5,760 | secynic/ipwhois | ipwhois/net.py | Net.get_http_raw | def get_http_raw(self, url=None, retry_count=3, headers=None,
request_type='GET', form_data=None):
"""
The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed.
"""
if headers is None:
headers = {'Accept': 'text/html'}
enc_form_data = None
if form_data:
enc_form_data = urlencode(form_data)
try:
# Py 2 inspection will alert on the encoding arg, no harm done.
enc_form_data = bytes(enc_form_data, encoding='ascii')
except TypeError: # pragma: no cover
pass
try:
# Create the connection for the HTTP query.
log.debug('HTTP query for {0} at {1}'.format(
self.address_str, url))
try:
# Py 2 inspection alert bypassed by using kwargs dict.
conn = Request(url=url, data=enc_form_data, headers=headers,
**{'method': request_type})
except TypeError: # pragma: no cover
conn = Request(url=url, data=enc_form_data, headers=headers)
data = self.opener.open(conn, timeout=self.timeout)
try:
d = data.readall().decode('ascii', 'ignore')
except AttributeError: # pragma: no cover
d = data.read().decode('ascii', 'ignore')
return str(d)
except (URLError, socket.timeout, socket.error) as e:
log.debug('HTTP query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('HTTP query retrying (count: {0})'.format(
str(retry_count)))
return self.get_http_raw(
url=url, retry_count=retry_count - 1, headers=headers,
request_type=request_type, form_data=form_data
)
else:
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
url))
except HTTPLookupError as e: # pragma: no cover
raise e
except Exception: # pragma: no cover
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) | python | def get_http_raw(self, url=None, retry_count=3, headers=None,
request_type='GET', form_data=None):
"""
The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed.
"""
if headers is None:
headers = {'Accept': 'text/html'}
enc_form_data = None
if form_data:
enc_form_data = urlencode(form_data)
try:
# Py 2 inspection will alert on the encoding arg, no harm done.
enc_form_data = bytes(enc_form_data, encoding='ascii')
except TypeError: # pragma: no cover
pass
try:
# Create the connection for the HTTP query.
log.debug('HTTP query for {0} at {1}'.format(
self.address_str, url))
try:
# Py 2 inspection alert bypassed by using kwargs dict.
conn = Request(url=url, data=enc_form_data, headers=headers,
**{'method': request_type})
except TypeError: # pragma: no cover
conn = Request(url=url, data=enc_form_data, headers=headers)
data = self.opener.open(conn, timeout=self.timeout)
try:
d = data.readall().decode('ascii', 'ignore')
except AttributeError: # pragma: no cover
d = data.read().decode('ascii', 'ignore')
return str(d)
except (URLError, socket.timeout, socket.error) as e:
log.debug('HTTP query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('HTTP query retrying (count: {0})'.format(
str(retry_count)))
return self.get_http_raw(
url=url, retry_count=retry_count - 1, headers=headers,
request_type=request_type, form_data=form_data
)
else:
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
url))
except HTTPLookupError as e: # pragma: no cover
raise e
except Exception: # pragma: no cover
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) | [
"def",
"get_http_raw",
"(",
"self",
",",
"url",
"=",
"None",
",",
"retry_count",
"=",
"3",
",",
"headers",
"=",
"None",
",",
"request_type",
"=",
"'GET'",
",",
"form_data",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"'Accept'",
":",
"'text/html'",
"}",
"enc_form_data",
"=",
"None",
"if",
"form_data",
":",
"enc_form_data",
"=",
"urlencode",
"(",
"form_data",
")",
"try",
":",
"# Py 2 inspection will alert on the encoding arg, no harm done.",
"enc_form_data",
"=",
"bytes",
"(",
"enc_form_data",
",",
"encoding",
"=",
"'ascii'",
")",
"except",
"TypeError",
":",
"# pragma: no cover",
"pass",
"try",
":",
"# Create the connection for the HTTP query.",
"log",
".",
"debug",
"(",
"'HTTP query for {0} at {1}'",
".",
"format",
"(",
"self",
".",
"address_str",
",",
"url",
")",
")",
"try",
":",
"# Py 2 inspection alert bypassed by using kwargs dict.",
"conn",
"=",
"Request",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"enc_form_data",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"{",
"'method'",
":",
"request_type",
"}",
")",
"except",
"TypeError",
":",
"# pragma: no cover",
"conn",
"=",
"Request",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"enc_form_data",
",",
"headers",
"=",
"headers",
")",
"data",
"=",
"self",
".",
"opener",
".",
"open",
"(",
"conn",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"try",
":",
"d",
"=",
"data",
".",
"readall",
"(",
")",
".",
"decode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"except",
"AttributeError",
":",
"# pragma: no cover",
"d",
"=",
"data",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"return",
"str",
"(",
"d",
")",
"except",
"(",
"URLError",
",",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'HTTP query socket error: {0}'",
".",
"format",
"(",
"e",
")",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'HTTP query retrying (count: {0})'",
".",
"format",
"(",
"str",
"(",
"retry_count",
")",
")",
")",
"return",
"self",
".",
"get_http_raw",
"(",
"url",
"=",
"url",
",",
"retry_count",
"=",
"retry_count",
"-",
"1",
",",
"headers",
"=",
"headers",
",",
"request_type",
"=",
"request_type",
",",
"form_data",
"=",
"form_data",
")",
"else",
":",
"raise",
"HTTPLookupError",
"(",
"'HTTP lookup failed for {0}.'",
".",
"format",
"(",
"url",
")",
")",
"except",
"HTTPLookupError",
"as",
"e",
":",
"# pragma: no cover",
"raise",
"e",
"except",
"Exception",
":",
"# pragma: no cover",
"raise",
"HTTPLookupError",
"(",
"'HTTP lookup failed for {0}.'",
".",
"format",
"(",
"url",
")",
")"
] | The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed. | [
"The",
"function",
"for",
"retrieving",
"a",
"raw",
"HTML",
"result",
"via",
"HTTP",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/net.py#L857-L936 |
5,761 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | generate_output | def generate_output(line='0', short=None, name=None, value=None,
is_parent=False, colorize=True):
"""
The function for formatting CLI output results.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
short (:obj:`str`): The optional abbreviated name for a field.
See hr.py for values.
name (:obj:`str`): The optional name for a field. See hr.py for values.
value (:obj:`str`): The field data (required).
is_parent (:obj:`bool`): Set to True if the field value has sub-items
(dicts/lists). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI colors.
Defaults to True.
Returns:
str: The generated output.
"""
# TODO: so ugly
output = '{0}{1}{2}{3}{4}{5}{6}{7}\n'.format(
LINES['{0}{1}'.format(line, 'C' if colorize else '')] if (
line in LINES.keys()) else '',
COLOR_DEPTH[line] if (colorize and line in COLOR_DEPTH) else '',
ANSI['b'],
short if short is not None else (
name if (name is not None) else ''
),
'' if (name is None or short is None) else ' ({0})'.format(
name),
'' if (name is None and short is None) else ': ',
ANSI['end'] if colorize else '',
'' if is_parent else value
)
return output | python | def generate_output(line='0', short=None, name=None, value=None,
is_parent=False, colorize=True):
"""
The function for formatting CLI output results.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
short (:obj:`str`): The optional abbreviated name for a field.
See hr.py for values.
name (:obj:`str`): The optional name for a field. See hr.py for values.
value (:obj:`str`): The field data (required).
is_parent (:obj:`bool`): Set to True if the field value has sub-items
(dicts/lists). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI colors.
Defaults to True.
Returns:
str: The generated output.
"""
# TODO: so ugly
output = '{0}{1}{2}{3}{4}{5}{6}{7}\n'.format(
LINES['{0}{1}'.format(line, 'C' if colorize else '')] if (
line in LINES.keys()) else '',
COLOR_DEPTH[line] if (colorize and line in COLOR_DEPTH) else '',
ANSI['b'],
short if short is not None else (
name if (name is not None) else ''
),
'' if (name is None or short is None) else ' ({0})'.format(
name),
'' if (name is None and short is None) else ': ',
ANSI['end'] if colorize else '',
'' if is_parent else value
)
return output | [
"def",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"None",
",",
"name",
"=",
"None",
",",
"value",
"=",
"None",
",",
"is_parent",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"# TODO: so ugly",
"output",
"=",
"'{0}{1}{2}{3}{4}{5}{6}{7}\\n'",
".",
"format",
"(",
"LINES",
"[",
"'{0}{1}'",
".",
"format",
"(",
"line",
",",
"'C'",
"if",
"colorize",
"else",
"''",
")",
"]",
"if",
"(",
"line",
"in",
"LINES",
".",
"keys",
"(",
")",
")",
"else",
"''",
",",
"COLOR_DEPTH",
"[",
"line",
"]",
"if",
"(",
"colorize",
"and",
"line",
"in",
"COLOR_DEPTH",
")",
"else",
"''",
",",
"ANSI",
"[",
"'b'",
"]",
",",
"short",
"if",
"short",
"is",
"not",
"None",
"else",
"(",
"name",
"if",
"(",
"name",
"is",
"not",
"None",
")",
"else",
"''",
")",
",",
"''",
"if",
"(",
"name",
"is",
"None",
"or",
"short",
"is",
"None",
")",
"else",
"' ({0})'",
".",
"format",
"(",
"name",
")",
",",
"''",
"if",
"(",
"name",
"is",
"None",
"and",
"short",
"is",
"None",
")",
"else",
"': '",
",",
"ANSI",
"[",
"'end'",
"]",
"if",
"colorize",
"else",
"''",
",",
"''",
"if",
"is_parent",
"else",
"value",
")",
"return",
"output"
] | The function for formatting CLI output results.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
short (:obj:`str`): The optional abbreviated name for a field.
See hr.py for values.
name (:obj:`str`): The optional name for a field. See hr.py for values.
value (:obj:`str`): The field data (required).
is_parent (:obj:`bool`): Set to True if the field value has sub-items
(dicts/lists). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI colors.
Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"formatting",
"CLI",
"output",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L313-L350 |
5,762 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_header | def generate_output_header(self, query_type='RDAP'):
"""
The function for generating the CLI output header.
Args:
query_type (:obj:`str`): The IPWhois query type. Defaults to
'RDAP'.
Returns:
str: The generated output.
"""
output = '\n{0}{1}{2} query for {3}:{4}\n\n'.format(
ANSI['ul'],
ANSI['b'],
query_type,
self.obj.address_str,
ANSI['end']
)
return output | python | def generate_output_header(self, query_type='RDAP'):
"""
The function for generating the CLI output header.
Args:
query_type (:obj:`str`): The IPWhois query type. Defaults to
'RDAP'.
Returns:
str: The generated output.
"""
output = '\n{0}{1}{2} query for {3}:{4}\n\n'.format(
ANSI['ul'],
ANSI['b'],
query_type,
self.obj.address_str,
ANSI['end']
)
return output | [
"def",
"generate_output_header",
"(",
"self",
",",
"query_type",
"=",
"'RDAP'",
")",
":",
"output",
"=",
"'\\n{0}{1}{2} query for {3}:{4}\\n\\n'",
".",
"format",
"(",
"ANSI",
"[",
"'ul'",
"]",
",",
"ANSI",
"[",
"'b'",
"]",
",",
"query_type",
",",
"self",
".",
"obj",
".",
"address_str",
",",
"ANSI",
"[",
"'end'",
"]",
")",
"return",
"output"
] | The function for generating the CLI output header.
Args:
query_type (:obj:`str`): The IPWhois query type. Defaults to
'RDAP'.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"the",
"CLI",
"output",
"header",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L406-L426 |
5,763 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_newline | def generate_output_newline(self, line='0', colorize=True):
"""
The function for generating a CLI output new line.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
return generate_output(
line=line,
is_parent=True,
colorize=colorize
) | python | def generate_output_newline(self, line='0', colorize=True):
"""
The function for generating a CLI output new line.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
return generate_output(
line=line,
is_parent=True,
colorize=colorize
) | [
"def",
"generate_output_newline",
"(",
"self",
",",
"line",
"=",
"'0'",
",",
"colorize",
"=",
"True",
")",
":",
"return",
"generate_output",
"(",
"line",
"=",
"line",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")"
] | The function for generating a CLI output new line.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"a",
"CLI",
"output",
"new",
"line",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L428-L446 |
5,764 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_asn | def generate_output_asn(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output ASN results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
keys = {'asn', 'asn_cidr', 'asn_country_code', 'asn_date',
'asn_registry', 'asn_description'}.intersection(json_data)
output = ''
for key in keys:
output += generate_output(
line='0',
short=HR_ASN[key]['_short'] if hr else key,
name=HR_ASN[key]['_name'] if (hr and show_name) else None,
value=(json_data[key] if (
json_data[key] is not None and
len(json_data[key]) > 0 and
json_data[key] != 'NA') else 'None'),
colorize=colorize
)
return output | python | def generate_output_asn(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output ASN results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
keys = {'asn', 'asn_cidr', 'asn_country_code', 'asn_date',
'asn_registry', 'asn_description'}.intersection(json_data)
output = ''
for key in keys:
output += generate_output(
line='0',
short=HR_ASN[key]['_short'] if hr else key,
name=HR_ASN[key]['_name'] if (hr and show_name) else None,
value=(json_data[key] if (
json_data[key] is not None and
len(json_data[key]) > 0 and
json_data[key] != 'NA') else 'None'),
colorize=colorize
)
return output | [
"def",
"generate_output_asn",
"(",
"self",
",",
"json_data",
"=",
"None",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"json_data",
"=",
"{",
"}",
"keys",
"=",
"{",
"'asn'",
",",
"'asn_cidr'",
",",
"'asn_country_code'",
",",
"'asn_date'",
",",
"'asn_registry'",
",",
"'asn_description'",
"}",
".",
"intersection",
"(",
"json_data",
")",
"output",
"=",
"''",
"for",
"key",
"in",
"keys",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"HR_ASN",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_ASN",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"(",
"json_data",
"[",
"key",
"]",
"if",
"(",
"json_data",
"[",
"key",
"]",
"is",
"not",
"None",
"and",
"len",
"(",
"json_data",
"[",
"key",
"]",
")",
">",
"0",
"and",
"json_data",
"[",
"key",
"]",
"!=",
"'NA'",
")",
"else",
"'None'",
")",
",",
"colorize",
"=",
"colorize",
")",
"return",
"output"
] | The function for generating CLI output ASN results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"ASN",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L448-L487 |
5,765 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_entities | def generate_output_entities(self, json_data=None, hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP entity results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = ''
short = HR_RDAP['entities']['_short'] if hr else 'entities'
name = HR_RDAP['entities']['_name'] if (hr and show_name) else None
output += generate_output(
line='0',
short=short,
name=name,
is_parent=False if (json_data is None or
json_data['entities'] is None) else True,
value='None' if (json_data is None or
json_data['entities'] is None) else None,
colorize=colorize
)
if json_data is not None:
for ent in json_data['entities']:
output += generate_output(
line='1',
value=ent,
colorize=colorize
)
return output | python | def generate_output_entities(self, json_data=None, hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP entity results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = ''
short = HR_RDAP['entities']['_short'] if hr else 'entities'
name = HR_RDAP['entities']['_name'] if (hr and show_name) else None
output += generate_output(
line='0',
short=short,
name=name,
is_parent=False if (json_data is None or
json_data['entities'] is None) else True,
value='None' if (json_data is None or
json_data['entities'] is None) else None,
colorize=colorize
)
if json_data is not None:
for ent in json_data['entities']:
output += generate_output(
line='1',
value=ent,
colorize=colorize
)
return output | [
"def",
"generate_output_entities",
"(",
"self",
",",
"json_data",
"=",
"None",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"output",
"=",
"''",
"short",
"=",
"HR_RDAP",
"[",
"'entities'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'entities'",
"name",
"=",
"HR_RDAP",
"[",
"'entities'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"short",
",",
"name",
"=",
"name",
",",
"is_parent",
"=",
"False",
"if",
"(",
"json_data",
"is",
"None",
"or",
"json_data",
"[",
"'entities'",
"]",
"is",
"None",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"json_data",
"is",
"None",
"or",
"json_data",
"[",
"'entities'",
"]",
"is",
"None",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"if",
"json_data",
"is",
"not",
"None",
":",
"for",
"ent",
"in",
"json_data",
"[",
"'entities'",
"]",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'1'",
",",
"value",
"=",
"ent",
",",
"colorize",
"=",
"colorize",
")",
"return",
"output"
] | The function for generating CLI output RDAP entity results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"RDAP",
"entity",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L489-L532 |
5,766 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_events | def generate_output_events(self, source, key, val, line='2', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP events results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
count = 0
for item in val:
try:
action = item['action']
except KeyError:
action = None
try:
timestamp = item['timestamp']
except KeyError:
timestamp = None
try:
actor = item['actor']
except KeyError:
actor = None
if count > 0:
output += generate_output(
line=str(int(line)+1),
is_parent=True,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['action'][
'_short'] if hr else 'action',
name=HR_RDAP_COMMON[key]['action'][
'_name'] if (hr and show_name) else None,
value=action,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['timestamp'][
'_short'] if hr else 'timestamp',
name=HR_RDAP_COMMON[key]['timestamp'][
'_name'] if (hr and show_name) else None,
value=timestamp,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['actor'][
'_short'] if hr else 'actor',
name=HR_RDAP_COMMON[key]['actor'][
'_name'] if (hr and show_name) else None,
value=actor,
colorize=colorize
)
count += 1
return output | python | def generate_output_events(self, source, key, val, line='2', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP events results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
count = 0
for item in val:
try:
action = item['action']
except KeyError:
action = None
try:
timestamp = item['timestamp']
except KeyError:
timestamp = None
try:
actor = item['actor']
except KeyError:
actor = None
if count > 0:
output += generate_output(
line=str(int(line)+1),
is_parent=True,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['action'][
'_short'] if hr else 'action',
name=HR_RDAP_COMMON[key]['action'][
'_name'] if (hr and show_name) else None,
value=action,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['timestamp'][
'_short'] if hr else 'timestamp',
name=HR_RDAP_COMMON[key]['timestamp'][
'_name'] if (hr and show_name) else None,
value=timestamp,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['actor'][
'_short'] if hr else 'actor',
name=HR_RDAP_COMMON[key]['actor'][
'_name'] if (hr and show_name) else None,
value=actor,
colorize=colorize
)
count += 1
return output | [
"def",
"generate_output_events",
"(",
"self",
",",
"source",
",",
"key",
",",
"val",
",",
"line",
"=",
"'2'",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"line",
",",
"short",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"False",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"if",
"val",
"is",
"not",
"None",
":",
"count",
"=",
"0",
"for",
"item",
"in",
"val",
":",
"try",
":",
"action",
"=",
"item",
"[",
"'action'",
"]",
"except",
"KeyError",
":",
"action",
"=",
"None",
"try",
":",
"timestamp",
"=",
"item",
"[",
"'timestamp'",
"]",
"except",
"KeyError",
":",
"timestamp",
"=",
"None",
"try",
":",
"actor",
"=",
"item",
"[",
"'actor'",
"]",
"except",
"KeyError",
":",
"actor",
"=",
"None",
"if",
"count",
">",
"0",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'action'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'action'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'action'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"action",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'timestamp'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'timestamp'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'timestamp'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"timestamp",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'actor'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'actor'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'actor'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"actor",
",",
"colorize",
"=",
"colorize",
")",
"count",
"+=",
"1",
"return",
"output"
] | The function for generating CLI output RDAP events results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"RDAP",
"events",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L534-L628 |
5,767 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_list | def generate_output_list(self, source, key, val, line='2', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP list results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
for item in val:
output += generate_output(
line=str(int(line)+1),
value=item,
colorize=colorize
)
return output | python | def generate_output_list(self, source, key, val, line='2', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP list results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
for item in val:
output += generate_output(
line=str(int(line)+1),
value=item,
colorize=colorize
)
return output | [
"def",
"generate_output_list",
"(",
"self",
",",
"source",
",",
"key",
",",
"val",
",",
"line",
"=",
"'2'",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"line",
",",
"short",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"False",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"if",
"val",
"is",
"not",
"None",
":",
"for",
"item",
"in",
"val",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"value",
"=",
"item",
",",
"colorize",
"=",
"colorize",
")",
"return",
"output"
] | The function for generating CLI output RDAP list results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"RDAP",
"list",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L630-L673 |
5,768 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_notices | def generate_output_notices(self, source, key, val, line='1', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP notices results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
count = 0
for item in val:
title = item['title']
description = item['description']
links = item['links']
if count > 0:
output += generate_output(
line=str(int(line)+1),
is_parent=True,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['title']['_short'] if hr else (
'title'),
name=HR_RDAP_COMMON[key]['title']['_name'] if (
hr and show_name) else None,
value=title,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['description'][
'_short'] if hr else 'description',
name=HR_RDAP_COMMON[key]['description'][
'_name'] if (hr and show_name) else None,
value=description.replace(
'\n',
'\n{0}'.format(generate_output(line='3'))
),
colorize=colorize
)
output += self.generate_output_list(
source=source,
key='links',
val=links,
line=str(int(line)+1),
hr=hr,
show_name=show_name,
colorize=colorize
)
count += 1
return output | python | def generate_output_notices(self, source, key, val, line='1', hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP notices results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = generate_output(
line=line,
short=HR_RDAP[source][key]['_short'] if hr else key,
name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if val is not None:
count = 0
for item in val:
title = item['title']
description = item['description']
links = item['links']
if count > 0:
output += generate_output(
line=str(int(line)+1),
is_parent=True,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['title']['_short'] if hr else (
'title'),
name=HR_RDAP_COMMON[key]['title']['_name'] if (
hr and show_name) else None,
value=title,
colorize=colorize
)
output += generate_output(
line=str(int(line)+1),
short=HR_RDAP_COMMON[key]['description'][
'_short'] if hr else 'description',
name=HR_RDAP_COMMON[key]['description'][
'_name'] if (hr and show_name) else None,
value=description.replace(
'\n',
'\n{0}'.format(generate_output(line='3'))
),
colorize=colorize
)
output += self.generate_output_list(
source=source,
key='links',
val=links,
line=str(int(line)+1),
hr=hr,
show_name=show_name,
colorize=colorize
)
count += 1
return output | [
"def",
"generate_output_notices",
"(",
"self",
",",
"source",
",",
"key",
",",
"val",
",",
"line",
"=",
"'1'",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"line",
",",
"short",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_RDAP",
"[",
"source",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"False",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"if",
"val",
"is",
"not",
"None",
":",
"count",
"=",
"0",
"for",
"item",
"in",
"val",
":",
"title",
"=",
"item",
"[",
"'title'",
"]",
"description",
"=",
"item",
"[",
"'description'",
"]",
"links",
"=",
"item",
"[",
"'links'",
"]",
"if",
"count",
">",
"0",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'title'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"(",
"'title'",
")",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'title'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"title",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"short",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'description'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'description'",
",",
"name",
"=",
"HR_RDAP_COMMON",
"[",
"key",
"]",
"[",
"'description'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"description",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n{0}'",
".",
"format",
"(",
"generate_output",
"(",
"line",
"=",
"'3'",
")",
")",
")",
",",
"colorize",
"=",
"colorize",
")",
"output",
"+=",
"self",
".",
"generate_output_list",
"(",
"source",
"=",
"source",
",",
"key",
"=",
"'links'",
",",
"val",
"=",
"links",
",",
"line",
"=",
"str",
"(",
"int",
"(",
"line",
")",
"+",
"1",
")",
",",
"hr",
"=",
"hr",
",",
"show_name",
"=",
"show_name",
",",
"colorize",
"=",
"colorize",
")",
"count",
"+=",
"1",
"return",
"output"
] | The function for generating CLI output RDAP notices results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"RDAP",
"notices",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L675-L760 |
5,769 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_network | def generate_output_network(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_RDAP['network']['_short'] if hr else 'network',
name=HR_RDAP['network']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
for key, val in json_data['network'].items():
if key in ['links', 'status']:
output += self.generate_output_list(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key in ['notices', 'remarks']:
output += self.generate_output_notices(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key == 'events':
output += self.generate_output_events(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key not in ['raw']:
output += generate_output(
line='1',
short=HR_RDAP['network'][key]['_short'] if hr else key,
name=HR_RDAP['network'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output | python | def generate_output_network(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_RDAP['network']['_short'] if hr else 'network',
name=HR_RDAP['network']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
for key, val in json_data['network'].items():
if key in ['links', 'status']:
output += self.generate_output_list(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key in ['notices', 'remarks']:
output += self.generate_output_notices(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key == 'events':
output += self.generate_output_events(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key not in ['raw']:
output += generate_output(
line='1',
short=HR_RDAP['network'][key]['_short'] if hr else key,
name=HR_RDAP['network'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output | [
"def",
"generate_output_network",
"(",
"self",
",",
"json_data",
"=",
"None",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"json_data",
"=",
"{",
"}",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"HR_RDAP",
"[",
"'network'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'network'",
",",
"name",
"=",
"HR_RDAP",
"[",
"'network'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"for",
"key",
",",
"val",
"in",
"json_data",
"[",
"'network'",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"[",
"'links'",
",",
"'status'",
"]",
":",
"output",
"+=",
"self",
".",
"generate_output_list",
"(",
"source",
"=",
"'network'",
",",
"key",
"=",
"key",
",",
"val",
"=",
"val",
",",
"line",
"=",
"'1'",
",",
"hr",
"=",
"hr",
",",
"show_name",
"=",
"show_name",
",",
"colorize",
"=",
"colorize",
")",
"elif",
"key",
"in",
"[",
"'notices'",
",",
"'remarks'",
"]",
":",
"output",
"+=",
"self",
".",
"generate_output_notices",
"(",
"source",
"=",
"'network'",
",",
"key",
"=",
"key",
",",
"val",
"=",
"val",
",",
"line",
"=",
"'1'",
",",
"hr",
"=",
"hr",
",",
"show_name",
"=",
"show_name",
",",
"colorize",
"=",
"colorize",
")",
"elif",
"key",
"==",
"'events'",
":",
"output",
"+=",
"self",
".",
"generate_output_events",
"(",
"source",
"=",
"'network'",
",",
"key",
"=",
"key",
",",
"val",
"=",
"val",
",",
"line",
"=",
"'1'",
",",
"hr",
"=",
"hr",
",",
"show_name",
"=",
"show_name",
",",
"colorize",
"=",
"colorize",
")",
"elif",
"key",
"not",
"in",
"[",
"'raw'",
"]",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'1'",
",",
"short",
"=",
"HR_RDAP",
"[",
"'network'",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_RDAP",
"[",
"'network'",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"val",
",",
"colorize",
"=",
"colorize",
")",
"return",
"output"
] | The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"RDAP",
"network",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L762-L840 |
5,770 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_whois_nets | def generate_output_whois_nets(self, json_data=None, hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output Legacy Whois networks results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_WHOIS['nets']['_short'] if hr else 'nets',
name=HR_WHOIS['nets']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
count = 0
for net in json_data['nets']:
if count > 0:
output += self.generate_output_newline(
line='1',
colorize=colorize
)
count += 1
output += generate_output(
line='1',
short=net['handle'],
is_parent=True,
colorize=colorize
)
for key, val in net.items():
if val and '\n' in val:
output += generate_output(
line='2',
short=HR_WHOIS['nets'][key]['_short'] if hr else key,
name=HR_WHOIS['nets'][key]['_name'] if (
hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
for v in val.split('\n'):
output += generate_output(
line='3',
value=v,
colorize=colorize
)
else:
output += generate_output(
line='2',
short=HR_WHOIS['nets'][key]['_short'] if hr else key,
name=HR_WHOIS['nets'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output | python | def generate_output_whois_nets(self, json_data=None, hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output Legacy Whois networks results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_WHOIS['nets']['_short'] if hr else 'nets',
name=HR_WHOIS['nets']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
count = 0
for net in json_data['nets']:
if count > 0:
output += self.generate_output_newline(
line='1',
colorize=colorize
)
count += 1
output += generate_output(
line='1',
short=net['handle'],
is_parent=True,
colorize=colorize
)
for key, val in net.items():
if val and '\n' in val:
output += generate_output(
line='2',
short=HR_WHOIS['nets'][key]['_short'] if hr else key,
name=HR_WHOIS['nets'][key]['_name'] if (
hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
for v in val.split('\n'):
output += generate_output(
line='3',
value=v,
colorize=colorize
)
else:
output += generate_output(
line='2',
short=HR_WHOIS['nets'][key]['_short'] if hr else key,
name=HR_WHOIS['nets'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output | [
"def",
"generate_output_whois_nets",
"(",
"self",
",",
"json_data",
"=",
"None",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"json_data",
"=",
"{",
"}",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"HR_WHOIS",
"[",
"'nets'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'nets'",
",",
"name",
"=",
"HR_WHOIS",
"[",
"'nets'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"count",
"=",
"0",
"for",
"net",
"in",
"json_data",
"[",
"'nets'",
"]",
":",
"if",
"count",
">",
"0",
":",
"output",
"+=",
"self",
".",
"generate_output_newline",
"(",
"line",
"=",
"'1'",
",",
"colorize",
"=",
"colorize",
")",
"count",
"+=",
"1",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'1'",
",",
"short",
"=",
"net",
"[",
"'handle'",
"]",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"for",
"key",
",",
"val",
"in",
"net",
".",
"items",
"(",
")",
":",
"if",
"val",
"and",
"'\\n'",
"in",
"val",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'2'",
",",
"short",
"=",
"HR_WHOIS",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_WHOIS",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"False",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"for",
"v",
"in",
"val",
".",
"split",
"(",
"'\\n'",
")",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'3'",
",",
"value",
"=",
"v",
",",
"colorize",
"=",
"colorize",
")",
"else",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'2'",
",",
"short",
"=",
"HR_WHOIS",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"key",
",",
"name",
"=",
"HR_WHOIS",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"val",
",",
"colorize",
"=",
"colorize",
")",
"return",
"output"
] | The function for generating CLI output Legacy Whois networks results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"Legacy",
"Whois",
"networks",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L1085-L1164 |
5,771 | secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | IPWhoisCLI.generate_output_nir | def generate_output_nir(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output NIR network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_WHOIS_NIR['nets']['_short'] if hr else 'nir_nets',
name=HR_WHOIS_NIR['nets']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
count = 0
if json_data['nir']:
for net in json_data['nir']['nets']:
if count > 0:
output += self.generate_output_newline(
line='1',
colorize=colorize
)
count += 1
output += generate_output(
line='1',
short=net['handle'],
is_parent=True,
colorize=colorize
)
for key, val in net.items():
if val and (isinstance(val, dict) or '\n' in val or
key == 'nameservers'):
output += generate_output(
line='2',
short=(
HR_WHOIS_NIR['nets'][key]['_short'] if (
hr) else key
),
name=HR_WHOIS_NIR['nets'][key]['_name'] if (
hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if key == 'contacts':
for k, v in val.items():
if v:
output += generate_output(
line='3',
is_parent=False if (
len(v) == 0) else True,
name=k,
colorize=colorize
)
for contact_key, contact_val in v.items():
if v is not None:
tmp_out = '{0}{1}{2}'.format(
contact_key,
': ',
contact_val
)
output += generate_output(
line='4',
value=tmp_out,
colorize=colorize
)
elif key == 'nameservers':
for v in val:
output += generate_output(
line='3',
value=v,
colorize=colorize
)
else:
for v in val.split('\n'):
output += generate_output(
line='3',
value=v,
colorize=colorize
)
else:
output += generate_output(
line='2',
short=(
HR_WHOIS_NIR['nets'][key]['_short'] if (
hr) else key
),
name=HR_WHOIS_NIR['nets'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
else:
output += 'None'
return output | python | def generate_output_nir(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output NIR network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_WHOIS_NIR['nets']['_short'] if hr else 'nir_nets',
name=HR_WHOIS_NIR['nets']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
count = 0
if json_data['nir']:
for net in json_data['nir']['nets']:
if count > 0:
output += self.generate_output_newline(
line='1',
colorize=colorize
)
count += 1
output += generate_output(
line='1',
short=net['handle'],
is_parent=True,
colorize=colorize
)
for key, val in net.items():
if val and (isinstance(val, dict) or '\n' in val or
key == 'nameservers'):
output += generate_output(
line='2',
short=(
HR_WHOIS_NIR['nets'][key]['_short'] if (
hr) else key
),
name=HR_WHOIS_NIR['nets'][key]['_name'] if (
hr and show_name) else None,
is_parent=False if (val is None or
len(val) == 0) else True,
value='None' if (val is None or
len(val) == 0) else None,
colorize=colorize
)
if key == 'contacts':
for k, v in val.items():
if v:
output += generate_output(
line='3',
is_parent=False if (
len(v) == 0) else True,
name=k,
colorize=colorize
)
for contact_key, contact_val in v.items():
if v is not None:
tmp_out = '{0}{1}{2}'.format(
contact_key,
': ',
contact_val
)
output += generate_output(
line='4',
value=tmp_out,
colorize=colorize
)
elif key == 'nameservers':
for v in val:
output += generate_output(
line='3',
value=v,
colorize=colorize
)
else:
for v in val.split('\n'):
output += generate_output(
line='3',
value=v,
colorize=colorize
)
else:
output += generate_output(
line='2',
short=(
HR_WHOIS_NIR['nets'][key]['_short'] if (
hr) else key
),
name=HR_WHOIS_NIR['nets'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
else:
output += 'None'
return output | [
"def",
"generate_output_nir",
"(",
"self",
",",
"json_data",
"=",
"None",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"json_data",
"=",
"{",
"}",
"output",
"=",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"HR_WHOIS_NIR",
"[",
"'nets'",
"]",
"[",
"'_short'",
"]",
"if",
"hr",
"else",
"'nir_nets'",
",",
"name",
"=",
"HR_WHOIS_NIR",
"[",
"'nets'",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"count",
"=",
"0",
"if",
"json_data",
"[",
"'nir'",
"]",
":",
"for",
"net",
"in",
"json_data",
"[",
"'nir'",
"]",
"[",
"'nets'",
"]",
":",
"if",
"count",
">",
"0",
":",
"output",
"+=",
"self",
".",
"generate_output_newline",
"(",
"line",
"=",
"'1'",
",",
"colorize",
"=",
"colorize",
")",
"count",
"+=",
"1",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'1'",
",",
"short",
"=",
"net",
"[",
"'handle'",
"]",
",",
"is_parent",
"=",
"True",
",",
"colorize",
"=",
"colorize",
")",
"for",
"key",
",",
"val",
"in",
"net",
".",
"items",
"(",
")",
":",
"if",
"val",
"and",
"(",
"isinstance",
"(",
"val",
",",
"dict",
")",
"or",
"'\\n'",
"in",
"val",
"or",
"key",
"==",
"'nameservers'",
")",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'2'",
",",
"short",
"=",
"(",
"HR_WHOIS_NIR",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"(",
"hr",
")",
"else",
"key",
")",
",",
"name",
"=",
"HR_WHOIS_NIR",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"is_parent",
"=",
"False",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"True",
",",
"value",
"=",
"'None'",
"if",
"(",
"val",
"is",
"None",
"or",
"len",
"(",
"val",
")",
"==",
"0",
")",
"else",
"None",
",",
"colorize",
"=",
"colorize",
")",
"if",
"key",
"==",
"'contacts'",
":",
"for",
"k",
",",
"v",
"in",
"val",
".",
"items",
"(",
")",
":",
"if",
"v",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'3'",
",",
"is_parent",
"=",
"False",
"if",
"(",
"len",
"(",
"v",
")",
"==",
"0",
")",
"else",
"True",
",",
"name",
"=",
"k",
",",
"colorize",
"=",
"colorize",
")",
"for",
"contact_key",
",",
"contact_val",
"in",
"v",
".",
"items",
"(",
")",
":",
"if",
"v",
"is",
"not",
"None",
":",
"tmp_out",
"=",
"'{0}{1}{2}'",
".",
"format",
"(",
"contact_key",
",",
"': '",
",",
"contact_val",
")",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'4'",
",",
"value",
"=",
"tmp_out",
",",
"colorize",
"=",
"colorize",
")",
"elif",
"key",
"==",
"'nameservers'",
":",
"for",
"v",
"in",
"val",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'3'",
",",
"value",
"=",
"v",
",",
"colorize",
"=",
"colorize",
")",
"else",
":",
"for",
"v",
"in",
"val",
".",
"split",
"(",
"'\\n'",
")",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'3'",
",",
"value",
"=",
"v",
",",
"colorize",
"=",
"colorize",
")",
"else",
":",
"output",
"+=",
"generate_output",
"(",
"line",
"=",
"'2'",
",",
"short",
"=",
"(",
"HR_WHOIS_NIR",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_short'",
"]",
"if",
"(",
"hr",
")",
"else",
"key",
")",
",",
"name",
"=",
"HR_WHOIS_NIR",
"[",
"'nets'",
"]",
"[",
"key",
"]",
"[",
"'_name'",
"]",
"if",
"(",
"hr",
"and",
"show_name",
")",
"else",
"None",
",",
"value",
"=",
"val",
",",
"colorize",
"=",
"colorize",
")",
"else",
":",
"output",
"+=",
"'None'",
"return",
"output"
] | The function for generating CLI output NIR network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"NIR",
"network",
"results",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L1234-L1368 |
5,772 | secynic/ipwhois | ipwhois/asn.py | IPASN.parse_fields_whois | def parse_fields_whois(self, response):
"""
The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
try:
temp = response.split('|')
# Parse out the ASN information.
ret = {'asn_registry': temp[4].strip(' \n')}
if ret['asn_registry'] not in self.rir_whois.keys():
raise ASNRegistryError(
'ASN registry {0} is not known.'.format(
ret['asn_registry'])
)
ret['asn'] = temp[0].strip(' \n')
ret['asn_cidr'] = temp[2].strip(' \n')
ret['asn_country_code'] = temp[3].strip(' \n').upper()
ret['asn_date'] = temp[5].strip(' \n')
ret['asn_description'] = temp[6].strip(' \n')
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return ret | python | def parse_fields_whois(self, response):
"""
The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
try:
temp = response.split('|')
# Parse out the ASN information.
ret = {'asn_registry': temp[4].strip(' \n')}
if ret['asn_registry'] not in self.rir_whois.keys():
raise ASNRegistryError(
'ASN registry {0} is not known.'.format(
ret['asn_registry'])
)
ret['asn'] = temp[0].strip(' \n')
ret['asn_cidr'] = temp[2].strip(' \n')
ret['asn_country_code'] = temp[3].strip(' \n').upper()
ret['asn_date'] = temp[5].strip(' \n')
ret['asn_description'] = temp[6].strip(' \n')
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return ret | [
"def",
"parse_fields_whois",
"(",
"self",
",",
"response",
")",
":",
"try",
":",
"temp",
"=",
"response",
".",
"split",
"(",
"'|'",
")",
"# Parse out the ASN information.",
"ret",
"=",
"{",
"'asn_registry'",
":",
"temp",
"[",
"4",
"]",
".",
"strip",
"(",
"' \\n'",
")",
"}",
"if",
"ret",
"[",
"'asn_registry'",
"]",
"not",
"in",
"self",
".",
"rir_whois",
".",
"keys",
"(",
")",
":",
"raise",
"ASNRegistryError",
"(",
"'ASN registry {0} is not known.'",
".",
"format",
"(",
"ret",
"[",
"'asn_registry'",
"]",
")",
")",
"ret",
"[",
"'asn'",
"]",
"=",
"temp",
"[",
"0",
"]",
".",
"strip",
"(",
"' \\n'",
")",
"ret",
"[",
"'asn_cidr'",
"]",
"=",
"temp",
"[",
"2",
"]",
".",
"strip",
"(",
"' \\n'",
")",
"ret",
"[",
"'asn_country_code'",
"]",
"=",
"temp",
"[",
"3",
"]",
".",
"strip",
"(",
"' \\n'",
")",
".",
"upper",
"(",
")",
"ret",
"[",
"'asn_date'",
"]",
"=",
"temp",
"[",
"5",
"]",
".",
"strip",
"(",
"' \\n'",
")",
"ret",
"[",
"'asn_description'",
"]",
"=",
"temp",
"[",
"6",
"]",
".",
"strip",
"(",
"' \\n'",
")",
"except",
"ASNRegistryError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ASNParseError",
"(",
"'Parsing failed for \"{0}\" with exception: {1}.'",
"''",
".",
"format",
"(",
"response",
",",
"e",
")",
"[",
":",
"100",
"]",
")",
"return",
"ret"
] | The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed. | [
"The",
"function",
"for",
"parsing",
"ASN",
"fields",
"from",
"a",
"whois",
"response",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/asn.py#L239-L294 |
5,773 | secynic/ipwhois | ipwhois/asn.py | IPASN.parse_fields_http | def parse_fields_http(self, response, extra_org_map=None):
"""
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
# Set the org_map. Map the orgRef handle to an RIR.
org_map = self.org_map.copy()
try:
org_map.update(extra_org_map)
except (TypeError, ValueError, IndexError, KeyError):
pass
try:
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None,
'asn_description': None
}
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except (KeyError, TypeError):
log.debug('No networks found')
net_list = []
for n in reversed(net_list):
try:
asn_data['asn_registry'] = (
org_map[n['orgRef']['@handle'].upper()]
)
except KeyError as e:
log.debug('Could not parse ASN registry via HTTP: '
'{0}'.format(str(e)))
continue
break
if not asn_data['asn_registry']:
log.debug('Could not parse ASN registry via HTTP')
raise ASNRegistryError('ASN registry lookup failed.')
except ASNRegistryError:
raise
except Exception as e: # pragma: no cover
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return asn_data | python | def parse_fields_http(self, response, extra_org_map=None):
"""
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
# Set the org_map. Map the orgRef handle to an RIR.
org_map = self.org_map.copy()
try:
org_map.update(extra_org_map)
except (TypeError, ValueError, IndexError, KeyError):
pass
try:
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None,
'asn_description': None
}
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except (KeyError, TypeError):
log.debug('No networks found')
net_list = []
for n in reversed(net_list):
try:
asn_data['asn_registry'] = (
org_map[n['orgRef']['@handle'].upper()]
)
except KeyError as e:
log.debug('Could not parse ASN registry via HTTP: '
'{0}'.format(str(e)))
continue
break
if not asn_data['asn_registry']:
log.debug('Could not parse ASN registry via HTTP')
raise ASNRegistryError('ASN registry lookup failed.')
except ASNRegistryError:
raise
except Exception as e: # pragma: no cover
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return asn_data | [
"def",
"parse_fields_http",
"(",
"self",
",",
"response",
",",
"extra_org_map",
"=",
"None",
")",
":",
"# Set the org_map. Map the orgRef handle to an RIR.",
"org_map",
"=",
"self",
".",
"org_map",
".",
"copy",
"(",
")",
"try",
":",
"org_map",
".",
"update",
"(",
"extra_org_map",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"IndexError",
",",
"KeyError",
")",
":",
"pass",
"try",
":",
"asn_data",
"=",
"{",
"'asn_registry'",
":",
"None",
",",
"'asn'",
":",
"None",
",",
"'asn_cidr'",
":",
"None",
",",
"'asn_country_code'",
":",
"None",
",",
"'asn_date'",
":",
"None",
",",
"'asn_description'",
":",
"None",
"}",
"try",
":",
"net_list",
"=",
"response",
"[",
"'nets'",
"]",
"[",
"'net'",
"]",
"if",
"not",
"isinstance",
"(",
"net_list",
",",
"list",
")",
":",
"net_list",
"=",
"[",
"net_list",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"log",
".",
"debug",
"(",
"'No networks found'",
")",
"net_list",
"=",
"[",
"]",
"for",
"n",
"in",
"reversed",
"(",
"net_list",
")",
":",
"try",
":",
"asn_data",
"[",
"'asn_registry'",
"]",
"=",
"(",
"org_map",
"[",
"n",
"[",
"'orgRef'",
"]",
"[",
"'@handle'",
"]",
".",
"upper",
"(",
")",
"]",
")",
"except",
"KeyError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'Could not parse ASN registry via HTTP: '",
"'{0}'",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"continue",
"break",
"if",
"not",
"asn_data",
"[",
"'asn_registry'",
"]",
":",
"log",
".",
"debug",
"(",
"'Could not parse ASN registry via HTTP'",
")",
"raise",
"ASNRegistryError",
"(",
"'ASN registry lookup failed.'",
")",
"except",
"ASNRegistryError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"# pragma: no cover",
"raise",
"ASNParseError",
"(",
"'Parsing failed for \"{0}\" with exception: {1}.'",
"''",
".",
"format",
"(",
"response",
",",
"e",
")",
"[",
":",
"100",
"]",
")",
"return",
"asn_data"
] | The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed. | [
"The",
"function",
"for",
"parsing",
"ASN",
"fields",
"from",
"a",
"http",
"response",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/asn.py#L306-L404 |
5,774 | secynic/ipwhois | ipwhois/asn.py | ASNOrigin.get_nets_radb | def get_nets_radb(self, response, is_http=False):
"""
The function for parsing network blocks from ASN origin data.
Args:
response (:obj:`str`): The response from the RADB whois/http
server.
is_http (:obj:`bool`): If the query is RADB HTTP instead of whois,
set to True. Defaults to False.
Returns:
list: A list of network block dictionaries
::
[{
'cidr' (str) - The assigned CIDR
'start' (int) - The index for the start of the parsed
network block
'end' (int) - The index for the end of the parsed network
block
}]
"""
nets = []
if is_http:
regex = r'route(?:6)?:[^\S\n]+(?P<val>.+?)<br>'
else:
regex = r'^route(?:6)?:[^\S\n]+(?P<val>.+|.+)$'
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
regex,
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['cidr'] = match.group(1).strip()
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError: # pragma: no cover
pass
return nets | python | def get_nets_radb(self, response, is_http=False):
"""
The function for parsing network blocks from ASN origin data.
Args:
response (:obj:`str`): The response from the RADB whois/http
server.
is_http (:obj:`bool`): If the query is RADB HTTP instead of whois,
set to True. Defaults to False.
Returns:
list: A list of network block dictionaries
::
[{
'cidr' (str) - The assigned CIDR
'start' (int) - The index for the start of the parsed
network block
'end' (int) - The index for the end of the parsed network
block
}]
"""
nets = []
if is_http:
regex = r'route(?:6)?:[^\S\n]+(?P<val>.+?)<br>'
else:
regex = r'^route(?:6)?:[^\S\n]+(?P<val>.+|.+)$'
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
regex,
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['cidr'] = match.group(1).strip()
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError: # pragma: no cover
pass
return nets | [
"def",
"get_nets_radb",
"(",
"self",
",",
"response",
",",
"is_http",
"=",
"False",
")",
":",
"nets",
"=",
"[",
"]",
"if",
"is_http",
":",
"regex",
"=",
"r'route(?:6)?:[^\\S\\n]+(?P<val>.+?)<br>'",
"else",
":",
"regex",
"=",
"r'^route(?:6)?:[^\\S\\n]+(?P<val>.+|.+)$'",
"# Iterate through all of the networks found, storing the CIDR value",
"# and the start and end positions.",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"regex",
",",
"response",
",",
"re",
".",
"MULTILINE",
")",
":",
"try",
":",
"net",
"=",
"copy",
".",
"deepcopy",
"(",
"BASE_NET",
")",
"net",
"[",
"'cidr'",
"]",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"net",
"[",
"'start'",
"]",
"=",
"match",
".",
"start",
"(",
")",
"net",
"[",
"'end'",
"]",
"=",
"match",
".",
"end",
"(",
")",
"nets",
".",
"append",
"(",
"net",
")",
"except",
"ValueError",
":",
"# pragma: no cover",
"pass",
"return",
"nets"
] | The function for parsing network blocks from ASN origin data.
Args:
response (:obj:`str`): The response from the RADB whois/http
server.
is_http (:obj:`bool`): If the query is RADB HTTP instead of whois,
set to True. Defaults to False.
Returns:
list: A list of network block dictionaries
::
[{
'cidr' (str) - The assigned CIDR
'start' (int) - The index for the start of the parsed
network block
'end' (int) - The index for the end of the parsed network
block
}] | [
"The",
"function",
"for",
"parsing",
"network",
"blocks",
"from",
"ASN",
"origin",
"data",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/asn.py#L719-L770 |
5,775 | secynic/ipwhois | ipwhois/nir.py | NIRWhois.get_nets_jpnic | def get_nets_jpnic(self, response):
"""
The function for parsing network blocks from jpnic whois data.
Args:
response (:obj:`str`): The response from the jpnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^.*?(\[Network Number\])[^\S\n]+.+?>(?P<val>.+?)</A>$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
tmp = ip_network(match.group(2))
try: # pragma: no cover
network_address = tmp.network_address
except AttributeError: # pragma: no cover
network_address = tmp.ip
pass
try: # pragma: no cover
broadcast_address = tmp.broadcast_address
except AttributeError: # pragma: no cover
broadcast_address = tmp.broadcast
pass
net['range'] = '{0} - {1}'.format(
network_address + 1, broadcast_address
)
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets | python | def get_nets_jpnic(self, response):
"""
The function for parsing network blocks from jpnic whois data.
Args:
response (:obj:`str`): The response from the jpnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^.*?(\[Network Number\])[^\S\n]+.+?>(?P<val>.+?)</A>$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
tmp = ip_network(match.group(2))
try: # pragma: no cover
network_address = tmp.network_address
except AttributeError: # pragma: no cover
network_address = tmp.ip
pass
try: # pragma: no cover
broadcast_address = tmp.broadcast_address
except AttributeError: # pragma: no cover
broadcast_address = tmp.broadcast
pass
net['range'] = '{0} - {1}'.format(
network_address + 1, broadcast_address
)
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets | [
"def",
"get_nets_jpnic",
"(",
"self",
",",
"response",
")",
":",
"nets",
"=",
"[",
"]",
"# Iterate through all of the networks found, storing the CIDR value",
"# and the start and end positions.",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r'^.*?(\\[Network Number\\])[^\\S\\n]+.+?>(?P<val>.+?)</A>$'",
",",
"response",
",",
"re",
".",
"MULTILINE",
")",
":",
"try",
":",
"net",
"=",
"copy",
".",
"deepcopy",
"(",
"BASE_NET",
")",
"tmp",
"=",
"ip_network",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"try",
":",
"# pragma: no cover",
"network_address",
"=",
"tmp",
".",
"network_address",
"except",
"AttributeError",
":",
"# pragma: no cover",
"network_address",
"=",
"tmp",
".",
"ip",
"pass",
"try",
":",
"# pragma: no cover",
"broadcast_address",
"=",
"tmp",
".",
"broadcast_address",
"except",
"AttributeError",
":",
"# pragma: no cover",
"broadcast_address",
"=",
"tmp",
".",
"broadcast",
"pass",
"net",
"[",
"'range'",
"]",
"=",
"'{0} - {1}'",
".",
"format",
"(",
"network_address",
"+",
"1",
",",
"broadcast_address",
")",
"cidr",
"=",
"ip_network",
"(",
"match",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
")",
".",
"__str__",
"(",
")",
"net",
"[",
"'cidr'",
"]",
"=",
"cidr",
"net",
"[",
"'start'",
"]",
"=",
"match",
".",
"start",
"(",
")",
"net",
"[",
"'end'",
"]",
"=",
"match",
".",
"end",
"(",
")",
"nets",
".",
"append",
"(",
"net",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"return",
"nets"
] | The function for parsing network blocks from jpnic whois data.
Args:
response (:obj:`str`): The response from the jpnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}] | [
"The",
"function",
"for",
"parsing",
"network",
"blocks",
"from",
"jpnic",
"whois",
"data",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/nir.py#L299-L360 |
5,776 | secynic/ipwhois | ipwhois/nir.py | NIRWhois.get_contact | def get_contact(self, response=None, nir=None, handle=None,
retry_count=3, dt_format=None):
"""
The function for retrieving and parsing NIR whois data based on
NIR_WHOIS contact_fields.
Args:
response (:obj:`str`): Optional response object, this bypasses the
lookup.
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
handle (:obj:`str`): For NIRs that have separate contact queries
(JPNIC), this is the contact handle to use in the query.
Defaults to None.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
Returns:
dict: Mapping of the fields provided in contact_fields, to their
parsed results.
"""
if response or nir == 'krnic':
contact_response = response
else:
# Retrieve the whois data.
contact_response = self._net.get_http_raw(
url=str(NIR_WHOIS[nir]['url']).format(handle),
retry_count=retry_count,
headers=NIR_WHOIS[nir]['request_headers'],
request_type=NIR_WHOIS[nir]['request_type']
)
return self.parse_fields(
response=contact_response,
fields_dict=NIR_WHOIS[nir]['contact_fields'],
dt_format=dt_format,
hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta']),
is_contact=True
) | python | def get_contact(self, response=None, nir=None, handle=None,
retry_count=3, dt_format=None):
"""
The function for retrieving and parsing NIR whois data based on
NIR_WHOIS contact_fields.
Args:
response (:obj:`str`): Optional response object, this bypasses the
lookup.
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
handle (:obj:`str`): For NIRs that have separate contact queries
(JPNIC), this is the contact handle to use in the query.
Defaults to None.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
Returns:
dict: Mapping of the fields provided in contact_fields, to their
parsed results.
"""
if response or nir == 'krnic':
contact_response = response
else:
# Retrieve the whois data.
contact_response = self._net.get_http_raw(
url=str(NIR_WHOIS[nir]['url']).format(handle),
retry_count=retry_count,
headers=NIR_WHOIS[nir]['request_headers'],
request_type=NIR_WHOIS[nir]['request_type']
)
return self.parse_fields(
response=contact_response,
fields_dict=NIR_WHOIS[nir]['contact_fields'],
dt_format=dt_format,
hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta']),
is_contact=True
) | [
"def",
"get_contact",
"(",
"self",
",",
"response",
"=",
"None",
",",
"nir",
"=",
"None",
",",
"handle",
"=",
"None",
",",
"retry_count",
"=",
"3",
",",
"dt_format",
"=",
"None",
")",
":",
"if",
"response",
"or",
"nir",
"==",
"'krnic'",
":",
"contact_response",
"=",
"response",
"else",
":",
"# Retrieve the whois data.",
"contact_response",
"=",
"self",
".",
"_net",
".",
"get_http_raw",
"(",
"url",
"=",
"str",
"(",
"NIR_WHOIS",
"[",
"nir",
"]",
"[",
"'url'",
"]",
")",
".",
"format",
"(",
"handle",
")",
",",
"retry_count",
"=",
"retry_count",
",",
"headers",
"=",
"NIR_WHOIS",
"[",
"nir",
"]",
"[",
"'request_headers'",
"]",
",",
"request_type",
"=",
"NIR_WHOIS",
"[",
"nir",
"]",
"[",
"'request_type'",
"]",
")",
"return",
"self",
".",
"parse_fields",
"(",
"response",
"=",
"contact_response",
",",
"fields_dict",
"=",
"NIR_WHOIS",
"[",
"nir",
"]",
"[",
"'contact_fields'",
"]",
",",
"dt_format",
"=",
"dt_format",
",",
"hourdelta",
"=",
"int",
"(",
"NIR_WHOIS",
"[",
"nir",
"]",
"[",
"'dt_hourdelta'",
"]",
")",
",",
"is_contact",
"=",
"True",
")"
] | The function for retrieving and parsing NIR whois data based on
NIR_WHOIS contact_fields.
Args:
response (:obj:`str`): Optional response object, this bypasses the
lookup.
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
handle (:obj:`str`): For NIRs that have separate contact queries
(JPNIC), this is the contact handle to use in the query.
Defaults to None.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
Returns:
dict: Mapping of the fields provided in contact_fields, to their
parsed results. | [
"The",
"function",
"for",
"retrieving",
"and",
"parsing",
"NIR",
"whois",
"data",
"based",
"on",
"NIR_WHOIS",
"contact_fields",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/nir.py#L447-L492 |
5,777 | secynic/ipwhois | ipwhois/rdap.py | _RDAPContact._parse_address | def _parse_address(self, val):
"""
The function for parsing the vcard address.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
try:
ret['value'] = val[1]['label']
except (KeyError, ValueError, TypeError):
ret['value'] = '\n'.join(val[3]).strip()
try:
self.vars['address'].append(ret)
except AttributeError:
self.vars['address'] = []
self.vars['address'].append(ret) | python | def _parse_address(self, val):
"""
The function for parsing the vcard address.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
try:
ret['value'] = val[1]['label']
except (KeyError, ValueError, TypeError):
ret['value'] = '\n'.join(val[3]).strip()
try:
self.vars['address'].append(ret)
except AttributeError:
self.vars['address'] = []
self.vars['address'].append(ret) | [
"def",
"_parse_address",
"(",
"self",
",",
"val",
")",
":",
"ret",
"=",
"{",
"'type'",
":",
"None",
",",
"'value'",
":",
"None",
"}",
"try",
":",
"ret",
"[",
"'type'",
"]",
"=",
"val",
"[",
"1",
"]",
"[",
"'type'",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"try",
":",
"ret",
"[",
"'value'",
"]",
"=",
"val",
"[",
"1",
"]",
"[",
"'label'",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"ret",
"[",
"'value'",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"val",
"[",
"3",
"]",
")",
".",
"strip",
"(",
")",
"try",
":",
"self",
".",
"vars",
"[",
"'address'",
"]",
".",
"append",
"(",
"ret",
")",
"except",
"AttributeError",
":",
"self",
".",
"vars",
"[",
"'address'",
"]",
"=",
"[",
"]",
"self",
".",
"vars",
"[",
"'address'",
"]",
".",
"append",
"(",
"ret",
")"
] | The function for parsing the vcard address.
Args:
val (:obj:`list`): The value to parse. | [
"The",
"function",
"for",
"parsing",
"the",
"vcard",
"address",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L112-L148 |
5,778 | secynic/ipwhois | ipwhois/rdap.py | _RDAPContact._parse_phone | def _parse_phone(self, val):
"""
The function for parsing the vcard phone numbers.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (IndexError, KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['phone'].append(ret)
except AttributeError:
self.vars['phone'] = []
self.vars['phone'].append(ret) | python | def _parse_phone(self, val):
"""
The function for parsing the vcard phone numbers.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (IndexError, KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['phone'].append(ret)
except AttributeError:
self.vars['phone'] = []
self.vars['phone'].append(ret) | [
"def",
"_parse_phone",
"(",
"self",
",",
"val",
")",
":",
"ret",
"=",
"{",
"'type'",
":",
"None",
",",
"'value'",
":",
"None",
"}",
"try",
":",
"ret",
"[",
"'type'",
"]",
"=",
"val",
"[",
"1",
"]",
"[",
"'type'",
"]",
"except",
"(",
"IndexError",
",",
"KeyError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"ret",
"[",
"'value'",
"]",
"=",
"val",
"[",
"3",
"]",
".",
"strip",
"(",
")",
"try",
":",
"self",
".",
"vars",
"[",
"'phone'",
"]",
".",
"append",
"(",
"ret",
")",
"except",
"AttributeError",
":",
"self",
".",
"vars",
"[",
"'phone'",
"]",
"=",
"[",
"]",
"self",
".",
"vars",
"[",
"'phone'",
"]",
".",
"append",
"(",
"ret",
")"
] | The function for parsing the vcard phone numbers.
Args:
val (:obj:`list`): The value to parse. | [
"The",
"function",
"for",
"parsing",
"the",
"vcard",
"phone",
"numbers",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L150-L180 |
5,779 | secynic/ipwhois | ipwhois/rdap.py | _RDAPContact._parse_email | def _parse_email(self, val):
"""
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret)
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret) | python | def _parse_email(self, val):
"""
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret)
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret) | [
"def",
"_parse_email",
"(",
"self",
",",
"val",
")",
":",
"ret",
"=",
"{",
"'type'",
":",
"None",
",",
"'value'",
":",
"None",
"}",
"try",
":",
"ret",
"[",
"'type'",
"]",
"=",
"val",
"[",
"1",
"]",
"[",
"'type'",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"ret",
"[",
"'value'",
"]",
"=",
"val",
"[",
"3",
"]",
".",
"strip",
"(",
")",
"try",
":",
"self",
".",
"vars",
"[",
"'email'",
"]",
".",
"append",
"(",
"ret",
")",
"except",
"AttributeError",
":",
"self",
".",
"vars",
"[",
"'email'",
"]",
"=",
"[",
"]",
"self",
".",
"vars",
"[",
"'email'",
"]",
".",
"append",
"(",
"ret",
")"
] | The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse. | [
"The",
"function",
"for",
"parsing",
"the",
"vcard",
"email",
"addresses",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L182-L212 |
5,780 | secynic/ipwhois | ipwhois/rdap.py | _RDAPContact.parse | def parse(self):
"""
The function for parsing the vcard to the vars dictionary.
"""
keys = {
'fn': self._parse_name,
'kind': self._parse_kind,
'adr': self._parse_address,
'tel': self._parse_phone,
'email': self._parse_email,
'role': self._parse_role,
'title': self._parse_title
}
for val in self.vcard:
try:
parser = keys.get(val[0])
parser(val)
except (KeyError, ValueError, TypeError):
pass | python | def parse(self):
"""
The function for parsing the vcard to the vars dictionary.
"""
keys = {
'fn': self._parse_name,
'kind': self._parse_kind,
'adr': self._parse_address,
'tel': self._parse_phone,
'email': self._parse_email,
'role': self._parse_role,
'title': self._parse_title
}
for val in self.vcard:
try:
parser = keys.get(val[0])
parser(val)
except (KeyError, ValueError, TypeError):
pass | [
"def",
"parse",
"(",
"self",
")",
":",
"keys",
"=",
"{",
"'fn'",
":",
"self",
".",
"_parse_name",
",",
"'kind'",
":",
"self",
".",
"_parse_kind",
",",
"'adr'",
":",
"self",
".",
"_parse_address",
",",
"'tel'",
":",
"self",
".",
"_parse_phone",
",",
"'email'",
":",
"self",
".",
"_parse_email",
",",
"'role'",
":",
"self",
".",
"_parse_role",
",",
"'title'",
":",
"self",
".",
"_parse_title",
"}",
"for",
"val",
"in",
"self",
".",
"vcard",
":",
"try",
":",
"parser",
"=",
"keys",
".",
"get",
"(",
"val",
"[",
"0",
"]",
")",
"parser",
"(",
"val",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"pass"
] | The function for parsing the vcard to the vars dictionary. | [
"The",
"function",
"for",
"parsing",
"the",
"vcard",
"to",
"the",
"vars",
"dictionary",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/rdap.py#L234-L258 |
5,781 | secynic/ipwhois | ipwhois/utils.py | ipv4_lstrip_zeros | def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address (:obj:`str`): An IPv4 address.
Returns:
str: The modified IPv4 address.
"""
# Split the octets.
obj = address.strip().split('.')
for x, y in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0'
return '.'.join(obj) | python | def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address (:obj:`str`): An IPv4 address.
Returns:
str: The modified IPv4 address.
"""
# Split the octets.
obj = address.strip().split('.')
for x, y in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0'
return '.'.join(obj) | [
"def",
"ipv4_lstrip_zeros",
"(",
"address",
")",
":",
"# Split the octets.",
"obj",
"=",
"address",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'.'",
")",
"for",
"x",
",",
"y",
"in",
"enumerate",
"(",
"obj",
")",
":",
"# Strip leading zeros. Split / here in case CIDR is attached.",
"obj",
"[",
"x",
"]",
"=",
"y",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"lstrip",
"(",
"'0'",
")",
"if",
"obj",
"[",
"x",
"]",
"in",
"[",
"''",
",",
"None",
"]",
":",
"obj",
"[",
"x",
"]",
"=",
"'0'",
"return",
"'.'",
".",
"join",
"(",
"obj",
")"
] | The function to strip leading zeros in each octet of an IPv4 address.
Args:
address (:obj:`str`): An IPv4 address.
Returns:
str: The modified IPv4 address. | [
"The",
"function",
"to",
"strip",
"leading",
"zeros",
"in",
"each",
"octet",
"of",
"an",
"IPv4",
"address",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/utils.py#L117-L138 |
5,782 | secynic/ipwhois | ipwhois/utils.py | get_countries | def get_countries(is_legacy_xml=False):
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml (:obj:`bool`): Whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
dict: A mapping of country codes as the keys to the country names as
the values.
"""
# Initialize the countries dictionary.
countries = {}
# Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable) # pragma: no cover
else:
data_dir = path.dirname(__file__)
if is_legacy_xml:
log.debug('Opening country code legacy XML: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',
encoding='ISO-8859-1')
# Read the file.
data = f.read()
# Check if there is data.
if not data: # pragma: no cover
return {}
# Parse the data to get the DOM.
dom = parseString(data)
# Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
# Iterate through the entries and add to the countries dictionary.
for entry in entries:
# Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
# Add to the countries dictionary.
countries[code] = name.title()
else:
log.debug('Opening country code CSV: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',
encoding='utf-8')
# Create csv reader object.
csv_reader = csv.reader(f, delimiter=',', quotechar='"')
# Iterate through the rows and add to the countries dictionary.
for row in csv_reader:
# Retrieve the country code and name columns.
code = row[0]
name = row[1]
# Add to the countries dictionary.
countries[code] = name
return countries | python | def get_countries(is_legacy_xml=False):
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml (:obj:`bool`): Whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
dict: A mapping of country codes as the keys to the country names as
the values.
"""
# Initialize the countries dictionary.
countries = {}
# Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable) # pragma: no cover
else:
data_dir = path.dirname(__file__)
if is_legacy_xml:
log.debug('Opening country code legacy XML: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',
encoding='ISO-8859-1')
# Read the file.
data = f.read()
# Check if there is data.
if not data: # pragma: no cover
return {}
# Parse the data to get the DOM.
dom = parseString(data)
# Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
# Iterate through the entries and add to the countries dictionary.
for entry in entries:
# Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
# Add to the countries dictionary.
countries[code] = name.title()
else:
log.debug('Opening country code CSV: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',
encoding='utf-8')
# Create csv reader object.
csv_reader = csv.reader(f, delimiter=',', quotechar='"')
# Iterate through the rows and add to the countries dictionary.
for row in csv_reader:
# Retrieve the country code and name columns.
code = row[0]
name = row[1]
# Add to the countries dictionary.
countries[code] = name
return countries | [
"def",
"get_countries",
"(",
"is_legacy_xml",
"=",
"False",
")",
":",
"# Initialize the countries dictionary.",
"countries",
"=",
"{",
"}",
"# Set the data directory based on if the script is a frozen executable.",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
"and",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"False",
")",
":",
"data_dir",
"=",
"path",
".",
"dirname",
"(",
"sys",
".",
"executable",
")",
"# pragma: no cover",
"else",
":",
"data_dir",
"=",
"path",
".",
"dirname",
"(",
"__file__",
")",
"if",
"is_legacy_xml",
":",
"log",
".",
"debug",
"(",
"'Opening country code legacy XML: {0}'",
".",
"format",
"(",
"str",
"(",
"data_dir",
")",
"+",
"'/data/iso_3166-1_list_en.xml'",
")",
")",
"# Create the country codes file object.",
"f",
"=",
"io",
".",
"open",
"(",
"str",
"(",
"data_dir",
")",
"+",
"'/data/iso_3166-1_list_en.xml'",
",",
"'r'",
",",
"encoding",
"=",
"'ISO-8859-1'",
")",
"# Read the file.",
"data",
"=",
"f",
".",
"read",
"(",
")",
"# Check if there is data.",
"if",
"not",
"data",
":",
"# pragma: no cover",
"return",
"{",
"}",
"# Parse the data to get the DOM.",
"dom",
"=",
"parseString",
"(",
"data",
")",
"# Retrieve the country entries.",
"entries",
"=",
"dom",
".",
"getElementsByTagName",
"(",
"'ISO_3166-1_Entry'",
")",
"# Iterate through the entries and add to the countries dictionary.",
"for",
"entry",
"in",
"entries",
":",
"# Retrieve the country code and name from the DOM.",
"code",
"=",
"entry",
".",
"getElementsByTagName",
"(",
"'ISO_3166-1_Alpha-2_Code_element'",
")",
"[",
"0",
"]",
".",
"firstChild",
".",
"data",
"name",
"=",
"entry",
".",
"getElementsByTagName",
"(",
"'ISO_3166-1_Country_name'",
")",
"[",
"0",
"]",
".",
"firstChild",
".",
"data",
"# Add to the countries dictionary.",
"countries",
"[",
"code",
"]",
"=",
"name",
".",
"title",
"(",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'Opening country code CSV: {0}'",
".",
"format",
"(",
"str",
"(",
"data_dir",
")",
"+",
"'/data/iso_3166-1_list_en.xml'",
")",
")",
"# Create the country codes file object.",
"f",
"=",
"io",
".",
"open",
"(",
"str",
"(",
"data_dir",
")",
"+",
"'/data/iso_3166-1.csv'",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"# Create csv reader object.",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"# Iterate through the rows and add to the countries dictionary.",
"for",
"row",
"in",
"csv_reader",
":",
"# Retrieve the country code and name columns.",
"code",
"=",
"row",
"[",
"0",
"]",
"name",
"=",
"row",
"[",
"1",
"]",
"# Add to the countries dictionary.",
"countries",
"[",
"code",
"]",
"=",
"name",
"return",
"countries"
] | The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml (:obj:`bool`): Whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
dict: A mapping of country codes as the keys to the country names as
the values. | [
"The",
"function",
"to",
"generate",
"a",
"dictionary",
"containing",
"ISO_3166",
"-",
"1",
"country",
"codes",
"to",
"names",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/utils.py#L178-L261 |
5,783 | secynic/ipwhois | ipwhois/utils.py | unique_everseen | def unique_everseen(iterable, key=None):
"""
The generator to list unique elements, preserving the order. Remember all
elements ever seen. This was taken from the itertools recipes.
Args:
iterable (:obj:`iter`): An iterable to process.
key (:obj:`callable`): Optional function to run when checking
elements (e.g., str.lower)
Yields:
The next unique element found.
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element | python | def unique_everseen(iterable, key=None):
"""
The generator to list unique elements, preserving the order. Remember all
elements ever seen. This was taken from the itertools recipes.
Args:
iterable (:obj:`iter`): An iterable to process.
key (:obj:`callable`): Optional function to run when checking
elements (e.g., str.lower)
Yields:
The next unique element found.
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element | [
"def",
"unique_everseen",
"(",
"iterable",
",",
"key",
"=",
"None",
")",
":",
"seen",
"=",
"set",
"(",
")",
"seen_add",
"=",
"seen",
".",
"add",
"if",
"key",
"is",
"None",
":",
"for",
"element",
"in",
"filterfalse",
"(",
"seen",
".",
"__contains__",
",",
"iterable",
")",
":",
"seen_add",
"(",
"element",
")",
"yield",
"element",
"else",
":",
"for",
"element",
"in",
"iterable",
":",
"k",
"=",
"key",
"(",
"element",
")",
"if",
"k",
"not",
"in",
"seen",
":",
"seen_add",
"(",
"k",
")",
"yield",
"element"
] | The generator to list unique elements, preserving the order. Remember all
elements ever seen. This was taken from the itertools recipes.
Args:
iterable (:obj:`iter`): An iterable to process.
key (:obj:`callable`): Optional function to run when checking
elements (e.g., str.lower)
Yields:
The next unique element found. | [
"The",
"generator",
"to",
"list",
"unique",
"elements",
"preserving",
"the",
"order",
".",
"Remember",
"all",
"elements",
"ever",
"seen",
".",
"This",
"was",
"taken",
"from",
"the",
"itertools",
"recipes",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/utils.py#L424-L457 |
5,784 | secynic/ipwhois | ipwhois/whois.py | Whois.get_nets_arin | def get_nets_arin(self, response):
"""
The function for parsing network blocks from ARIN whois data.
Args:
response (:obj:`str`): The response from the ARIN whois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Find the first NetRange value.
pattern = re.compile(
r'^NetRange:[^\S\n]+(.+)$',
re.MULTILINE
)
temp = pattern.search(response)
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
if len(nets) > 0:
temp = pattern.search(response, match.start())
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
if net_range is not None:
if net_range_start < match.start() or len(nets) > 0:
try:
net['range'] = '{0} - {1}'.format(
ip_network(net_range)[0].__str__(),
ip_network(net_range)[-1].__str__()
) if '/' in net_range else net_range
except ValueError: # pragma: no cover
net['range'] = net_range
net['cidr'] = ', '.join(
[ip_network(c.strip()).__str__()
for c in match.group(1).split(', ')]
)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
return nets | python | def get_nets_arin(self, response):
"""
The function for parsing network blocks from ARIN whois data.
Args:
response (:obj:`str`): The response from the ARIN whois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Find the first NetRange value.
pattern = re.compile(
r'^NetRange:[^\S\n]+(.+)$',
re.MULTILINE
)
temp = pattern.search(response)
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
if len(nets) > 0:
temp = pattern.search(response, match.start())
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
if net_range is not None:
if net_range_start < match.start() or len(nets) > 0:
try:
net['range'] = '{0} - {1}'.format(
ip_network(net_range)[0].__str__(),
ip_network(net_range)[-1].__str__()
) if '/' in net_range else net_range
except ValueError: # pragma: no cover
net['range'] = net_range
net['cidr'] = ', '.join(
[ip_network(c.strip()).__str__()
for c in match.group(1).split(', ')]
)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
return nets | [
"def",
"get_nets_arin",
"(",
"self",
",",
"response",
")",
":",
"nets",
"=",
"[",
"]",
"# Find the first NetRange value.",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'^NetRange:[^\\S\\n]+(.+)$'",
",",
"re",
".",
"MULTILINE",
")",
"temp",
"=",
"pattern",
".",
"search",
"(",
"response",
")",
"net_range",
"=",
"None",
"net_range_start",
"=",
"None",
"if",
"temp",
"is",
"not",
"None",
":",
"net_range",
"=",
"temp",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"net_range_start",
"=",
"temp",
".",
"start",
"(",
")",
"# Iterate through all of the networks found, storing the CIDR value",
"# and the start and end positions.",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r'^CIDR:[^\\S\\n]+(.+?,[^\\S\\n].+|.+)$'",
",",
"response",
",",
"re",
".",
"MULTILINE",
")",
":",
"try",
":",
"net",
"=",
"copy",
".",
"deepcopy",
"(",
"BASE_NET",
")",
"if",
"len",
"(",
"nets",
")",
">",
"0",
":",
"temp",
"=",
"pattern",
".",
"search",
"(",
"response",
",",
"match",
".",
"start",
"(",
")",
")",
"net_range",
"=",
"None",
"net_range_start",
"=",
"None",
"if",
"temp",
"is",
"not",
"None",
":",
"net_range",
"=",
"temp",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"net_range_start",
"=",
"temp",
".",
"start",
"(",
")",
"if",
"net_range",
"is",
"not",
"None",
":",
"if",
"net_range_start",
"<",
"match",
".",
"start",
"(",
")",
"or",
"len",
"(",
"nets",
")",
">",
"0",
":",
"try",
":",
"net",
"[",
"'range'",
"]",
"=",
"'{0} - {1}'",
".",
"format",
"(",
"ip_network",
"(",
"net_range",
")",
"[",
"0",
"]",
".",
"__str__",
"(",
")",
",",
"ip_network",
"(",
"net_range",
")",
"[",
"-",
"1",
"]",
".",
"__str__",
"(",
")",
")",
"if",
"'/'",
"in",
"net_range",
"else",
"net_range",
"except",
"ValueError",
":",
"# pragma: no cover",
"net",
"[",
"'range'",
"]",
"=",
"net_range",
"net",
"[",
"'cidr'",
"]",
"=",
"', '",
".",
"join",
"(",
"[",
"ip_network",
"(",
"c",
".",
"strip",
"(",
")",
")",
".",
"__str__",
"(",
")",
"for",
"c",
"in",
"match",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"', '",
")",
"]",
")",
"net",
"[",
"'start'",
"]",
"=",
"match",
".",
"start",
"(",
")",
"net",
"[",
"'end'",
"]",
"=",
"match",
".",
"end",
"(",
")",
"nets",
".",
"append",
"(",
"net",
")",
"except",
"ValueError",
":",
"pass",
"return",
"nets"
] | The function for parsing network blocks from ARIN whois data.
Args:
response (:obj:`str`): The response from the ARIN whois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}] | [
"The",
"function",
"for",
"parsing",
"network",
"blocks",
"from",
"ARIN",
"whois",
"data",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/whois.py#L337-L416 |
5,785 | secynic/ipwhois | ipwhois/whois.py | Whois.get_nets_lacnic | def get_nets_lacnic(self, response):
"""
The function for parsing network blocks from LACNIC whois data.
Args:
response (:obj:`str`): The response from the LACNIC whois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num|route):[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net_range = match.group(2).strip()
try:
net['range'] = net['range'] = '{0} - {1}'.format(
ip_network(net_range)[0].__str__(),
ip_network(net_range)[-1].__str__()
) if '/' in net_range else net_range
except ValueError: # pragma: no cover
net['range'] = net_range
temp = []
for addr in net_range.split(', '):
count = addr.count('.')
if count is not 0 and count < 4:
addr_split = addr.strip().split('/')
for i in range(count + 1, 4):
addr_split[0] += '.0'
addr = '/'.join(addr_split)
temp.append(ip_network(addr.strip()).__str__())
net['cidr'] = ', '.join(temp)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
return nets | python | def get_nets_lacnic(self, response):
"""
The function for parsing network blocks from LACNIC whois data.
Args:
response (:obj:`str`): The response from the LACNIC whois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num|route):[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net_range = match.group(2).strip()
try:
net['range'] = net['range'] = '{0} - {1}'.format(
ip_network(net_range)[0].__str__(),
ip_network(net_range)[-1].__str__()
) if '/' in net_range else net_range
except ValueError: # pragma: no cover
net['range'] = net_range
temp = []
for addr in net_range.split(', '):
count = addr.count('.')
if count is not 0 and count < 4:
addr_split = addr.strip().split('/')
for i in range(count + 1, 4):
addr_split[0] += '.0'
addr = '/'.join(addr_split)
temp.append(ip_network(addr.strip()).__str__())
net['cidr'] = ', '.join(temp)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
return nets | [
"def",
"get_nets_lacnic",
"(",
"self",
",",
"response",
")",
":",
"nets",
"=",
"[",
"]",
"# Iterate through all of the networks found, storing the CIDR value",
"# and the start and end positions.",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r'^(inetnum|inet6num|route):[^\\S\\n]+(.+?,[^\\S\\n].+|.+)$'",
",",
"response",
",",
"re",
".",
"MULTILINE",
")",
":",
"try",
":",
"net",
"=",
"copy",
".",
"deepcopy",
"(",
"BASE_NET",
")",
"net_range",
"=",
"match",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
"try",
":",
"net",
"[",
"'range'",
"]",
"=",
"net",
"[",
"'range'",
"]",
"=",
"'{0} - {1}'",
".",
"format",
"(",
"ip_network",
"(",
"net_range",
")",
"[",
"0",
"]",
".",
"__str__",
"(",
")",
",",
"ip_network",
"(",
"net_range",
")",
"[",
"-",
"1",
"]",
".",
"__str__",
"(",
")",
")",
"if",
"'/'",
"in",
"net_range",
"else",
"net_range",
"except",
"ValueError",
":",
"# pragma: no cover",
"net",
"[",
"'range'",
"]",
"=",
"net_range",
"temp",
"=",
"[",
"]",
"for",
"addr",
"in",
"net_range",
".",
"split",
"(",
"', '",
")",
":",
"count",
"=",
"addr",
".",
"count",
"(",
"'.'",
")",
"if",
"count",
"is",
"not",
"0",
"and",
"count",
"<",
"4",
":",
"addr_split",
"=",
"addr",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"for",
"i",
"in",
"range",
"(",
"count",
"+",
"1",
",",
"4",
")",
":",
"addr_split",
"[",
"0",
"]",
"+=",
"'.0'",
"addr",
"=",
"'/'",
".",
"join",
"(",
"addr_split",
")",
"temp",
".",
"append",
"(",
"ip_network",
"(",
"addr",
".",
"strip",
"(",
")",
")",
".",
"__str__",
"(",
")",
")",
"net",
"[",
"'cidr'",
"]",
"=",
"', '",
".",
"join",
"(",
"temp",
")",
"net",
"[",
"'start'",
"]",
"=",
"match",
".",
"start",
"(",
")",
"net",
"[",
"'end'",
"]",
"=",
"match",
".",
"end",
"(",
")",
"nets",
".",
"append",
"(",
"net",
")",
"except",
"ValueError",
":",
"pass",
"return",
"nets"
] | The function for parsing network blocks from LACNIC whois data.
Args:
response (:obj:`str`): The response from the LACNIC whois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}] | [
"The",
"function",
"for",
"parsing",
"network",
"blocks",
"from",
"LACNIC",
"whois",
"data",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/whois.py#L428-L496 |
5,786 | secynic/ipwhois | ipwhois/whois.py | Whois.get_nets_other | def get_nets_other(self, response):
"""
The function for parsing network blocks from generic whois data.
Args:
response (:obj:`str`): The response from the whois/rwhois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|'
'.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net_range = match.group(2).strip()
try:
net['range'] = net['range'] = '{0} - {1}'.format(
ip_network(net_range)[0].__str__(),
ip_network(net_range)[-1].__str__()
) if '/' in net_range else net_range
except ValueError: # pragma: no cover
net['range'] = net_range
if match.group(3) and match.group(4):
addrs = []
addrs.extend(summarize_address_range(
ip_address(match.group(3).strip()),
ip_address(match.group(4).strip())))
cidr = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
else:
cidr = ip_network(net_range).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets | python | def get_nets_other(self, response):
"""
The function for parsing network blocks from generic whois data.
Args:
response (:obj:`str`): The response from the whois/rwhois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|'
'.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net_range = match.group(2).strip()
try:
net['range'] = net['range'] = '{0} - {1}'.format(
ip_network(net_range)[0].__str__(),
ip_network(net_range)[-1].__str__()
) if '/' in net_range else net_range
except ValueError: # pragma: no cover
net['range'] = net_range
if match.group(3) and match.group(4):
addrs = []
addrs.extend(summarize_address_range(
ip_address(match.group(3).strip()),
ip_address(match.group(4).strip())))
cidr = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
else:
cidr = ip_network(net_range).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets | [
"def",
"get_nets_other",
"(",
"self",
",",
"response",
")",
":",
"nets",
"=",
"[",
"]",
"# Iterate through all of the networks found, storing the CIDR value",
"# and the start and end positions.",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r'^(inetnum|inet6num|route):[^\\S\\n]+((.+?)[^\\S\\n]-[^\\S\\n](.+)|'",
"'.+)$'",
",",
"response",
",",
"re",
".",
"MULTILINE",
")",
":",
"try",
":",
"net",
"=",
"copy",
".",
"deepcopy",
"(",
"BASE_NET",
")",
"net_range",
"=",
"match",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
"try",
":",
"net",
"[",
"'range'",
"]",
"=",
"net",
"[",
"'range'",
"]",
"=",
"'{0} - {1}'",
".",
"format",
"(",
"ip_network",
"(",
"net_range",
")",
"[",
"0",
"]",
".",
"__str__",
"(",
")",
",",
"ip_network",
"(",
"net_range",
")",
"[",
"-",
"1",
"]",
".",
"__str__",
"(",
")",
")",
"if",
"'/'",
"in",
"net_range",
"else",
"net_range",
"except",
"ValueError",
":",
"# pragma: no cover",
"net",
"[",
"'range'",
"]",
"=",
"net_range",
"if",
"match",
".",
"group",
"(",
"3",
")",
"and",
"match",
".",
"group",
"(",
"4",
")",
":",
"addrs",
"=",
"[",
"]",
"addrs",
".",
"extend",
"(",
"summarize_address_range",
"(",
"ip_address",
"(",
"match",
".",
"group",
"(",
"3",
")",
".",
"strip",
"(",
")",
")",
",",
"ip_address",
"(",
"match",
".",
"group",
"(",
"4",
")",
".",
"strip",
"(",
")",
")",
")",
")",
"cidr",
"=",
"', '",
".",
"join",
"(",
"[",
"i",
".",
"__str__",
"(",
")",
"for",
"i",
"in",
"collapse_addresses",
"(",
"addrs",
")",
"]",
")",
"else",
":",
"cidr",
"=",
"ip_network",
"(",
"net_range",
")",
".",
"__str__",
"(",
")",
"net",
"[",
"'cidr'",
"]",
"=",
"cidr",
"net",
"[",
"'start'",
"]",
"=",
"match",
".",
"start",
"(",
")",
"net",
"[",
"'end'",
"]",
"=",
"match",
".",
"end",
"(",
")",
"nets",
".",
"append",
"(",
"net",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"return",
"nets"
] | The function for parsing network blocks from generic whois data.
Args:
response (:obj:`str`): The response from the whois/rwhois server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}] | [
"The",
"function",
"for",
"parsing",
"network",
"blocks",
"from",
"generic",
"whois",
"data",
"."
] | b5d634d36b0b942d538d38d77b3bdcd815f155a0 | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/whois.py#L508-L578 |
5,787 | klen/marshmallow-peewee | marshmallow_peewee/convert.py | ModelConverter.convert_default | def convert_default(self, field, **params):
"""Return raw field."""
for klass, ma_field in self.TYPE_MAPPING:
if isinstance(field, klass):
return ma_field(**params)
return fields.Raw(**params) | python | def convert_default(self, field, **params):
"""Return raw field."""
for klass, ma_field in self.TYPE_MAPPING:
if isinstance(field, klass):
return ma_field(**params)
return fields.Raw(**params) | [
"def",
"convert_default",
"(",
"self",
",",
"field",
",",
"*",
"*",
"params",
")",
":",
"for",
"klass",
",",
"ma_field",
"in",
"self",
".",
"TYPE_MAPPING",
":",
"if",
"isinstance",
"(",
"field",
",",
"klass",
")",
":",
"return",
"ma_field",
"(",
"*",
"*",
"params",
")",
"return",
"fields",
".",
"Raw",
"(",
"*",
"*",
"params",
")"
] | Return raw field. | [
"Return",
"raw",
"field",
"."
] | a5985daa4072605882a9c7c41d74881631943953 | https://github.com/klen/marshmallow-peewee/blob/a5985daa4072605882a9c7c41d74881631943953/marshmallow_peewee/convert.py#L78-L83 |
5,788 | klen/marshmallow-peewee | marshmallow_peewee/schema.py | ModelSchema.make_instance | def make_instance(self, data):
"""Build object from data."""
if not self.opts.model:
return data
if self.instance is not None:
for key, value in data.items():
setattr(self.instance, key, value)
return self.instance
return self.opts.model(**data) | python | def make_instance(self, data):
"""Build object from data."""
if not self.opts.model:
return data
if self.instance is not None:
for key, value in data.items():
setattr(self.instance, key, value)
return self.instance
return self.opts.model(**data) | [
"def",
"make_instance",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"opts",
".",
"model",
":",
"return",
"data",
"if",
"self",
".",
"instance",
"is",
"not",
"None",
":",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
".",
"instance",
",",
"key",
",",
"value",
")",
"return",
"self",
".",
"instance",
"return",
"self",
".",
"opts",
".",
"model",
"(",
"*",
"*",
"data",
")"
] | Build object from data. | [
"Build",
"object",
"from",
"data",
"."
] | a5985daa4072605882a9c7c41d74881631943953 | https://github.com/klen/marshmallow-peewee/blob/a5985daa4072605882a9c7c41d74881631943953/marshmallow_peewee/schema.py#L68-L78 |
5,789 | tyarkoni/pliers | pliers/extractors/text.py | ComplexTextExtractor._extract | def _extract(self, stim):
''' Returns all words. '''
props = [(e.text, e.onset, e.duration) for e in stim.elements]
vals, onsets, durations = map(list, zip(*props))
return ExtractorResult(vals, stim, self, ['word'], onsets, durations) | python | def _extract(self, stim):
''' Returns all words. '''
props = [(e.text, e.onset, e.duration) for e in stim.elements]
vals, onsets, durations = map(list, zip(*props))
return ExtractorResult(vals, stim, self, ['word'], onsets, durations) | [
"def",
"_extract",
"(",
"self",
",",
"stim",
")",
":",
"props",
"=",
"[",
"(",
"e",
".",
"text",
",",
"e",
".",
"onset",
",",
"e",
".",
"duration",
")",
"for",
"e",
"in",
"stim",
".",
"elements",
"]",
"vals",
",",
"onsets",
",",
"durations",
"=",
"map",
"(",
"list",
",",
"zip",
"(",
"*",
"props",
")",
")",
"return",
"ExtractorResult",
"(",
"vals",
",",
"stim",
",",
"self",
",",
"[",
"'word'",
"]",
",",
"onsets",
",",
"durations",
")"
] | Returns all words. | [
"Returns",
"all",
"words",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/extractors/text.py#L44-L48 |
5,790 | tyarkoni/pliers | pliers/stimuli/base.py | Stim.get_filename | def get_filename(self):
''' Return the source filename of the current Stim. '''
if self.filename is None or not os.path.exists(self.filename):
tf = tempfile.mktemp() + self._default_file_extension
self.save(tf)
yield tf
os.remove(tf)
else:
yield self.filename | python | def get_filename(self):
''' Return the source filename of the current Stim. '''
if self.filename is None or not os.path.exists(self.filename):
tf = tempfile.mktemp() + self._default_file_extension
self.save(tf)
yield tf
os.remove(tf)
else:
yield self.filename | [
"def",
"get_filename",
"(",
"self",
")",
":",
"if",
"self",
".",
"filename",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"filename",
")",
":",
"tf",
"=",
"tempfile",
".",
"mktemp",
"(",
")",
"+",
"self",
".",
"_default_file_extension",
"self",
".",
"save",
"(",
"tf",
")",
"yield",
"tf",
"os",
".",
"remove",
"(",
"tf",
")",
"else",
":",
"yield",
"self",
".",
"filename"
] | Return the source filename of the current Stim. | [
"Return",
"the",
"source",
"filename",
"of",
"the",
"current",
"Stim",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/stimuli/base.py#L55-L63 |
5,791 | tyarkoni/pliers | pliers/stimuli/compound.py | CompoundStim.get_stim | def get_stim(self, type_, return_all=False):
''' Returns component elements of the specified type.
Args:
type_ (str or Stim class): the desired Stim subclass to return.
return_all (bool): when True, returns all elements that matched the
specified type as a list. When False (default), returns only
the first matching Stim.
Returns:
If return_all is True, a list of matching elements (or an empty
list if no elements match). If return_all is False, returns the
first matching Stim, or None if no elements match.
'''
if isinstance(type_, string_types):
type_ = _get_stim_class(type_)
matches = []
for s in self.elements:
if isinstance(s, type_):
if not return_all:
return s
matches.append(s)
if not matches:
return [] if return_all else None
return matches | python | def get_stim(self, type_, return_all=False):
''' Returns component elements of the specified type.
Args:
type_ (str or Stim class): the desired Stim subclass to return.
return_all (bool): when True, returns all elements that matched the
specified type as a list. When False (default), returns only
the first matching Stim.
Returns:
If return_all is True, a list of matching elements (or an empty
list if no elements match). If return_all is False, returns the
first matching Stim, or None if no elements match.
'''
if isinstance(type_, string_types):
type_ = _get_stim_class(type_)
matches = []
for s in self.elements:
if isinstance(s, type_):
if not return_all:
return s
matches.append(s)
if not matches:
return [] if return_all else None
return matches | [
"def",
"get_stim",
"(",
"self",
",",
"type_",
",",
"return_all",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"type_",
",",
"string_types",
")",
":",
"type_",
"=",
"_get_stim_class",
"(",
"type_",
")",
"matches",
"=",
"[",
"]",
"for",
"s",
"in",
"self",
".",
"elements",
":",
"if",
"isinstance",
"(",
"s",
",",
"type_",
")",
":",
"if",
"not",
"return_all",
":",
"return",
"s",
"matches",
".",
"append",
"(",
"s",
")",
"if",
"not",
"matches",
":",
"return",
"[",
"]",
"if",
"return_all",
"else",
"None",
"return",
"matches"
] | Returns component elements of the specified type.
Args:
type_ (str or Stim class): the desired Stim subclass to return.
return_all (bool): when True, returns all elements that matched the
specified type as a list. When False (default), returns only
the first matching Stim.
Returns:
If return_all is True, a list of matching elements (or an empty
list if no elements match). If return_all is False, returns the
first matching Stim, or None if no elements match. | [
"Returns",
"component",
"elements",
"of",
"the",
"specified",
"type",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/stimuli/compound.py#L57-L81 |
5,792 | tyarkoni/pliers | pliers/stimuli/compound.py | CompoundStim.has_types | def has_types(self, types, all_=True):
''' Check whether the current component list matches all Stim types
in the types argument.
Args:
types (Stim, list): a Stim class or iterable of Stim classes.
all_ (bool): if True, all input types must match; if False, at
least one input type must match.
Returns:
True if all passed types match at least one Stim in the component
list, otherwise False.
'''
func = all if all_ else any
return func([self.get_stim(t) for t in listify(types)]) | python | def has_types(self, types, all_=True):
''' Check whether the current component list matches all Stim types
in the types argument.
Args:
types (Stim, list): a Stim class or iterable of Stim classes.
all_ (bool): if True, all input types must match; if False, at
least one input type must match.
Returns:
True if all passed types match at least one Stim in the component
list, otherwise False.
'''
func = all if all_ else any
return func([self.get_stim(t) for t in listify(types)]) | [
"def",
"has_types",
"(",
"self",
",",
"types",
",",
"all_",
"=",
"True",
")",
":",
"func",
"=",
"all",
"if",
"all_",
"else",
"any",
"return",
"func",
"(",
"[",
"self",
".",
"get_stim",
"(",
"t",
")",
"for",
"t",
"in",
"listify",
"(",
"types",
")",
"]",
")"
] | Check whether the current component list matches all Stim types
in the types argument.
Args:
types (Stim, list): a Stim class or iterable of Stim classes.
all_ (bool): if True, all input types must match; if False, at
least one input type must match.
Returns:
True if all passed types match at least one Stim in the component
list, otherwise False. | [
"Check",
"whether",
"the",
"current",
"component",
"list",
"matches",
"all",
"Stim",
"types",
"in",
"the",
"types",
"argument",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/stimuli/compound.py#L87-L101 |
5,793 | tyarkoni/pliers | pliers/stimuli/audio.py | AudioStim.save | def save(self, path):
''' Save clip data to file.
Args:
path (str): Filename to save audio data to.
'''
self.clip.write_audiofile(path, fps=self.sampling_rate) | python | def save(self, path):
''' Save clip data to file.
Args:
path (str): Filename to save audio data to.
'''
self.clip.write_audiofile(path, fps=self.sampling_rate) | [
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"clip",
".",
"write_audiofile",
"(",
"path",
",",
"fps",
"=",
"self",
".",
"sampling_rate",
")"
] | Save clip data to file.
Args:
path (str): Filename to save audio data to. | [
"Save",
"clip",
"data",
"to",
"file",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/stimuli/audio.py#L77-L83 |
5,794 | tyarkoni/pliers | pliers/converters/base.py | get_converter | def get_converter(in_type, out_type, *args, **kwargs):
''' Scans the list of available Converters and returns an instantiation
of the first one whose input and output types match those passed in.
Args:
in_type (type): The type of input the converter must have.
out_type (type): The type of output the converter must have.
args, kwargs: Optional positional and keyword arguments to pass onto
matching Converter's initializer.
'''
convs = pliers.converters.__all__
# If config includes default converters for this combination, try them
# first
out_type = listify(out_type)[::-1]
default_convs = config.get_option('default_converters')
for ot in out_type:
conv_str = '%s->%s' % (in_type.__name__, ot.__name__)
if conv_str in default_convs:
convs = list(default_convs[conv_str]) + convs
for name in convs:
cls = getattr(pliers.converters, name)
if not issubclass(cls, Converter):
continue
available = cls.available if issubclass(
cls, EnvironmentKeyMixin) else True
if cls._input_type == in_type and cls._output_type in out_type \
and available:
conv = cls(*args, **kwargs)
return conv
return None | python | def get_converter(in_type, out_type, *args, **kwargs):
''' Scans the list of available Converters and returns an instantiation
of the first one whose input and output types match those passed in.
Args:
in_type (type): The type of input the converter must have.
out_type (type): The type of output the converter must have.
args, kwargs: Optional positional and keyword arguments to pass onto
matching Converter's initializer.
'''
convs = pliers.converters.__all__
# If config includes default converters for this combination, try them
# first
out_type = listify(out_type)[::-1]
default_convs = config.get_option('default_converters')
for ot in out_type:
conv_str = '%s->%s' % (in_type.__name__, ot.__name__)
if conv_str in default_convs:
convs = list(default_convs[conv_str]) + convs
for name in convs:
cls = getattr(pliers.converters, name)
if not issubclass(cls, Converter):
continue
available = cls.available if issubclass(
cls, EnvironmentKeyMixin) else True
if cls._input_type == in_type and cls._output_type in out_type \
and available:
conv = cls(*args, **kwargs)
return conv
return None | [
"def",
"get_converter",
"(",
"in_type",
",",
"out_type",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"convs",
"=",
"pliers",
".",
"converters",
".",
"__all__",
"# If config includes default converters for this combination, try them",
"# first",
"out_type",
"=",
"listify",
"(",
"out_type",
")",
"[",
":",
":",
"-",
"1",
"]",
"default_convs",
"=",
"config",
".",
"get_option",
"(",
"'default_converters'",
")",
"for",
"ot",
"in",
"out_type",
":",
"conv_str",
"=",
"'%s->%s'",
"%",
"(",
"in_type",
".",
"__name__",
",",
"ot",
".",
"__name__",
")",
"if",
"conv_str",
"in",
"default_convs",
":",
"convs",
"=",
"list",
"(",
"default_convs",
"[",
"conv_str",
"]",
")",
"+",
"convs",
"for",
"name",
"in",
"convs",
":",
"cls",
"=",
"getattr",
"(",
"pliers",
".",
"converters",
",",
"name",
")",
"if",
"not",
"issubclass",
"(",
"cls",
",",
"Converter",
")",
":",
"continue",
"available",
"=",
"cls",
".",
"available",
"if",
"issubclass",
"(",
"cls",
",",
"EnvironmentKeyMixin",
")",
"else",
"True",
"if",
"cls",
".",
"_input_type",
"==",
"in_type",
"and",
"cls",
".",
"_output_type",
"in",
"out_type",
"and",
"available",
":",
"conv",
"=",
"cls",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"conv",
"return",
"None"
] | Scans the list of available Converters and returns an instantiation
of the first one whose input and output types match those passed in.
Args:
in_type (type): The type of input the converter must have.
out_type (type): The type of output the converter must have.
args, kwargs: Optional positional and keyword arguments to pass onto
matching Converter's initializer. | [
"Scans",
"the",
"list",
"of",
"available",
"Converters",
"and",
"returns",
"an",
"instantiation",
"of",
"the",
"first",
"one",
"whose",
"input",
"and",
"output",
"types",
"match",
"those",
"passed",
"in",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/converters/base.py#L27-L61 |
5,795 | tyarkoni/pliers | pliers/external/tensorflow/classify_image.py | create_graph | def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='') | python | def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='') | [
"def",
"create_graph",
"(",
")",
":",
"# Creates graph from saved graph_def.pb.",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"model_dir",
",",
"'classify_image_graph_def.pb'",
")",
",",
"'rb'",
")",
"as",
"f",
":",
"graph_def",
"=",
"tf",
".",
"GraphDef",
"(",
")",
"graph_def",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"_",
"=",
"tf",
".",
"import_graph_def",
"(",
"graph_def",
",",
"name",
"=",
"''",
")"
] | Creates a graph from saved GraphDef file and returns a saver. | [
"Creates",
"a",
"graph",
"from",
"saved",
"GraphDef",
"file",
"and",
"returns",
"a",
"saver",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/external/tensorflow/classify_image.py#L120-L127 |
5,796 | tyarkoni/pliers | pliers/external/tensorflow/classify_image.py | run_inference_on_image | def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score)) | python | def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score)) | [
"def",
"run_inference_on_image",
"(",
"image",
")",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"image",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"'File does not exist %s'",
",",
"image",
")",
"image_data",
"=",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"image",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"# Creates graph from saved GraphDef.",
"create_graph",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"# Some useful tensors:",
"# 'softmax:0': A tensor containing the normalized prediction across",
"# 1000 labels.",
"# 'pool_3:0': A tensor containing the next-to-last layer containing 2048",
"# float description of the image.",
"# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG",
"# encoding of the image.",
"# Runs the softmax tensor by feeding the image_data as input to the graph.",
"softmax_tensor",
"=",
"sess",
".",
"graph",
".",
"get_tensor_by_name",
"(",
"'softmax:0'",
")",
"predictions",
"=",
"sess",
".",
"run",
"(",
"softmax_tensor",
",",
"{",
"'DecodeJpeg/contents:0'",
":",
"image_data",
"}",
")",
"predictions",
"=",
"np",
".",
"squeeze",
"(",
"predictions",
")",
"# Creates node ID --> English string lookup.",
"node_lookup",
"=",
"NodeLookup",
"(",
")",
"top_k",
"=",
"predictions",
".",
"argsort",
"(",
")",
"[",
"-",
"FLAGS",
".",
"num_top_predictions",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
"for",
"node_id",
"in",
"top_k",
":",
"human_string",
"=",
"node_lookup",
".",
"id_to_string",
"(",
"node_id",
")",
"score",
"=",
"predictions",
"[",
"node_id",
"]",
"print",
"(",
"'%s (score = %.5f)'",
"%",
"(",
"human_string",
",",
"score",
")",
")"
] | Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing | [
"Runs",
"inference",
"on",
"an",
"image",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/external/tensorflow/classify_image.py#L130-L167 |
5,797 | tyarkoni/pliers | pliers/external/tensorflow/classify_image.py | NodeLookup.load | def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name | python | def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name | [
"def",
"load",
"(",
"self",
",",
"label_lookup_path",
",",
"uid_lookup_path",
")",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"uid_lookup_path",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"'File does not exist %s'",
",",
"uid_lookup_path",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"label_lookup_path",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"'File does not exist %s'",
",",
"label_lookup_path",
")",
"# Loads mapping from string UID to human-readable string",
"proto_as_ascii_lines",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"uid_lookup_path",
")",
".",
"readlines",
"(",
")",
"uid_to_human",
"=",
"{",
"}",
"p",
"=",
"re",
".",
"compile",
"(",
"r'[n\\d]*[ \\S,]*'",
")",
"for",
"line",
"in",
"proto_as_ascii_lines",
":",
"parsed_items",
"=",
"p",
".",
"findall",
"(",
"line",
")",
"uid",
"=",
"parsed_items",
"[",
"0",
"]",
"human_string",
"=",
"parsed_items",
"[",
"2",
"]",
"uid_to_human",
"[",
"uid",
"]",
"=",
"human_string",
"# Loads mapping from string UID to integer node ID.",
"node_id_to_uid",
"=",
"{",
"}",
"proto_as_ascii",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"label_lookup_path",
")",
".",
"readlines",
"(",
")",
"for",
"line",
"in",
"proto_as_ascii",
":",
"if",
"line",
".",
"startswith",
"(",
"' target_class:'",
")",
":",
"target_class",
"=",
"int",
"(",
"line",
".",
"split",
"(",
"': '",
")",
"[",
"1",
"]",
")",
"if",
"line",
".",
"startswith",
"(",
"' target_class_string:'",
")",
":",
"target_class_string",
"=",
"line",
".",
"split",
"(",
"': '",
")",
"[",
"1",
"]",
"node_id_to_uid",
"[",
"target_class",
"]",
"=",
"target_class_string",
"[",
"1",
":",
"-",
"2",
"]",
"# Loads the final mapping of integer node ID to human-readable string",
"node_id_to_name",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"node_id_to_uid",
".",
"items",
"(",
")",
":",
"if",
"val",
"not",
"in",
"uid_to_human",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"'Failed to locate: %s'",
",",
"val",
")",
"name",
"=",
"uid_to_human",
"[",
"val",
"]",
"node_id_to_name",
"[",
"key",
"]",
"=",
"name",
"return",
"node_id_to_name"
] | Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string. | [
"Loads",
"a",
"human",
"readable",
"English",
"name",
"for",
"each",
"softmax",
"node",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/external/tensorflow/classify_image.py#L69-L112 |
5,798 | tyarkoni/pliers | pliers/datasets/text.py | fetch_dictionary | def fetch_dictionary(name, url=None, format=None, index=0, rename=None,
save=True, force_retrieve=False):
''' Retrieve a dictionary of text norms from the web or local storage.
Args:
name (str): The name of the dictionary. If no url is passed, this must
match either one of the keys in the predefined dictionary file (see
dictionaries.json), or the name assigned to a previous dictionary
retrieved from a specific URL.
url (str): The URL of dictionary file to retrieve. Optional if name
matches an existing dictionary.
format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data
appropriately. Note that most forms of compression will be detected
and handled automatically, so the format string refers only to the
format of the decompressed file. When format is None, the format
will be inferred from the filename.
index (str, int): The name or numeric index of the column to used as
the dictionary index. Passed directly to pd.ix.
rename (dict): An optional dictionary passed to pd.rename(); can be
used to rename columns in the loaded dictionary. Note that the
locally-saved dictionary will retain the renamed columns.
save (bool): Whether or not to save the dictionary locally the first
time it is retrieved.
force_retrieve (bool): If True, remote dictionary will always be
downloaded, even if a local copy exists (and the local copy will
be overwritten).
Returns: A pandas DataFrame indexed by strings (typically words).
'''
file_path = os.path.join(_get_dictionary_path(), name + '.csv')
if not force_retrieve and os.path.exists(file_path):
df = pd.read_csv(file_path)
index = datasets[name].get('index', df.columns[index])
return df.set_index(index)
if name in datasets:
url = datasets[name]['url']
format = datasets[name].get('format', format)
index = datasets[name].get('index', index)
rename = datasets.get('rename', rename)
if url is None:
raise ValueError("Dataset '%s' not found in local storage or presets, "
"and no download URL provided." % name)
data = _download_dictionary(url, format=format, rename=rename)
if isinstance(index, int):
index = data.columns[index]
data = data.set_index(index)
if save:
file_path = os.path.join(_get_dictionary_path(), name + '.csv')
data.to_csv(file_path, encoding='utf-8')
return data | python | def fetch_dictionary(name, url=None, format=None, index=0, rename=None,
save=True, force_retrieve=False):
''' Retrieve a dictionary of text norms from the web or local storage.
Args:
name (str): The name of the dictionary. If no url is passed, this must
match either one of the keys in the predefined dictionary file (see
dictionaries.json), or the name assigned to a previous dictionary
retrieved from a specific URL.
url (str): The URL of dictionary file to retrieve. Optional if name
matches an existing dictionary.
format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data
appropriately. Note that most forms of compression will be detected
and handled automatically, so the format string refers only to the
format of the decompressed file. When format is None, the format
will be inferred from the filename.
index (str, int): The name or numeric index of the column to used as
the dictionary index. Passed directly to pd.ix.
rename (dict): An optional dictionary passed to pd.rename(); can be
used to rename columns in the loaded dictionary. Note that the
locally-saved dictionary will retain the renamed columns.
save (bool): Whether or not to save the dictionary locally the first
time it is retrieved.
force_retrieve (bool): If True, remote dictionary will always be
downloaded, even if a local copy exists (and the local copy will
be overwritten).
Returns: A pandas DataFrame indexed by strings (typically words).
'''
file_path = os.path.join(_get_dictionary_path(), name + '.csv')
if not force_retrieve and os.path.exists(file_path):
df = pd.read_csv(file_path)
index = datasets[name].get('index', df.columns[index])
return df.set_index(index)
if name in datasets:
url = datasets[name]['url']
format = datasets[name].get('format', format)
index = datasets[name].get('index', index)
rename = datasets.get('rename', rename)
if url is None:
raise ValueError("Dataset '%s' not found in local storage or presets, "
"and no download URL provided." % name)
data = _download_dictionary(url, format=format, rename=rename)
if isinstance(index, int):
index = data.columns[index]
data = data.set_index(index)
if save:
file_path = os.path.join(_get_dictionary_path(), name + '.csv')
data.to_csv(file_path, encoding='utf-8')
return data | [
"def",
"fetch_dictionary",
"(",
"name",
",",
"url",
"=",
"None",
",",
"format",
"=",
"None",
",",
"index",
"=",
"0",
",",
"rename",
"=",
"None",
",",
"save",
"=",
"True",
",",
"force_retrieve",
"=",
"False",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_get_dictionary_path",
"(",
")",
",",
"name",
"+",
"'.csv'",
")",
"if",
"not",
"force_retrieve",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"file_path",
")",
"index",
"=",
"datasets",
"[",
"name",
"]",
".",
"get",
"(",
"'index'",
",",
"df",
".",
"columns",
"[",
"index",
"]",
")",
"return",
"df",
".",
"set_index",
"(",
"index",
")",
"if",
"name",
"in",
"datasets",
":",
"url",
"=",
"datasets",
"[",
"name",
"]",
"[",
"'url'",
"]",
"format",
"=",
"datasets",
"[",
"name",
"]",
".",
"get",
"(",
"'format'",
",",
"format",
")",
"index",
"=",
"datasets",
"[",
"name",
"]",
".",
"get",
"(",
"'index'",
",",
"index",
")",
"rename",
"=",
"datasets",
".",
"get",
"(",
"'rename'",
",",
"rename",
")",
"if",
"url",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Dataset '%s' not found in local storage or presets, \"",
"\"and no download URL provided.\"",
"%",
"name",
")",
"data",
"=",
"_download_dictionary",
"(",
"url",
",",
"format",
"=",
"format",
",",
"rename",
"=",
"rename",
")",
"if",
"isinstance",
"(",
"index",
",",
"int",
")",
":",
"index",
"=",
"data",
".",
"columns",
"[",
"index",
"]",
"data",
"=",
"data",
".",
"set_index",
"(",
"index",
")",
"if",
"save",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_get_dictionary_path",
"(",
")",
",",
"name",
"+",
"'.csv'",
")",
"data",
".",
"to_csv",
"(",
"file_path",
",",
"encoding",
"=",
"'utf-8'",
")",
"return",
"data"
] | Retrieve a dictionary of text norms from the web or local storage.
Args:
name (str): The name of the dictionary. If no url is passed, this must
match either one of the keys in the predefined dictionary file (see
dictionaries.json), or the name assigned to a previous dictionary
retrieved from a specific URL.
url (str): The URL of dictionary file to retrieve. Optional if name
matches an existing dictionary.
format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data
appropriately. Note that most forms of compression will be detected
and handled automatically, so the format string refers only to the
format of the decompressed file. When format is None, the format
will be inferred from the filename.
index (str, int): The name or numeric index of the column to used as
the dictionary index. Passed directly to pd.ix.
rename (dict): An optional dictionary passed to pd.rename(); can be
used to rename columns in the loaded dictionary. Note that the
locally-saved dictionary will retain the renamed columns.
save (bool): Whether or not to save the dictionary locally the first
time it is retrieved.
force_retrieve (bool): If True, remote dictionary will always be
downloaded, even if a local copy exists (and the local copy will
be overwritten).
Returns: A pandas DataFrame indexed by strings (typically words). | [
"Retrieve",
"a",
"dictionary",
"of",
"text",
"norms",
"from",
"the",
"web",
"or",
"local",
"storage",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/datasets/text.py#L57-L111 |
5,799 | tyarkoni/pliers | pliers/extractors/api/google.py | GoogleVisionAPIFaceExtractor._to_df | def _to_df(self, result, handle_annotations=None):
'''
Converts a Google API Face JSON response into a Pandas Dataframe.
Args:
result (ExtractorResult): Result object from which to parse out a
Dataframe.
handle_annotations (str): How returned face annotations should be
handled in cases where there are multiple faces.
'first' indicates to only use the first face JSON object, all
other values will default to including every face.
'''
annotations = result._data
if handle_annotations == 'first':
annotations = [annotations[0]]
face_results = []
for i, annotation in enumerate(annotations):
data_dict = {}
for field, val in annotation.items():
if 'Confidence' in field:
data_dict['face_' + field] = val
elif 'oundingPoly' in field:
for j, vertex in enumerate(val['vertices']):
for dim in ['x', 'y']:
name = '%s_vertex%d_%s' % (field, j+1, dim)
val = vertex[dim] if dim in vertex else np.nan
data_dict[name] = val
elif field == 'landmarks':
for lm in val:
name = 'landmark_' + lm['type'] + '_%s'
lm_pos = {name %
k: v for (k, v) in lm['position'].items()}
data_dict.update(lm_pos)
else:
data_dict[field] = val
face_results.append(data_dict)
return pd.DataFrame(face_results) | python | def _to_df(self, result, handle_annotations=None):
'''
Converts a Google API Face JSON response into a Pandas Dataframe.
Args:
result (ExtractorResult): Result object from which to parse out a
Dataframe.
handle_annotations (str): How returned face annotations should be
handled in cases where there are multiple faces.
'first' indicates to only use the first face JSON object, all
other values will default to including every face.
'''
annotations = result._data
if handle_annotations == 'first':
annotations = [annotations[0]]
face_results = []
for i, annotation in enumerate(annotations):
data_dict = {}
for field, val in annotation.items():
if 'Confidence' in field:
data_dict['face_' + field] = val
elif 'oundingPoly' in field:
for j, vertex in enumerate(val['vertices']):
for dim in ['x', 'y']:
name = '%s_vertex%d_%s' % (field, j+1, dim)
val = vertex[dim] if dim in vertex else np.nan
data_dict[name] = val
elif field == 'landmarks':
for lm in val:
name = 'landmark_' + lm['type'] + '_%s'
lm_pos = {name %
k: v for (k, v) in lm['position'].items()}
data_dict.update(lm_pos)
else:
data_dict[field] = val
face_results.append(data_dict)
return pd.DataFrame(face_results) | [
"def",
"_to_df",
"(",
"self",
",",
"result",
",",
"handle_annotations",
"=",
"None",
")",
":",
"annotations",
"=",
"result",
".",
"_data",
"if",
"handle_annotations",
"==",
"'first'",
":",
"annotations",
"=",
"[",
"annotations",
"[",
"0",
"]",
"]",
"face_results",
"=",
"[",
"]",
"for",
"i",
",",
"annotation",
"in",
"enumerate",
"(",
"annotations",
")",
":",
"data_dict",
"=",
"{",
"}",
"for",
"field",
",",
"val",
"in",
"annotation",
".",
"items",
"(",
")",
":",
"if",
"'Confidence'",
"in",
"field",
":",
"data_dict",
"[",
"'face_'",
"+",
"field",
"]",
"=",
"val",
"elif",
"'oundingPoly'",
"in",
"field",
":",
"for",
"j",
",",
"vertex",
"in",
"enumerate",
"(",
"val",
"[",
"'vertices'",
"]",
")",
":",
"for",
"dim",
"in",
"[",
"'x'",
",",
"'y'",
"]",
":",
"name",
"=",
"'%s_vertex%d_%s'",
"%",
"(",
"field",
",",
"j",
"+",
"1",
",",
"dim",
")",
"val",
"=",
"vertex",
"[",
"dim",
"]",
"if",
"dim",
"in",
"vertex",
"else",
"np",
".",
"nan",
"data_dict",
"[",
"name",
"]",
"=",
"val",
"elif",
"field",
"==",
"'landmarks'",
":",
"for",
"lm",
"in",
"val",
":",
"name",
"=",
"'landmark_'",
"+",
"lm",
"[",
"'type'",
"]",
"+",
"'_%s'",
"lm_pos",
"=",
"{",
"name",
"%",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"lm",
"[",
"'position'",
"]",
".",
"items",
"(",
")",
"}",
"data_dict",
".",
"update",
"(",
"lm_pos",
")",
"else",
":",
"data_dict",
"[",
"field",
"]",
"=",
"val",
"face_results",
".",
"append",
"(",
"data_dict",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"face_results",
")"
] | Converts a Google API Face JSON response into a Pandas Dataframe.
Args:
result (ExtractorResult): Result object from which to parse out a
Dataframe.
handle_annotations (str): How returned face annotations should be
handled in cases where there are multiple faces.
'first' indicates to only use the first face JSON object, all
other values will default to including every face. | [
"Converts",
"a",
"Google",
"API",
"Face",
"JSON",
"response",
"into",
"a",
"Pandas",
"Dataframe",
"."
] | 5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b | https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/extractors/api/google.py#L50-L89 |
Subsets and Splits