docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Accepts a path to search for modules. The method will filter on files
that end in .pyc or files that start with __.
Arguments:
p (string): The path to search
Returns:
list of file names
|
def get_module_names(p):
mods = list()
mods = [f.split('.')[0] for f in listdir(p)
if isfile(join(p, f)) and not f.endswith('.pyc') and not f.startswith('__')]
print len(mods)
return mods
| 266,384 |
Imports a module into the current runtime environment
This function emulates the Python import system that allows for
importing full path modules. It will break down the module and
import each part (or skip if it is already loaded in cache).
Args:
name (str): The name of the module to import. This should be
the full path of the module
Returns:
The module that was imported
|
def import_module(name):
parts = name.split('.')
path = None
module_name = ''
fhandle = None
for index, part in enumerate(parts):
module_name = part if index == 0 else '%s.%s' % (module_name, part)
path = [path] if path is not None else path
try:
fhandle, path, descr = imp.find_module(part, path)
if module_name in sys.modules:
# since imp.load_module works like reload, need to be sure not
# to reload a previously loaded module
mod = sys.modules[module_name]
else:
mod = imp.load_module(module_name, fhandle, path, descr)
finally:
# lets be sure to clean up after ourselves
if fhandle:
fhandle.close()
return mod
| 266,389 |
Log a message to syslog and stderr
Args:
text (str): The string object to print
|
def debug(text):
frame = inspect.currentframe().f_back
module = frame.f_globals['__name__']
func = frame.f_code.co_name
msg = "%s.%s: %s" % (module, func, text)
_LOGGER.debug(msg)
| 266,391 |
Converts the supplied value to a list object
This function will inspect the supplied value and return an
iterable in the form of a list.
Args:
value (object): An valid Python object
Returns:
An iterable object of type list
|
def make_iterable(value):
if sys.version_info <= (3, 0):
# Convert unicode values to strings for Python 2
if isinstance(value, unicode):
value = str(value)
if isinstance(value, str) or isinstance(value, dict):
value = [value]
if not isinstance(value, collections.Iterable):
raise TypeError('value must be an iterable object')
return value
| 266,392 |
Scans the specified config and parses the switchport mode value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport mode.
The dict returned is intended to be merged into the resource
dict
|
def _parse_mode(self, config):
value = re.search(r'switchport mode (\w+)', config, re.M)
return dict(mode=value.group(1))
| 266,399 |
Scans the specified config and parses the trunk group values
Args:
config (str): The interface configuraiton blcok
Returns:
A dict object with the trunk group values that can be merged
into the resource dict
|
def _parse_trunk_groups(self, config):
values = re.findall(r'switchport trunk group ([^\s]+)', config, re.M)
return dict(trunk_groups=values)
| 266,400 |
Scans the specified config and parse the access-vlan value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport access
value. The dict returned is intended to be merged into the
resource dict
|
def _parse_access_vlan(self, config):
value = re.search(r'switchport access vlan (\d+)', config)
return dict(access_vlan=value.group(1))
| 266,401 |
Scans the specified config and parse the trunk native vlan value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport trunk
native vlan value. The dict returned is intended to be
merged into the resource dict
|
def _parse_trunk_native_vlan(self, config):
match = re.search(r'switchport trunk native vlan (\d+)', config)
return dict(trunk_native_vlan=match.group(1))
| 266,402 |
Scans the specified config and parse the trunk allowed vlans value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport trunk
allowed vlans value. The dict returned is intended to be
merged into the resource dict
|
def _parse_trunk_allowed_vlans(self, config):
match = re.search(r'switchport trunk allowed vlan (.+)$', config, re.M)
return dict(trunk_allowed_vlans=match.group(1))
| 266,403 |
Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False
|
def set_trunk_groups(self, intf, value=None, default=False, disable=False):
if default:
cmd = 'default switchport trunk group'
return self.configure_interface(intf, cmd)
if disable:
cmd = 'no switchport trunk group'
return self.configure_interface(intf, cmd)
current_value = self.get(intf)['trunk_groups']
failure = False
value = make_iterable(value)
for name in set(value).difference(current_value):
if not self.add_trunk_group(intf, name):
failure = True
for name in set(current_value).difference(value):
if not self.remove_trunk_group(intf, name):
failure = True
return not failure
| 266,406 |
Adds the specified trunk group to the interface
Args:
intf (str): The interface name to apply the trunk group to
value (str): The trunk group value to apply to the interface
Returns:
True if the operation as successfully applied otherwise false
|
def add_trunk_group(self, intf, value):
string = 'switchport trunk group {}'.format(value)
return self.configure_interface(intf, string)
| 266,407 |
Removes a specified trunk group to the interface
Args:
intf (str): The interface name to remove the trunk group from
value (str): The trunk group value
Returns:
True if the operation as successfully applied otherwise false
|
def remove_trunk_group(self, intf, value):
string = 'no switchport trunk group {}'.format(value)
return self.configure_interface(intf, string)
| 266,408 |
Scans the specified config block and returns the description value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the description value retrieved
from the config block. If the description value is not
configured, None is returned as the value. The returned dict
is intended to be merged into the interface resource dict.
|
def _parse_description(self, config):
value = None
match = re.search(r'description (.+)$', config, re.M)
if match:
value = match.group(1)
return dict(description=value)
| 266,413 |
Scans the config block and returns the flowcontrol send value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol send value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
|
def _parse_flowcontrol_send(self, config):
value = 'off'
match = re.search(r'flowcontrol send (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_send=value)
| 266,418 |
Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
|
def _parse_flowcontrol_receive(self, config):
value = 'off'
match = re.search(r'flowcontrol receive (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_receive=value)
| 266,419 |
Configures the sFlow state on the interface
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if sFlow should be enabled otherwise False
default (boolean): Specifies the default value for sFlow
disable (boolean): Specifies to disable sFlow
Returns:
True if the operation succeeds otherwise False is returned
|
def set_sflow(self, name, value=None, default=False, disable=False):
if value not in [True, False, None]:
raise ValueError
commands = ['interface %s' % name]
commands.append(self.command_builder('sflow enable', value=value,
default=default, disable=disable))
return self.configure(commands)
| 266,423 |
Returns the LACP mode for the specified Port-Channel interface
Args:
name(str): The Port-Channel interface name to return the LACP
mode for from the configuration
Returns:
The configured LACP mode for the interface. Valid mode values
are 'on', 'passive', 'active'
|
def get_lacp_mode(self, name):
members = self.get_members(name)
if not members:
return DEFAULT_LACP_MODE
for member in self.get_members(name):
match = re.search(r'channel-group\s\d+\smode\s(?P<value>.+)',
self.get_block('^interface %s' % member))
return match.group('value')
| 266,429 |
Returns the member interfaces for the specified Port-Channel
Args:
name(str): The Port-channel interface name to return the member
interfaces for
Returns:
A list of physical interface names that belong to the specified
interface
|
def get_members(self, name):
grpid = re.search(r'(\d+)', name).group()
command = 'show port-channel %s all-ports' % grpid
config = self.node.enable(command, 'text')
return re.findall(r'\b(?!Peer)Ethernet[\d/]*\b',
config[0]['result']['output'])
| 266,430 |
Configures the LACP mode of the member interfaces
Args:
name(str): The Port-Channel interface name to configure the
LACP mode
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'
Returns:
True if the operation succeeds otherwise False
|
def set_lacp_mode(self, name, mode):
if mode not in ['on', 'passive', 'active']:
return False
grpid = re.search(r'(\d+)', name).group()
remove_commands = list()
add_commands = list()
for member in self.get_members(name):
remove_commands.append('interface %s' % member)
remove_commands.append('no channel-group %s' % grpid)
add_commands.append('interface %s' % member)
add_commands.append('channel-group %s mode %s' % (grpid, mode))
return self.configure(remove_commands + add_commands)
| 266,432 |
Configures the Port-Channel LACP fallback timeout
The fallback timeout configures the period an interface in
fallback mode remains in LACP mode without receiving a PDU.
Args:
name(str): The Port-Channel interface name
value(int): port-channel lacp fallback timeout in seconds
Returns:
True if the operation succeeds otherwise False is returned
|
def set_lacp_timeout(self, name, value=None):
commands = ['interface %s' % name]
string = 'port-channel lacp fallback timeout'
commands.append(self.command_builder(string, value=value))
return self.configure(commands)
| 266,434 |
Parses the conf block and returns the vxlan source-interface value
Parses the provided configuration block and returns the value of
vxlan source-interface. If the value is not configured, this method
will return DEFAULT_SRC_INTF instead.
Args:
config (str): The Vxlan config block to scan
Return:
dict: A dict object intended to be merged into the resource dict
|
def _parse_source_interface(self, config):
match = re.search(r'vxlan source-interface ([^\s]+)', config)
value = match.group(1) if match else self.DEFAULT_SRC_INTF
return dict(source_interface=value)
| 266,436 |
Adds a new VTEP endpoint to the global or local flood list
EosVersion:
4.13.7M
Args:
name (str): The name of the interface to configure
vtep (str): The IP address of the remote VTEP endpoint to add
vlan (str): The VLAN ID associated with this VTEP. If the VLAN
keyword is used, then the VTEP is configured as a local flood
endpoing
Returns:
True if the command completes successfully
|
def add_vtep(self, name, vtep, vlan=None):
if not vlan:
cmd = 'vxlan flood vtep add {}'.format(vtep)
else:
cmd = 'vxlan vlan {} flood vtep add {}'.format(vlan, vtep)
return self.configure_interface(name, cmd)
| 266,441 |
Removes a VTEP endpoint from the global or local flood list
EosVersion:
4.13.7M
Args:
name (str): The name of the interface to configure
vtep (str): The IP address of the remote VTEP endpoint to add
vlan (str): The VLAN ID associated with this VTEP. If the VLAN
keyword is used, then the VTEP is configured as a local flood
endpoing
Returns:
True if the command completes successfully
|
def remove_vtep(self, name, vtep, vlan=None):
if not vlan:
cmd = 'vxlan flood vtep remove {}'.format(vtep)
else:
cmd = 'vxlan vlan {} flood vtep remove {}'.format(vlan, vtep)
return self.configure_interface(name, cmd)
| 266,442 |
Adds a new vlan to vni mapping for the interface
EosVersion:
4.13.7M
Args:
vlan (str, int): The vlan id to map to the vni
vni (str, int): The vni value to use
Returns:
True if the command completes successfully
|
def update_vlan(self, name, vid, vni):
cmd = 'vxlan vlan %s vni %s' % (vid, vni)
return self.configure_interface(name, cmd)
| 266,443 |
Scans the config block and returns the username as a dict
Args:
config (str): The config block to parse
Returns:
dict: A resource dict that is intended to be merged into the
user resource
|
def _parse_username(self, config):
(username, priv, role, nopass, fmt, secret, sshkey) = config
resource = dict()
resource['privilege'] = priv
resource['role'] = role
resource['nopassword'] = nopass == 'nopassword'
resource['format'] = fmt
resource['secret'] = secret
resource['sshkey'] = sshkey
return {username: resource}
| 266,445 |
Creates a new user on the local node
Args:
name (str): The name of the user to craete
secret (str): The secret (password) to assign to this user
encryption (str): Specifies how the secret is encoded. Valid
values are "cleartext", "md5", "sha512". The default is
"cleartext"
Returns:
True if the operation was successful otherwise False
|
def create_with_secret(self, name, secret, encryption):
try:
encryption = encryption or DEFAULT_ENCRYPTION
enc = ENCRYPTION_MAP[encryption]
except KeyError:
raise TypeError('encryption must be one of "cleartext", "md5"'
' or "sha512"')
cmd = 'username %s secret %s %s' % (name, enc, secret)
return self.configure(cmd)
| 266,447 |
Configures the user privilege value in EOS
Args:
name (str): The name of the user to craete
value (int): The privilege value to assign to the user. Valid
values are in the range of 0 to 15
Returns:
True if the operation was successful otherwise False
Raises:
TypeError: if the value is not in the valid range
|
def set_privilege(self, name, value=None):
cmd = 'username %s' % name
if value is not None:
if not isprivilege(value):
raise TypeError('priviledge value must be between 0 and 15')
cmd += ' privilege %s' % value
else:
cmd += ' privilege 1'
return self.configure(cmd)
| 266,448 |
Configures the user role vale in EOS
Args:
name (str): The name of the user to create
value (str): The value to configure for the user role
default (bool): Configure the user role using the EOS CLI
default command
disable (bool): Negate the user role using the EOS CLI no command
Returns:
True if the operation was successful otherwise False
|
def set_role(self, name, value=None, default=False, disable=False):
cmd = self.command_builder('username %s role' % name, value=value,
default=default, disable=disable)
return self.configure(cmd)
| 266,449 |
Reads the file specified by filename
This method will load the eapi.conf file specified by filename into
the instance object. It will also add the default connection localhost
if it was not defined in the eapi.conf file
Args:
filename (str): The full path to the file to load
|
def read(self, filename):
try:
SafeConfigParser.read(self, filename)
except SafeConfigParserError as exc:
# Ignore file and syslog a message on SafeConfigParser errors
msg = ("%s: parsing error in eapi conf file: %s" %
(type(exc).__name__, filename))
debug(msg)
self._add_default_connection()
for name in self.sections():
if name.startswith('connection:') and \
'host' not in dict(self.items(name)):
self.set(name, 'host', name.split(':')[1])
self.generate_tags()
| 266,456 |
Returns a section of the config
Args:
regex (str): A valid regular expression used to select sections
of configuration to return
config (str): The configuration to return. Valid values for config
are "running_config" or "startup_config". The default value
is "running_config"
Returns:
The configuration section as a string object.
|
def section(self, regex, config='running_config'):
if config in ['running_config', 'startup_config']:
config = getattr(self, config)
match = re.search(regex, config, re.M)
if not match:
raise TypeError('config section not found')
block_start, line_end = match.regs[0]
match = re.search(r'^[^\s]', config[line_end:], re.M)
if not match:
raise TypeError('could not find end block')
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end]
| 266,469 |
create a parameter ensemble from parfiles. Accepts parfiles with less than the
parameters in the control (get NaNs in the ensemble) or extra parameters in the
parfiles (get dropped)
Parameters:
pst : pyemu.Pst
parfile_names : list of str
par file names
real_names : str
optional list of realization names. If None, a single integer counter is used
Returns:
pyemu.ParameterEnsemble
|
def from_parfiles(cls,pst,parfile_names,real_names=None):
if isinstance(pst,str):
pst = pyemu.Pst(pst)
dfs = {}
if real_names is not None:
assert len(real_names) == len(parfile_names)
else:
real_names = np.arange(len(parfile_names))
for rname,pfile in zip(real_names,parfile_names):
assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \
"file: {0} not found".format(pfile)
df = read_parfile(pfile)
#check for scale differences - I don't who is dumb enough
#to change scale between par files and pst...
diff = df.scale - pst.parameter_data.scale
if diff.apply(np.abs).sum() > 0.0:
warnings.warn("differences in scale detected, applying scale in par file",
PyemuWarning)
#df.loc[:,"parval1"] *= df.scale
dfs[rname] = df.parval1.values
df_all = pd.DataFrame(data=dfs).T
df_all.columns = df.index
if len(pst.par_names) != df_all.shape[1]:
#if len(pst.par_names) < df_all.shape[1]:
# raise Exception("pst is not compatible with par files")
pset = set(pst.par_names)
dset = set(df_all.columns)
diff = pset.difference(dset)
if len(diff) > 0:
warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}".
format(','.join(diff)),PyemuWarning)
blank_df = pd.DataFrame(index=df_all.index,columns=diff)
df_all = pd.concat([df_all,blank_df],axis=1)
diff = dset.difference(pset)
if len(diff) > 0:
warnings.warn("the following par file parameters are not in the control (being dropped):{0}".
format(','.join(diff)),PyemuWarning)
df_all = df_all.loc[:, pst.par_names]
return ParameterEnsemble.from_dataframe(df=df_all,pst=pst)
| 266,564 |
Make a template file just assuming a list of parameter names the values of which should be
listed in order in a model input file
Args:
parnames: list of names from which to make a template file
tplfilename: filename for TPL file (default: model.input.tpl)
Returns:
writes a file <tplfilename> with each parameter name on a line
|
def simple_tpl_from_pars(parnames, tplfilename='model.input.tpl'):
with open(tplfilename, 'w') as ofp:
ofp.write('ptf ~\n')
[ofp.write('~{0:^12}~\n'.format(cname)) for cname in parnames]
| 266,758 |
writes an instruction file that assumes wanting to read the values names in obsnames in order
one per line from a model output file
Args:
obsnames: list of obsnames to read in
insfilename: filename for INS file (default: model.output.ins)
Returns:
writes a file <insfilename> with each observation read off a line
|
def simple_ins_from_obs(obsnames, insfilename='model.output.ins'):
with open(insfilename, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('!{0}!\n'.format(cob)) for cob in obsnames]
| 266,759 |
Creates a Pst object from a list of parameter names and a list of observation names.
Default values are provided for the TPL and INS
Args:
parnames: list of names from which to make a template file
obsnames: list of obsnames to read in
tplfilename: filename for TPL file (default: model.input.tpl)
insfilename: filename for INS file (default: model.output.ins)
Returns:
Pst object
|
def pst_from_parnames_obsnames(parnames, obsnames,
tplfilename='model.input.tpl', insfilename='model.output.ins'):
simple_tpl_from_pars(parnames, tplfilename)
simple_ins_from_obs(obsnames, insfilename)
modelinputfilename = tplfilename.replace('.tpl','')
modeloutputfilename = insfilename.replace('.ins','')
return pyemu.Pst.from_io_files(tplfilename, modelinputfilename, insfilename, modeloutputfilename)
| 266,760 |
unsubscribe_event(self, event_id) -> None
Unsubscribes a client from receiving the event specified by event_id.
Parameters :
- event_id : (int) is the event identifier returned by the
DeviceProxy::subscribe_event(). Unlike in
TangoC++ we chech that the event_id has been
subscribed in this DeviceProxy.
Return : None
Throws : EventSystemFailed
|
def __DeviceProxy__unsubscribe_event(self, event_id):
events_del = set()
timestamp = time.time()
se = self.__get_event_map()
with self.__get_event_map_lock():
# first delete event callbacks that have expire
for evt_id, (_, expire_time) in self._pending_unsubscribe.items():
if expire_time <= timestamp:
events_del.add(evt_id)
for evt_id in events_del:
del self._pending_unsubscribe[evt_id]
# unsubscribe and put the callback in the pending unsubscribe callbacks
try:
evt_info = se[event_id]
except KeyError:
raise KeyError("This device proxy does not own this subscription " + str(event_id))
del se[event_id]
self._pending_unsubscribe[event_id] = evt_info[0], timestamp + _UNSUBSCRIBE_LIFETIME
self.__unsubscribe_event(event_id)
| 269,597 |
remove_attribute(self, attr_name) -> None
Remove one attribute from the device attribute list.
Parameters :
- attr_name : (str) attribute name
Return : None
Throws : DevFailed
|
def __DeviceImpl__remove_attribute(self, attr_name):
try:
# Call this method in a try/except in case remove_attribute
# is called during the DS shutdown sequence
cl = self.get_device_class()
except:
return
dev_list = cl.get_device_list()
nb_dev = len(dev_list)
if nb_dev == 1:
self._remove_attr_meth(attr_name)
else:
nb_except = 0
for dev in dev_list:
try:
dev.get_device_attr().get_attr_by_name(attr_name)
except:
nb_except += 1
if nb_except == nb_dev - 1:
self._remove_attr_meth(attr_name)
self._remove_attribute(attr_name)
| 269,711 |
add_command(self, cmd, level=TANGO::OPERATOR) -> cmd
Add a new command to the device command list.
Parameters :
- cmd : the new command to be added to the list
- device_level : Set this flag to true if the command must be added
for only this device
Return : Command
Throws : DevFailed
|
def __DeviceImpl__add_command(self, cmd, device_level=True):
add_name_in_list = False # This flag is always False, what use is it?
try:
config = dict(cmd.__tango_command__[1][2])
if config and ("Display level" in config):
disp_level = config["Display level"]
else:
disp_level = DispLevel.OPERATOR
self._add_command(cmd.__name__, cmd.__tango_command__[1], disp_level,
device_level)
if add_name_in_list:
cl = self.get_device_class()
cl.dyn_cmd_added_methods.append(cmd.__name__)
except:
if add_name_in_list:
self._remove_cmd(cmd.__name__)
raise
return cmd
| 269,713 |
remove_command(self, attr_name) -> None
Remove one command from the device command list.
Parameters :
- cmd_name : (str) command name to be removed from the list
- free_it : Boolean set to true if the command object must be freed.
- clean_db : Clean command related information (included polling info
if the command is polled) from database.
Return : None
Throws : DevFailed
|
def __DeviceImpl__remove_command(self, cmd_name, free_it=False, clean_db=True):
try:
# Call this method in a try/except in case remove
# is called during the DS shutdown sequence
cl = self.get_device_class()
except:
return
if cl.dyn_cmd_added_methods.count(cmd_name) != 0:
cl.dyn_cmd_added_methods.remove(cmd_name)
self._remove_command(cmd_name, free_it, clean_db)
| 269,714 |
log(self, level, msg, *args) -> None
Sends the given message to the tango the selected stream.
Parameters :
- level: (Level.LevelLevel) Log level
- msg : (str) the message to be sent to the stream
- args: (seq<str>) list of optional message arguments
Return : None
.. versionchanged:
|
def __Logger__log(self, level, msg, *args):
self.__log(level, msg % args)
| 269,721 |
log_unconditionally(self, level, msg, *args) -> None
Sends the given message to the tango the selected stream,
without checking the level.
Parameters :
- level: (Level.LevelLevel) Log level
- msg : (str) the message to be sent to the stream
- args: (seq<str>) list of optional message arguments
Return : None
|
def __Logger__log_unconditionally(self, level, msg, *args):
self.__log_unconditionally(level, msg % args)
| 269,722 |
set_enum_labels(self, enum_labels) -> None
Set default enumeration labels.
Parameters :
- enum_labels : (seq<str>) list of enumeration labels
New in PyTango 9.2.0
|
def __UserDefaultAttrProp_set_enum_labels(self, enum_labels):
elbls = StdStringVector()
for enu in enum_labels:
elbls.append(enu)
return self._set_enum_labels(elbls)
| 269,723 |
export_server(self, dev_info) -> None
Export a group of devices to the database.
Parameters :
- devinfo : (sequence<DbDevExportInfo> | DbDevExportInfos | DbDevExportInfo)
containing the device(s) to export information
Return : None
Throws : ConnectionFailed, CommunicationFailed, DevFailed from device (DB_SQLError)
|
def __Database__export_server(self, dev_info):
if not isinstance(dev_info, collections_abc.Sequence) and \
not isinstance(dev_info, DbDevExportInfo):
raise TypeError(
'Value must be a DbDevExportInfos, a seq<DbDevExportInfo> or '
'a DbDevExportInfo')
if isinstance(dev_info, DbDevExportInfos):
pass
elif isinstance(dev_info, DbDevExportInfo):
dev_info = seq_2_DbDevExportInfos((dev_info), )
else:
dev_info = seq_2_DbDevExportInfos(dev_info)
self._export_server(dev_info)
| 269,863 |
delete_device(self, klass_name, device_name) -> None
Deletes an existing device from the database and from this running
server
Throws tango.DevFailed:
- the device name doesn't exist in the database
- the device name doesn't exist in this DS.
New in PyTango 7.1.2
Parameters :
- klass_name : (str) the device class name
- device_name : (str) the device name
Return : None
|
def __DeviceClass__delete_device(self, device_name):
util = Util.instance()
util.delete_device(self.get_name(), device_name)
| 269,930 |
set_default_property_values(self, dev_class, class_prop, dev_prop) -> None
Sets the default property values
Parameters :
- dev_class : (DeviceClass) device class object
- class_prop : (dict<str,>) class properties
- dev_prop : (dict<str,>) device properties
Return : None
|
def set_default_property_values(self, dev_class, class_prop, dev_prop):
for name in class_prop:
type = self.get_property_type(name, class_prop)
val = self.get_property_values(name, class_prop)
val = self.values2string(val, type)
desc = self.get_property_description(name, class_prop)
dev_class.add_wiz_class_prop(name, desc, val)
for name in dev_prop:
type = self.get_property_type(name, dev_prop)
val = self.get_property_values(name, dev_prop)
val = self.values2string(val, type)
desc = self.get_property_description(name, dev_prop)
dev_class.add_wiz_dev_prop(name, desc, val)
| 269,935 |
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None
|
def get_class_properties(self, dev_class, class_prop):
# initialize default values
if class_prop == {} or not Util._UseDb:
return
# call database to get properties
props = self.db.get_class_property(dev_class.get_name(), list(class_prop.keys()))
# if value defined in database, store it
for name in class_prop:
if props[name]:
type = self.get_property_type(name, class_prop)
values = self.stringArray2values(props[name], type)
self.set_property_values(name, class_prop, values)
else:
print(name + " property NOT found in database")
| 269,936 |
get_device_properties(self, dev, class_prop, dev_prop) -> None
Returns the device properties
Parameters :
- dev : (DeviceImpl) the device object
- class_prop : (dict<str, obj>) the class properties
- dev_prop : [in,out] (dict<str, None>) the device property names
Return : None
|
def get_device_properties(self, dev, class_prop, dev_prop):
# initialize default properties
if dev_prop == {} or not Util._UseDb:
return
# Call database to get properties
props = self.db.get_device_property(dev.get_name(), list(dev_prop.keys()))
# if value defined in database, store it
for name in dev_prop:
prop_value = props[name]
if len(prop_value):
data_type = self.get_property_type(name, dev_prop)
values = self.stringArray2values(prop_value, data_type)
if not self.is_empty_seq(values):
self.set_property_values(name, dev_prop, values)
else:
# Try to get it from class property
values = self.get_property_values(name, class_prop)
if not self.is_empty_seq(values):
if not self.is_seq(values):
values = [values]
data_type = self.get_property_type(name, class_prop)
values = self.stringArray2values(values, data_type)
if not self.is_empty_seq(values):
self.set_property_values(name, dev_prop, values)
else:
# Try to get it from class property
values = self.get_property_values(name, class_prop)
if not self.is_empty_seq(values):
if not self.is_seq(values):
values = [values]
data_type = self.get_property_type(name, class_prop)
values = self.stringArray2values(values, data_type)
if not self.is_empty_seq(values):
self.set_property_values(name, dev_prop, values)
| 269,937 |
get_property_type(self, prop_name, properties) -> CmdArgType
Gets the property type for the given property name using the
information given in properties
Parameters :
- prop_name : (str) property name
- properties : (dict<str,data>) property data
Return : (CmdArgType) the tango type for the given property
|
def get_property_type(self, prop_name, properties):
try:
tg_type = properties[prop_name][0]
except:
tg_type = CmdArgType.DevVoid
return tg_type
| 269,938 |
get_property_values(self, prop_name, properties) -> obj
Gets the property value
Parameters :
- prop_name : (str) property name
- properties : (dict<str,obj>) properties
Return : (obj) the value for the given property name
|
def get_property_values(self, prop_name, properties):
try:
tg_type = self.get_property_type(prop_name, properties)
val = properties[prop_name][2]
except:
val = []
if is_array(tg_type) or (isinstance(val, collections_abc.Sequence) and not len(val)):
return val
else:
if is_non_str_seq(val):
return val[0]
else:
return val
| 269,939 |
delete_device(self, klass_name, device_name) -> None
Deletes an existing device from the database and from this running
server
Throws tango.DevFailed:
- the device name doesn't exist in the database
- the device name doesn't exist in this DS.
New in PyTango 7.1.2
Parameters :
- klass_name : (str) the device class name
- device_name : (str) the device name
Return : None
|
def __Util__delete_device(self, klass_name, device_name):
db = self.get_database()
device_name = __simplify_device_name(device_name)
device_exists = True
try:
db.import_device(device_name)
except DevFailed as df:
device_exists = not df.args[0].reason == "DB_DeviceNotDefined"
# 1 - Make sure device name exists in the database
if not device_exists:
Except.throw_exception("PyAPI_DeviceNotDefined",
"The device %s is not defined in the database" % device_name,
"Util.delete_device")
# 2 - Make sure device name is defined in this server
class_device_name = "%s::%s" % (klass_name, device_name)
ds = self.get_dserver_device()
dev_names = ds.query_device()
device_exists = False
for dev_name in dev_names:
p = dev_name.index("::")
dev_name = dev_name[:p] + dev_name[p:].lower()
if dev_name == class_device_name:
device_exists = True
break
if not device_exists:
Except.throw_exception("PyAPI_DeviceNotDefinedInServer",
"The device %s is not defined in this server" % class_device_name,
"Util.delete_device")
db.delete_device(device_name)
dimpl = self.get_device_by_name(device_name)
dc = dimpl.get_device_class()
dc.device_destroyer(device_name)
| 270,046 |
preemphasising on the signal.
Args:
signal (array): The input signal.
shift (int): The shift step.
cof (float): The preemphasising coefficient. 0 equals to no filtering.
Returns:
array: The pre-emphasized signal.
|
def preemphasis(signal, shift=1, cof=0.98):
rolled_signal = np.roll(signal, shift)
return signal - cof * rolled_signal
| 270,781 |
This function the derivative features.
Args:
feat (array): The main feature vector(For returning the second
order derivative it can be first-order derivative).
DeltaWindows (int): The value of DeltaWindows is set using
the configuration parameter DELTAWINDOW.
Returns:
array: Derivative feature vector - A NUMFRAMESxNUMFEATURES numpy
array which is the derivative features along the features.
|
def derivative_extraction(feat, DeltaWindows):
# Getting the shape of the vector.
rows, cols = feat.shape
# Difining the vector of differences.
DIF = np.zeros(feat.shape, dtype=feat.dtype)
Scale = 0
# Pad only along features in the vector.
FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge')
for i in range(DeltaWindows):
# Start index
offset = DeltaWindows
# The dynamic range
Range = i + 1
dif = Range * FEAT[:, offset + Range:offset + Range + cols]
- FEAT[:, offset - Range:offset - Range + cols]
Scale += 2 * np.power(Range, 2)
DIF += dif
return DIF / Scale
| 270,785 |
This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
|
def cmvn(vec, variance_normalization=False):
eps = 2**-30
rows, cols = vec.shape
# Mean calculation
norm = np.mean(vec, axis=0)
norm_vec = np.tile(norm, (rows, 1))
# Mean subtraction
mean_subtracted = vec - norm_vec
# Variance normalization
if variance_normalization:
stdev = np.std(mean_subtracted, axis=0)
stdev_vec = np.tile(stdev, (rows, 1))
output = mean_subtracted / (stdev_vec + eps)
else:
output = mean_subtracted
return output
| 270,786 |
This function extracts temporal derivative features which are
first and second derivatives.
Args:
feature (array): The feature vector which its size is: N x M
Return:
array: The feature cube vector which contains the static, first and second derivative features of size: N x M x 3
|
def extract_derivative_feature(feature):
first_derivative_feature = processing.derivative_extraction(
feature, DeltaWindows=2)
second_derivative_feature = processing.derivative_extraction(
first_derivative_feature, DeltaWindows=2)
# Creating the future cube for each file
feature_cube = np.concatenate(
(feature[:, :, None], first_derivative_feature[:, :, None],
second_derivative_feature[:, :, None]),
axis=2)
return feature_cube
| 270,794 |
Checks if a user has permissions for a given object.
Args:
permissions: The permissions the current user must be compliant with
obj: The object for which the permissions apply
Returns:
1 if the user complies with all the permissions for the given object.
Otherwise, it returns empty.
|
def checkPermissions(permissions=[], obj=None):
if not obj:
return False
sm = getSecurityManager()
for perm in permissions:
if not sm.checkPermission(perm, obj):
return ''
return True
| 273,171 |
Changes into a given directory and cleans up after it is done
Args:
new_directory: The directory to change to
clean_up: A method to clean up the working directory once done
|
def cd(new_directory, clean_up=lambda: True): # pylint: disable=invalid-name
previous_directory = os.getcwd()
os.chdir(os.path.expanduser(new_directory))
try:
yield
finally:
os.chdir(previous_directory)
clean_up()
| 273,598 |
Return a checkered image of size width x height.
Arguments:
* width: image width
* height: image height
* c1: first color (RGBA)
* c2: second color (RGBA)
* s: size of the squares
|
def create_checkered_image(width, height, c1=(154, 154, 154, 255),
c2=(100, 100, 100, 255), s=6):
im = Image.new("RGBA", (width, height), c1)
draw = ImageDraw.Draw(im, "RGBA")
for i in range(s, width, 2 * s):
for j in range(0, height, 2 * s):
draw.rectangle(((i, j), ((i + s - 1, j + s - 1))), fill=c2)
for i in range(0, width, 2 * s):
for j in range(s, height, 2 * s):
draw.rectangle(((i, j), ((i + s - 1, j + s - 1))), fill=c2)
return im
| 273,724 |
This returns the Adyen API endpoint based on the provided platform,
service and action.
Args:
platform (str): Adyen platform, ie 'live' or 'test'.
service (str): API service to place request through.
action (str): the API action to perform.
|
def _determine_api_url(self, platform, service, action):
base_uri = settings.BASE_PAL_URL.format(platform)
if service == "Recurring":
api_version = settings.API_RECURRING_VERSION
elif service == "Payout":
api_version = settings.API_PAYOUT_VERSION
else:
api_version = settings.API_PAYMENT_VERSION
return '/'.join([base_uri, service, api_version, action])
| 273,839 |
This returns the Adyen HPP endpoint based on the provided platform,
and action.
Args:
platform (str): Adyen platform, ie 'live' or 'test'.
action (str): the HPP action to perform.
possible actions: select, pay, skipDetails, directory
|
def _determine_hpp_url(self, platform, action):
base_uri = settings.BASE_HPP_URL.format(platform)
service = action + '.shtml'
result = '/'.join([base_uri, service])
return result
| 273,840 |
This returns the Adyen API endpoint based on the provided platform,
service and action.
Args:
platform (str): Adyen platform, ie 'live' or 'test'.
action (str): the API action to perform.
|
def _determine_checkout_url(self, platform, action):
api_version = settings.API_CHECKOUT_VERSION
if platform == "test":
base_uri = settings.ENDPOINT_CHECKOUT_TEST
elif self.live_endpoint_prefix is not None and platform == "live":
base_uri = settings.ENDPOINT_CHECKOUT_LIVE_SUFFIX.format(
self.live_endpoint_prefix)
elif self.live_endpoint_prefix is None and platform == "live":
errorstring =
raise AdyenEndpointInvalidFormat(errorstring)
if action == "paymentsDetails":
action = "payments/details"
if action == "paymentsResult":
action = "payments/result"
if action == "originKeys":
api_version = settings.API_CHECKOUT_UTILITY_VERSION
return '/'.join([base_uri, api_version, action])
| 273,841 |
Concatenate anything into a list.
Args:
a: the first thing
b: the second thing
Returns:
list. All the things in a list.
|
def list_and_add(a, b):
if not isinstance(b, list):
b = [b]
if not isinstance(a, list):
a = [a]
return a + b
| 273,902 |
Find the array value, or index of the array value, closest to some given
value.
Args:
a (ndarray)
value (float)
index (bool): whether to return the index instead of the array value.
Returns:
float. The array value (or index, as int) nearest the specified value.
|
def find_nearest(a, value, index=False):
i = np.abs(a - value).argmin()
if index:
return i
else:
return a[i]
| 273,905 |
From ``bruges``
Normalize an array to [0,1] or to arbitrary new min and max.
Args:
a (ndarray)
new_min (float): the new min, default 0.
new_max (float): the new max, default 1.
Returns:
ndarray. The normalized array.
|
def normalize(a, new_min=0.0, new_max=1.0):
n = (a - np.amin(a)) / np.amax(a - np.amin(a))
return n * (new_max - new_min) + new_min
| 273,908 |
Remove the NaNs from the top and tail (only) of a well log.
Args:
a (ndarray): An array.
Returns:
ndarray: The top and tailed array.
|
def top_and_tail(a):
if np.all(np.isnan(a)):
return np.array([])
nans = np.where(~np.isnan(a))[0]
last = None if nans[-1]+1 == a.size else nans[-1]+1
return a[nans[0]:last]
| 273,912 |
Decimal degrees to DMS.
Args:
dd (float). Decimal degrees.
Return:
tuple. Degrees, minutes, and seconds.
|
def dd2dms(dd):
m, s = divmod(dd * 3600, 60)
d, m = divmod(m, 60)
return int(d), int(m), s
| 273,913 |
A Ricker wavelet.
Args:
f (float): frequency in Haz, e.g. 25 Hz.
length (float): Length in s, e.g. 0.128.
dt (float): sample interval in s, e.g. 0.001.
Returns:
tuple. time basis, amplitude values.
|
def ricker(f, length, dt):
t = np.linspace(-int(length/2), int((length-dt)/2), int(length/dt))
y = (1. - 2.*(np.pi**2)*(f**2)*(t**2))*np.exp(-(np.pi**2)*(f**2)*(t**2))
return t, y
| 273,914 |
Utility function to convert hex to (r,g,b) triples.
http://ageo.co/1CFxXpO
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
tuple: The equivalent RGB triple, in the range 0 to 255.
|
def hex_to_rgb(hexx):
h = hexx.strip('#')
l = len(h)
return tuple(int(h[i:i+l//3], 16) for i in range(0, l, l//3))
| 273,915 |
Function to decide if a hex colour is dark.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
bool: The colour's brightness is less than the given percent.
|
def hex_is_dark(hexx, percent=50):
r, g, b = hex_to_rgb(hexx)
luma = (0.2126 * r + 0.7152 * g + 0.0722 * b) / 2.55 # per ITU-R BT.709
return (luma < percent)
| 273,916 |
Function to decide what colour to use for a given hex colour.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
bool: The colour's brightness is less than the given percent.
|
def text_colour_for_hex(hexx, percent=50, dark='#000000', light='#ffffff'):
return light if hex_is_dark(hexx, percent=percent) else dark
| 273,917 |
Make a Location object from a lasio object. Assumes we're starting
with a lasio object, l.
Args:
l (lasio).
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
Location. An instance of this class.
|
def from_lasio(cls, l, remap=None, funcs=None):
params = {}
funcs = funcs or {}
funcs['location'] = str
for field, (sect, code) in las_fields['location'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
return cls(params)
| 273,925 |
Provides an transformation and interpolation function that converts
MD to TVD.
Args:
kind (str): The kind of interpolation to do, e.g. 'linear',
'cubic', 'nearest'.
Returns:
function.
|
def md2tvd(self, kind='linear'):
if self.position is None:
return lambda x: x
return interp1d(self.md, self.tvd,
kind=kind,
assume_sorted=True,
fill_value="extrapolate",
bounds_error=False)
| 273,927 |
Looks at all the wells in turn and returns the highest thing
in the alias table.
Args:
mnemonics (list)
alias (dict)
Returns:
list. A list of lists.
|
def get_mnemonics(self, mnemonics, uwis=None, alias=None):
# Let's not do the nested comprehension...
uwis = uwis or self.uwis
wells = [w for w in self.__list if w.uwi in uwis]
all_wells = []
for w in wells:
this_well = [w.get_mnemonic(m, alias=alias) for m in mnemonics]
all_wells.append(this_well)
return all_wells
| 273,940 |
Plot KDEs for all curves with the given name.
Args:
menmonic (str): the name of the curve to look for.
alias (dict): a welly alias dictionary.
uwi_regex (str): a regex pattern. Only this part of the UWI will be displayed
on the plot of KDEs.
return_fig (bool): whether to return the matplotlib figure object.
Returns:
None or figure.
|
def plot_kdes(self, mnemonic, alias=None, uwi_regex=None, return_fig=False):
wells = self.find_wells_with_curve(mnemonic, alias=alias)
fig, axs = plt.subplots(len(self), 1, figsize=(10, 1.5*len(self)))
curves = [w.get_curve(mnemonic, alias=alias) for w in wells]
all_data = np.hstack(curves)
all_data = all_data[~np.isnan(all_data)]
# Find values for common axis to exclude outliers.
amax = np.percentile(all_data, 99)
amin = np.percentile(all_data, 1)
for i, w in enumerate(self):
c = w.get_curve(mnemonic, alias=alias)
if uwi_regex is not None:
label = re.sub(uwi_regex, r'\1', w.uwi)
else:
label = w.uwi
if c is not None:
axs[i] = c.plot_kde(ax=axs[i], amax=amax, amin=amin, label=label+'-'+str(c.mnemonic))
else:
continue
if return_fig:
return fig
else:
return
| 273,943 |
Returns a new Project with only the wells which have the named curve.
Args:
menmonic (str): the name of the curve to look for.
alias (dict): a welly alias dictionary.
Returns:
project.
|
def find_wells_with_curve(self, mnemonic, alias=None):
return Project([w for w in self if w.get_curve(mnemonic, alias=alias) is not None])
| 273,944 |
Returns a new Project with only the wells which DO NOT have the named curve.
Args:
menmonic (str): the name of the curve to look for.
alias (dict): a welly alias dictionary.
Returns:
project.
|
def find_wells_without_curve(self, mnemonic, alias=None):
return Project([w for w in self if w.get_curve(mnemonic, alias=alias) is None])
| 273,945 |
Returns a new Project with only the wells named by UWI.
Args:
uwis (list): list or tuple of UWI strings.
Returns:
project.
|
def get_wells(self, uwis=None):
if uwis is None:
return Project(self.__list)
return Project([w for w in self if w.uwi in uwis])
| 273,946 |
Returns a Well object identified by UWI
Args:
uwi (string): the UWI string for the well.
Returns:
well
|
def get_well(self, uwi):
if uwi is None:
raise ValueError('a UWI must be provided')
matching_wells = [w for w in self if w.uwi == uwi]
return matching_wells[0] if len(matching_wells) >= 1 else None
| 273,948 |
Returns a new Project object containing wells from self where
curves from the wells on the right have been added. Matching between
wells in self and right is based on uwi match and ony wells in self
are considered
Args:
uwi (string): the UWI string for the well.
Returns:
project
|
def merge_wells(self, right, keys=None):
wells = []
for w in self:
rw = right.get_well(w.uwi)
if rw is not None:
if keys is None:
keys = list(rw.data.keys())
for k in keys:
try:
w.data[k] = rw.data[k]
except:
pass
wells.append(w)
return Project(wells)
| 273,949 |
Makes a pandas DataFrame containing Curve data for all the wells
in the Project. The DataFrame has a dual index of well UWI and
curve Depths. Requires `pandas`.
Args:
No arguments.
Returns:
`pandas.DataFrame`.
|
def df(self):
import pandas as pd
return pd.concat([w.df(uwi=True) for w in self])
| 273,950 |
Plot a curve.
Args:
ax (ax): A matplotlib axis.
legend (striplog.legend): A legend. Optional.
return_fig (bool): whether to return the matplotlib figure.
Default False.
kwargs: Arguments for ``ax.set()``
Returns:
ax. If you passed in an ax, otherwise None.
|
def plot(self, ax=None, legend=None, return_fig=False, **kwargs):
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
d = None
if legend is not None:
try:
d = legend.get_decor(self)
except:
pass
if d is not None:
kwargs['color'] = d.colour
kwargs['lw'] = getattr(d, 'lineweight', None) or getattr(d, 'lw', 1)
kwargs['ls'] = getattr(d, 'linestyle', None) or getattr(d, 'ls', '-')
# Attempt to get axis parameters from decor.
axkwargs = {}
xlim = getattr(d, 'xlim', None)
if xlim is not None:
axkwargs['xlim'] = list(map(float, xlim.split(',')))
xticks = getattr(d, 'xticks', None)
if xticks is not None:
axkwargs['xticks'] = list(map(float, xticks.split(',')))
xscale = getattr(d, 'xscale', None)
if xscale is not None:
axkwargs['xscale'] = xscale
ax.set(**axkwargs)
ax.plot(self, self.basis, **kwargs)
ax.set_title(self.mnemonic) # no longer needed
ax.set_xlabel(self.units)
if False: # labeltop of axes?
ax.xaxis.tick_top()
if True: # rotate x-tick labels
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(90)
ax.set_ylim([self.stop, self.start])
ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
| 273,961 |
Make a new curve in a new basis, given an existing one. Wraps
``to_basis()``.
Pass in a curve or the basis of a curve.
Args:
basis (ndarray): A basis, but can also be a Curve instance.
Returns:
Curve. The current instance in the new basis.
|
def to_basis_like(self, basis):
try: # To treat as a curve.
curve = basis
basis = curve.basis
undefined = curve.null
except:
undefined = None
return self.to_basis(basis=basis,
undefined=undefined)
| 273,964 |
Make a new curve in a new basis, given a basis, or a new start, step,
and/or stop. You only need to set the parameters you want to change.
If the new extents go beyond the current extents, the curve is padded
with the ``undefined`` parameter.
Args:
basis (ndarray)
start (float)
stop (float)
step (float)
undefined (float)
Returns:
Curve. The current instance in the new basis.
|
def to_basis(self, basis=None,
start=None,
stop=None,
step=None,
undefined=None):
if basis is None:
if start is None:
new_start = self.start
else:
new_start = start
new_step = step or self.step
new_stop = stop or self.stop
# new_adj_stop = new_stop + new_step/100 # To guarantee inclusion.
# basis = np.arange(new_start, new_adj_stop, new_step)
steps = 1 + (new_stop - new_start) / new_step
basis = np.linspace(new_start, new_stop, int(steps), endpoint=True)
else:
new_start = basis[0]
new_step = basis[1] - basis[0]
if undefined is None:
undefined = np.nan
else:
undefined = undefined
interp = interp1d(self.basis, self,
bounds_error=False,
fill_value=undefined)
data = interp(basis)
params = self.__dict__.copy()
params['step'] = float(new_step)
params['start'] = float(new_start)
return Curve(data, params=params)
| 273,965 |
Private function. Implements read_at() for a single depth.
Args:
d (float)
interpolation (str)
index(bool)
return_basis (bool)
Returns:
float
|
def _read_at(self, d,
interpolation='linear',
index=False,
return_basis=False):
method = {'linear': utils.linear,
'none': None}
i, d = utils.find_previous(self.basis,
d,
index=True,
return_distance=True)
if index:
return i
else:
return method[interpolation](self[i], self[i+1], d)
| 273,966 |
Read the log at a specific depth or an array of depths.
Args:
d (float or array-like)
interpolation (str)
index(bool)
return_basis (bool)
Returns:
float or ndarray.
|
def read_at(self, d, **kwargs):
try:
return np.array([self._read_at(depth, **kwargs) for depth in d])
except:
return self._read_at(d, **kwargs)
| 273,967 |
Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
|
def quality(self, tests, alias=None):
# Gather the test s.
# First, anything called 'all', 'All', or 'ALL'.
# Second, anything with the name of the curve we're in now.
# Third, anything that the alias list has for this curve.
# (This requires a reverse look-up so it's a bit messy.)
this_tests =\
tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\
+ tests.get(self.mnemonic, [])\
+ utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])
this_tests = filter(None, this_tests)
# If we explicitly set zero tests for a particular key, then this
# overrides the 'all' and 'alias' tests.
if not tests.get(self.mnemonic, 1):
this_tests = []
return {test.__name__: test(self) for test in this_tests}
| 273,968 |
Run a series of tests and return the normalized score.
1.0: Passed all tests.
(0-1): Passed a fraction of tests.
0.0: Passed no tests.
-1.0: Took no tests.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
float. The fraction of tests passed, or -1 for 'took no tests'.
|
def quality_score(self, tests, alias=None):
results = self.quality(tests, alias=alias).values()
if results:
return sum(results) / len(results)
return -1
| 273,969 |
Block a log based on number of bins, or on cutoffs.
Args:
cutoffs (array)
values (array): the values to map to. Defaults to [0, 1, 2,...]
n_bins (int)
right (bool)
function (function): transform the log if you want.
Returns:
Curve.
|
def block(self,
cutoffs=None,
values=None,
n_bins=0,
right=False,
function=None):
# We'll return a copy.
params = self.__dict__.copy()
if (values is not None) and (cutoffs is None):
cutoffs = values[1:]
if (cutoffs is None) and (n_bins == 0):
cutoffs = np.mean(self)
if (n_bins != 0) and (cutoffs is None):
mi, ma = np.amin(self), np.amax(self)
cutoffs = np.linspace(mi, ma, n_bins+1)
cutoffs = cutoffs[:-1]
try: # To use cutoff as a list.
data = np.digitize(self, cutoffs, right)
except ValueError: # It's just a number.
data = np.digitize(self, [cutoffs], right)
if (function is None) and (values is None):
return Curve(data, params=params)
data = data.astype(float)
# Set the function for reducing.
f = function or utils.null
# Find the tops of the 'zones'.
tops, vals = utils.find_edges(data)
# End of array trick... adding this should remove the
# need for the marked lines below. But it doesn't.
# np.append(tops, None)
# np.append(vals, None)
if values is None:
# Transform each segment in turn, then deal with the last segment.
for top, base in zip(tops[:-1], tops[1:]):
data[top:base] = f(np.copy(self[top:base]))
data[base:] = f(np.copy(self[base:])) # See above
else:
for top, base, val in zip(tops[:-1], tops[1:], vals[:-1]):
data[top:base] = values[int(val)]
data[base:] = values[int(vals[-1])] # See above
return Curve(data, params=params)
| 273,970 |
Private function. Smoother for other smoothing/conditioning functions.
Args:
window_length (int): the window length.
func1d (function): a function that takes a 1D array and returns a
scalar.
step (int): if you want to skip samples in the shifted versions.
Don't use this for smoothing, you will get strange results.
Returns:
ndarray: the resulting array.
|
def _rolling_window(self, window_length, func1d, step=1, return_rolled=False):
# Force odd.
if window_length % 2 == 0:
window_length += 1
shape = self.shape[:-1] + (self.shape[-1], window_length)
strides = self.strides + (step*self.strides[-1],)
data = np.nan_to_num(self)
data = np.pad(data, int(step*window_length//2), mode='edge')
rolled = np.lib.stride_tricks.as_strided(data,
shape=shape,
strides=strides)
result = np.apply_along_axis(func1d, -1, rolled)
result[np.isnan(self)] = np.nan
if return_rolled:
return result, rolled
else:
return result
| 273,971 |
Runs any kind of function over a window.
Args:
window_length (int): the window length. Required.
samples (bool): window length is in samples. Use False for a window
length given in metres.
func1d (function): a function that takes a 1D array and returns a
scalar. Default: ``np.mean()``.
Returns:
Curve.
|
def apply(self, window_length, samples=True, func1d=None):
window_length /= 1 if samples else self.step
if func1d is None:
func1d = np.mean
params = self.__dict__.copy()
out = self._rolling_window(int(window_length), func1d)
return Curve(out, params=params)
| 273,973 |
Plot a synthetic.
Args:
ax (ax): A matplotlib axis.
legend (Legend): For now, only here to match API for other plot
methods.
return_fig (bool): whether to return the matplotlib figure.
Default False.
Returns:
ax. If you passed in an ax, otherwise None.
|
def plot(self, ax=None, return_fig=False, **kwargs):
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
hypertime = np.linspace(self.start, self.stop, (10 * self.size - 1) + 1)
hyperamp = np.interp(hypertime, self.basis, self)
ax.plot(hyperamp, hypertime, 'k')
ax.fill_betweenx(hypertime, hyperamp, 0, hyperamp > 0.0, facecolor='k', lw=0)
ax.invert_yaxis()
ax.set_title(self.name)
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
| 273,989 |
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
|
def add_curves_from_las(self, fname, remap=None, funcs=None):
try: # To treat as a single file
self.add_curves_from_lasio(lasio.read(fname),
remap=remap,
funcs=funcs
)
except: # It's a list!
for f in fname:
self.add_curves_from_lasio(lasio.read(f),
remap=remap,
funcs=funcs
)
return None
| 274,013 |
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
|
def add_curves_from_lasio(self, l, remap=None, funcs=None):
params = {}
for field, (sect, code) in LAS_FIELDS['data'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
curves = {c.mnemonic: Curve.from_lasio_curve(c, **params)
for c in l.curves}
# This will clobber anything with the same key!
self.data.update(curves)
return None
| 274,014 |
Private function. Depth track plotting.
Args:
ax (ax): A matplotlib axis.
md (ndarray): The measured depths of the track.
kind (str): The kind of track to plot.
Returns:
ax.
|
def _plot_depth_track(self, ax, md, kind='MD'):
if kind == 'MD':
ax.set_yscale('bounded', vmin=md.min(), vmax=md.max())
# ax.set_ylim([md.max(), md.min()])
elif kind == 'TVD':
tvd = self.location.md2tvd(md)
ax.set_yscale('piecewise', x=tvd, y=md)
# ax.set_ylim([tvd.max(), tvd.min()])
else:
raise Exception("Kind must be MD or TVD")
for sp in ax.spines.values():
sp.set_color('gray')
if ax.is_first_col():
pad = -10
ax.spines['left'].set_color('none')
ax.yaxis.set_ticks_position('right')
for label in ax.get_yticklabels():
label.set_horizontalalignment('right')
elif ax.is_last_col():
pad = -10
ax.spines['right'].set_color('none')
ax.yaxis.set_ticks_position('left')
for label in ax.get_yticklabels():
label.set_horizontalalignment('left')
else:
pad = -30
for label in ax.get_yticklabels():
label.set_horizontalalignment('center')
ax.tick_params(axis='y', colors='gray', labelsize=12, pad=pad)
ax.set_xticks([])
ax.set(xticks=[])
ax.depth_track = True
return ax
| 274,015 |
Look at the basis of all the curves in ``well.data`` and return a
basis with the minimum start, maximum depth, and minimum step.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
step (float): a new step, if you want to change it.
Returns:
ndarray. The most complete common basis.
|
def survey_basis(self, keys=None, alias=None, step=None):
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
starts, stops, steps = [], [], []
for k in keys:
d = self.get_curve(k, alias=alias)
if keys and (d is None):
continue
try:
starts.append(d.basis[0])
stops.append(d.basis[-1])
steps.append(d.basis[1] - d.basis[0])
except Exception as e:
pass
if starts and stops and steps:
step = step or min(steps)
return np.arange(min(starts), max(stops)+1e-9, step)
else:
return None
| 274,017 |
Give everything, or everything in the list of keys, the same basis.
If you don't provide a basis, welly will try to get one using
``survey_basis()``.
Args:
basis (ndarray): A basis: the regularly sampled depths at which
you want the samples.
keys (list): List of strings: the keys of the data items to
unify, if not all of them.
Returns:
None. Works in place.
|
def unify_basis(self, keys=None, basis=None):
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
if basis is None:
basis = self.survey_basis(keys=keys)
if basis is None:
m = "No basis was provided and welly could not retrieve common basis."
raise WellError(m)
for k in keys:
if keys and (k not in keys):
continue
try: # To treat as a curve.
self.data[k] = self.data[k].to_basis(basis)
except: # It's probably a striplog.
continue
return
| 274,018 |
Run tests on a cohort of curves.
Args:
alias (dict): an alias dictionary, mapping mnemonics to lists of
mnemonics.
Returns:
dict.
|
def qc_curve_group(self, tests, alias=None):
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
if not keys:
return {}
all_tests = tests.get('all', tests.get('All', tests.get('ALL', [])))
data = {test.__name__: test(self, keys, alias) for test in all_tests}
results = {}
for i, key in enumerate(keys):
this = {}
for test, result in data.items():
this[test] = result[i]
results[key] = this
return results
| 274,026 |
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
|
def qc_data(self, tests, alias=None):
# We'll get a result for each curve here.
r = {m: c.quality(tests, alias) for m, c in self.data.items()}
s = self.qc_curve_group(tests, alias=alias)
for m, results in r.items():
if m in s:
results.update(s[m])
return r
| 274,027 |
Turn a PROJ.4 string into a mapping of parameters. Bare parameters
like "+no_defs" are given a value of ``True``. All keys are checked
against the ``all_proj_keys`` list.
Args:
prjs (str): A PROJ4 string.
|
def from_string(cls, prjs):
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
parts = [o.lstrip('+') for o in prjs.strip().split()]
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split('=') for p in parts))
return cls({k: v for k, v in items if '+'+k in PROJ4_PARAMS.keys()})
| 274,031 |
Turn a CRS dict into a PROJ.4 string. Mapping keys are tested against
``all_proj_keys`` list. Values of ``True`` are omitted, leaving the key
bare: {'no_defs': True} -> "+no_defs" and items where the value is
otherwise not a str, int, or float are omitted.
Args:
crs: A CRS dict as used in Location.
Returns:
str. The string representation.
|
def to_string(self):
def filt(x):
return '+'+x[0] in PROJ4_PARAMS.keys() and x[1] is not False
items = []
for k, v in sorted(filter(filt, self.items())):
items.append(
"+" + "=".join(
map(str, filter(
lambda y: (y or y == 0) and y is not True, (k, v)))))
return " ".join(items)
| 274,033 |
Return the collections of handlers handling the exception in arguments.
Args:
node (astroid.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
|
def get_exception_handlers(
node: astroid.node_classes.NodeNG, exception=Exception
) -> List[astroid.ExceptHandler]:
context = find_try_except_wrapper_node(node)
if isinstance(context, astroid.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return None
| 274,490 |
Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (astroid.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
|
def is_node_inside_try_except(node: astroid.Raise) -> bool:
context = find_try_except_wrapper_node(node)
return isinstance(context, astroid.TryExcept)
| 274,491 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.