text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_host_lun(self, lun_or_snap, cg_member=None):
"""Gets the host lun of a lun, lun snap, cg snap or a member snap of cg snap. :param lun_or_snap: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: the host lun object. """ |
import storops.unity.resource.lun as lun_module
import storops.unity.resource.snap as snap_module
which = None
if isinstance(lun_or_snap, lun_module.UnityLun):
which = self._get_host_luns(lun=lun_or_snap)
elif isinstance(lun_or_snap, snap_module.UnitySnap):
if lun_or_snap.is_cg_snap():
if cg_member is None:
log.debug('None host lun for CG snap {}. '
'Use its member snap instead or pass in '
'cg_member.'.format(lun_or_snap.id))
return None
lun_or_snap = lun_or_snap.get_member_snap(cg_member)
which = self._get_host_luns(lun=cg_member, snap=lun_or_snap)
else:
which = self._get_host_luns(snap=lun_or_snap)
if not which:
log.debug('Resource(LUN or Snap) {} is not attached to host {}'
.format(lun_or_snap.name, self.name))
return None
return which[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_hlu(self, resource, cg_member=None):
"""Gets the hlu number of a lun, lun snap, cg snap or a member snap of cg snap. :param resource: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: the hlu number. """ |
host_lun = self.get_host_lun(resource, cg_member=cg_member)
return host_lun if host_lun is None else host_lun.hlu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_files(tag=None, sat_id=None, data_path=None, format_str=None, supported_tags=None, fake_daily_files_from_monthly=False, two_digit_year_break=None):
"""Return a Pandas Series of every file for chosen satellite data. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters tag : (string or NoneType) Denotes type of file to load. Accepted types are <tag strings>. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) supported_tags : (dict or NoneType) keys are tags supported by list_files routine. Values are the default format_str values for key. (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, appends daily dates to monthly files internally. These dates are used by load routine in this module to provide data by day. Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files Examples -------- :: fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf' supported_tags = {'dc_b':fname} list_files = functools.partial(nasa_cdaweb_methods.list_files, supported_tags=supported_tags) ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf' supported_tags = {'':ivm_fname} list_files = functools.partial(cdw.list_files, supported_tags=supported_tags) """ |
if data_path is not None:
if format_str is None:
try:
format_str = supported_tags[sat_id][tag]
except KeyError:
raise ValueError('Unknown tag')
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if (not out.empty) and fake_daily_files_from_monthly:
out.ix[out.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
return out
else:
estr = 'A directory must be passed to the loading routine for <Instrument Code>'
raise ValueError (estr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(fnames, tag=None, sat_id=None, fake_daily_files_from_monthly=False, flatten_twod=True):
"""Load NASA CDAWeb CDF files. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, parses of daily dates to monthly files that were added internally by the list_files routine, when flagged. These dates are used here to provide data by day. Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Examples -------- :: # within the new instrument module, at the top level define # a new variable named load, and set it equal to this load method # code below taken from cnofs_ivm.py. # support load routine # use the default CDAWeb method load = cdw.load """ |
import pysatCDF
if len(fnames) <= 0 :
return pysat.DataFrame(None), None
else:
# going to use pysatCDF to load the CDF and format
# data and metadata for pysat using some assumptions.
# Depending upon your needs the resulting pandas DataFrame may
# need modification
# currently only loads one file, which handles more situations via pysat
# than you may initially think
if fake_daily_files_from_monthly:
# parse out date from filename
fname = fnames[0][0:-11]
date = pysat.datetime.strptime(fnames[0][-10:], '%Y-%m-%d')
with pysatCDF.CDF(fname) as cdf:
# convert data to pysat format
data, meta = cdf.to_pysat(flatten_twod=flatten_twod)
# select data from monthly
data = data.ix[date:date+pds.DateOffset(days=1) - pds.DateOffset(microseconds=1),:]
return data, meta
else:
# basic data return
with pysatCDF.CDF(fnames[0]) as cdf:
return cdf.to_pysat(flatten_twod=flatten_twod) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(supported_tags, date_array, tag, sat_id, ftp_site='cdaweb.gsfc.nasa.gov', data_path=None, user=None, password=None, fake_daily_files_from_monthly=False):
"""Routine to download NASA CDAWeb CDF data. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters supported_tags : dict dict of dicts. Keys are supported tag names for download. Value is a dict with 'dir', 'remote_fname', 'local_fname'. Inteded to be pre-set with functools.partial then assigned to new instrument code. date_array : array_like Array of datetimes to download data for. Provided by pysat. tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) user : (string or NoneType) Username to be passed along to resource with relevant data. (default=None) password : (string or NoneType) User password to be passed along to resource with relevant data. (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month.This flag, when true, accomodates this reality with user feedback on a monthly time frame. Returns -------- Void : (NoneType) Downloads data to disk. Examples -------- :: # download support added to cnofs_vefi.py using code below rn = '{year:4d}/cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf' ln = 'cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf' dc_b_tag = {'dir':'/pub/data/cnofs/vefi/bfield_1sec', 'remote_fname':rn, 'local_fname':ln} supported_tags = {'dc_b':dc_b_tag} download = functools.partial(nasa_cdaweb_methods.download, supported_tags=supported_tags) """ |
import os
import ftplib
# connect to CDAWeb default port
ftp = ftplib.FTP(ftp_site)
# user anonymous, passwd anonymous@
ftp.login()
try:
ftp_dict = supported_tags[tag]
except KeyError:
raise ValueError('Tag name unknown.')
# path to relevant file on CDAWeb
ftp.cwd(ftp_dict['dir'])
# naming scheme for files on the CDAWeb server
remote_fname = ftp_dict['remote_fname']
# naming scheme for local files, should be closely related
# to CDAWeb scheme, though directory structures may be reduced
# if desired
local_fname = ftp_dict['local_fname']
for date in date_array:
# format files for specific dates and download location
formatted_remote_fname = remote_fname.format(year=date.year,
month=date.month, day=date.day)
formatted_local_fname = local_fname.format(year=date.year,
month=date.month, day=date.day)
saved_local_fname = os.path.join(data_path,formatted_local_fname)
# perform download
try:
print('Attempting to download file for '+date.strftime('%x'))
sys.stdout.flush()
ftp.retrbinary('RETR '+formatted_remote_fname, open(saved_local_fname,'wb').write)
print('Finished.')
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_local_fname)
print('File not available for '+ date.strftime('%x'))
ftp.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_pool(self, name, raid_groups, description=None, **kwargs):
"""Create pool based on RaidGroupParameter. :param name: pool name :param raid_groups: a list of *RaidGroupParameter* :param description: pool description :param alert_threshold: Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage. :param is_harvest_enabled: True - Enable pool harvesting for the pool. False - Disable pool harvesting for the pool. :param is_snap_harvest_enabled: True - Enable snapshot harvesting for the pool. False - Disable snapshot harvesting for the pool. :param pool_harvest_high_threshold: Pool used space high threshold at which the system will automatically starts to delete snapshots in the pool :param pool_harvest_low_threshold: Pool used space low threshold under which the system will automatically stop deletion of snapshots in the pool :param snap_harvest_high_threshold: Snapshot used space high threshold at which the system automatically starts to delete snapshots in the pool :param snap_harvest_low_threshold: Snapshot used space low threshold below which the system will stop automatically deleting snapshots in the pool :param is_fast_cache_enabled: True - FAST Cache will be enabled for this pool. False - FAST Cache will be disabled for this pool. :param is_fastvp_enabled: True - Enable scheduled data relocations for the pool. False - Disable scheduled data relocations for the pool. :param pool_type: StoragePoolTypeEnum.TRADITIONAL - Create traditional pool. StoragePoolTypeEnum.DYNAMIC - Create dynamic pool. (default) """ |
return UnityPool.create(self._cli, name=name, description=description,
raid_groups=raid_groups, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_file_port(self):
"""Returns ports list can be used by File File ports includes ethernet ports and link aggregation ports. """ |
eths = self.get_ethernet_port(bond=False)
las = self.get_link_aggregation()
return eths + las |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_files(tag='', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen SuperMAG data Parameters tag : (string or NoneType) Denotes type of file to load. Accepted types are 'indices', 'all', 'stations', and '' (for just magnetometer measurements). (default='') sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files """ |
if format_str is None and data_path is not None:
file_base = 'supermag_magnetometer'
if tag == "indices" or tag == "all":
file_base += '_all' # Can't just download indices
if tag == "indices":
psplit = path.split(data_path[:-1])
data_path = path.join(psplit[0], "all", "")
if tag == "stations":
min_fmt = '_'.join([file_base, '{year:4d}.???'])
doff = pds.DateOffset(years=1)
else:
min_fmt = '_'.join([file_base, '{year:4d}{month:02d}{day:02d}.???'])
doff = pds.DateOffset(days=1)
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# station files are once per year but we need to
# create the illusion there is a file per year
if not files.empty:
files = files.sort_index()
if tag == "stations":
orig_files = files.copy()
new_files = []
# Assigns the validity of each station file to be 1 year
for orig in orig_files.iteritems():
files.ix[orig[0] + doff - pds.DateOffset(days=1)] = orig[1]
files = files.sort_index()
new_files.append(files.ix[orig[0]: orig[0] + doff - \
pds.DateOffset(days=1)].asfreq('D', method='pad'))
files = pds.concat(new_files)
files = files.dropna()
files = files.sort_index()
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
elif format_str is None:
estr = 'A directory must be passed to the loading routine for SuperMAG'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_csv_data(fname, tag):
"""Load data from a comma separated SuperMAG file Parameters fname : (str) CSV SuperMAG file name tag : (str) Denotes type of file to load. Accepted types are 'indices', 'all', 'stations', and '' (for just magnetometer measurements). Returns -------- data : (pandas.DataFrame) Pandas DataFrame """ |
import re
if tag == "stations":
# Because there may be multiple operators, the default pandas reader
# cannot be used.
ddict = dict()
dkeys = list()
date_list = list()
# Open and read the file
with open(fname, "r") as fopen:
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
sline = [ll for ll in re.split(r'[,\n]+', fline) if len(ll) > 0]
if len(ddict.items()) == 0:
for kk in sline:
kk = re.sub("-", "_", kk)
ddict[kk] = list()
dkeys.append(kk)
else:
date_list.append(dtime)
for i,ll in enumerate(sline):
if i >= 1 and i <= 4:
ddict[dkeys[i]].append(float(ll))
elif i == 6:
ddict[dkeys[i]].append(int(ll))
elif i < len(dkeys):
ddict[dkeys[i]].append(ll)
else:
ddict[dkeys[-1]][-1] += " {:s}".format(ll)
# Create a data frame for this file
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
else:
# Define the date parser
def parse_smag_date(dd):
return pysat.datetime.strptime(dd, "%Y-%m-%d %H:%M:%S")
# Load the file into a data frame
data = pds.read_csv(fname, parse_dates={'datetime':[0]},
date_parser=parse_smag_date, index_col='datetime')
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_baseline_list(baseline_list):
"""Format the list of baseline information from the loaded files into a cohesive, informative string Parameters baseline_list : (list) List of strings specifying the baseline information for each SuperMAG file Returns --------- base_string : (str) Single string containing the relevent data """ |
uniq_base = dict()
uniq_delta = dict()
for bline in baseline_list:
bsplit = bline.split()
bdate = " ".join(bsplit[2:])
if bsplit[0] not in uniq_base.keys():
uniq_base[bsplit[0]] = ""
if bsplit[1] not in uniq_delta.keys():
uniq_delta[bsplit[1]] = ""
uniq_base[bsplit[0]] += "{:s}, ".format(bdate)
uniq_delta[bsplit[1]] += "{:s}, ".format(bdate)
if len(uniq_base.items()) == 1:
base_string = "Baseline {:s}".format(list(uniq_base.keys())[0])
else:
base_string = "Baseline "
for i,kk in enumerate(uniq_base.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_base[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_base[kk][:-2])
else:
base_string += "unknown"
if len(uniq_delta.items()) == 1:
base_string += "\nDelta {:s}".format(list(uniq_delta.keys())[0])
else:
base_string += "\nDelta "
for i,kk in enumerate(uniq_delta.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_delta[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_delta[kk][:-2])
else:
base_string += "unknown"
return base_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append_ascii_data(file_strings, tag):
""" Append data from multiple files for the same time period Parameters file_strings : array-like Lists or arrays of strings, where each string contains one file of data tag : string String denoting the type of file to load, accepted values are 'indices', 'all', 'stations', and None (for only magnetometer data) Returns ------- out_string : string String with all data, ready for output to a file """ |
import re
# Start with data from the first list element
out_lines = file_strings[0].split('\n')
iparam = -1 # Index for the parameter line
ihead = -1 # Index for the last header line
idates = list() # Indices for the date lines
date_list = list() # List of dates
num_stations = list() # Number of stations for each date line
ind_num = 2 if tag in ['all', 'indices', ''] else 0
# ind_num = 2 if tag == '' else ind_num
# Find the index information for the data
for i,line in enumerate(out_lines):
if line == "Selected parameters:":
iparam = i + 1
elif line.count("=") == len(line) and len(line) > 2:
ihead = i
break
# Find the time indices and number of stations for each date line
i = ihead + 1
while i < len(out_lines) - 1:
idates.append(i)
lsplit = re.split('\t+', out_lines[i])
dtime = pds.datetime.strptime(" ".join(lsplit[0:-1]),
"%Y %m %d %H %M %S")
date_list.append(dtime)
num_stations.append(int(lsplit[-1]))
i += num_stations[-1] + 1 + ind_num
idates = np.array(idates)
# Initialize a list of station names
station_names = list()
# Cycle through each additional set of file strings
for ff in range(len(file_strings)-1):
file_lines = file_strings[ff+1].split('\n')
# Find the index information for the data
head = True
snum = 0
for i,line in enumerate(file_lines):
if head:
if line.count("=") == len(line) and len(line) > 2:
head = False
elif len(line) > 0:
lsplit = re.split('\t+', line)
if snum == 0:
dtime = pds.datetime.strptime(" ".join(lsplit[0:-1]),
"%Y %m %d %H %M %S")
try:
idate = date_list.index(dtime)
except:
# SuperMAG outputs date lines regardless of the
# number of stations. These files shouldn't be
# appended together.
raise ValueError("Unexpected date ", dtime)
snum = int(lsplit[-1])
onum = num_stations[idate]
inum = ind_num
# Adjust reference data for new number of station lines
idates[idate+1:] += snum
num_stations[idate] += snum
# Adjust date line for new number of station lines
oline = "{:s}\t{:d}".format( \
dtime.strftime("%Y\t%m\t%d\t%H\t%M\t%S"),
num_stations[idate])
out_lines[idates[idate]] = oline
else:
if inum > 0:
inum -= 1
else:
# Insert the station line to the end of the date section
onum += 1
snum -= 1
out_lines.insert(idates[idate]+onum, line)
# Save the station name to update the parameter line
if not lsplit[0] in station_names:
station_names.append(lsplit[0])
# Update the parameter line
out_lines[iparam] += "," + ",".join(station_names)
# Join the output lines into a single string
out_string = "\n".join(out_lines)
return out_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append_csv_data(file_strings):
""" Append data from multiple csv files for the same time period Parameters file_strings : array-like Lists or arrays of strings, where each string contains one file of data Returns ------- out_string : string String with all data, ready for output to a file """ |
# Start with data from the first list element
out_lines = list()
head_line = None
# Cycle through the lists of file strings, creating a list of line strings
for fstrings in file_strings:
file_lines = fstrings.split('\n')
# Remove and save the header line
head_line = file_lines.pop(0)
# Save the data lines
out_lines.extend(file_lines)
# Sort the output lines by date and station (first two columns) in place
out_lines.sort()
# Remove all zero-length lines from front, add one to back, and add header
i = 0
while len(out_lines[i]) == 0:
out_lines.pop(i)
out_lines.insert(0, head_line)
out_lines.append('')
# Join the output lines into a single string
out_string = "\n".join(out_lines)
return out_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a fake list of files spanning a year""" |
index = pds.date_range(pysat.datetime(2017,12,1), pysat.datetime(2018,12,1))
# file list is effectively just the date in string format - '%D' works only in Mac. '%x' workins in both Windows and Mac
names = [ data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index]
return pysat.Series(names, index=index) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enable_log(level=logging.DEBUG):
"""Enable console logging. This is a utils method for try run with storops. :param level: log level, default to DEBUG """ |
logger = logging.getLogger(__name__)
logger.setLevel(level)
if not logger.handlers:
logger.info('enabling logging to console.')
logger.addHandler(logging.StreamHandler(sys.stdout)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def round_60(value):
""" round the number to the multiple of 60 Say a random value is represented by: 60 * n + r n is an integer and r is an integer between 0 and 60. if r < 30, the result is 60 * n. otherwise, the result is 60 * (n + 1) The use of this function is that the counter refreshment on VNX is always 1 minute. So the delta time between samples of counters must be the multiple of 60. :param value: the value to be rounded. :return: result """ |
t = 60
if value is not None:
r = value % t
if r > t / 2:
ret = value + (t - r)
else:
ret = value - r
else:
ret = NaN
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def utilization(prev, curr, counters):
""" calculate the utilization delta_busy = curr.busy - prev.busy delta_idle = curr.idle - prev.idle utilization = delta_busy / (delta_busy + delta_idle) :param prev: previous resource :param curr: current resource :param counters: list of two, busy ticks and idle ticks :return: value, NaN if invalid. """ |
busy_prop, idle_prop = counters
pb = getattr(prev, busy_prop)
pi = getattr(prev, idle_prop)
cb = getattr(curr, busy_prop)
ci = getattr(curr, idle_prop)
db = minus(cb, pb)
di = minus(ci, pi)
return mul(div(db, add(db, di)), 100) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delta_ps(prev, curr, counters):
""" calculate the delta per second of one counter formula: (curr - prev) / delta_time :param prev: previous resource :param curr: current resource :param counters: the counter to do delta and per second, one only :return: value, NaN if invalid. """ |
counter = get_counter(counters)
pv = getattr(prev, counter)
cv = getattr(curr, counter)
return minus(cv, pv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def io_size_kb(prev, curr, counters):
""" calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid """ |
bw_stats, io_stats = counters
size_mb = div(getattr(curr, bw_stats), getattr(curr, io_stats))
return mul(size_mb, 1024) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object. """ |
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)),
package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_data(self, date=None, fid=None):
""" Load data for an instrument on given date or fid, dependng upon input. Parameters date : (dt.datetime.date object or NoneType) file date fid : (int or NoneType) filename index value Returns -------- data : (pds.DataFrame) pysat data meta : (pysat.Meta) pysat meta data """ |
if fid is not None:
# get filename based off of index value
fname = self.files[fid:fid+1]
elif date is not None:
fname = self.files[date: date+pds.DateOffset(days=1)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
data, mdata = self._load_rtn(load_fname, tag=self.tag,
sat_id=self.sat_id, **self.kwargs)
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
mdata.accept_default_labels(self)
else:
data = DataFrame(None)
mdata = _meta.Meta(units_label=self.units_label, name_label=self.name_label,
notes_label = self.notes_label, desc_label = self.desc_label,
plot_label = self.plot_label, axis_label = self.axis_label,
scale_label = self.scale_label, min_label = self.min_label,
max_label = self.max_label, fill_label=self.fill_label)
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
sat_id=self.sat_id)
if not data.empty:
if not isinstance(data, DataFrame):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a pandas.DataFrame')))
if not isinstance(mdata, _meta.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for',
date.strftime('%x')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0]))
else:
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0], '::', fname[-1]))
else:
# no data signal
output_str = ' '.join(('No', output_str, 'data for',
date.strftime('%m/%d/%y')))
# remove extra spaces, if any
output_str = " ".join(output_str.split())
print (output_str)
return data, mdata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_var_type_code(self, coltype):
'''Determines the two-character type code for a given variable type
Parameters
----------
coltype : type or np.dtype
The type of the variable
Returns
-------
str
The variable type code for the given type'''
if type(coltype) is np.dtype:
var_type = coltype.kind + str(coltype.itemsize)
return var_type
else:
if coltype is np.int64:
return 'i8'
elif coltype is np.int32:
return 'i4'
elif coltype is np.int16:
return 'i2'
elif coltype is np.int8:
return 'i1'
elif coltype is np.uint64:
return 'u8'
elif coltype is np.uint32:
return 'u4'
elif coltype is np.uint16:
return 'u2'
elif coltype is np.uint8:
return 'u1'
elif coltype is np.float64:
return 'f8'
elif coltype is np.float32:
return 'f4'
elif issubclass(coltype, basestring):
return 'S1'
else:
raise TypeError('Unknown Variable Type' + str(coltype)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_data_info(self, data, file_format):
"""Support file writing by determiniing data type and other options Parameters data : pandas object Data to be written file_format : basestring String indicating netCDF3 or netCDF4 Returns ------- data_flag, datetime_flag, old_format """ |
# get type of data
data_type = data.dtype
# check if older file_format
# if file_format[:7] == 'NETCDF3':
if file_format != 'NETCDF4':
old_format = True
else:
old_format = False
# check for object type
if data_type != np.dtype('O'):
# simple data, not an object
# no 64bit ints in netCDF3
if (data_type == np.int64) & old_format:
data = data.astype(np.int32)
data_type = np.int32
if data_type == np.dtype('<M8[ns]'):
if not old_format:
data_type = np.int64
else:
data_type = np.float
datetime_flag = True
else:
datetime_flag = False
else:
# dealing with a more complicated object
# iterate over elements until we hit something that is something,
# and not NaN
data_type = type(data.iloc[0])
for i in np.arange(len(data)):
if len(data.iloc[i]) > 0:
data_type = type(data.iloc[i])
if not isinstance(data_type, np.float):
break
datetime_flag = False
return data, data_type, datetime_flag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def generic_meta_translator(self, meta_to_translate):
'''Translates the metadate contained in an object into a dictionary
suitable for export.
Parameters
----------
meta_to_translate : Meta
The metadata object to translate
Returns
-------
dict
A dictionary of the metadata for each variable of an output file
e.g. netcdf4'''
export_dict = {}
if self._meta_translation_table is not None:
# Create a translation table for the actual values of the meta labels.
# The instrument specific translation table only stores the names of the
# attributes that hold the various meta labels
translation_table = {}
for key in self._meta_translation_table:
translation_table[getattr(self, key)] = self._meta_translation_table[key]
else:
translation_table = None
#First Order Data
for key in meta_to_translate.data.index:
if translation_table is None:
export_dict[key] = meta_to_translate.data.loc[key].to_dict()
else:
# Translate each key if a translation is provided
export_dict[key] = {}
meta_dict = meta_to_translate.data.loc[key].to_dict()
for original_key in meta_dict:
if original_key in translation_table:
for translated_key in translation_table[original_key]:
export_dict[key][translated_key] = meta_dict[original_key]
else:
export_dict[key][original_key] = meta_dict[original_key]
#Higher Order Data
for key in meta_to_translate.ho_data:
if key not in export_dict:
export_dict[key] = {}
for ho_key in meta_to_translate.ho_data[key].data.index:
if translation_table is None:
export_dict[key+'_'+ho_key] = meta_to_translate.ho_data[key].data.loc[ho_key].to_dict()
else:
#Translate each key if a translation is provided
export_dict[key+'_'+ho_key] = {}
meta_dict = meta_to_translate.ho_data[key].data.loc[ho_key].to_dict()
for original_key in meta_dict:
if original_key in translation_table:
for translated_key in translation_table[original_key]:
export_dict[key+'_'+ho_key][translated_key] = meta_dict[original_key]
else:
export_dict[key+'_'+ho_key][original_key] = meta_dict[original_key]
return export_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_alu_hlu_map(input_str):
"""Converter for alu hlu map Convert following input into a alu -> hlu map: Sample input: ``` HLU Number ALU Number 0 12 1 23 ``` ALU stands for array LUN number hlu stands for host LUN number :param input_str: raw input from naviseccli :return: alu -> hlu map """ |
ret = {}
if input_str is not None:
pattern = re.compile(r'(\d+)\s*(\d+)')
for line in input_str.split('\n'):
line = line.strip()
if len(line) == 0:
continue
matched = re.search(pattern, line)
if matched is None or len(matched.groups()) < 2:
continue
else:
hlu = matched.group(1)
alu = matched.group(2)
ret[int(alu)] = int(hlu)
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_disk_indices(value):
"""Convert following input to disk indices Sample input: ``` Disks: Bus 0 Enclosure 0 Disk 9 Bus 1 Enclosure 0 Disk 12 Bus 1 Enclosure 0 Disk 9 Bus 0 Enclosure 0 Disk 4 Bus 0 Enclosure 0 Disk 7 ``` :param value: disk list :return: disk indices in list """ |
ret = []
p = re.compile(r'Bus\s+(\w+)\s+Enclosure\s+(\w+)\s+Disk\s+(\w+)')
if value is not None:
for line in value.split('\n'):
line = line.strip()
if len(line) == 0:
continue
matched = re.search(p, line)
if matched is None or len(matched.groups()) < 3:
continue
else:
ret.append('{}_{}_{}'.format(*matched.groups()))
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ipv4_prefix_to_mask(prefix):
""" ipv4 cidr prefix to net mask :param prefix: cidr prefix , rang in (0, 32) :type prefix: int :return: dot separated ipv4 net mask code, eg: 255.255.255.0 :rtype: str """ |
if prefix > 32 or prefix < 0:
raise ValueError("invalid cidr prefix for ipv4")
else:
mask = ((1 << 32) - 1) ^ ((1 << (32 - prefix)) - 1)
eight_ones = 255 # 0b11111111
mask_str = ''
for i in range(0, 4):
mask_str = str(mask & eight_ones) + mask_str
mask = mask >> 8
if i != 3:
mask_str = '.' + mask_str
return mask_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ipv6_prefix_to_mask(prefix):
""" ipv6 cidr prefix to net mask :param prefix: cidr prefix, rang in (0, 128) :type prefix: int :return: comma separated ipv6 net mask code, eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000 :rtype: str """ |
if prefix > 128 or prefix < 0:
raise ValueError("invalid cidr prefix for ipv6")
else:
mask = ((1 << 128) - 1) ^ ((1 << (128 - prefix)) - 1)
f = 15 # 0xf or 0b1111
hex_mask_str = ''
for i in range(0, 32):
hex_mask_str = format((mask & f), 'x') + hex_mask_str
mask = mask >> 4
if i != 31 and i & 3 == 3:
hex_mask_str = ':' + hex_mask_str
return hex_mask_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand(self, new_size):
""" expand the LUN to a new size :param new_size: new size in bytes. :return: the old size """ |
ret = self.size_total
resp = self.modify(size=new_size)
resp.raise_if_err()
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replicate(self, dst_lun_id, max_time_out_of_sync, replication_name=None, replicate_existing_snaps=None, remote_system=None):
""" Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session. """ |
return UnityReplicationSession.create(
self._cli, self.get_id(), dst_lun_id, max_time_out_of_sync,
name=replication_name,
replicate_existing_snaps=replicate_existing_snaps,
remote_system=remote_system) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync, dst_pool_id, dst_lun_name=None, remote_system=None, replication_name=None, dst_size=None, dst_sp=None, is_dst_thin=None, dst_tiering_policy=None, is_dst_compression=None):
""" Creates a replication session with destination lun provisioning. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param dst_pool_id: id of pool to allocate destination lun. :param dst_lun_name: destination lun name. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :param replication_name: replication name. :param dst_size: destination lun size. :param dst_sp: `NodeEnum` value. Default storage processor of destination lun. :param is_dst_thin: indicates whether destination lun is thin or not. :param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of destination lun. :param is_dst_compression: indicates whether destination lun is compression enabled or not. :return: created replication session. """ |
dst_size = self.size_total if dst_size is None else dst_size
dst_resource = UnityResourceConfig.to_embedded(
name=dst_lun_name, pool_id=dst_pool_id,
size=dst_size, default_sp=dst_sp,
tiering_policy=dst_tiering_policy, is_thin_enabled=is_dst_thin,
is_compression_enabled=is_dst_compression)
return UnityReplicationSession.create_with_dst_resource_provisioning(
self._cli, self.get_id(), dst_resource, max_time_out_of_sync,
remote_system=remote_system, name=replication_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_physical_port(self):
"""Returns the link aggregation object or the ethernet port object.""" |
obj = None
if self.is_link_aggregation():
obj = UnityLinkAggregation.get(self._cli, self.get_id())
else:
obj = UnityEthernetPort.get(self._cli, self.get_id())
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_embedded(pool_id=None, is_thin_enabled=None, is_deduplication_enabled=None, is_compression_enabled=None, is_backup_only=None, size=None, tiering_policy=None, request_id=None, src_id=None, name=None, default_sp=None, replication_resource_type=None):
""" Constructs an embeded object of `UnityResourceConfig`. :param pool_id: storage pool of the resource. :param is_thin_enabled: is thin type or not. :param is_deduplication_enabled: is deduplication enabled or not. :param is_compression_enabled: is in-line compression (ILC) enabled or not. :param is_backup_only: is backup only or not. :param size: size of the resource. :param tiering_policy: `TieringPolicyEnum` value. Tiering policy for the resource. :param request_id: unique request ID for the configuration. :param src_id: storage resource if it already exists. :param name: name of the storage resource. :param default_sp: `NodeEnum` value. Default storage processor for the resource. :param replication_resource_type: `ReplicationEndpointResourceTypeEnum` value. Replication resource type. :return: """ |
return {'poolId': pool_id, 'isThinEnabled': is_thin_enabled,
'isDeduplicationEnabled': is_deduplication_enabled,
'isCompressionEnabled': is_compression_enabled,
'isBackupOnly': is_backup_only, 'size': size,
'tieringPolicy': tiering_policy, 'requestId': request_id,
'srcId': src_id, 'name': name, 'defaultSP': default_sp,
'replicationResourceType': replication_resource_type} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(cls, cli, src_resource_id, dst_resource_id, max_time_out_of_sync, name=None, members=None, auto_initiate=None, hourly_snap_replication_policy=None, daily_snap_replication_policy=None, replicate_existing_snaps=None, remote_system=None, src_spa_interface=None, src_spb_interface=None, dst_spa_interface=None, dst_spb_interface=None):
""" Creates a replication session. :param cli: the rest cli. :param src_resource_id: id of the replication source, could be lun/fs/cg. :param dst_resource_id: id of the replication destination. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param name: name of the replication. :param members: list of `UnityLunMemberReplication` object. If `src_resource` is cg, `lunMemberReplication` list need to pass in to this parameter as member lun pairing between source and destination cg. :param auto_initiate: indicates whether to perform the first replication sync automatically. True - perform the first replication sync automatically. False - perform the first replication sync manually. :param hourly_snap_replication_policy: `UnitySnapReplicationPolicy` object. The policy for replicating hourly scheduled snaps of the source resource. :param daily_snap_replication_policy: `UnitySnapReplicationPolicy` object. The policy for replicating daily scheduled snaps of the source resource. :param replicate_existing_snaps: indicates whether or not to replicate snapshots already existing on the resource. :param remote_system: `UnityRemoteSystem` object. The remote system of remote replication. :param src_spa_interface: `UnityRemoteInterface` object. The replication interface for source SPA. :param src_spb_interface: `UnityRemoteInterface` object. The replication interface for source SPB. :param dst_spa_interface: `UnityRemoteInterface` object. The replication interface for destination SPA. :param dst_spb_interface: `UnityRemoteInterface` object. The replication interface for destination SPB. :return: the newly created replication session. """ |
req_body = cli.make_body(
srcResourceId=src_resource_id, dstResourceId=dst_resource_id,
maxTimeOutOfSync=max_time_out_of_sync, members=members,
autoInitiate=auto_initiate, name=name,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
replicateExistingSnaps=replicate_existing_snaps,
remoteSystem=remote_system,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_with_dst_resource_provisioning( cls, cli, src_resource_id, dst_resource_config, max_time_out_of_sync, name=None, remote_system=None, src_spa_interface=None, src_spb_interface=None, dst_spa_interface=None, dst_spb_interface=None, dst_resource_element_configs=None, auto_initiate=None, hourly_snap_replication_policy=None, daily_snap_replication_policy=None, replicate_existing_snaps=None):
""" Create a replication session along with destination resource provisioning. :param cli: the rest cli. :param src_resource_id: id of the replication source, could be lun/fs/cg. :param dst_resource_config: `UnityResourceConfig` object. The user chosen config for destination resource provisioning. `pool_id` and `size` are required for creation. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param name: name of the replication. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :param src_spa_interface: `UnityRemoteInterface` object. The replication interface for source SPA. :param src_spb_interface: `UnityRemoteInterface` object. The replication interface for source SPB. :param dst_spa_interface: `UnityRemoteInterface` object. The replication interface for destination SPA. :param dst_spb_interface: `UnityRemoteInterface` object. The replication interface for destination SPB. :param dst_resource_element_configs: List of `UnityResourceConfig` objects. The user chose config for each of the member element of the destination resource. :param auto_initiate: indicates whether to perform the first replication sync automatically. True - perform the first replication sync automatically. False - perform the first replication sync manually. :param hourly_snap_replication_policy: `UnitySnapReplicationPolicy` object. The policy for replicating hourly scheduled snaps of the source resource. :param daily_snap_replication_policy: `UnitySnapReplicationPolicy` object. The policy for replicating daily scheduled snaps of the source resource. :param replicate_existing_snaps: indicates whether or not to replicate snapshots already existing on the resource. :return: the newly created replication session. """ |
req_body = cli.make_body(
srcResourceId=src_resource_id,
dstResourceConfig=dst_resource_config,
maxTimeOutOfSync=max_time_out_of_sync,
name=name, remoteSystem=remote_system,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface,
dstResourceElementConfigs=dst_resource_element_configs,
autoInitiate=auto_initiate,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
replicateExistingSnaps=replicate_existing_snaps)
resp = cli.type_action(
cls().resource_class,
'createReplicationSessionWDestResProvisioning',
**req_body)
resp.raise_if_err()
# response is like:
# "content": {
# "id": {
# "id": "42949676351_FNM00150600267_xxxx"
# }
session_resp = resp.first_content['id']
return cls.get(cli, _id=session_resp['id']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify(self, max_time_out_of_sync=None, name=None, hourly_snap_replication_policy=None, daily_snap_replication_policy=None, src_spa_interface=None, src_spb_interface=None, dst_spa_interface=None, dst_spb_interface=None):
""" Modifies properties of a replication session. :param max_time_out_of_sync: same as the one in `create` method. :param name: same as the one in `create` method. :param hourly_snap_replication_policy: same as the one in `create` method. :param daily_snap_replication_policy: same as the one in `create` method. :param src_spa_interface: same as the one in `create` method. :param src_spb_interface: same as the one in `create` method. :param dst_spa_interface: same as the one in `create` method. :param dst_spb_interface: same as the one in `create` method. """ |
req_body = self._cli.make_body(
maxTimeOutOfSync=max_time_out_of_sync, name=name,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resume(self, force_full_copy=None, src_spa_interface=None, src_spb_interface=None, dst_spa_interface=None, dst_spb_interface=None):
""" Resumes a replication session. This can be applied on replication session when it's operational status is reported as Failed over, or Paused. :param force_full_copy: needed when replication session goes out of sync due to a fault. True - replicate all data. False - replicate changed data only. :param src_spa_interface: same as the one in `create` method. :param src_spb_interface: same as the one in `create` method. :param dst_spa_interface: same as the one in `create` method. :param dst_spb_interface: same as the one in `create` method. """ |
req_body = self._cli.make_body(forceFullCopy=force_full_copy,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface)
resp = self.action('resume', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def failover(self, sync=None, force=None):
""" Fails over a replication session. :param sync: True - sync the source and destination resources before failing over the asynchronous replication session or keep them in sync after failing over the synchronous replication session. False - don't sync. :param force: True - skip pre-checks on file system(s) replication sessions of a NAS server when a replication failover is issued from the source NAS server. False - don't skip pre-checks. """ |
req_body = self._cli.make_body(sync=sync, force=force)
resp = self.action('failover', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def failback(self, force_full_copy=None):
""" Fails back a replication session. This can be applied on a replication session that is failed over. Fail back will synchronize the changes done to original destination back to original source site and will restore the original direction of session. :param force_full_copy: indicates whether to sync back all data from the destination SP to the source SP during the failback session. True - Sync back all data. False - Sync back changed data only. """ |
req_body = self._cli.make_body(forceFullCopy=force_full_copy)
resp = self.action('failback', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calcOrbits(self):
"""Prepares data structure for breaking data into orbits. Not intended for end user.""" |
# if the breaks between orbit have not been defined, define them
# also, store the data so that grabbing different orbits does not
# require reloads of whole dataset
if len(self._orbit_breaks) == 0:
# determine orbit breaks
self._detBreaks()
# store a copy of data
self._fullDayData = self.sat.data.copy()
# set current orbit counter to zero (default)
self._current = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _polarBreaks(self):
"""Determine where breaks in a polar orbiting satellite orbit occur. Looks for sign changes in latitude (magnetic or geographic) as well as breaks in UT. """ |
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not appear to ' +
'exist in loaded data')
# determine where orbit index goes from positive to negative
pos = (self.sat[self.orbit_index] >= 0)
npos = -pos
change = (pos.values[:-1] & npos.values[1:]) | (npos.values[:-1] &
pos.values[1:])
ind, = np.where(change)
ind += 1
ut_diff = Series(self.sat.data.index).diff()
ut_ind, = np.where(ut_diff / self.orbit_period > 0.95)
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
# print 'Time Gap'
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _orbitNumberBreaks(self):
"""Determine where orbital breaks in a dataset with orbit numbers occur. Looks for changes in unique values. """ |
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not appear to ' +
'exist in loaded data')
# determine where the orbit index changes from one value to the next
uniq_vals = self.sat[self.orbit_index].unique()
orbit_index = []
for val in uniq_vals:
idx, = np.where(val == self.sat[self.orbit_index].values)
orbit_index.append(idx[0])
# create orbitbreak index, ensure first element is always 0
if orbit_index[0] != 0:
ind = np.hstack((np.array([0]), orbit_index))
else:
ind = orbit_index
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse(yr, mo, day):
""" Basic parser to deal with date format of the Kp file. """ |
yr = '20'+yr
yr = int(yr)
mo = int(mo)
day = int(day)
return pds.datetime(yr, mo, day) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download Kp index data Parameters tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) Returns -------- Void : (NoneType) data downloaded to disk, if available. Notes ----- Called by pysat. Not intended for direct use by user. """ |
import ftplib
from ftplib import FTP
import sys
ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/home/obs/kp-ap/tab')
for date in date_array:
fname = 'kp{year:02d}{month:02d}.tab'
fname = fname.format(year=(date.year - date.year//100*100), month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path,local_fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write)
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_fname)
print('File not available for '+date.strftime('%D'))
ftp.close()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_converter(self, converter_str):
"""find converter function reference by name find converter by name, converter name follows this convention: Class.method or: method The first type of converter class/function must be available in current module. The second type of converter must be available in `__builtin__` (or `builtins` in python3) module. :param converter_str: string representation of the converter func :return: function reference """ |
ret = None
if converter_str is not None:
converter_desc_list = converter_str.split('.')
if len(converter_desc_list) == 1:
converter = converter_desc_list[0]
# default to `converter`
ret = getattr(cvt, converter, None)
if ret is None:
# try module converter
ret = self.get_converter(converter)
if ret is None:
ret = self.get_resource_clz_by_name(converter)
if ret is None:
ret = self.get_enum_by_name(converter)
if ret is None:
# try parser config
ret = self.get(converter)
if ret is None and converter_str is not None:
raise ValueError(
'Specified converter not supported: {}'.format(
converter_str))
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_file_to_remote(self, local_path, remote_path):
"""scp the local file to remote folder. :param local_path: local path :param remote_path: remote path """ |
sftp_client = self.transport.open_sftp_client()
LOG.debug('Copy the local file to remote. '
'Source=%(src)s. Target=%(target)s.' %
{'src': local_path, 'target': remote_path})
try:
sftp_client.put(local_path, remote_path)
except Exception as ex:
LOG.error('Failed to copy the local file to remote. '
'Reason: %s.' % six.text_type(ex))
raise SFtpExecutionError(err=ex) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_remote_file(self, remote_path, local_path):
"""Fetch remote File. :param remote_path: remote path :param local_path: local path """ |
sftp_client = self.transport.open_sftp_client()
LOG.debug('Get the remote file. '
'Source=%(src)s. Target=%(target)s.' %
{'src': remote_path, 'target': local_path})
try:
sftp_client.get(remote_path, local_path)
except Exception as ex:
LOG.error('Failed to secure copy. Reason: %s.' %
six.text_type(ex))
raise SFtpExecutionError(err=ex) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the ssh connection.""" |
if 'isLive' in self.__dict__ and self.isLive:
self.transport.close()
self.isLive = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xml_request(check_object=False, check_invalid_data_mover=False):
""" indicate the return value is a xml api request :param check_invalid_data_mover: :param check_object: :return: the response of this request """ |
def decorator(f):
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
request = f(self, *argv, **kwargs)
return self.request(
request, check_object=check_object,
check_invalid_data_mover=check_invalid_data_mover)
return func_wrapper
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nas_command(f):
""" indicate it's a command of nas command run with ssh :param f: function that returns the command in list :return: command execution result """ |
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
commands = f(self, *argv, **kwargs)
return self.ssh_execute(['env', 'NAS_DB=/nas'] + commands)
return func_wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restore(self, backup=None, delete_backup=False):
"""Restore the snapshot to the associated storage resource. :param backup: name of the backup snapshot :param delete_backup: Whether to delete the backup snap after a successful restore. """ |
resp = self._cli.action(self.resource_class, self.get_id(),
'restore', copyName=backup)
resp.raise_if_err()
backup = resp.first_content['backup']
backup_snap = UnitySnap(_id=backup['id'], cli=self._cli)
if delete_backup:
log.info("Deleting the backup snap {} as the restoration "
"succeeded.".format(backup['id']))
backup_snap.delete()
return backup_snap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, async_mode=False, even_attached=False):
"""Deletes the snapshot. :param async_mode: whether to delete the snapshot in async mode. :param even_attached: whether to delete the snapshot even it is attached to hosts. """ |
try:
return super(UnitySnap, self).delete(async_mode=async_mode)
except UnityDeleteAttachedSnapError:
if even_attached:
log.debug("Force delete the snapshot even if it is attached. "
"First detach the snapshot from hosts, then delete "
"again.")
# Currently `detach_from` doesn't process `host` parameter.
# It always detaches the snapshot from all hosts. So pass in
# `None` here.
self.detach_from(None)
return super(UnitySnap, self).delete(async_mode=async_mode)
else:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _flat_vports(self, connection_port):
"""Flat the virtual ports.""" |
vports = []
for vport in connection_port.virtual_ports:
self._set_child_props(connection_port, vport)
vports.append(vport)
return vports |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_snap(self):
""" This method won't count the snaps in "destroying" state! :return: false if no snaps or all snaps are destroying. """ |
return len(list(filter(lambda s: s.state != SnapStateEnum.DESTROYING,
self.snapshots))) > 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def median2D(const, bin1, label1, bin2, label2, data_label, returnData=False):
"""Return a 2D average of data_label over a season and label1, label2. Parameters const: Constellation or Instrument bin#: [min, max, number of bins] label#: string identifies data product for bin# data_label: list-like contains strings identifying data product(s) to be averaged Returns ------- median : dictionary 2D median accessed by data_label as a function of label1 and label2 over the season delineated by bounds of passed instrument objects. Also includes 'count' and 'avg_abs_dev' as well as the values of the bin edges in 'bin_x' and 'bin_y'. """ |
# const is either an Instrument or a Constellation, and we want to
# iterate over it.
# If it's a Constellation, then we can do that as is, but if it's
# an Instrument, we just have to put that Instrument into something
# that will yeild that Instrument, like a list.
if isinstance(const, pysat.Instrument):
const = [const]
elif not isinstance(const, pysat.Constellation):
raise ValueError("Parameter must be an Instrument or a Constellation.")
# create bins
#// seems to create the boundaries used for sorting into bins
binx = np.linspace(bin1[0], bin1[1], bin1[2]+1)
biny = np.linspace(bin2[0], bin2[1], bin2[2]+1)
#// how many bins are used
numx = len(binx)-1
numy = len(biny)-1
#// how many different data products
numz = len(data_label)
# create array to store all values before taking median
#// the indices of the bins/data products? used for looping.
yarr = np.arange(numy)
xarr = np.arange(numx)
zarr = np.arange(numz)
#// 3d array: stores the data that is sorted into each bin? - in a deque
ans = [ [ [collections.deque() for i in xarr] for j in yarr] for k in zarr]
for inst in const:
# do loop to iterate over instrument season
#// probably iterates by date but that all depends on the
#// configuration of that particular instrument.
#// either way, it iterates over the instrument, loading successive
#// data between start and end bounds
for inst in inst:
# collect data in bins for averaging
if len(inst.data) != 0:
#// sort the data into bins (x) based on label 1
#// (stores bin indexes in xind)
xind = np.digitize(inst.data[label1], binx)-1
#// for each possible x index
for xi in xarr:
#// get the indicies of those pieces of data in that bin
xindex, = np.where(xind==xi)
if len(xindex) > 0:
#// look up the data along y (label2) at that set of indicies (a given x)
yData = inst.data.iloc[xindex]
#// digitize that, to sort data into bins along y (label2) (get bin indexes)
yind = np.digitize(yData[label2], biny)-1
#// for each possible y index
for yj in yarr:
#// select data with this y index (and we already filtered for this x index)
yindex, = np.where(yind==yj)
if len(yindex) > 0:
#// for each data product label zk
for zk in zarr:
#// take the data (already filtered by x); filter it by y and
#// select the data product, put it in a list, and extend the deque
ans[zk][yj][xi].extend( yData.ix[yindex,data_label[zk]].tolist() )
return _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz, returnData) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_rsc_list_2(self, rsc_clz_list=None):
"""get the list of resource list to collect based on clz list :param rsc_clz_list: the list of classes to collect :return: filtered list of resource list, like [VNXLunList(), VNXDiskList()] """ |
rsc_list_2 = self._default_rsc_list_with_perf_stats()
if rsc_clz_list is None:
rsc_clz_list = ResourceList.get_rsc_clz_list(rsc_list_2)
return [rsc_list
for rsc_list in rsc_list_2
if rsc_list.get_resource_class() in rsc_clz_list] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cosmicFiles, tag=None, sat_id=None):
""" cosmic data load routine, called by pysat """ |
import netCDF4
num = len(cosmicFiles)
# make sure there are files to read
if num != 0:
# call separate load_files routine, segemented for possible
# multiprocessor load, not included and only benefits about 20%
output = pysat.DataFrame(load_files(cosmicFiles, tag=tag, sat_id=sat_id))
output.index = pysat.utils.create_datetime_index(year=output.year,
month=output.month, day=output.day,
uts=output.hour*3600.+output.minute*60.+output.second)
# make sure UTS strictly increasing
output.sort_index(inplace=True)
# use the first available file to pick out meta information
meta = pysat.Meta()
ind = 0
repeat = True
while repeat:
try:
data = netCDF4.Dataset(cosmicFiles[ind])
ncattrsList = data.ncattrs()
for d in ncattrsList:
meta[d] = {'units':'', 'long_name':d}
keys = data.variables.keys()
for key in keys:
meta[key] = {'units':data.variables[key].units,
'long_name':data.variables[key].long_name}
repeat = False
except RuntimeError:
# file was empty, try the next one by incrementing ind
ind+=1
return output, meta
else:
# no data
return pysat.DataFrame(None), pysat.Meta() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
"""Routine to return DMSP IVM data cleaned to the specified level 'Clean' enforces that both RPA and DM flags are <= 1 'Dusty' <= 2 'Dirty' <= 3 'None' None Routine is called by pysat, and not by the end user directly. Parameters inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty' """ |
if self.clean_level == 'clean':
idx, = np.where((self['rpa_flag_ut'] <= 1) & (self['idm_flag_ut'] <= 1))
elif self.clean_level == 'dusty':
idx, = np.where((self['rpa_flag_ut'] <= 2) & (self['idm_flag_ut'] <= 2))
elif self.clean_level == 'dirty':
idx, = np.where((self['rpa_flag_ut'] <= 3) & (self['idm_flag_ut'] <= 3))
else:
idx = []
# downselect data based upon cleaning conditions above
self.data = self[idx]
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify(self, management_address=None, username=None, password=None, connection_type=None):
""" Modifies a remote system for remote replication. :param management_address: same as the one in `create` method. :param username: username for accessing the remote system. :param password: password for accessing the remote system. :param connection_type: same as the one in `create` method. """ |
req_body = self._cli.make_body(
managementAddress=management_address, username=username,
password=password, connectionType=connection_type)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify(self, connection_type=None):
""" Verifies and update the remote system settings. :param connection_type: same as the one in `create` method. """ |
req_body = self._cli.make_body(connectionType=connection_type)
resp = self.action('verify', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None):
""" Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method. """ |
req_body = self._cli.make_body(sp=sp, ipPort=ip_port,
ipAddress=ip_address, netmask=netmask,
v6PrefixLength=v6_prefix_length,
gateway=gateway, vlanId=vlan_id)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sp_sum_values(self):
""" return sp level values input: "values": { "spa": { "19": "385", "18": "0", "20": "0", "17": "0", "16": "0" }, "spb": { "19": "101", "18": "101", "20": "101", "17": "101", "16": "101" } }, return: "values": { "spa": 385, "spb": 505 }, """ |
if self.values is None:
ret = IdValues()
else:
ret = IdValues({k: sum(int(x) for x in v.values()) for k, v in
self.values.items()})
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, function, kind='add', at_pos='end',*args, **kwargs):
"""Add a function to custom processing queue. Custom functions are applied automatically to associated pysat instrument whenever instrument.load command called. Parameters function : string or function object name of function or function object to be added to queue kind : {'add', 'modify', 'pass} add Adds data returned from function to instrument object. A copy of pysat instrument object supplied to routine. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments extra arguments are passed to the custom function (once) kwargs : extra keyword arguments extra keyword args are passed to the custom function (once) Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) """ |
if isinstance(function, str):
# convert string to function object
function=eval(function)
if (at_pos == 'end') | (at_pos == len(self._functions)):
# store function object
self._functions.append(function)
self._args.append(args)
self._kwargs.append(kwargs)
self._kind.append(kind.lower())
elif at_pos < len(self._functions):
# user picked a specific location to insert
self._functions.insert(at_pos, function)
self._args.insert(at_pos, args)
self._kwargs.insert(at_pos, kwargs)
self._kind.insert(at_pos, kind)
else:
raise TypeError('Must enter an index between 0 and %i' %
len(self._functions)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_all(self, sat):
""" Apply all of the custom functions to the satellite data object. """ |
if len(self._functions) > 0:
for func, arg, kwarg, kind in zip(self._functions, self._args,
self._kwargs, self._kind):
if len(sat.data) > 0:
if kind == 'add':
# apply custom functions that add data to the
# instrument object
tempd = sat.copy()
newData = func(tempd, *arg, **kwarg)
del tempd
# process different types of data returned by the
# function if a dict is returned, data in 'data'
if isinstance(newData,dict):
# if DataFrame returned, add Frame to existing frame
if isinstance(newData['data'], pds.DataFrame):
sat[newData['data'].columns] = newData
# if a series is returned, add it as a column
elif isinstance(newData['data'], pds.Series):
# look for name attached to series first
if newData['data'].name is not None:
sat[newData['data'].name] = newData
# look if name is provided as part of dict
# returned from function
elif 'name' in newData.keys():
name = newData.pop('name')
sat[name] = newData
# couldn't find name information
else:
raise ValueError('Must assign a name to ' +
'Series or return a ' +
'"name" in dictionary.')
# some kind of iterable was returned
elif hasattr(newData['data'], '__iter__'):
# look for name in returned dict
if 'name' in newData.keys():
name = newData.pop('name')
sat[name] = newData
else:
raise ValueError('Must include "name" in ' +
'returned dictionary.')
# bare DataFrame is returned
elif isinstance(newData, pds.DataFrame):
sat[newData.columns] = newData
# bare Series is returned, name must be attached to
# Series
elif isinstance(newData, pds.Series):
sat[newData.name] = newData
# some kind of iterable returned,
# presuming (name, data)
# or ([name1,...], [data1,...])
elif hasattr(newData, '__iter__'):
# falling back to older behavior
# unpack tuple/list that was returned
newName = newData[0]
newData = newData[1]
if len(newData)>0:
# doesn't really check ensure data, there could
# be multiple empty arrays returned, [[],[]]
if isinstance(newName, str):
# one item to add
sat[newName] = newData
else:
# multiple items
for name, data in zip(newName, newData):
if len(data)>0:
# fixes up the incomplete check
# from before
sat[name] = data
else:
raise ValueError("kernel doesn't know what to do " +
"with returned data.")
# modifying loaded data
if kind == 'modify':
t = func(sat,*arg,**kwarg)
if t is not None:
raise ValueError('Modify functions should not ' +
'return any information via ' +
'return. Information may only be' +
' propagated back by modifying ' +
'supplied pysat object.')
# pass function (function runs, no data allowed back)
if kind == 'pass':
tempd = sat.copy()
t = func(tempd,*arg,**kwarg)
del tempd
if t is not None:
raise ValueError('Pass functions should not ' +
'return any information via ' +
'return.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear(self):
"""Clear custom function list.""" |
self._functions=[]
self._args=[]
self._kwargs=[]
self._kind=[] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(date_array, tag, sat_id, data_path, user=None, password=None):
""" Download SuperDARN data from Virginia Tech organized for loading by pysat. """ |
import sys
import os
import pysftp
import davitpy
if user is None:
user = os.environ['DBREADUSER']
if password is None:
password = os.environ['DBREADPASS']
with pysftp.Connection(
os.environ['VTDB'],
username=user,
password=password) as sftp:
for date in date_array:
myDir = '/data/'+date.strftime("%Y")+'/grdex/'+tag+'/'
fname = date.strftime("%Y%m%d")+'.' + tag + '.grdex'
local_fname = fname+'.bz2'
saved_fname = os.path.join(data_path,local_fname)
full_fname = os.path.join(data_path,fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
sftp.get(myDir+local_fname, saved_fname)
os.system('bunzip2 -c '+saved_fname+' > '+ full_fname)
os.system('rm ' + saved_fname)
except IOError:
print('File not available for '+date.strftime('%D'))
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_disks(self, count=2):
""" filter self to the required number of disks with same size and type Select the disks with the same type and same size. If not enough disks available, set self to empty. :param count: number of disks to retrieve :return: disk list """ |
ret = self
if len(self) > 0:
type_counter = Counter(self.drive_type)
drive_type, counts = type_counter.most_common()[0]
self.set_drive_type(drive_type)
if len(self) > 0:
size_counter = Counter(self.capacity)
size, counts = size_counter.most_common()[0]
self.set_capacity(size)
if len(self) >= count:
indices = self.index[:count]
self.set_indices(indices)
else:
self.set_indices('N/A')
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_bounds(self, start, stop):
""" Sets boundaries for all instruments in constellation """ |
for instrument in self.instruments:
instrument.bounds = (start, stop) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data_mod(self, *args, **kwargs):
""" Register a function to modify data of member Instruments. The function is not partially applied to modify member data. When the Constellation receives a function call to register a function for data modification, it passes the call to each instrument and registers it in the instrument's pysat.Custom queue. (Wraps pysat.Custom.add; documentation of that function is reproduced here.) Parameters function : string or function object name of function or function object to be added to queue kind : {'add, 'modify', 'pass'} add Adds data returned from fuction to instrument object. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) """ |
for instrument in self.instruments:
instrument.custom.add(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, *args, **kwargs):
""" Load instrument data into instrument object.data (Wraps pysat.Instrument.load; documentation of that function is reproduced here.) Parameters --------- yr : integer Year for desired data doy : integer day of year data : datetime object date to load fname : 'string' filename to be loaded verifyPad : boolean if true, padding data not removed (debug purposes) """ |
for instrument in self.instruments:
instrument.load(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, bounds1, label1, bounds2, label2, bin3, label3, data_label):
""" Combines signals from multiple instruments within given bounds. Parameters bounds1 : (min, max) Bounds for selecting data on the axis of label1 Data points with label1 in [min, max) will be considered. label1 : string Data label for bounds1 to act on. bounds2 : (min, max) Bounds for selecting data on the axis of label2 Data points with label1 in [min, max) will be considered. label2 : string Data label for bounds2 to act on. bin3 : (min, max, #bins) Min and max bounds and number of bins for third axis. label3 : string Data label for third axis. data_label : array of strings Data label(s) for data product(s) to be averaged. Returns ------- median : dictionary Dictionary indexed by data label, each value of which is a dictionary with keys 'median', 'count', 'avg_abs_dev', and 'bin' (the values of the bin edges.) """ |
# TODO Update for 2.7 compatability.
if isinstance(data_label, str):
data_label = [data_label, ]
elif not isinstance(data_label, collections.Sequence):
raise ValueError("Please pass data_label as a string or "
"collection of strings.")
# Modeled after pysat.ssnl.median2D
# Make bin boundaries.
# y: values at label3
# z: *data_labels
biny = np.linspace(bin3[0], bin3[1], bin3[2]+1)
numy = len(biny)-1
numz = len(data_label)
# Ranges
yarr, zarr = map(np.arange, (numy, numz))
# Store data here.
ans = [[[collections.deque()] for j in yarr] for k in zarr]
# Filter data by bounds and bin it.
# Idiom for loading all of the data in an instrument's bounds.
for inst in self:
for inst in inst:
if len(inst.data) != 0:
# Select indicies for each piece of data we're interest in.
# Not all of this data is in bounds on label3 but we'll
# sort this later.
min1, max1 = bounds1
min2, max2 = bounds2
data1 = inst.data[label1]
data2 = inst.data[label2]
in_bounds, = np.where((min1 <= data1) & (data1 < max1) &
(min2 <= data2) & (data2 < max2))
# Grab the data in bounds on data1, data2.
data_considered = inst.data.iloc[in_bounds]
y_indexes = np.digitize(data_considered[label3], biny) - 1
# Iterate over the bins along y
for yj in yarr:
# Indicies of data in this bin
yindex, = np.where(y_indexes == yj)
# If there's data in this bin
if len(yindex) > 0:
# For each data label, add the points.
for zk in zarr:
ans[zk][yj][0].extend(
data_considered.ix[yindex, data_label[zk]].tolist())
# Now for the averaging.
# Let's, try .. packing the answers for the 2d function.
numx = 1
xarr = np.arange(numx)
binx = None
# TODO modify output
out_2d = _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz)
# Transform output
output = {}
for i, label in enumerate(data_label):
median = [r[0] for r in out_2d[label]['median']]
count = [r[0] for r in out_2d[label]['count']]
dev = [r[0] for r in out_2d[label]['avg_abs_dev']]
output[label] = {'median': median,
'count': count,
'avg_abs_dev': dev,
'bin': out_2d[label]['bin_y']}
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def difference(self, instrument1, instrument2, bounds, data_labels, cost_function):
""" Calculates the difference in signals from multiple instruments within the given bounds. Parameters instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. """ |
"""
Draft Pseudocode
----------------
Check integrity of inputs.
Let STD_LABELS be the constant tuple:
("time", "lat", "long", "alt")
Note: modify so that user can override labels for time,
lat, long, data for each satelite.
// We only care about the data currently loaded
into each object.
Let start be the later of the datetime of the
first piece of data loaded into s1, the first
piece of data loaded into s2, and the user
supplied start bound.
Let end be the later of the datetime of the first
piece of data loaded into s1, the first piece
of data loaded into s2, and the user supplied
end bound.
If start is after end, raise an error.
// Let data be the 2D array of deques holding each piece
// of data, sorted into bins by lat/long/alt.
Let s1_data (resp s2_data) be data from s1.data, s2.data
filtered by user-provided lat/long/alt bounds, time bounds
calculated.
Let data be a dictionary of lists with the keys
[ dl1 for dl1, dl2 in data_labels ] +
STD_LABELS +
[ lb+"2" for lb in STD_LABELS ]
For each piece of data s1_point in s1_data:
# Hopefully np.where is very good, because this
# runs O(n) times.
# We could try reusing selections, maybe, if needed.
# This would probably involve binning.
Let s2_near be the data from s2.data within certain
bounds on lat/long/alt/time using 8 statements to
numpy.where. We can probably get those defaults from
the user or handy constants / config?
# We could try a different algorithm for closest pairs
# of points.
Let distance be the numpy array representing the
distance between s1_point and each point in s2_near.
# S: Difference for others: change this line.
For each of those, calculate the spatial difference
from the s1 using lat/long/alt. If s2_near is
empty; break loop.
Let s2_nearest be the point in s2_near corresponding
to the lowest distance.
Append to data: a point, indexed by the time from
s1_point, containing the following data:
# note
Let n be the length of data["time"].
For each key in data:
Assert len(data[key]) == n
End for.
# Create data row to pass to pandas.
Let row be an empty dict.
For dl1, dl2 in data_labels:
Append s1_point[dl1] - s2_nearest[dl2] to data[dl1].
For key in STD_LABELS:
Append s1_point[translate[key]] to data[key]
key = key+"2"
Append s2_nearest[translate[key]] to data[key]
Let data_df be a pandas dataframe created from the data
in data.
return { 'data': data_df, 'start':start, 'end':end }
"""
labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist']
data = {label: [] for label in labels}
# Apply bounds
inst1 = instrument1.data
inst2 = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
low = b[2]
high = b[3]
data1 = inst1[label1]
ind1 = np.where((data1 >= low) & (data1 < high))
inst1 = inst1.iloc[ind1]
data2 = inst2[label2]
ind2 = np.where((data2 >= low) & (data2 < high))
inst2 = inst2.iloc[ind2]
for i, s1_point in inst1.iterrows():
# Gets points in instrument2 within the given bounds
s2_near = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
s1_val = s1_point[label1]
max_dist = b[4]
minbound = s1_val - max_dist
maxbound = s1_val + max_dist
data2 = s2_near[label2]
indices = np.where((data2 >= minbound) & (data2 < maxbound))
s2_near = s2_near.iloc[indices]
# Finds nearest point to s1_point in s2_near
s2_nearest = None
min_dist = float('NaN')
for j, s2_point in s2_near.iterrows():
dist = cost_function(s1_point, s2_point)
if dist < min_dist or min_dist != min_dist:
min_dist = dist
s2_nearest = s2_point
data['dist'].append(min_dist)
# Append difference to data dict
for dl1, dl2 in data_labels:
if s2_nearest is not None:
data[dl1].append(s1_point[dl1] - s2_nearest[dl2])
else:
data[dl1].append(float('NaN'))
# Append the rest of the row
for b in bounds:
label1 = b[0]
label2 = b[1]
data['1_'+label1].append(s1_point[label1])
if s2_nearest is not None:
data['2_'+label2].append(s2_nearest[label2])
else:
data['2_'+label2].append(float('NaN'))
data_df = pds.DataFrame(data=data)
return data_df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def computational_form(data):
""" Input Series of numbers, Series, or DataFrames repackaged for calculation. Parameters data : pandas.Series Series of numbers, Series, DataFrames Returns ------- pandas.Series, DataFrame, or Panel repacked data, aligned by indices, ready for calculation """ |
if isinstance(data.iloc[0], DataFrame):
dslice = Panel.from_dict(dict([(i,data.iloc[i])
for i in xrange(len(data))]))
elif isinstance(data.iloc[0], Series):
dslice = DataFrame(data.tolist())
dslice.index = data.index
else:
dslice = data
return dslice |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_data_dir(path=None, store=None):
""" Set the top level directory pysat uses to look for data and reload. Parameters path : string valid path to directory pysat uses to look for data store : bool if True, store data directory for future runs """ |
import sys
import os
import pysat
if sys.version_info[0] >= 3:
if sys.version_info[1] < 4:
import imp
re_load = imp.reload
else:
import importlib
re_load = importlib.reload
else:
re_load = reload
if store is None:
store = True
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getyrdoy(date):
"""Return a tuple of year, day of year for a supplied datetime object.""" |
try:
doy = date.toordinal()-datetime(date.year,1,1).toordinal()+1
except AttributeError:
raise AttributeError("Must supply a pandas datetime object or " +
"equivalent")
else:
return date.year, doy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def season_date_range(start, stop, freq='D'):
""" Return array of datetime objects using input frequency from start to stop Supports single datetime object or list, tuple, ndarray of start and stop dates. freq codes correspond to pandas date_range codes, D daily, M monthly, S secondly """ |
if hasattr(start, '__iter__'):
# missing check for datetime
season = pds.date_range(start[0], stop[0], freq=freq)
for (sta,stp) in zip(start[1:], stop[1:]):
season = season.append(pds.date_range(sta, stp, freq=freq))
else:
season = pds.date_range(start, stop, freq=freq)
return season |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_datetime_index(year=None, month=None, day=None, uts=None):
"""Create a timeseries index using supplied year, month, day, and ut in seconds. Parameters year : array_like of ints month : array_like of ints or None day : array_like of ints for day (default) or day of year (use month=None) uts : array_like of floats Returns ------- Pandas timeseries index. Note ---- Leap seconds have no meaning here. """ |
# need a timeseries index for storing satellite data in pandas but
# creating a datetime object for everything is too slow
# so I calculate the number of nanoseconds elapsed since first sample,
# and create timeseries index from that.
# Factor of 20 improvement compared to previous method,
# which itself was an order of magnitude faster than datetime.
#get list of unique year, and month
if not hasattr(year, '__iter__'):
raise ValueError('Must provide an iterable for all inputs.')
if len(year) == 0:
raise ValueError('Length of array must be larger than 0.')
year = year.astype(int)
if month is None:
month = np.ones(len(year), dtype=int)
else:
month = month.astype(int)
if uts is None:
uts = np.zeros(len(year))
if day is None:
day = np.ones(len(year))
day = day.astype(int)
# track changes in seconds
uts_del = uts.copy().astype(float)
# determine where there are changes in year and month that need to be
# accounted for
_,idx = np.unique(year*100.+month, return_index=True)
# create another index array for faster algorithm below
idx2 = np.hstack((idx,len(year)+1))
# computes UTC seconds offset for each unique set of year and month
for _idx,_idx2 in zip(idx[1:],idx2[2:]):
temp = (datetime(year[_idx],month[_idx],1)
- datetime(year[0],month[0],1))
uts_del[_idx:_idx2] += temp.total_seconds()
# add in UTC seconds for days, ignores existence of leap seconds
uts_del += (day-1)*86400
# add in seconds since unix epoch to first day
uts_del += (datetime(year[0],month[0],1)-datetime(1970,1,1)).total_seconds()
# going to use routine that defaults to nanseconds for epoch
uts_del *= 1E9
return pds.to_datetime(uts_del) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nan_circmean(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular mean routine Parameters samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circmean : float Circular mean """ |
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
ssum = np.sin(ang).sum(axis=axis)
csum = np.cos(ang).sum(axis=axis)
res = np.arctan2(ssum, csum)
# Bring the range of the result between 0 and 2 pi
mask = res < 0.0
if mask.ndim > 0:
res[mask] += 2.0 * np.pi
elif mask:
res += 2.0 * np.pi
# Calculate the circular standard deviation
circmean = res * (high - low) / (2.0 * np.pi) + low
return circmean |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular standard deviation routine Parameters samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circstd : float Circular standard deviation """ |
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
smean = np.sin(ang).mean(axis=axis)
cmean = np.cos(ang).mean(axis=axis)
rmean = np.sqrt(smean**2 + cmean**2)
# Calculate the circular standard deviation
circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi)
return circstd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default(inst):
"""Default routine to be applied when loading data. Removes redundant naming """ |
import pysat.instruments.icon_ivm as icivm
inst.tag = 'level_2'
icivm.remove_icon_names(inst, target='ICON_L2_EUV_Daytime_OP_') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of ICON EUV files. Notes ----- Currently fixed to level-2 """ |
desc = None
level = tag
if level == 'level_1':
code = 'L1'
desc = None
elif level == 'level_2':
code = 'L2'
desc = None
else:
raise ValueError('Unsupported level supplied: ' + level)
if format_str is None:
format_str = 'ICON_'+code+'_EUV_Daytime'
if desc is not None:
format_str += '_' + desc +'_'
format_str += '_{year:4d}-{month:02d}-{day:02d}_v{version:02d}r{revision:03d}.NC'
return pysat.Files.from_os(data_path=data_path,
format_str=format_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shadow_copy(self):
""" Return a copy of the resource with same raw data :return: copy of the resource """ |
ret = self.__class__()
if not self._is_updated():
# before copy, make sure source is updated.
self.update()
ret._parsed_resource = self._parsed_resource
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(fnames, tag=None, sat_id=None, **kwargs):
"""Loads data using pysat.utils.load_netcdf4 . This routine is called as needed by pysat. It is not intended for direct user interaction. Parameters fnames : array-like iterable of filename strings, full path, to data files to be loaded. This input is nominally provided by pysat itself. tag : string tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. **kwargs : extra keywords Passthrough for additional keyword arguments specified when instantiating an Instrument object. These additional keywords are passed through to this routine by pysat. Returns ------- data, metadata Data and Metadata are formatted for pysat. Data is a pandas DataFrame while metadata is a pysat.Meta instance. Note ---- Any additional keyword arguments passed to pysat.Instrument upon instantiation are passed along to this routine and through to the load_netcdf4 call. Examples -------- :: inst = pysat.Instrument('sport', 'ivm') inst.load(2019,1) # create quick Instrument object for a new, random netCDF4 file # define filename template string to identify files # this is normally done by instrument code, but in this case # there is no built in pysat instrument support # presumes files are named default_2019-01-01.NC format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC' inst = pysat.Instrument('netcdf', 'pandas', custom_kwarg='test' data_path='./', format_str=format_str) inst.load(2019,1) """ |
return pysat.utils.load_netcdf4(fnames, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of files corresponding to format_str located at data_path. This routine is invoked by pysat and is not intended for direct use by the end user. Multiple data levels may be supported via the 'tag' and 'sat_id' input strings. Parameters tag : string ('') tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string ('') Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. data_path : string Full path to directory containing files to be loaded. This is provided by pysat. The user may specify their own data path at Instrument instantiation and it will appear here. format_str : string (None) String template used to parse the datasets filenames. If a user supplies a template string at Instrument instantiation then it will appear here, otherwise defaults to None. Returns ------- pandas.Series Series of filename strings, including the path, indexed by datetime. Examples -------- :: If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_v{version:02d}r{revision:04d}.NC' Note ---- The returned Series should not have any duplicate datetimes. If there are multiple versions of a file the most recent version should be kept and the rest discarded. This routine uses the pysat.Files.from_os constructor, thus the returned files are up to pysat specifications. Normally the format_str for each supported tag and sat_id is defined within this routine. However, as this is a generic routine, those definitions can't be made here. This method could be used in an instrument specific module where the list_files routine in the new package defines the format_str based upon inputs, then calls this routine passing both data_path and format_str. Alternately, the list_files routine in nasa_cdaweb_methods may also be used and has more built in functionality. Supported tages and format strings may be defined within the new instrument module and passed as arguments to nasa_cdaweb_methods.list_files . For an example on using this routine, see pysat/instrument/cnofs_ivm.py or cnofs_vefi, cnofs_plp, omni_hro, timed_see, etc. """ |
return pysat.Files.from_os(data_path=data_path, format_str=format_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def command(f):
""" indicate it's a command of naviseccli :param f: function that returns the command in list :return: command execution result """ |
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
if 'ip' in kwargs:
ip = kwargs['ip']
del kwargs['ip']
else:
ip = None
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute(commands, ip=ip)
return func_wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def duel_command(f):
""" indicate it's a command need to be called on both SP :param f: function that returns the command in list :return: command execution result on both sps (tuple of 2) """ |
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute_dual(commands)
return func_wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def supplement_filesystem(old_size, user_cap=False):
"""Return new size accounting for the metadata.""" |
new_size = old_size
if user_cap:
if old_size <= _GiB_to_Byte(1.5):
new_size = _GiB_to_Byte(3)
else:
new_size += _GiB_to_Byte(1.5)
return int(new_size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def synchronized(cls, obj=None):
""" synchronize on obj if obj is supplied. :param obj: the obj to lock on. if none, lock to the function :return: return of the func. """ |
def get_key(f, o):
if o is None:
key = hash(f)
else:
key = hash(o)
return key
def get_lock(f, o):
key = get_key(f, o)
if key not in cls.lock_map:
with cls.lock_map_lock:
if key not in cls.lock_map:
cls.lock_map[key] = _init_lock()
return cls.lock_map[key]
def wrap(f):
@functools.wraps(f)
def new_func(*args, **kw):
with get_lock(f, obj):
return f(*args, **kw)
return new_func
return wrap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def re_enqueue(self, item):
"""Re-enqueue till reach max retries.""" |
if 'retries' in item:
retries = item['retries']
if retries >= self.MAX_RETRIES:
log.warn("Failed to execute {} after {} retries, give it "
" up.".format(item['method'], retries))
else:
retries += 1
item['retries'] = retries
self._q.put_nowait(item)
else:
item['retries'] = 1
self._q.put_nowait(item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _support_op(*args):
"""Internal decorator to define an criteria compare operations.""" |
def inner(func):
for one_arg in args:
_op_mapping_[one_arg] = func
return func
return inner |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(inst):
"""Routine to return VEFI data cleaned to the specified level Parameters inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- 'dusty' or 'clean' removes data when interpolation flag is set to 1 """ |
if (inst.clean_level == 'dusty') | (inst.clean_level == 'clean'):
idx, = np.where(inst['B_flag'] == 0)
inst.data = inst[idx, :]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_icon_names(inst, target=None):
"""Removes leading text on ICON project variable names Parameters inst : pysat.Instrument ICON associated pysat.Instrument object target : str Leading string to remove. If none supplied, ICON project standards are used to identify and remove leading text Returns ------- None Modifies Instrument object in place """ |
if target is None:
lev = inst.tag
if lev == 'level_2':
lev = 'L2'
elif lev == 'level_0':
lev = 'L0'
elif lev == 'level_0p':
lev = 'L0P'
elif lev == 'level_1.5':
lev = 'L1-5'
elif lev == 'level_1':
lev = 'L1'
else:
raise ValueError('Uknown ICON data level')
# get instrument code
sid = inst.sat_id.lower()
if sid == 'a':
sid = 'IVM_A'
elif sid == 'b':
sid = 'IVM_B'
else:
raise ValueError('Unknown ICON satellite ID')
prepend_str = '_'.join(('ICON', lev, sid)) + '_'
else:
prepend_str = target
inst.data.rename(columns=lambda x: x.split(prepend_str)[-1], inplace=True)
inst.meta.data.rename(index=lambda x: x.split(prepend_str)[-1], inplace=True)
orig_keys = inst.meta.keys_nD()
for key in orig_keys:
new_key = key.split(prepend_str)[-1]
new_meta = inst.meta.pop(key)
new_meta.data.rename(index=lambda x: x.split(prepend_str)[-1], inplace=True)
inst.meta[new_key] = new_meta
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def time_shift_to_magnetic_poles(inst):
""" OMNI data is time-shifted to bow shock. Time shifted again to intersections with magnetic pole. Parameters inst : Instrument class object Instrument with OMNI HRO data Notes --------- Time shift calculated using distance to bow shock nose (BSN) and velocity of solar wind along x-direction. Warnings -------- Use at own risk. """ |
# need to fill in Vx to get an estimate of what is going on
inst['Vx'] = inst['Vx'].interpolate('nearest')
inst['Vx'] = inst['Vx'].fillna(method='backfill')
inst['Vx'] = inst['Vx'].fillna(method='pad')
inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')
# make sure there are no gaps larger than a minute
inst.data = inst.data.resample('1T').interpolate('time')
time_x = inst['BSN_x']*6371.2/-inst['Vx']
idx, = np.where(np.isnan(time_x))
if len(idx) > 0:
print (time_x[idx])
print (time_x)
time_x_offset = [pds.DateOffset(seconds = time)
for time in time_x.astype(int)]
new_index=[]
for i, time in enumerate(time_x_offset):
new_index.append(inst.data.index[i] + time)
inst.data.index = new_index
inst.data = inst.data.sort_index()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_clock_angle(inst):
""" Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane Parameters inst : pysat.Instrument Instrument with OMNI HRO data """ |
# Calculate clock angle in degrees
clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM']))
clock_angle[clock_angle < 0.0] += 360.0
inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index)
# Calculate magnitude of IMF in Y-Z plane
inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 +
inst['BZ_GSM']**2),
index=inst.data.index)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_imf_steadiness(inst, steady_window=15, min_window_frac=0.75, max_clock_angle_std=90.0/np.pi, max_bmag_cv=0.5):
""" Calculate IMF steadiness using clock angle standard deviation and the coefficient of variation of the IMF magnitude in the GSM Y-Z plane Parameters inst : pysat.Instrument Instrument with OMNI HRO data steady_window : int Window for calculating running statistical moments in min (default=15) min_window_frac : float Minimum fraction of points in a window for steadiness to be calculated (default=0.75) max_clock_angle_std : float Maximum standard deviation of the clock angle in degrees (default=22.5) max_bmag_cv : float Maximum coefficient of variation of the IMF magnitude in the GSM Y-Z plane (default=0.5) """ |
# We are not going to interpolate through missing values
sample_rate = int(inst.tag[0])
max_wnum = np.floor(steady_window / sample_rate)
if max_wnum != steady_window / sample_rate:
steady_window = max_wnum * sample_rate
print("WARNING: sample rate is not a factor of the statistical window")
print("new statistical window is {:.1f}".format(steady_window))
min_wnum = int(np.ceil(max_wnum * min_window_frac))
# Calculate the running coefficient of variation of the BYZ magnitude
byz_mean = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).mean()
byz_std = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).std()
inst['BYZ_CV'] = pds.Series(byz_std / byz_mean, index=inst.data.index)
# Calculate the running circular standard deviation of the clock angle
circ_kwargs = {'high':360.0, 'low':0.0}
ca = inst['clock_angle'][~np.isnan(inst['clock_angle'])]
ca_std = inst['clock_angle'].rolling(min_periods=min_wnum,
window=steady_window, \
center=True).apply(pysat.utils.nan_circstd, kwargs=circ_kwargs)
inst['clock_angle_std'] = pds.Series(ca_std, index=inst.data.index)
# Determine how long the clock angle and IMF magnitude are steady
imf_steady = np.zeros(shape=inst.data.index.shape)
steady = False
for i,cv in enumerate(inst.data['BYZ_CV']):
if steady:
del_min = int((inst.data.index[i] -
inst.data.index[i-1]).total_seconds() / 60.0)
if np.isnan(cv) or np.isnan(ca_std[i]) or del_min > sample_rate:
# Reset the steadiness flag if fill values are encountered, or
# if an entry is missing
steady = False
if cv <= max_bmag_cv and ca_std[i] <= max_clock_angle_std:
# Steadiness conditions have been met
if steady:
imf_steady[i] = imf_steady[i-1]
imf_steady[i] += sample_rate
steady = True
inst['IMF_Steady'] = pds.Series(imf_steady, index=inst.data.index)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_access(self, white_list=None):
""" clear all ace entries of the share :param white_list: list of username whose access entry won't be cleared :return: sid list of ace entries removed successfully """ |
access_entries = self.get_ace_list()
sid_list = access_entries.sid_list
if white_list:
sid_white_list = [UnityAclUser.get_sid(self._cli,
user,
self.cifs_server.domain)
for user in white_list]
sid_list = list(set(sid_list) - set(sid_white_list))
resp = self.delete_ace(sid=sid_list)
resp.raise_if_err()
return sid_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_ace(self, domain=None, user=None, sid=None):
""" delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response """ |
if sid is None:
if domain is None:
domain = self.cifs_server.domain
sid = UnityAclUser.get_sid(self._cli, user=user, domain=domain)
if isinstance(sid, six.string_types):
sid = [sid]
ace_list = [self._make_remove_ace_entry(s) for s in sid]
resp = self.action("setACEs", cifsShareACEs=ace_list)
resp.raise_if_err()
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def luns(self):
"""Aggregator for ioclass_luns and ioclass_snapshots.""" |
lun_list, smp_list = [], []
if self.ioclass_luns:
lun_list = map(lambda l: VNXLun(lun_id=l.lun_id, name=l.name,
cli=self._cli), self.ioclass_luns)
if self.ioclass_snapshots:
smp_list = map(lambda smp: VNXLun(name=smp.name, cli=self._cli),
self.ioclass_snapshots)
return list(lun_list) + list(smp_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def policy(self):
"""Returns policy which contains this ioclass.""" |
policies = VNXIOPolicy.get(cli=self._cli)
ret = None
for policy in policies:
contained = policy.ioclasses.name
if self._get_name() in contained:
ret = VNXIOPolicy.get(name=policy.name, cli=self._cli)
break
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify(self, new_name=None, iotype=None, lun_ids=None, smp_names=None, ctrlmethod=None, minsize=None, maxsize=None):
"""Overwrite the current properties for a VNX ioclass. :param new_name: new name for the ioclass :param iotype: can be 'rw', 'r' or 'w' :param lun_ids: list of LUN IDs :param smp_names: list of Snapshot Mount Point names :param ctrlmethod: the new CtrlMethod :param minsize: minimal size in kb :param maxsize: maximum size in kb """ |
if not any([new_name, iotype, lun_ids, smp_names, ctrlmethod]):
raise ValueError('Cannot apply modification, please specify '
'parameters to modify.')
def _do_modify():
out = self._cli.modify_ioclass(
self._get_name(), new_name, iotype, lun_ids, smp_names,
ctrlmethod, minsize, maxsize)
ex.raise_if_err(out, default=ex.VNXIOClassError)
try:
_do_modify()
except ex.VNXIOCLassRunningError:
with restart_policy(self.policy):
_do_modify()
return VNXIOClass(new_name if new_name else self._get_name(),
self._cli) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_lun(self, luns):
"""A wrapper for modify method. .. note:: This API only append luns to existing luns. """ |
curr_lun_ids, curr_smp_names = self._get_current_names()
luns = normalize_lun(luns, self._cli)
new_ids, new_smps = convert_lun(luns)
if new_ids:
curr_lun_ids.extend(new_ids)
if new_smps:
curr_smp_names.extend(new_smps)
return self.modify(lun_ids=curr_lun_ids, smp_names=curr_smp_names) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.