Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def delete(self, id):
project = db.session.query(Project).filter_by(id=id).first()
if project is None:
response = jsonify({
'projects': None,
'message': 'No interface defined for URL.'
})
return response, 404
db.session.delete(project)
db.session.commit()
return jsonify({
'project': project.serialize
}) | [
"delete."
] |
Please provide a description of the function:def serialize(self):
arguments = []
if isinstance(json.loads(self.data), dict):
for k, v in json.loads(self.data).items():
arguments.append({
'resultId': self.result_id,
'key': k,
'value': str(v) if v is not None else None,
})
return arguments | [
"serialize."
] |
Please provide a description of the function:def collect_assets(result, force=False):
path_name = result.path_name
info_path = os.path.join(path_name, summary.CHAINERUI_ASSETS_METAFILE_NAME)
if not os.path.isfile(info_path):
return
start_idx = len(result.assets)
file_modified_at = datetime.datetime.fromtimestamp(os.path.getmtime(
info_path))
if start_idx > 0:
if result.assets[-1].file_modified_at == file_modified_at:
return
with open(info_path, 'r') as f:
info_list = json.load(f, object_pairs_hook=OrderedDict)
if len(info_list) < start_idx:
start_idx = 0
result.assets = []
for base_info in info_list[start_idx:]:
asset_path = base_info.pop('images', {})
asset_path.update(base_info.pop('audios', {}))
asset = Asset.create(
result_id=result.id, summary=base_info,
file_modified_at=file_modified_at)
for key, path in asset_path.items():
with open(os.path.join(path_name, path), 'rb') as f:
data = f.read()
content = Bindata(
asset_id=asset.id, name=path, tag=key, content=data)
asset.content_list.append(content)
result.assets.append(asset)
db.session.commit() | [
"collect assets from meta file\n\n Collecting assets only when the metafile is updated. If number of assets\n are decreased, assets are reset and re-collect the assets.\n "
] |
Please provide a description of the function:def save_args(conditions, out_path):
if isinstance(conditions, argparse.Namespace):
args = vars(conditions)
else:
args = conditions
try:
os.makedirs(out_path)
except OSError:
pass
with tempdir(prefix='args', dir=out_path) as tempd:
path = os.path.join(tempd, 'args.json')
with open(path, 'w') as f:
json.dump(args, f, indent=4)
new_path = os.path.join(out_path, 'args')
shutil.move(path, new_path) | [
"A util function to save experiment condition for job table.\n\n Args:\n conditions (:class:`argparse.Namespace` or dict): Experiment conditions\n to show on a job table. Keys are show as table header and values\n are show at a job row.\n out_path (str): Output directory name to save conditions.\n\n "
] |
Please provide a description of the function:def _list_result_paths(target_path, log_file_name='log'):
result_list = []
for root, _dirs, _files in os.walk(os.path.abspath(target_path)):
for name in _files:
if name == log_file_name:
result_list.append(root)
return result_list | [
"list_result_paths."
] |
Please provide a description of the function:def collect_results(project, force=False):
if not project.crawlable:
return project
now = datetime.datetime.now()
if (now - project.updated_at).total_seconds() < 4 and (not force):
return project
result_paths = []
if os.path.isdir(project.path_name):
result_paths.extend(_list_result_paths(project.path_name))
registered_results = db.session.query(Result.path_name).filter_by(
project_id=project.id
).all()
registered_paths = {r.path_name for r in registered_results}
for result_path in result_paths:
if result_path not in registered_paths:
_register_result(project.id, result_path)
project.updated_at = datetime.datetime.now()
db.session.commit()
return project | [
"collect_results."
] |
Please provide a description of the function:def server_handler(args):
if not db.setup(url=args.db, echo=args.db_echo):
return
if not _check_db_revision():
return
app = create_app()
listener = '{:s}:{:d}'.format(args.host, args.port)
if args.debug:
logging.getLogger('werkzeug').disabled = True
set_loglevel(logging.DEBUG)
app.config['ENV'] = 'development'
app.debug = True
_show_banner_debug(app, listener)
from werkzeug.serving import run_simple
run_simple(
args.host, args.port, app, use_reloader=True, use_debugger=True,
threaded=True)
else:
app.config['ENV'] = 'production'
import gevent
from gevent.pywsgi import WSGIServer
http_server = WSGIServer(listener, application=app, log=None)
def stop_server():
if http_server.started:
http_server.stop()
gevent.signal(signal.SIGTERM, stop_server)
gevent.signal(signal.SIGINT, stop_server)
logger.info(' * Environment: {}'.format(app.config['ENV']))
logger.info(' * Running on http://{}/ (Press CTRL+C to quit)'.format(
listener))
try:
http_server.serve_forever()
except (KeyboardInterrupt, SystemExit):
stop_server() | [
"server_handler."
] |
Please provide a description of the function:def db_handler(args):
if args.type == 'create':
if args.db is None:
db.init_db()
return
if not db.setup(url=args.db, echo=args.db_echo):
return
if args.type == 'status':
current_rev = db_revision.current_db_revision()
print('The current DB schema version:', current_rev)
if args.type == 'upgrade':
db.upgrade()
if args.type == 'revision':
db_revision.new_revision()
if args.type == 'drop':
if args.db is not None:
db.downgrade()
db.remove_db() | [
"db_handler."
] |
Please provide a description of the function:def project_create_handler(args):
if not db.setup(url=args.db, echo=args.db_echo):
return
if not _check_db_revision():
return
project_path = os.path.abspath(args.project_dir)
project_name = args.project_name
project = db.session.query(Project).\
filter_by(path_name=project_path).first()
if project is None:
project = Project.create(project_path, project_name)
else:
print("Path '{}' has already registered.".format(project.path_name)) | [
"project_create_handler."
] |
Please provide a description of the function:def main():
parser = create_parser()
args = parser.parse_args()
if hasattr(args, 'handler'):
args.handler(args)
else:
parser.print_help() | [
"main."
] |
Please provide a description of the function:def get(self, id=None, project_id=None):
logs_limit = request.args.get('logs_limit', default=-1, type=int)
project = db.session.query(Project).filter_by(
id=project_id).first()
if project is None:
return jsonify({
'project': None,
'message': 'No interface defined for URL.'
}), 404
if id is None:
path = request.args.get('path_name', default=None)
if path is not None:
result = db.session.query(Result).filter_by(
path_name=path).first()
if result is None:
return jsonify({
'result': None,
'message': 'Result path \'%s\' is not found' % path
}), 400
return jsonify({'result': result.serialize})
collect_results(project)
results = db.session.query(Result).\
filter_by(project_id=project_id).\
filter_by(is_unregistered=False).\
all()
# NOTE: To improve performance, aggregate commit phase. By set
# `commit=False`, implicit transaction is not closed, UPDATE query
# is not committed. Consequently a process of serializing does not
# have to call SELECT query again.
for result in results:
crawl_result(result, commit=False)
db.session.commit()
rs = [r.serialize_with_sampled_logs(logs_limit) for r in results]
return jsonify({'results': rs})
else:
result = db.session.query(Result).\
filter_by(id=id).\
filter_by(is_unregistered=False).\
first()
if result is None:
return jsonify({
'result': None,
'message': 'No interface defined for URL.'
}), 404
result = crawl_result(result)
return jsonify({
'result': result.serialize_with_sampled_logs(logs_limit)
}) | [
"get."
] |
Please provide a description of the function:def put(self, id, project_id=None):
result = db.session.query(Result).filter_by(id=id).first()
if result is None:
response = jsonify({
'result': None, 'message': 'No interface defined for URL.'
})
return response, 404
request_json = request.get_json()
request_result = request_json.get('result')
name = request_result.get('name', None)
if name is not None:
result.name = name
is_unregistered = request_result.get('isUnregistered', None)
if is_unregistered is not None:
result.is_unregistered = is_unregistered
db.session.add(result)
db.session.commit()
return jsonify({'result': result.serialize}) | [
"put."
] |
Please provide a description of the function:def delete(self, id, project_id=None):
result = db.session.query(Result).filter_by(id=id).first()
if result is None:
response = jsonify({
'result': None, 'message': 'No interface defined for URL.'
})
return response, 404
db.session.delete(result)
db.session.commit()
return jsonify({'result': result.serialize}) | [
"delete."
] |
Please provide a description of the function:def _path_insensitive(path):
path = str(path)
if path == '' or os.path.exists(path):
return path
base = os.path.basename(path) # may be a directory or a file
dirname = os.path.dirname(path)
suffix = ''
if not base: # dir ends with a slash?
if len(dirname) < len(path):
suffix = path[:len(path) - len(dirname)]
base = os.path.basename(dirname)
dirname = os.path.dirname(dirname)
if not os.path.exists(dirname):
dirname = _path_insensitive(dirname)
if not dirname:
return
# at this point, the directory exists but not the file
try: # we are expecting dirname to be a directory, but it could be a file
files = os.listdir(dirname)
except OSError:
return
baselow = base.lower()
try:
basefinal = next(fl for fl in files if fl.lower() == baselow)
except StopIteration:
return
if basefinal:
return os.path.join(dirname, basefinal) + suffix
else:
return | [
"\n Recursive part of path_insensitive to do the work.\n "
] |
Please provide a description of the function:def url_is_alive(url):
request = urllib.request.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib.request.urlopen(request)
return True
except urllib.request.HTTPError:
return False | [
"\n Checks that a given URL is reachable.\n :param url: A URL\n :rtype: bool\n "
] |
Please provide a description of the function:def form_option(str_opt):
'''generate option name based suffix for URL
:param str_opt: opt name
:type str_opt: str
:return: URL suffix for the specified option
:rtype: str
'''
str_base = '#cmdoption-arg-'
str_opt_x = str_base+str_opt.lower()\
.replace('_', '-')\
.replace('(', '-')\
.replace(')', '')
return str_opt_x | [] |
Please provide a description of the function:def gen_url_option(
str_opt,
set_site=set_site,
set_runcontrol=set_runcontrol,
set_initcond=set_initcond,
source='docs'):
'''construct a URL for option based on source
:param str_opt: option name, defaults to ''
:param str_opt: str, optional
:param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs'
:param source: str, optional
:return: a valid URL pointing to the option related resources
:rtype: urlpath.URL
'''
dict_base = {
'docs': URL('https://suews-docs.readthedocs.io/en/latest/input_files/'),
'github': URL('https://github.com/Urban-Meteorology-Reading/SUEWS-Docs/raw/master/docs/source/input_files/'),
}
url_base = dict_base[source]
url_page = choose_page(
str_opt, set_site, set_runcontrol, set_initcond, source=source)
# print('str_opt', str_opt, url_base, url_page)
str_opt_x = form_option(str_opt)
url_opt = url_base/(url_page+str_opt_x)
return url_opt | [] |
Please provide a description of the function:def gen_df_forcing(
path_csv_in='SSss_YYYY_data_tt.csv',
url_base=url_repo_input,)->pd.DataFrame:
'''Generate description info of supy forcing data into a dataframe
Parameters
----------
path_csv_in : str, optional
path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv'])
url_base : urlpath.URL, optional
URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy forcing data
'''
try:
# load info from SUEWS docs repo
# this is regarded as the official source
urlpath_table = url_base/path_csv_in
df_var_info = pd.read_csv(urlpath_table)
except:
print(f'{urlpath_table} not existing!')
else:
# clean info dataframe
df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1)
# set index with `Column name`
df_var_forcing = df_var_forcing.set_index('Column Name')
df_var_forcing.index = df_var_forcing.index\
.map(lambda x: x.replace('`', ''))\
.rename('variable')
# add `Second` info
df_var_forcing.loc['isec'] = 'Second [S]'
return df_var_forcing | [] |
Please provide a description of the function:def gen_df_output(
list_csv_in=[
'SSss_YYYY_SUEWS_TT.csv',
'SSss_DailyState.csv',
'SSss_YYYY_snow_TT.csv',
],
url_base=url_repo_output)->Path:
'''Generate description info of supy output results into dataframe
Parameters
----------
list_csv_in : list, optional
list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description])
url_base : [type], optional
URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file)
Returns
-------
pd.DataFrame
Description info of supy output results
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat(
[pd.read_csv(f) for f in list_url_table],
sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.set_index('Name')\
.loc[:, ['Description']]\
.drop_duplicates()
df_var_output = df_var_info_x\
.copy()\
.assign(lower=df_var_info_x.index.str.lower())\
.reset_index()\
.set_index('lower')
df_var_group = df_output_sample.columns.to_frame()
df_var_group.index = df_var_group.index.droplevel(0).rename('Name')
# wrap into a dataframe
df_var_output = df_var_group\
.merge(
df_var_output.set_index('Name'),
left_on='Name',
right_on='Name')\
.rename(columns={
'var': 'variable',
'group': 'Group',
})\
.set_index('variable')\
.drop_duplicates()
return df_var_output | [] |
Please provide a description of the function:def gen_opt_str(ser_rec: pd.Series)->str:
'''generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
'''
name = ser_rec.name
indent = r' '
str_opt = f'.. option:: {name}'+'\n\n'
for spec in ser_rec.sort_index().index:
str_opt += indent+f':{spec}:'+'\n'
spec_content = ser_rec[spec]
str_opt += indent+indent+f'{spec_content}'+'\n'
return str_opt | [] |
Please provide a description of the function:def plot_day_clm(df_var, fig=None, ax=None, **kwargs):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
# plt.clf()
# group by hour and minute
grp_sdf_var = df_var.groupby(
[df_var.index.hour.rename('hr'),
df_var.index.minute.rename('min')])
# get index
idx = [pd.datetime(2014, 1, 1, h, m)
for h, m in sorted(grp_sdf_var.groups.keys())]
idx = pd.date_range(idx[0], idx[-1], periods=len(idx))
idx = mdates.date2num(idx)
# calculate quartiles
quar_sel_pos_clm = grp_sdf_var.quantile(
[.75, .5, .25]).unstack().set_index(idx)
# fig, ax = plt.subplots(1)
for var in quar_sel_pos_clm.columns.levels[0]:
df_x = quar_sel_pos_clm.loc[:, var]
y0 = df_x[0.5]
y1, y2 = df_x[0.75], df_x[0.25]
y0.plot(ax=ax, label=var).fill_between(
quar_sel_pos_clm.index, y1, y2, alpha=0.3)
# add legend
ax.legend(title='variable')
# adjust xticks formar
ax.xaxis.set_major_locator(mdates.HourLocator(byhour=np.arange(0, 23, 3)))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
return fig, ax | [
"Short summary.\n\n Parameters\n ----------\n df_var : pd.DataFrame\n DataFrame containing variables to plot with datetime as index\n\n Returns\n -------\n MPL.figure\n figure showing median lines and IQR in shadings\n\n "
] |
Please provide a description of the function:def plot_comp(df_var, fig=None, ax=None, **kwargs):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
# plt.clf()
# plt.cla()
# ax = sns.regplot(
# x='Obs', y='Sim',
# data=df_var,
# fit_reg=True)
# add regression expression
df_var_fit = df_var.dropna(how='any')
# regr = linear_model.LinearRegression()
# val_x = df_var_fit['Obs'].values.reshape(-1, 1)
# val_y = df_var_fit['Sim'].values.reshape(-1, 1)
# regr.fit(val_x, val_y)
val_x = df_var_fit['Obs']
val_y = df_var_fit['Sim']
slope, intercept, r_value, p_value, std_err = stats.linregress(
val_x, val_y)
mae = (val_y - val_x).abs().mean()
sns.regplot(
x='Obs', y='Sim',
data=df_var,
ax=ax,
fit_reg=True,
line_kws={
'label': "y={0:.2f}x{1}{2:.2f}".format(slope, '+' if intercept>0 else '', intercept) +
'\n' + '$R^2$={0:.4f}'.format(r_value) +
'\n' + 'MAE={0:.2f}'.format(mae) +
'\n' + 'n={}'.format(df_var.shape[0])
},
**kwargs
)
# ax.plot(val_x, y_pred, color='red', linewidth=2,
# label='r2= ' + str("%.3f" % r2) + '\n' +
# 'y=' + str("%.3f" % a[0][0]) + 'x+' + str("%.2f" % b[0]))
# ax.legend(fontsize=15)
ax.legend()
# ax.set_title(var + '_' + title)
# set equal plotting range
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
lim_low, lim_high = np.min([x0, y0]), np.max([x1, y1])
ax.set_xlim(lim_low, lim_high)
ax.set_ylim(lim_low, lim_high)
# set 1:1 aspect ratio
ax.set_aspect('equal')
# add 1:1 line
ax.plot([lim_low, lim_high], [lim_low, lim_high],
color='red', linewidth=1, zorder=0)
# fig = ax.figure
return fig, ax | [
"Short summary.\n\n Parameters\n ----------\n df_var : pd.DataFrame\n DataFrame containing variables to plot with datetime as index\n\n Returns\n -------\n MPL.figure\n figure showing 1:1 line plot\n\n "
] |
Please provide a description of the function:def init_supy(path_init: str)->pd.DataFrame:
'''Initialise supy by loading initial model states.
Parameters
----------
path_init : str
Path to a file that can initialise SuPy, which can be either of the follows:
* SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations
* SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy`
Returns
-------
df_state_init: pandas.DataFrame
Initial model states.
See `df_state_var` for details.
Examples
--------
1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy
>>> path_init = "~/SUEWS_sims/RunControl.nml"
>>> df_state_init = supy.init_supy(path_init)
2. Use ``df_state.csv`` to initialise SuPy
>>> path_init = "~/SuPy_res/df_state_test.csv"
>>> df_state_init = supy.init_supy(path_init)
'''
try:
path_init_x = Path(path_init).expanduser().resolve()
except FileNotFoundError:
print('{path} does not exists!'.format(path=path_init_x))
else:
if path_init_x.suffix == '.nml':
# SUEWS `RunControl.nml`:
df_state_init = load_InitialCond_grid_df(path_init_x)
elif path_init_x.suffix == '.csv':
# SuPy `df_state.csv`:
df_state_init = load_df_state(path_init_x)
else:
print('{path} is NOT a valid file to initialise SuPy!'.format(
path=path_init_x))
sys.exit()
return df_state_init | [] |
Please provide a description of the function:def load_forcing_grid(path_runcontrol: str, grid: int)->pd.DataFrame:
'''Load forcing data for a specific grid included in the index of `df_state_init </data-structure/supy-io.ipynb#df_state_init:-model-initial-states>`.
Parameters
----------
path_runcontrol : str
Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`
grid : int
Grid number
Returns
-------
df_forcing: pandas.DataFrame
Forcing data. See `df_forcing_var` for details.
Examples
--------
>>> path_runcontrol = "~/SUEWS_sims/RunControl.nml" # a valid path to `RunControl.nml`
>>> df_state_init = supy.init_supy(path_runcontrol) # get `df_state_init`
>>> grid = df_state_init.index[0] # first grid number included in `df_state_init`
>>> df_forcing = supy.load_forcing_grid(path_runcontrol, grid) # get df_forcing
'''
try:
path_runcontrol = Path(path_runcontrol).expanduser().resolve()
except FileNotFoundError:
print('{path} does not exists!'.format(path=path_runcontrol))
else:
dict_mod_cfg = load_SUEWS_dict_ModConfig(path_runcontrol)
df_state_init = init_supy(path_runcontrol)
# load setting variables from dict_mod_cfg
(
filecode,
kdownzen,
tstep_met_in,
tstep_ESTM_in,
multiplemetfiles,
multipleestmfiles,
dir_input_cfg
) = (dict_mod_cfg[x] for x in
[
'filecode',
'kdownzen',
'resolutionfilesin',
'resolutionfilesinestm',
'multiplemetfiles',
'multipleestmfiles',
'fileinputpath'
]
)
tstep_mod, lat, lon, alt, timezone = df_state_init.loc[
grid,
[(x, '0') for x in ['tstep', 'lat', 'lng', 'alt', 'timezone']]
].values
path_site = path_runcontrol.parent
path_input = path_site / dict_mod_cfg['fileinputpath']
# load raw data
# met forcing
df_forcing_met = load_SUEWS_Forcing_met_df_raw(
path_input, filecode, grid, tstep_met_in, multiplemetfiles)
# resample raw data from tstep_in to tstep_mod
df_forcing_met_tstep = resample_forcing_met(
df_forcing_met, tstep_met_in, tstep_mod,
lat, lon, alt, timezone, kdownzen)
# merge forcing datasets (met and ESTM)
df_forcing_tstep = df_forcing_met_tstep.copy()
# disable the AnOHM and ESTM components for now and for better performance
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# TS 28 Dec 2018
# pack all records of `id` into `metforcingdata_grid` for AnOHM
# df_grp = df_forcing_tstep.groupby('id')
# dict_id_all = {xid: df_grp.get_group(xid)
# for xid in df_forcing_tstep['id'].unique()}
# id_all = df_forcing_tstep['id'].apply(lambda xid: dict_id_all[xid])
# df_forcing_tstep = df_forcing_tstep.merge(
# id_all.to_frame(name='metforcingdata_grid'),
# left_index=True,
# right_index=True)
# # add Ts forcing for ESTM
# if np.asscalar(df_state_init.iloc[0]['storageheatmethod'].values) == 4:
# # load ESTM forcing
# df_forcing_estm = load_SUEWS_Forcing_ESTM_df_raw(
# path_input, filecode, grid, tstep_ESTM_in, multipleestmfiles)
# # resample raw data from tstep_in to tstep_mod
# df_forcing_estm_tstep = resample_linear(
# df_forcing_estm, tstep_met_in, tstep_mod)
# df_forcing_tstep = df_forcing_tstep.merge(
# df_forcing_estm_tstep,
# left_on=['iy', 'id', 'it', 'imin'],
# right_on=['iy', 'id', 'it', 'imin'])
# # insert `ts5mindata_ir` into df_forcing_tstep
# ts_col = df_forcing_estm.columns[4:]
# df_forcing_tstep['ts5mindata_ir'] = (
# df_forcing_tstep.loc[:, ts_col].values.tolist())
# df_forcing_tstep['ts5mindata_ir'] = df_forcing_tstep[
# 'ts5mindata_ir'].map(lambda x: np.array(x, order='F'))
# else:
# # insert some placeholder values
# df_forcing_tstep['ts5mindata_ir'] = df_forcing_tstep['Tair']
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# disable the AnOHM and ESTM components for now and for better performance
# coerced precision here to prevent numerical errors inside Fortran
df_forcing = np.around(df_forcing_tstep, decimals=10)
# new columns for later use in main calculation
df_forcing[['iy', 'id', 'it', 'imin']] = df_forcing[[
'iy', 'id', 'it', 'imin']].astype(np.int64)
return df_forcing | [] |
Please provide a description of the function:def load_SampleData()->Tuple[pandas.DataFrame, pandas.DataFrame]:
'''Load sample data for quickly starting a demo run.
Returns
-------
df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame]
- df_state_init: `initial model states <df_state_var>`
- df_forcing: `forcing data <df_forcing_var>`
Examples
--------
>>> df_state_init, df_forcing = supy.load_SampleData()
'''
path_SampleData = Path(path_supy_module) / 'sample_run'
path_runcontrol = path_SampleData / 'RunControl.nml'
df_state_init = init_supy(path_runcontrol)
# path_input = path_runcontrol.parent / ser_mod_cfg['fileinputpath']
df_forcing = load_forcing_grid(
path_runcontrol,
df_state_init.index[0]
)
return df_state_init, df_forcing | [] |
Please provide a description of the function:def run_supy(
df_forcing: pandas.DataFrame,
df_state_init: pandas.DataFrame,
save_state=False,
n_yr=10,
)->Tuple[pandas.DataFrame, pandas.DataFrame]:
'''Perform supy simulation.
Parameters
----------
df_forcing : pandas.DataFrame
forcing data for all grids in `df_state_init`.
df_state_init : pandas.DataFrame
initial model states;
or a collection of model states with multiple timestamps, whose last temporal record will be used as the initial model states.
save_state : bool, optional
flag for saving model states at each time step, which can be useful in diagnosing model runtime performance or performing a restart run.
(the default is False, which instructs supy not to save runtime model states).
n_yr : int, optional
chunk size (`n_yr` years) to split simulation periods so memory usage can be reduced.
(the default is 10, which implies 10-year forcing chunks used in simulations).
Returns
-------
df_output, df_state_final : Tuple[pandas.DataFrame, pandas.DataFrame]
- df_output: `output results <df_output_var>`
- df_state_final: `final model states <df_state_var>`
Examples
--------
>>> df_output, df_state_final = supy.run_supy(df_forcing, df_state_init)
'''
# save df_init without changing its original data
# df.copy() in pandas does work as a standard python deepcopy
df_init = df_state_init.copy()
# retrieve the last temporal record as `df_init`
# if a `datetime` level existing in the index
if df_init.index.nlevels > 1:
idx_dt = df_init.index.get_level_values('datetime').unique()
dt_last = idx_dt.max()
df_init = df_init.loc[dt_last]
# add placeholder variables for df_forcing
# `metforcingdata_grid` and `ts5mindata_ir` are used by AnOHM and ESTM, respectively
# they are now temporarily disabled in supy
df_forcing = df_forcing\
.assign(
metforcingdata_grid=0,
ts5mindata_ir=0,
)\
.rename(
# remanae is a workaround to resolve naming inconsistency between
# suews fortran code interface and input forcing file hearders
columns={
'%' + 'iy': 'iy',
'id': 'id',
'it': 'it',
'imin': 'imin',
'qn': 'qn1_obs',
'qh': 'qh_obs',
'qe': 'qe',
'qs': 'qs_obs',
'qf': 'qf_obs',
'U': 'avu1',
'RH': 'avrh',
'Tair': 'temp_c',
'pres': 'press_hpa',
'rain': 'precip',
'kdown': 'avkdn',
'snow': 'snow_obs',
'ldown': 'ldown_obs',
'fcld': 'fcld_obs',
'Wuh': 'wu_m3',
'xsmd': 'xsmd',
'lai': 'lai_obs',
'kdiff': 'kdiff',
'kdir': 'kdir',
'wdir': 'wdir',
}
)
# grid list determined by initial states
list_grid = df_init.index
# initialise dicts for holding results and model states
dict_state = {}
dict_output = {}
# initial and final tsteps retrieved from forcing data
tstep_init = df_forcing.index[0]
tstep_final = df_forcing.index[-1]
# tstep size retrieved from forcing data
freq = df_forcing.index.freq
# dict_state is used to save model states for later use
dict_state = {
# (t_start, grid): series_state_init.to_dict()
(tstep_init, grid): pack_grid_dict(series_state_init)
for grid, series_state_init
in df_init.iterrows()
}
# remove 'problems.txt'
if Path('problems.txt').exists():
os.remove('problems.txt')
if save_state:
# use slower more functional single step wrapper
# convert df to dict with `itertuples` for better performance
dict_forcing = {row.Index: row._asdict()
for row in df_forcing.itertuples()}
for tstep in df_forcing.index:
# temporal loop
# initialise output of tstep:
# load met_forcing if the same across all grids:
met_forcing_tstep = dict_forcing[tstep]
# spatial loop
for grid in list_grid:
dict_state_start = dict_state[(tstep, grid)]
# calculation at one step:
# series_state_end, series_output_tstep = suews_cal_tstep_df(
# series_state_start, met_forcing_tstep)
dict_state_end, dict_output_tstep = suews_cal_tstep(
dict_state_start, met_forcing_tstep)
# update output & model state at tstep for the current grid
dict_output.update({(tstep, grid): dict_output_tstep})
dict_state.update({(tstep + 1*freq, grid): dict_state_end})
# pack results as easier DataFrames
df_output = pack_df_output(dict_output).swaplevel(0, 1)
# drop unnecessary 'datetime' as it is already included in the index
df_output = df_output.drop(columns=['datetime'], level=0)
df_state_final = pack_df_state(dict_state).swaplevel(0, 1)
else:
# for multi-year run, reduce the whole df_forcing into {n_yr}-year chunks for less memory consumption
grp_forcing_yr = df_forcing.groupby(df_forcing.index.year // n_yr)
if len(grp_forcing_yr) > 1:
df_state_init_yr = df_state_init.copy()
list_df_output = []
list_df_state = []
for grp in grp_forcing_yr.groups:
# get forcing of a specific year
df_forcing_yr = grp_forcing_yr.get_group(grp)
# run supy: actual execution done in the `else` clause below
df_output_yr, df_state_final_yr = run_supy(
df_forcing_yr, df_state_init_yr)
df_state_init_yr = df_state_final_yr.copy()
# collect results
list_df_output.append(df_output_yr)
list_df_state.append(df_state_final_yr)
# re-organise results of each year
df_output = pd.concat(list_df_output).sort_index()
df_state_final = pd.concat(
list_df_state).sort_index().drop_duplicates()
return df_output, df_state_final
else:
# for single-chunk run (1 chunk = {n_yr} years), directly put df_forcing into supy_driver for calculation
# use higher level wrapper that calculate at a `block` level
# for better performance
# for grid in list_grid:
# dict_state_start_grid = dict_state[(tstep_init, grid)]
# dict_state_end, dict_output_array = suews_cal_tstep_multi(
# dict_state_start_grid,
# df_forcing)
# # update output & model state at tstep for the current grid
# dict_output.update({grid: dict_output_array})
# # model state for the next run
# dict_state.update({(tstep_final + freq, grid): dict_state_end})
# # parallel run of grid_list for better efficiency
# if os.name == 'nt':
# if __name__ == '__main__':
# p = Pool(min([len(list_grid), cpu_count()]))
# else:
# p = Pool(min([len(list_grid), cpu_count()]))
# # construct input list for `Pool.starmap`
# construct input list for `dask.bag`
list_input = [
# (dict_state[(tstep_init, grid)], df_forcing)
dict_state[(tstep_init, grid)]
for grid in list_grid
]
# on windows `processes` has issues when importing
# so set `threads` here
method_parallel = 'threads' if os.name == 'nt' else 'processes'
list_res = db.from_sequence(list_input)\
.map(suews_cal_tstep_multi, df_forcing)\
.compute(scheduler=method_parallel)
list_state_end, list_output_array = zip(*list_res)
# collect output arrays
dict_output = {
grid: dict_output_array
for grid, dict_output_array in zip(list_grid, list_output_array)
}
# collect final states
dict_state_final_tstep = {
(tstep_final + freq, grid): dict_state_end
for grid, dict_state_end in zip(list_grid, list_state_end)
}
dict_state.update(dict_state_final_tstep)
# save results as time-aware DataFrame
df_output0 = pack_df_output_array(dict_output, df_forcing)
df_output = df_output0.replace(-999., np.nan)
df_state_final = pack_df_state(dict_state).swaplevel(0, 1)
# drop ESTM for now as it is not supported yet
# select only those supported output groups
df_output = df_output.loc[:, ['SUEWS', 'snow', 'DailyState']]
# trim multiindex based columns
df_output.columns = df_output.columns.remove_unused_levels()
# pack final model states into a proper dataframe
df_state_final = pack_df_state_final(df_state_final, df_init)
return df_output, df_state_final | [] |
Please provide a description of the function:def save_supy(
df_output: pandas.DataFrame,
df_state_final: pandas.DataFrame,
freq_s: int = 3600,
site: str = '',
path_dir_save: str = Path('.'),
path_runcontrol: str = None,)->list:
'''Save SuPy run results to files
Parameters
----------
df_output : pandas.DataFrame
DataFrame of output
df_state_final : pandas.DataFrame
DataFrame of final model states
freq_s : int, optional
Output frequency in seconds (the default is 3600, which indicates hourly output)
site : str, optional
Site identifier (the default is '', which indicates site identifier will be left empty)
path_dir_save : str, optional
Path to directory to saving the files (the default is Path('.'), which indicates the current working directory)
path_runcontrol : str, optional
Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`.
(the default is None, which is unset)
Returns
-------
list
a list of paths of saved files
Examples
--------
1. save results of a supy run to the current working directory with default settings
>>> list_path_save = supy.save_supy(df_output, df_state_final)
2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>`
>>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml')
3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir'
>>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir')
'''
# get necessary information for saving procedure
if path_runcontrol is not None:
freq_s, path_dir_save, site = get_save_info(path_runcontrol)
# save df_output to several files
list_path_save = save_df_output(df_output, freq_s, site, path_dir_save)
# save df_state
path_state_save = save_df_state(df_state_final, site, path_dir_save)
# update list_path_save
list_path_save.append(path_state_save)
return list_path_save | [] |
Please provide a description of the function:def load_SUEWS_Forcing_met_df_pattern(path_input, forcingfile_met_pattern):
# list of met forcing files
path_input = path_input.resolve()
# forcingfile_met_pattern = os.path.abspath(forcingfile_met_pattern)
list_file_MetForcing = sorted([
f for f in path_input.glob(forcingfile_met_pattern)
if 'ESTM' not in f.name])
# print(forcingfile_met_pattern)
# print(list_file_MetForcing)
# load raw data
# read in forcing with dask.dataframe in parallel
dd_forcing_met = dd.read_csv(
list_file_MetForcing,
delim_whitespace=True,
comment='!',
error_bad_lines=True
)
# convert to normal pandas dataframe
df_forcing_met = dd_forcing_met.compute()
# `drop_duplicates` in case some duplicates mixed
df_forcing_met = df_forcing_met.drop_duplicates()
col_suews_met_forcing = [
'iy', 'id', 'it', 'imin',
'qn', 'qh', 'qe', 'qs', 'qf',
'U', 'RH', 'Tair', 'pres', 'rain', 'kdown',
'snow', 'ldown', 'fcld',
'Wuh', 'xsmd', 'lai', 'kdiff', 'kdir', 'wdir'
]
# rename these columns to match variables via the driver interface
df_forcing_met.columns = col_suews_met_forcing
# convert unit from kPa to hPa
df_forcing_met['pres'] *= 10
# add `isec` for WRF-SUEWS interface
df_forcing_met['isec'] = 0
# set correct data types
df_forcing_met[['iy', 'id', 'it', 'imin', 'isec']] = df_forcing_met[[
'iy', 'id', 'it', 'imin', 'isec']].astype(np.int64)
# set timestamp as index
idx_dt = pd.date_range(
*df_forcing_met.iloc[[0, -1], :4].astype(int).astype(str).apply(
lambda ser: ser.str.cat(sep=' '), axis=1).map(
lambda dt: pd.Timestamp.strptime(dt, '%Y %j %H %M')),
periods=df_forcing_met.shape[0])
df_forcing_met = df_forcing_met.set_index(idx_dt)
return df_forcing_met | [
"Short summary.\n\n Parameters\n ----------\n forcingfile_met_pattern : type\n Description of parameter `forcingfile_met_pattern`.\n\n Returns\n -------\n type\n Description of returned object.\n\n "
] |
Please provide a description of the function:def load_df_state(path_csv: Path)->pd.DataFrame:
'''load `df_state` from `path_csv`
Parameters
----------
path_csv : Path
path to the csv file that stores `df_state` produced by a supy run
Returns
-------
pd.DataFrame
`df_state` produced by a supy run
'''
df_state = pd.read_csv(
path_csv,
header=[0, 1],
index_col=[0, 1],
parse_dates=True,
infer_datetime_format=True,
)
return df_state | [] |
Please provide a description of the function:def extract_var_suews(dict_var_full: dict, var_supy: str)->list:
'''extract related SUEWS variables for a supy variable `var_supy`
Parameters
----------
dict_var_full : dict
dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect)
var_supy : str
supy variable name
Returns
-------
list
related SUEWS variables for `var_supy`
'''
x = sp.supy_load.flatten_list(dict_var_full[var_supy])
x = np.unique(x)
x = [
xx for xx in x
if xx not in ['base', 'const', '0.0'] + [str(x) for x in range(24)]
]
x = [xx for xx in x if 'Code' not in xx]
return x | [] |
Please provide a description of the function:def gen_df_site(
list_csv_in=list_table,
url_base=url_repo_input_site)->pd.DataFrame:
'''Generate description info of supy output results as a dataframe
Parameters
----------
path_csv_out : str, optional
path to the output csv file (the default is 'df_output.csv')
list_csv_in : list, optional
list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file)
url_base : URL, optional
URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
full path to the output csv file
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table])
# df_var_info = pd.concat(
# [pd.read_csv(f) for f in list_url_table],
# sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.drop(['No.', 'Use'], axis=1)\
.set_index('Column Name')
df_var_info_x.index = df_var_info_x.index.map(
lambda x: x.replace('`', ''))
# retrieve SUEWS-related variables
dict_var_full = sp.supy_load.exp_dict_full(
sp.supy_load.dict_var2SiteSelect)
dict_var_ref_suews = {
k: extract_var_suews(dict_var_full, k)
for k in dict_var_full
}
df_var_ref_suews = pd.DataFrame(
{k: ', '.join(dict_var_ref_suews[k])
for k in dict_var_ref_suews},
index=[0]).T.rename({
0: 'SUEWS-related variables'
}, axis=1)
# retrive supy variable description
dict_var_desc = {
k: '\n'.join(df_var_info_x.loc[v].values.flatten())
for k, v in dict_var_ref_suews.items()
}
df_var_desc = pd.DataFrame(dict_var_desc, index=[0]).T\
.rename(columns={0: 'Description'})
# retrieve variable dimensionality
df_var_dim = gen_df_dim(df_init_sample)
df_var_site_raw = pd.concat(
[df_var_dim, df_var_desc, df_var_ref_suews],
axis=1, sort=False)
df_var_site = df_var_site_raw.filter(items=set_input, axis=0).dropna()
return df_var_site | [] |
Please provide a description of the function:def gen_rst_url_split_opts(opts_str):
if opts_str is not 'None':
list_opts = opts_str.split(',')
# list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts]
list_rst = [opt.strip() for opt in list_opts]
# list_rst = [f'`{opt}`' for opt in list_rst]
# more properly handle SUEWS options by explicitly adding prefix `suews`:
list_rst = [f':option:`{opt} <suews:{opt}>`' for opt in list_rst]
list_url_rst = ', '.join(list_rst)
else:
list_url_rst = 'None'
return list_url_rst | [
"generate option list for RST docs\n\n Parameters\n ----------\n opts_str : str\n a string including all SUEWS related options/variables.\n e.g. 'SUEWS_a, SUEWS_b'\n\n\n Returns\n -------\n list\n a list of parsed RST `:ref:` roles.\n e.g. [':option:`SUEWS_a <suews:SUEWS_a>`']\n "
] |
Please provide a description of the function:def gen_df_state(
list_table: list,
set_initcond: set,
set_runcontrol: set,
set_input_runcontrol: set)->pd.DataFrame:
'''generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy
'''
# generate a base df for site characteristics related variables
df_var_site = gen_df_site(list_table)
# generate a base df for runcontrol related variables
df_var_runcontrol = gen_df_runcontrol(
set_initcond, set_runcontrol, set_input_runcontrol)
# generate a base df for initial condition related variables
df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol)
# further processing by modifying several entries
df_var_state = proc_df_state(
df_var_site, df_var_runcontrol, df_var_initcond)
# reorganising the result:
df_var_state = df_var_state.sort_index()
# delete duplicates while considering the variable name (stored as index)
df_var_state = df_var_state.reset_index()
df_var_state = df_var_state.drop_duplicates()
# convert index back
df_var_state = df_var_state.set_index('variable')
return df_var_state | [] |
Please provide a description of the function:def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame:
'''generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
'''
# generate df_datetime for prepending
idx_dt = df_grid_group.index
ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year')
ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY')
ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour')
ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min')
df_datetime = pd.concat([
ser_year,
ser_DOY,
ser_hour,
ser_min,
], axis=1)
df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta(
'd').total_seconds()/(24*60*60)
df_save = pd.concat([df_datetime, df_grid_group], axis=1)
return df_save | [] |
Please provide a description of the function:def save_df_output(
df_output: pd.DataFrame,
freq_s: int = 3600,
site: str = '',
path_dir_save: Path = Path('.'),)->list:
'''save supy output dataframe to txt files
Parameters
----------
df_output : pd.DataFrame
output dataframe of supy simulation
freq_s : int, optional
output frequency in second (the default is 3600, which indicates the a txt with hourly values)
path_dir_save : Path, optional
directory to save txt files (the default is '.', which the current working directory)
site : str, optional
site code used for filename (the default is '', which indicates no site name prepended to the filename)
path_runcontrol : str or anything that can be parsed as `Path`, optional
path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters)
Returns
-------
list
a list of `Path` objects for saved txt files
'''
list_path_save = []
list_group = df_output.columns.get_level_values('group').unique()
list_grid = df_output.index.get_level_values('grid').unique()
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_output\
.loc[grid, group]\
.dropna(how='all', axis=0)
# save output at the runtime frequency (usually 5 min)
# 'DailyState' group will be save a daily frequency
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
# resample output if freq_s is different from runtime freq (usually 5 min)
freq_save = pd.Timedelta(freq_s, 's')
# resample `df_output` at `freq_save`
df_rsmp = resample_output(df_output, freq_save)
# 'DailyState' group will be dropped in `resample_output` as resampling is not needed
df_rsmp = df_rsmp.drop(columns='DailyState')
list_group = df_rsmp.columns.get_level_values('group').unique()
list_grid = df_rsmp.index.get_level_values('grid').unique()
# save output at the resampling frequency
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_rsmp.loc[grid, group]
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
return list_path_save | [] |
Please provide a description of the function:def save_df_state(
df_state: pd.DataFrame,
site: str = '',
path_dir_save: Path = Path('.'),)->Path:
'''save `df_state` to a csv file
Parameters
----------
df_state : pd.DataFrame
a dataframe of model states produced by a supy run
site : str, optional
site identifier (the default is '', which indicates an empty site code)
path_dir_save : Path, optional
path to directory to save results (the default is Path('.'), which the current working directory)
Returns
-------
Path
path to the saved csv file
'''
file_state_save = 'df_state_{site}.csv'.format(site=site)
# trim filename if site == ''
file_state_save = file_state_save.replace('_.csv', '.csv')
path_state_save = path_dir_save/file_state_save
print('writing out: {path_out}'.format(path_out=path_state_save))
df_state.to_csv(path_state_save)
return path_state_save | [] |
Please provide a description of the function:def get_save_info(path_runcontrol: str)->Tuple[int, Path, str]:
'''get necessary information for saving supy results, which are (freq_s, dir_save, site)
Parameters
----------
path_runcontrol : Path
Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`
Returns
-------
tuple
A tuple including (freq_s, dir_save, site):
freq_s: output frequency in seconds
dir_save: directory name to save results
site: site identifier
'''
try:
path_runcontrol = Path(path_runcontrol).expanduser().resolve()
except FileNotFoundError:
print('{path} does not exists!'.format(path=path_runcontrol))
else:
dict_mod_cfg = load_SUEWS_dict_ModConfig(path_runcontrol)
freq_s, dir_save, site = [
dict_mod_cfg[x] for x in
[
'resolutionfilesout',
'fileoutputpath',
'filecode',
]
]
dir_save = path_runcontrol.parent/dir_save
if not dir_save.exists():
dir_save.mkdir()
return freq_s, dir_save, site | [] |
Please provide a description of the function:def gen_FS_DF(df_output):
df_day = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Year', 'Month', 'Day'],
aggfunc=[min, max, np.mean, ])
df_day_all_year = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Month', 'Day'],
aggfunc=[min, max, np.mean, ])
array_yr_mon = df_day.index.droplevel(
'Day').to_frame().drop_duplicates().values
df_fs = pd.DataFrame(
{(yr, mon):
(df_day.loc[(yr, mon)].apply(gen_score_ser) -
df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean()
for yr, mon in array_yr_mon})
return df_fs | [
"generate DataFrame of scores.\n\n Parameters\n ----------\n df_WS_data : type\n Description of parameter `df_WS_data`.\n\n Returns\n -------\n type\n Description of returned object.\n\n "
] |
Please provide a description of the function:def gen_WS_DF(df_WS_data):
df_fs = gen_FS_DF(df_WS_data)
list_index = [('mean', 'T2'), ('max', 'T2'), ('min', 'T2'),
('mean', 'U10'), ('max', 'U10'), ('min', 'U10'),
('mean', 'RH2'), ('max', 'RH2'), ('min', 'RH2'),
('mean', 'Kdown')]
list_const = [getattr(const, attr)
for attr in ['T_MEAN', 'T_MAX', 'T_MIN',
'WIND_MEAN', 'WIND_MAX', 'WIND_MIN',
'RH_MEAN', 'RH_MAX', 'RH_MIN',
'SOLAR_RADIATION_GLOBAL']]
list_ws = [df_fs.loc[idx] * cst
for idx, cst
in zip(list_index, list_const)]
df_ws = pd.concat(list_ws, axis=1).sum(axis=1).unstack().dropna()
return df_ws | [
"generate DataFrame of weighted sums.\n\n Parameters\n ----------\n df_WS_data : type\n Description of parameter `df_WS_data`.\n\n Returns\n -------\n type\n Description of returned object.\n\n "
] |
Please provide a description of the function:def gen_TMY(df_output):
'''generate TMY (typical meteorological year) from SuPy output.
Parameters
----------
df_output : pandas.DataFrame
Output from `run_supy`: longterm (e.g., >10 years) simulation results, otherwise not very useful.
'''
# calculate weighted score
ws = gen_WS_DF(df_output)
# select year
year_sel = pick_year(ws, df_output, n=5)
# generate TMY data
df_TMY = pd.concat(
# shift(1) here is to conform the convention that
# timestamps refer to the preceding period
# [df_output.shift(1).groupby(['Month', 'Year']).get_group(grp)
# shift(1) is not necessary
[df_output.groupby(['Month', 'Year']).get_group(grp)
for grp in year_sel.items()])
# df_TMY = df_TMY.rename(columns=dict_supy_epw)
return df_TMY | [] |
Please provide a description of the function:def _geoid_radius(latitude: float) -> float:
lat = deg2rad(latitude)
return sqrt(1/(cos(lat) ** 2 / Rmax_WGS84 ** 2 + sin(lat) ** 2 / Rmin_WGS84 ** 2)) | [
"Calculates the GEOID radius at a given latitude\n\n Parameters\n ----------\n latitude : float\n Latitude (degrees)\n\n Returns\n -------\n R : float\n GEOID Radius (meters)\n "
] |
Please provide a description of the function:def geometric2geopotential(z: float, latitude: float) -> float:
twolat = deg2rad(2 * latitude)
g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2)
re = _geoid_radius(latitude)
return z * g * re / (re + z) | [
"Converts geometric height to geopoential height\n\n Parameters\n ----------\n z : float\n Geometric height (meters)\n latitude : float\n Latitude (degrees)\n\n Returns\n -------\n h : float\n Geopotential Height (meters) above the reference ellipsoid\n "
] |
Please provide a description of the function:def geopotential2geometric(h: float, latitude: float) -> float:
twolat = deg2rad(2 * latitude)
g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2)
re = _geoid_radius(latitude)
return h * re / (g * re - h) | [
"Converts geopoential height to geometric height\n\n Parameters\n ----------\n h : float\n Geopotential height (meters)\n latitude : float\n Latitude (degrees)\n\n Returns\n -------\n z : float\n Geometric Height (meters) above the reference ellipsoid\n "
] |
Please provide a description of the function:def get_ser_val_alt(lat: float, lon: float,
da_alt_x: xr.DataArray,
da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series:
'''interpolate atmospheric variable to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.Series
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
'''
alt_t_1d = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
val_t_1d = da_val.sel(
latitude=lat, longitude=lon, method='nearest')
alt_x = da_alt_x.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_alt = np.array(
[interp1d(alt_1d, val_1d)(alt_x)
for alt_1d, val_1d
in zip(alt_t_1d, val_t_1d)])
ser_alt = pd.Series(
val_alt,
index=da_val.time.values,
name=da_val.name,
)
return ser_alt | [] |
Please provide a description of the function:def get_df_val_alt(lat: float, lon: float, da_alt_meas: xr.DataArray, ds_val: xr.Dataset):
'''interpolate atmospheric variables to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.DataFrame
interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
'''
da_alt = geopotential2geometric(ds_val.z, ds_val.latitude)
# generate pressure series for grid x
da_alt_x = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
alt_meas_x = da_alt_meas.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_pres = np.array([interp1d(alt, da_alt_x.level)(alt_meas_x)
for alt in da_alt_x])
df_val_alt = pd.concat(
[get_ser_val_alt(
lat, lon, da_alt_meas, da_alt, ds_val[var])
for var in ds_val.data_vars],
axis=1
)
# add pressure
df_val_alt['p'] = val_pres
df_val_alt.index = df_val_alt.index.set_names('time')
df_val_alt.columns = df_val_alt.columns.set_names('var')
return df_val_alt | [] |
Please provide a description of the function:def gen_req_sfc(lat_x, lon_x, start, end, grid=[0.125, 0.125], scale=0):
'''generate a dict of reqs kwargs for (lat_x,lon_x) spanning [start, end]
Parameters
----------
lat_x : [type]
[description]
lon_x : [type]
[description]
start : [type]
[description]
end : [type]
[description]
grid : list, optional
[description] (the default is [0.125, 0.125], which [default_description])
scale : int, optional
[description] (the default is 0, which [default_description])
Returns
-------
[type]
[description]
Examples
--------
>>> gen_req_sfc(28, 116, '2015-01', '2015-01-31 23', grid=[0.125, 0.125], scale=0)
'''
# scale is a factor to rescale grid size
size = grid[0]*scale
# generate pd.Series for timestamps
ser_datetime = pd.date_range(start, end, freq='1h').to_series()
# surface requests
lat_c, lon_c = (roundPartial(x, grid[0]) for x in [lat_x, lon_x])
area = [lat_c+size, lon_c-size, lat_c-size, lon_c+size]
dict_req_sfc = {
'variable': list_var_sfc,
'product_type': 'reanalysis',
'area': area,
'grid': grid,
'format': 'netcdf'
}
list_dict_req_sfc = [
{**dict_req_sfc, **dict_dt}
for dict_dt
in list(gen_dict_dt_sub(ser_datetime).values())
]
dict_req_sfc = {
gen_fn(dict_req): gen_dict_proc(dict_req)
for dict_req in list_dict_req_sfc
}
return dict_req_sfc | [] |
Please provide a description of the function:def sel_list_pres(ds_sfc_x):
'''
select proper levels for model level data download
'''
p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values
list_pres_level = [
'1', '2', '3',
'5', '7', '10',
'20', '30', '50',
'70', '100', '125',
'150', '175', '200',
'225', '250', '300',
'350', '400', '450',
'500', '550', '600',
'650', '700', '750',
'775', '800', '825',
'850', '875', '900',
'925', '950', '975',
'1000',
]
ser_pres_level = pd.Series(list_pres_level).map(int)*100
pos_lev_max, pos_lev_min = (
ser_pres_level[ser_pres_level > p_max].idxmin(),
ser_pres_level[ser_pres_level < p_min].idxmax()
)
list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100
list_pres_sel = list_pres_sel.map(int).map(str).to_list()
return list_pres_sel | [] |
Please provide a description of the function:def load_world(filename):
import ecell4_base
vinfo = ecell4_base.core.load_version_information(filename)
if vinfo.startswith("ecell4-bd"):
return ecell4_base.bd.World(filename)
elif vinfo.startswith("ecell4-egfrd"):
return ecell4_base.egfrd.World(filename)
elif vinfo.startswith("ecell4-meso"):
return ecell4_base.meso.World(filename)
elif vinfo.startswith("ecell4-ode"):
return ecell4_base.ode.World(filename)
elif vinfo.startswith("ecell4-gillespie"):
return ecell4_base.gillespie.World(filename)
elif vinfo.startswith("ecell4-spatiocyte"):
return ecell4_base.spatiocyte.World(filename)
elif vinfo == "":
raise RuntimeError("No version information was found in [{0}]".format(filename))
raise RuntimeError("Unkown version information [{0}]".format(vinfo)) | [
"\n Load a world from the given HDF5 filename.\n The return type is determined by ``ecell4_base.core.load_version_information``.\n\n Parameters\n ----------\n filename : str\n A HDF5 filename.\n\n Returns\n -------\n w : World\n Return one from ``BDWorld``, ``EGFRDWorld``, ``MesoscopicWorld``,\n ``ODEWorld``, ``GillespieWorld`` and ``SpatiocyteWorld``.\n\n "
] |
Please provide a description of the function:def run_simulation(
t, y0=None, volume=1.0, model=None, solver='ode',
is_netfree=False, species_list=None, without_reset=False,
return_type='matplotlib', opt_args=(), opt_kwargs=None,
structures=None, observers=(), progressbar=0, rndseed=None,
factory=None, ## deprecated
**kwargs):
y0 = y0 or {}
opt_kwargs = opt_kwargs or {}
structures = structures or {}
for key, value in kwargs.items():
if key == 'r':
return_type = value
elif key == 'v':
volume = value
elif key == 's':
solver = value
elif key == 'm':
model = value
else:
raise ValueError(
"An unknown keyword argument was given [{}={}]".format(key, value))
import ecell4_base
if unit.HAS_PINT:
if isinstance(t, unit._Quantity):
if unit.STRICT and not unit.check_dimensionality(t, '[time]'):
raise ValueError("Cannot convert [t] from '{}' ({}) to '[time]'".format(t.dimensionality, t.u))
t = t.to_base_units().magnitude
if isinstance(volume, unit._Quantity):
if unit.STRICT:
if isinstance(volume.magnitude, ecell4_base.core.Real3) and not unit.check_dimensionality(volume, '[length]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[length]'".format(
volume.dimensionality, volume.u))
elif not unit.check_dimensionality(volume, '[volume]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[volume]'".format(
volume.dimensionality, volume.u))
volume = volume.to_base_units().magnitude
if not isinstance(solver, str) and isinstance(solver, collections.Iterable):
solver = [
value.to_base_units().magnitude if isinstance(value, unit._Quantity) else value
for value in solver]
if factory is not None:
# f = factory #XXX: will be deprecated in the future. just use solver
raise ValueError(
"Argument 'factory' is no longer available. Use 'solver' instead.")
elif isinstance(solver, str):
f = get_factory(solver)
elif isinstance(solver, collections.Iterable):
f = get_factory(*solver)
else:
f = solver
if rndseed is not None:
f = f.rng(ecell4_base.core.GSLRandomNumberGenerator(rndseed))
if model is None:
model = get_model(is_netfree, without_reset)
w = f.world(volume)
edge_lengths = w.edge_lengths()
if unit.HAS_PINT:
y0 = y0.copy()
for key, value in y0.items():
if isinstance(value, unit._Quantity):
if not unit.STRICT:
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[substance]'):
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[concentration]'):
volume = w.volume() if not isinstance(w, ecell4_base.spatiocyte.SpatiocyteWorld) else w.actual_volume()
y0[key] = value.to_base_units().magnitude * volume
else:
raise ValueError(
"Cannot convert a quantity for [{}] from '{}' ({}) to '[substance]'".format(
key, value.dimensionality, value.u))
if not isinstance(w, ecell4_base.ode.ODEWorld):
w.bind_to(model)
for (name, shape) in (structures.items() if isinstance(structures, dict) else structures):
if isinstance(shape, str):
w.add_structure(ecell4_base.core.Species(name), get_shape(shape))
elif isinstance(shape, collections.Iterable):
w.add_structure(ecell4_base.core.Species(name), get_shape(*shape))
else:
w.add_structure(ecell4_base.core.Species(name), shape)
if isinstance(w, ecell4_base.ode.ODEWorld):
# w.bind_to(model) # stop binding for ode
for serial, n in y0.items():
w.set_value(ecell4_base.core.Species(serial), n)
else:
# w.bind_to(model)
for serial, n in y0.items():
w.add_molecules(ecell4_base.core.Species(serial), n)
if not isinstance(t, collections.Iterable):
t = [float(t) * i / 100 for i in range(101)]
if species_list is not None:
obs = ecell4_base.core.TimingNumberObserver(t, species_list)
else:
obs = ecell4_base.core.TimingNumberObserver(t)
sim = f.simulator(w, model)
# sim = f.simulator(w)
if not isinstance(observers, collections.Iterable):
observers = (observers, )
if return_type not in ('world', 'none', None):
observers = (obs, ) + tuple(observers)
if progressbar > 0:
from .progressbar import progressbar as pb
pb(sim, timeout=progressbar, flush=True).run(t[-1], observers)
else:
sim.run(t[-1], observers)
if return_type in ('matplotlib', 'm'):
if isinstance(opt_args, (list, tuple)):
viz.plot_number_observer(obs, *opt_args, **opt_kwargs)
elif isinstance(opt_args, dict):
# opt_kwargs is ignored
viz.plot_number_observer(obs, **opt_args)
else:
raise ValueError('opt_args [{}] must be list or dict.'.format(
repr(opt_args)))
elif return_type in ('nyaplot', 'n'):
if isinstance(opt_args, (list, tuple)):
viz.plot_number_observer_with_nya(obs, *opt_args, **opt_kwargs)
elif isinstance(opt_args, dict):
# opt_kwargs is ignored
viz.plot_number_observer_with_nya(obs, **opt_args)
else:
raise ValueError('opt_args [{}] must be list or dict.'.format(
repr(opt_args)))
elif return_type in ('observer', 'o'):
return obs
elif return_type in ('array', 'a'):
return obs.data()
elif return_type in ('dataframe', 'd'):
import pandas
import numpy
data = numpy.array(obs.data()).T
return pandas.concat([
pandas.DataFrame(dict(Time=data[0], Value=data[i + 1],
Species=sp.serial(), **opt_kwargs))
for i, sp in enumerate(obs.targets())])
elif return_type in ('world', 'w'):
return sim.world()
elif return_type is None or return_type in ('none', ):
return
else:
raise ValueError(
'An invald value for "return_type" was given [{}].'.format(str(return_type))
+ 'Use "none" if you need nothing to be returned.') | [
"Run a simulation with the given model and plot the result on IPython\n notebook with matplotlib.\n\n Parameters\n ----------\n t : array or Real\n A sequence of time points for which to solve for 'm'.\n y0 : dict\n Initial condition.\n volume : Real or Real3, optional\n A size of the simulation volume.\n Keyword 'v' is a shortcut for specifying 'volume'.\n model : Model, optional\n Keyword 'm' is a shortcut for specifying 'model'.\n solver : str, tuple or Factory, optional\n Solver type. Choose one from 'ode', 'gillespie', 'spatiocyte', 'meso',\n 'bd' and 'egfrd'. Default is 'ode'.\n When tuple is given, the first value must be str as explained above.\n All the rest is used as arguments for the corresponding factory class.\n Keyword 's' is a shortcut for specifying 'solver'.\n species_list : list of str, optional\n A list of names of Species observed. If None, log all.\n Default is None.\n return_type : str, optional\n Choose a type of return value from 'array', 'observer',\n 'matplotlib', 'nyaplot', 'world', 'dataframe', 'none' or None.\n If None or 'none', return and plot nothing. Default is 'matplotlib'.\n 'dataframe' requires numpy and pandas libraries.\n Keyword 'r' is a shortcut for specifying 'return_type'.\n opt_args: list, tuple or dict, optional\n Arguments for plotting. If return_type suggests no plotting, just ignored.\n opt_kwargs: dict, optional\n Arguments for plotting. If return_type suggests no plotting or\n opt_args is a list or tuple, just ignored.\n i.e.) viz.plot_number_observer(obs, *opt_args, **opt_kwargs)\n is_netfree: bool, optional\n Whether the model is netfree or not. When a model is given as an\n argument, just ignored. Default is False.\n structures : list or dict, optional\n A dictionary which gives pairs of a name and shape of structures.\n Not fully supported yet.\n observers : Observer or list, optional\n A list of extra observer references.\n progressbar : float, optional\n A timeout for a progress bar in seconds.\n When the value is not more than 0, show nothing.\n Default is 0.\n rndseed : int, optional\n A random seed for a simulation.\n This argument will be ignored when 'solver' is given NOT as a string.\n\n Returns\n -------\n value : list, TimingNumberObserver, World or None\n Return a value suggested by ``return_type``.\n When ``return_type`` is 'array', return a time course data.\n When ``return_type`` is 'observer', return an observer.\n When ``return_type`` is 'world', return the last state of ``World``.\n Return nothing if else.\n\n "
] |
Please provide a description of the function:def number_observer(t=None, targets=None):
from ecell4_base.core import NumberObserver, FixedIntervalNumberObserver, TimingNumberObserver
if t is None:
return NumberObserver(targets)
elif isinstance(t, numbers.Number):
return FixedIntervalNumberObserver(t, targets)
elif hasattr(t, '__iter__'):
if targets is not None:
return TimingNumberObserver(t, targets)
else:
return TimingNumberObserver(t)
else:
raise TypeError("An invalid type was given. Either number or iterable is required.") | [
"\n Return a number observer. If t is None, return NumberObserver. If t is a number,\n return FixedIntervalNumberObserver. If t is an iterable (a list of numbers), return\n TimingNumberObserver.\n\n Parameters\n ----------\n t : float, list or tuple, optional. default None\n A timing of the observation. See above.\n targets : list or tuple, optional. default None\n A list of strings suggesting Species observed.\n\n Returns\n -------\n obs : NumberObserver, FixedIntervalNumberObserver or TimingNumberObserver\n "
] |
Please provide a description of the function:def show(target, *args, **kwargs):
if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver, )):
plot_number_observer(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)):
plot_trajectory(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)):
plot_world(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)):
dump_model(target)
elif isinstance(target, str):
try:
w = simulation.load_world(target)
except RuntimeError as e:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
else:
show(w, *args, **kwargs)
else:
raise ValueError("The given target [{}] is not supported.".format(repr(target))) | [
"\n An utility function to display the given target object in the proper way.\n\n Paramters\n ---------\n target : NumberObserver, TrajectoryObserver, World, str\n When a NumberObserver object is given, show it with viz.plot_number_observer.\n When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.\n When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.\n\n "
] |
Please provide a description of the function:def run(self, duration, obs):
from ecell4_base.core import TimeoutObserver
timeout = TimeoutObserver(self.__timeout)
if isinstance(obs, collections.Iterable):
obs = tuple(obs) + (timeout, )
else:
obs = (obs, timeout)
p = ProgressBar(**self.__kwargs)
p.animate(0.0)
tstart = self.__sim.t()
upto = tstart + duration
while self.__sim.t() < upto:
self.__sim.run(upto - self.__sim.t(), obs)
p.animate((self.__sim.t() - tstart) / duration, timeout.accumulation())
if self.__flush:
p.flush()
else:
print() | [
"Run the simulation.\n\n Parameters\n ----------\n duration : Real\n a duration for running a simulation.\n A simulation is expected to be stopped at t() + duration.\n observers : list of Obeservers, optional\n observers\n\n "
] |
Please provide a description of the function:def run(self, duration, obs=None):
from ecell4_base.core import TimeoutObserver
timeout = TimeoutObserver(self.__timeout)
if obs is None:
obs = (timeout, )
elif isinstance(obs, collections.Iterable):
obs = tuple(obs) + (timeout, )
else:
obs = (obs, timeout)
from ipywidgets import FloatProgress, HBox, HTML
from IPython.display import display
from time import sleep
fp = FloatProgress(min=0, max=100)
ptext = HTML()
display(HBox(children=[fp, ptext]))
tstart = self.__sim.t()
upto = tstart + duration
while self.__sim.t() < upto:
self.__sim.run(upto - self.__sim.t(), obs)
value = (self.__sim.t() - tstart) / duration
fp.value = value * 100
ptext.value = self.get_text(value, timeout.accumulation())
sleep(self.__wait)
fp.value = 100
ptext.value = self.get_text(1, timeout.accumulation()) | [
"Run the simulation.\n\n Parameters\n ----------\n duration : Real\n a duration for running a simulation.\n A simulation is expected to be stopped at t() + duration.\n obs : list of Obeservers, optional\n observers\n\n "
] |
Please provide a description of the function:def print_batch_exception(batch_exception):
_log.error('-------------------------------------------')
_log.error('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
_log.error(batch_exception.error.message.value)
if batch_exception.error.values:
_log.error('')
for mesg in batch_exception.error.values:
_log.error('{}:\t{}'.format(mesg.key, mesg.value))
_log.error('-------------------------------------------') | [
"Prints the contents of the specified Batch exception.\n\n :param batch_exception:\n "
] |
Please provide a description of the function:def upload_file_to_container(block_blob_client, container_name, file_path):
blob_name = os.path.basename(file_path)
_log.info('Uploading file {} to container [{}]...'.format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name) | [
"Uploads a local file to an Azure Blob storage container.\n\n :param block_blob_client: A blob service client.\n :type block_blob_client: `azure.storage.blob.BlockBlobService`\n :param str container_name: The name of the Azure Blob storage container.\n :param str file_path: The local path to the file.\n :rtype: `azure.batch.models.ResourceFile`\n :return: A ResourceFile initialized with a SAS URL appropriate for Batch\n tasks.\n "
] |
Please provide a description of the function:def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token | [
"Obtains a shared access signature granting the specified permissions to the\n container.\n\n :param block_blob_client: A blob service client.\n :type block_blob_client: `azure.storage.blob.BlockBlobService`\n :param str container_name: The name of the Azure Blob storage container.\n :param BlobPermissions blob_permissions:\n :rtype: str\n :return: A SAS token granting the specified permissions to the container.\n "
] |
Please provide a description of the function:def wrap_commands_in_shell(ostype, commands):
if ostype.lower() == 'linux':
return '/bin/bash -c \'set -e; set -o pipefail; {}; wait\''.format(
';'.join(commands))
elif ostype.lower() == 'windows':
return 'cmd.exe /c "{}"'.format('&'.join(commands))
else:
raise ValueError('unknown ostype: {}'.format(ostype)) | [
"Wrap commands in a shell\n Originally in azure-batch-samples.Python.Batch.common.helpers\n\n :param list commands: list of commands to wrap\n :param str ostype: OS type, linux or windows\n :rtype: str\n :return: a shell wrapping commands\n "
] |
Please provide a description of the function:def create_pool(batch_service_client, pool_id,
resource_files, publisher, offer, sku,
task_file, vm_size, node_count):
_log.info('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# Specify the commands for the pool's start task. The start task is run
# on each node as it joins the pool, and when it's rebooted or re-imaged.
# We use the start task to prep the node for running our task script.
task_commands = [
# Copy the python_tutorial_task.py script to the "shared" directory
# that all tasks that run on the node have access to. Note that
# we are using the -p flag with cp to preserve the file uid/gid,
# otherwise since this start task is run as an admin, it would not
# be accessible by tasks run as a non-admin user.
'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)),
# Install pip
'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python',
# Install the azure-storage module so that the task script can access
# Azure Blob storage, pre-cryptography version
'pip install azure-storage==0.32.0',
# Install E-Cell 4
'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl']
# Get the node agent SKU and image reference for the virtual machine
# configuration.
# For more information about the virtual machine configuration, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
sku_to_use, image_ref_to_use = \
select_latest_verified_vm_image_with_node_agent_sku(
batch_service_client, publisher, offer, sku)
user = batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
node_agent_sku_id=sku_to_use),
vm_size=vm_size,
target_dedicated_nodes=0,
target_low_priority_nodes=node_count,
start_task=batch.models.StartTask(
command_line=wrap_commands_in_shell('linux', task_commands),
user_identity=batchmodels.UserIdentity(auto_user=user),
wait_for_success=True,
resource_files=resource_files),
)
try:
batch_service_client.pool.add(new_pool)
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise | [
"Creates a pool of compute nodes with the specified OS settings.\n\n :param batch_service_client: A Batch service client.\n :type batch_service_client: `azure.batch.BatchServiceClient`\n :param str pool_id: An ID for the new pool.\n :param list resource_files: A collection of resource files for the pool's\n start task.\n :param str publisher: Marketplace image publisher\n :param str offer: Marketplace image offer\n :param str sku: Marketplace image sku\n :param str task_file: A file name of the script\n :param str vm_size: A type of vm\n :param str node_count: The number of nodes\n "
] |
Please provide a description of the function:def create_job(batch_service_client, job_id, pool_id):
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
try:
batch_service_client.job.add(job)
except batchmodels.batch_error.BatchErrorException as err:
print_batch_exception(err)
raise | [
"Creates a job with the specified ID, associated with the specified pool.\n\n :param batch_service_client: A Batch service client.\n :type batch_service_client: `azure.batch.BatchServiceClient`\n :param str job_id: The ID for the job.\n :param str pool_id: The ID for the pool.\n "
] |
Please provide a description of the function:def add_tasks(batch_service_client, job_id, loads,
output_container_name, output_container_sas_token,
task_file, acount_name):
_log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id))
# _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id))
tasks = list()
for (input_file, output_file, i, j) in loads:
command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} '
'--filepath {} --output {} --storageaccount {} '
'--task_id {} --job_id {} '
'--storagecontainer {} --sastoken "{}"'.format(
os.path.basename(task_file),
input_file.file_path,
output_file,
acount_name,
i, j,
output_container_name,
output_container_sas_token)]
_log.debug('CMD : "{}"'.format(command[0]))
tasks.append(batch.models.TaskAddParameter(
id='topNtask{}-{}'.format(i, j),
command_line=command,
resource_files=[input_file]
)
)
batch_service_client.task.add_collection(job_id, tasks)
task_ids = [task.id for task in tasks]
_log.info('{} tasks were added.'.format(len(task_ids)))
return task_ids | [
"Adds a task for each input file in the collection to the specified job.\n\n :param batch_service_client: A Batch service client.\n :type batch_service_client: `azure.batch.BatchServiceClient`\n :param str job_id: The ID of the job to which to add the tasks.\n :param list input_files: A collection of input files. One task will be\n created for each input file.\n :param output_container_name: The ID of an Azure Blob storage container to\n which the tasks will upload their results.\n :param output_container_sas_token: A SAS token granting write access to\n the specified Azure Blob storage container.\n :param str task_file: A file name of the script\n :param str account_name: A storage account\n "
] |
Please provide a description of the function:def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout):
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
# tasks = batch_service_client.task.list(job_id)
# incomplete_tasks = [task for task in tasks if
# task.state != batchmodels.TaskState.completed]
for (job_id, _) in job_ids:
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if incomplete_tasks:
break
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout)) | [
"Returns when all tasks in the specified job reach the Completed state.\n\n :param batch_service_client: A Batch service client.\n :type batch_service_client: `azure.batch.BatchServiceClient`\n :param str job_id: The id of the job whose tasks should be to monitored.\n :param timedelta timeout: The duration to wait for task completion. If all\n tasks in the specified job do not reach Completed state within this time\n period, an exception will be raised.\n "
] |
Please provide a description of the function:def download_blobs_from_container(block_blob_client,
container_name, directory_path,
prefix=None):
_log.info('Downloading all files from container [{}]...'.format(container_name))
container_blobs = block_blob_client.list_blobs(container_name, prefix=None)
_log.info('{} blobs are found [{}]'.format(len(tuple(container_blobs)), ', '.join(blob.name for blob in container_blobs.items)))
for blob in container_blobs.items:
destination_file_path = os.path.join(directory_path, blob.name)
block_blob_client.get_blob_to_path(container_name,
blob.name,
destination_file_path)
_log.info(' Downloaded blob [{}] from container [{}] to {}'.format(
blob.name,
container_name,
destination_file_path))
_log.info(' Download complete!') | [
"Downloads all blobs from the specified Azure Blob storage container.\n\n :param block_blob_client: A blob service client.\n :type block_blob_client: `azure.storage.blob.BlockBlobService`\n :param container_name: The Azure Blob storage container from which to\n download files.\n :param directory_path: The local directory to which to download the files.\n :param str prefix: A name prefix to filter blobs. None as its default\n "
] |
Please provide a description of the function:def _read_stream_as_string(stream, encoding):
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes') | [
"Read stream as string\n Originally in azure-batch-samples.Python.Batch.common.helpers\n\n :param stream: input stream generator\n :param str encoding: The encoding of the file. The default is utf-8.\n :return: The file content.\n :rtype: str\n "
] |
Please provide a description of the function:def read_task_file_as_string(
batch_client, job_id, task_id, file_name, encoding=None):
stream = batch_client.file.get_from_task(job_id, task_id, file_name)
return _read_stream_as_string(stream, encoding) | [
"Reads the specified file as a string.\n Originally in azure-batch-samples.Python.Batch.common.helpers\n\n :param batch_client: The batch client to use.\n :type batch_client: `batchserviceclient.BatchServiceClient`\n :param str job_id: The id of the job.\n :param str task_id: The id of the task.\n :param str file_name: The name of the file to read.\n :param str encoding: The encoding of the file. The default is utf-8.\n :return: The file content.\n :rtype: str\n "
] |
Please provide a description of the function:def print_task_output(batch_client, job_id, task_ids, encoding=None):
for task_id in task_ids:
file_text = read_task_file_as_string(
batch_client,
job_id,
task_id,
_STANDARD_OUT_FILE_NAME,
encoding)
print("{} content for task {}: ".format(
_STANDARD_OUT_FILE_NAME,
task_id))
print(file_text)
file_text = read_task_file_as_string(
batch_client,
job_id,
task_id,
_STANDARD_ERROR_FILE_NAME,
encoding)
print("{} content for task {}: ".format(
_STANDARD_ERROR_FILE_NAME,
task_id))
print(file_text) | [
"Prints the stdout and stderr for each task specified.\n Originally in azure-batch-samples.Python.Batch.common.helpers\n\n :param batch_client: The batch client to use.\n :type batch_client: `batchserviceclient.BatchServiceClient`\n :param str job_id: The id of the job to monitor.\n :param task_ids: The collection of tasks to print the output for.\n :type task_ids: `list`\n :param str encoding: The encoding to use when downloading the file.\n "
] |
Please provide a description of the function:def run_azure(target, jobs, n=1, path='.', delete=True, config=None):
if config is None:
raise ValueError('Argument \'config\' must be given.')
elif isinstance(config, str):
if not os.path.isfile(config):
raise FileNotFoundError('A file [{}] could not be found.'.format(config))
config_filename = config
config = configparser.ConfigParser()
config.sections()
config.read(config_filename)
config.sections()
elif not isinstance(config, configparser.ConfigParser):
raise ValueError('\'config\' must be eighter str or ConfigParser. [{}] was given.'.format(repr(config)))
if 'azure' not in config:
raise KeyError('Key \'azure\' could not be found in the given config.')
for key in ('batch.name', 'batch.key', 'batch.url', 'storage.name', 'storage.key', 'pool.nodecount'):
if key not in config['azure']:
raise KeyError('Key \'{}\' could not be found in the \'azure\' section.'.format(key))
# Update the Batch and Storage account credential strings below with the values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
_BATCH_ACCOUNT_NAME = config['azure']['batch.name']
_BATCH_ACCOUNT_KEY = config['azure']['batch.key']
_BATCH_ACCOUNT_URL = config['azure']['batch.url']
_STORAGE_ACCOUNT_NAME = config['azure']['storage.name']
_STORAGE_ACCOUNT_KEY = config['azure']['storage.key']
_POOL_NODE_COUNT = config['azure']['pool.nodecount']
_POOL_ID = config['azure'].get('pool.id', 'MyPool')
_POOL_VM_SIZE = config['azure'].get('pool.vmsize', 'Standard_D11_v2')
_NODE_OS_PUBLISHER = config['azure'].get('os.publisher', 'Canonical')
_NODE_OS_OFFER = config['azure'].get('os.offer', 'UbuntuServer')
_NODE_OS_SKU = config['azure'].get('os.sku', '16')
_JOB_ID = config['azure'].get('job.id', 'MyJob')
if not _POOL_NODE_COUNT.isdigit():
raise ValueError('The wrong pool node count was given [{}]. This must be an integer'.format(_POOL_NODE_COUNT))
proc_per_node = 2 #XXX: Does this depend on pool vm?
nproc = int(_POOL_NODE_COUNT) * proc_per_node
code_header =
code_footer =
# src = textwrap.dedent(inspect.getsource(target)).replace(r'"', r'\"')
src = textwrap.dedent(inspect.getsource(target))
if re.match('[\s\t]+', src.split('\n')[0]) is not None:
raise RuntimeError(
"Wrong indentation was found in the source translated")
code = code_header
code += src
code += 'res = {}(inputs, args.task_id, args.job_id)'.format(target.__name__)
code += code_footer
target = code
suffix = binascii.hexlify(os.urandom(4)).decode()
start_time = datetime.datetime.now().replace(microsecond=0)
_log.info('Sample start: {}'.format(start_time))
if not os.path.isdir(path):
os.mkdir(path)
# task_file = target
# task_file = 'task-{}.py'.format(suffix)
task_file = '{}/task-{}.py'.format(path, suffix)
with open(task_file, 'w') as fout:
fout.write(target)
# Prepare input pickle files
input_file_names = []
output_file_names = []
for i, job in enumerate(jobs):
filename = '{}/input-{}_{}.pickle'.format(path, suffix, i)
input_file_names.append(filename)
for j in range(n):
output_file_names.append('output-{}_{}.{}.pickle'.format(suffix, i, j + 1))
with open(filename, mode='wb') as fout:
pickle.dump(job, fout, protocol=2)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=_STORAGE_ACCOUNT_NAME,
account_key=_STORAGE_ACCOUNT_KEY)
n_jobs = -(-(len(jobs) * n) // nproc) # ceil for int
_log.info('{} jobs will be created.'.format(n_jobs))
res = None
try:
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
app_container_name = 'application-{}'.format(suffix)
input_container_name = 'input-{}'.format(suffix)
output_container_name = 'output-{}'.format(suffix)
# app_container_name = 'application'
# input_container_name = 'input'
# output_container_name = 'output'
blob_client.create_container(app_container_name, fail_on_exist=False)
blob_client.create_container(input_container_name, fail_on_exist=False)
blob_client.create_container(output_container_name, fail_on_exist=False)
# Paths to the task script. This script will be executed by the tasks that
# run on the compute nodes.
application_file_paths = [os.path.realpath(task_file)]
# The collection of data files that are to be processed by the tasks.
input_file_paths = [os.path.realpath(filename) for filename in input_file_names]
# Upload the application script to Azure Storage. This is the script that
# will process the data files, and is executed by each of the tasks on the
# compute nodes.
application_files = [
upload_file_to_container(blob_client, app_container_name, file_path)
for file_path in application_file_paths]
# Upload the data files. This is the data that will be processed by each of
# the tasks executed on the compute nodes in the pool.
input_files = [
upload_file_to_container(blob_client, input_container_name, file_path)
for file_path in input_file_paths]
# Obtain a shared access signature that provides write access to the output
# container to which the tasks will upload their output.
output_container_sas_token = get_container_sas_token(
blob_client,
output_container_name,
azureblob.BlobPermissions.WRITE)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batchauth.SharedKeyCredentials(_BATCH_ACCOUNT_NAME,
_BATCH_ACCOUNT_KEY)
#print(_BATCH_ACCOUNT_URL)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=_BATCH_ACCOUNT_URL)
# Create the pool that will contain the compute nodes that will execute the
# tasks. The resource files we pass in are used for configuring the pool's
# start task, which is executed each time a node first joins the pool (or
# is rebooted or re-imaged).
create_pool(batch_client,
_POOL_ID + '-' + suffix,
application_files,
_NODE_OS_PUBLISHER,
_NODE_OS_OFFER,
_NODE_OS_SKU,
task_file,
_POOL_VM_SIZE, _POOL_NODE_COUNT)
# Create the job that will run the tasks.
loads = []
for i, input_file in enumerate(input_files):
for j, output_file in enumerate(output_file_names[i * n: (i + 1) * n]):
loads.append((input_file, output_file, i + 1, j + 1))
assert n_jobs == -(-len(loads) // nproc) # ceil for int
job_names = []
for i in range(n_jobs):
job_name = '{}-{}-{}'.format(_JOB_ID, suffix, i + 1)
create_job(batch_client, job_name, _POOL_ID + '-' + suffix)
# Add the tasks to the job. We need to supply a container shared access
# signature (SAS) token for the tasks so that they can upload their output
# to Azure Storage.
task_ids = add_tasks(batch_client,
job_name,
loads[i * nproc: (i + 1) * nproc],
output_container_name,
output_container_sas_token,
task_file,
_STORAGE_ACCOUNT_NAME)
job_names.append((job_name, task_ids))
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(batch_client,
job_names,
datetime.timedelta(minutes=20))
_log.info(" Success! All tasks reached the 'Completed' state within the specified timeout period.")
# Download the task output files from the output Storage container to a
# local directory. Note that we could have also downloaded the output
# files directly from the compute nodes themselves.
download_blobs_from_container(blob_client,
output_container_name,
os.path.abspath(path))
for job_id, task_ids in job_names:
print_task_output(batch_client, job_id, task_ids)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
_log.info('Sample end: {}'.format(end_time))
_log.info('Elapsed time: {}'.format(end_time - start_time))
res = []
for output_file in output_file_names:
with open(os.path.join(path, output_file), mode='rb') as fin:
res.append(pickle.load(fin))
res = [res[i * n: (i + 1) * n] for i in range(len(jobs))]
finally:
# Clean up storage resources
_log.info('Deleting containers...')
blob_client.delete_container(app_container_name)
blob_client.delete_container(input_container_name)
blob_client.delete_container(output_container_name)
# Clean up Batch resources (if the user so chooses).
for i in range(n_jobs):
job_name = '{}-{}-{}'.format(_JOB_ID, suffix, i + 1)
_log.info('Deleting job [{}] ...'.format(job_name))
batch_client.job.delete(job_name)
_log.info('Deleting pool...')
batch_client.pool.delete(_POOL_ID + '-' + suffix)
if delete:
_log.info('Deleting temporary files...')
for filename in output_file_names:
filename = os.path.join(path, filename)
if os.path.isfile(filename):
os.remove(filename)
for filename in itertools.chain(input_file_paths, application_file_paths):
if os.path.isfile(filename):
os.remove(filename)
return res | [
"Execute a function for multiple sets of arguments on Microsoft Azure,\n and return the results as a list.\n\n :param function target: A target function.\n :param list jobs: A list of sets of arguments given to the target.\n :param int n: The number of repeats running the target. 1 as default.\n :param str path: A path to save temp files. The current path as default.\n :param bool delete: Delete temp files after finishing jobs, or not. True as default.\n :param config: str or configparser.ConfigParser. A config file. An example is the following:\n\n ```\n [azure]\n batch.name = foo\n batch.key = bar\n batch.url = hoge\n storage.name = fuga\n storage.key = spam\n pool.nodecount = 2\n # pool.id = MyPool\n # pool.vmsize = Standard_D11_v2\n # os.publisher = Canonical\n # os.offer = UbuntuServer\n # os.sku = 16\n # job.id = MyJob\n ```\n\n :return: A list of results corresponding the `jobs` list.\n :rtype: list\n ",
"\nfrom __future__ import print_function\nimport argparse\nimport os\nimport string\nimport azure.storage.blob as azureblob\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--filepath', required=True,\n help='The path to the text file to process. The path'\n 'may include a compute node\\\\'s environment'\n 'variables, such as'\n '$AZ_BATCH_NODE_SHARED_DIR/filename.txt')\nparser.add_argument('--output', required=True,\n help='The path to the output.')\nparser.add_argument('--job_id', type=int, required=True)\nparser.add_argument('--task_id', type=int, required=True)\nparser.add_argument('--storageaccount', required=True,\n help='The name the Azure Storage account that owns the'\n 'blob storage container to which to upload'\n 'results.')\nparser.add_argument('--storagecontainer', required=True,\n help='The Azure Blob storage container to which to'\n 'upload results.')\nparser.add_argument('--sastoken', required=True,\n help='The SAS token providing write access to the'\n 'Storage container.')\nargs = parser.parse_args()\n\ninput_file = os.path.realpath(args.filepath)\noutput_file = args.output\n\nimport pickle\nwith open(input_file, mode='rb') as fin:\n inputs = pickle.load(fin)\n\n",
"\n\nwith open(output_file, mode='wb') as fout:\n pickle.dump(res, fout, protocol=2)\n\n# Create the blob client using the container's SAS token.\n# This allows us to create a client that provides write\n# access only to the container.\nblob_client = azureblob.BlockBlobService(account_name=args.storageaccount,\n sas_token=args.sastoken)\noutput_file_path = os.path.realpath(output_file)\nblob_client.create_blob_from_path(args.storagecontainer,\n output_file,\n output_file_path)\n"
] |
Please provide a description of the function:def singlerun(job, task_id=0, job_id=0):
import ecell4_base
import ecell4
import ecell4.util.simulation
import ecell4.util.decorator
print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__))
print('ecell4.__version__ = {:s}'.format(ecell4.__version__))
print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id))
with ecell4.util.decorator.reaction_rules():
A + B == C | (0.01, 0.3)
res = ecell4.util.simulation.run_simulation(
1.0,
y0={'A': job[0], 'B': job[1], 'C': job[2]},
rndseed=job_id,
solver='gillespie',
return_type='array')
print('A simulation was successfully done.')
return res | [
"This task is for an example."
] |
Please provide a description of the function:def plot_number_observer(*args, **kwargs):
interactive = kwargs.pop('interactive', False)
if interactive:
plot_number_observer_with_nya(*args, **kwargs)
# elif __on_ipython_notebook():
# kwargs['to_png'] = True
# plot_number_observer_with_nya(*args, **kwargs)
else:
if kwargs.pop('to_png', None) is not None:
#XXX: Remove an option available only on nyaplot for the consistency
import warnings
warnings.warn(
"An option 'to_png' is not available with matplotlib. Just ignored.")
plot_number_observer_with_matplotlib(*args, **kwargs) | [
"\n Generate a plot from NumberObservers and show it.\n See plot_number_observer_with_matplotlib and _with_nya for details.\n\n Parameters\n ----------\n obs : NumberObserver (e.g. FixedIntervalNumberObserver)\n interactive : bool, default False\n Choose a visualizer. If False, show the plot with matplotlib.\n If True (only available on IPython Notebook), show it with nyaplot.\n\n Examples\n --------\n >>> plot_number_observer(obs1)\n >>> plot_number_observer(obs1, interactive=True)\n\n "
] |
Please provide a description of the function:def plot_world(*args, **kwargs):
interactive = kwargs.pop('interactive', True)
if interactive:
plot_world_with_elegans(*args, **kwargs)
else:
plot_world_with_matplotlib(*args, **kwargs) | [
"\n Generate a plot from received instance of World and show it.\n See also plot_world_with_elegans and plot_world_with_matplotlib.\n\n Parameters\n ----------\n world : World or str\n World or a HDF5 filename to render.\n interactive : bool, default True\n Choose a visualizer. If False, show the plot with matplotlib.\n If True (only available on IPython Notebook), show it with elegans.\n\n Examples\n --------\n >>> plot_world(w)\n >>> plot_world(w, interactive=False)\n\n "
] |
Please provide a description of the function:def plot_movie(*args, **kwargs):
interactive = kwargs.pop('interactive', False)
if interactive:
plot_movie_with_elegans(*args, **kwargs)
else:
plot_movie_with_matplotlib(*args, **kwargs) | [
"\n Generate a movie from received instances of World and show them.\n See also plot_movie_with_elegans and plot_movie_with_matplotlib.\n\n Parameters\n ----------\n worlds : list of World\n Worlds to render.\n interactive : bool, default True\n Choose a visualizer. If False, show the plot with matplotlib.\n If True (only available on IPython Notebook), show it with elegans.\n\n "
] |
Please provide a description of the function:def plot_trajectory(*args, **kwargs):
interactive = kwargs.pop('interactive', True)
if interactive:
plot_trajectory_with_elegans(*args, **kwargs)
else:
plot_trajectory_with_matplotlib(*args, **kwargs) | [
"\n Generate a plot from received instance of TrajectoryObserver and show it\n See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib.\n\n Parameters\n ----------\n obs : TrajectoryObserver\n TrajectoryObserver to render.\n interactive : bool, default True\n Choose a visualizer. If False, show the plot with matplotlib.\n If True (only available on IPython Notebook), show it with elegans.\n\n Examples\n --------\n >>> plot_trajectory(obs)\n >>> plot_trajectory(obs, interactive=False)\n\n "
] |
Please provide a description of the function:def plot_number_observer_with_matplotlib(*args, **kwargs):
import matplotlib.pylab as plt
import numpy
import collections
special_keys = ("xlim", "ylim", "xlabel", "ylabel", "legend", "x", "y", "filename")
plot_opts = {key: value for key, value in kwargs.items()
if key not in special_keys}
if 'axes.prop_cycle' in plt.rcParams.keys():
color_cycle = [prop['color'] for prop in plt.rcParams['axes.prop_cycle']]
else:
color_cycle = plt.rcParams['axes.color_cycle']
if "y" in kwargs.keys() and isinstance(kwargs["y"], str):
kwargs["y"] = (kwargs["y"], )
fig = plt.figure()
ax = fig.add_subplot(111)
if len(args) > 1 and isinstance(args[1], str):
if len(args) % 2 == 0:
observers = [(args[i], args[i + 1]) for i in range(0, len(args), 2)]
else:
observers = [(args[i], args[i + 1]) for i in range(0, len(args) - 1, 2)]
observers.append(args[-1], None)
else:
observers = [(obs, None) for obs in args]
color_map = {}
data, xidx = None, 0
for obs, fmt in observers:
if isinstance(obs, types.FunctionType):
if data is None:
raise ValueError("A function must be given after an observer.")
y = [obs(xi) for xi in data[xidx]]
opts = plot_opts.copy()
label = obs.__name__
opts["label"] = label
if label not in color_map.keys():
color_map[label] = color_cycle[len(color_map) % len(color_cycle)]
opts["label"] = label
opts["color"] = color_map[label]
if fmt is None:
ax.plot(data[xidx], y, **opts)
else:
ax.plot(data[xidx], y, fmt, **opts)
continue
data = numpy.array(obs.data()).T
try:
err = obs.error().T
except AttributeError:
err = None
if "x" in kwargs.keys():
targets = [sp.serial() for sp in obs.targets()]
if kwargs["x"] not in targets:
raise ValueError("[{0}] given as 'x' was not found.".fomrat(kwargs["x"]))
xidx = targets.index(kwargs["x"]) + 1
else:
xidx = 0
if "y" in kwargs.keys():
targets = [sp.serial() for sp in obs.targets()]
targets = [(targets.index(serial), serial)
for serial in kwargs["y"] if serial in targets]
else:
targets = [sp.serial() for sp in obs.targets()]
targets = list(enumerate(targets))
# targets.sort(key=lambda x: x[1])
for idx, serial in targets:
opts = plot_opts.copy()
label = serial
if len(label) > 0 and label[0] == '_':
label = '$\_$' + label[1:] # XXX: lazy escaping for a special character
if label not in color_map.keys():
color_map[label] = color_cycle[len(color_map) % len(color_cycle)]
opts["label"] = label
opts["color"] = color_map[label]
if err is None:
if fmt is None:
ax.plot(data[xidx], data[idx + 1], **opts)
else:
ax.plot(data[xidx], data[idx + 1], fmt, **opts)
else:
if fmt is None:
ax.errorbar(data[xidx], data[idx + 1],
xerr=(None if xidx == 0 else err[xidx]), yerr=err[idx + 1],
**opts)
else:
ax.errorbar(data[xidx], data[idx + 1],
xerr=(None if xidx == 0 else err[xidx]), yerr=err[idx + 1],
fmt=fmt, **opts)
# if "legend" not in kwargs.keys() or kwargs["legend"]:
# ax.legend(*ax.get_legend_handles_labels(), loc="best", shadow=True)
if "legend" not in kwargs.keys() or (kwargs["legend"] is not None and kwargs["legend"] is not False):
legend_opts = {"loc": "best", "shadow": True}
if "legend" in kwargs and isinstance(kwargs["legend"], dict):
legend_opts.update(kwargs["legend"])
ax.legend(*ax.get_legend_handles_labels(), **legend_opts)
if "xlabel" in kwargs.keys():
ax.set_xlabel(kwargs["xlabel"])
elif "x" in kwargs.keys():
ax.set_xlabel("The Number of Molecules [{0}]".format(kwargs["x"]))
else:
ax.set_xlabel("Time")
if "ylabel" in kwargs.keys():
ax.set_ylabel(kwargs["ylabel"])
else:
ax.set_ylabel("The Number of Molecules")
if "xlim" in kwargs.keys():
ax.set_xlim(kwargs["xlim"])
if "ylim" in kwargs.keys():
ax.set_ylim(kwargs["ylim"])
if "filename" in kwargs.keys():
plt.savefig(kwargs["filename"])
else:
plt.show() | [
"\n Generate a plot from NumberObservers and show it on IPython notebook\n with matplotlib.\n\n Parameters\n ----------\n obs : NumberObserver (e.g. FixedIntervalNumberObserver)\n fmt : str, optional\n opt : dict, optional\n matplotlib plot options.\n\n Examples\n --------\n >>> plot_number_observer(obs1)\n >>> plot_number_observer(obs1, 'o')\n >>> plot_number_observer(obs1, obs2, obs3, {'linewidth': 2})\n >>> plot_number_observer(obs1, 'k-', obs2, 'k--')\n\n "
] |
Please provide a description of the function:def plot_number_observer_with_nya(obs, config=None, width=600, height=400, x=None, y=None, to_png=False):
config = config or {}
from IPython.core.display import display, HTML
import numpy
config = {}
color_scale = default_color_scale(config=config)
data1, data2 = [], []
data = numpy.array(obs.data())
if x is None:
xidx = 0
else:
tmp = [sp.serial() for sp in obs.targets()]
if x not in tmp:
raise ValueError("[{0}] given as 'x' was not found.".fomrat(x))
xidx = tmp.index(x) + 1
if y is None:
targets = [sp.serial() for sp in obs.targets()]
targets = list(enumerate(targets))
# targets.sort(key=lambda x: x[1])
else:
if isinstance(y, str):
y = (y, )
targets = [sp.serial() for sp in obs.targets()]
targets = [(targets.index(serial), serial)
for serial in y if serial in targets]
for line in data:
tmp = {"x": line[xidx]}
for i, (idx, serial) in enumerate(targets):
tmp["y{0}".format(i + 1)] = line[idx + 1]
data1.append(tmp)
for i, (idx, serial) in enumerate(targets):
label = serial
tmp = {"type": "line", "data": "data1",
"options": {"x": "x", "y": "y{0}".format(i + 1),
"stroke_width": 2, "title": label,
"color": color_scale.get_color(label)}}
data2.append(tmp)
xmin, xmax = data.T[xidx].min(), data.T[xidx].max()
yview = data.T.take([idx + 1 for idx, serial in targets], axis=0)
ymin, ymax = yview.min(), yview.max()
model = {
"data": {"data1": data1},
"panes": [{"type": 'rectangular',
"diagrams": data2,
"options": {"width": width, "height": height, "xrange": [xmin, xmax],
"yrange": [ymin, ymax], "legend": True, "zoom": True}}]}
model_id = 'viz{0:s}'.format(str(uuid.uuid4()))
display(HTML(generate_html(
{'model': json.dumps(model), 'model_id': model_id, 'to_png': json.dumps(to_png)},
'templates/nya.tmpl'))) | [
"\n Generate a plot from NumberObservers and show it on IPython notebook\n with nyaplot.\n\n Parameters\n ----------\n obs : NumberObserver (e.g. FixedIntervalNumberObserver)\n config : dict, optional\n A config data for coloring. The dictionary will be updated during this plot.\n width : int, optional\n height : int, optional\n x : str, optional\n A serial for x-axis. If None, x-axis corresponds time.\n y : str or list of str\n Serials for y axis.\n\n "
] |
Please provide a description of the function:def __parse_world(
world, radius=None, species_list=None, max_count=None,
predicator=None):
from ecell4_base.core import Species
if species_list is None:
species_list = [
p.species().serial() for pid, p in world.list_particles()]
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
species = []
for name in species_list:
particles = [
{'pos': p.position(), 'r': p.radius()}
for pid, p in world.list_particles(Species(name))
if predicator is None or predicator(pid, p)]
# particles = [
# {'pos': p.position(), 'r': p.radius()}
# for pid, p in world.list_particles()
# if (p.species().serial() == name and
# (predicator is None or predicator(pid, p)))]
if len(particles) == 0:
continue
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count)
data = {
'x': [p['pos'][0] for p in particles],
'y': [p['pos'][1] for p in particles],
'z': [p['pos'][2] for p in particles]
}
# assume that all particles belong to one species have the same radius
r = max([p['r'] for p in particles]) if radius is None else radius
r = r if r > 0 else min(world.edge_lengths()) * 0.005
size = 30.0 / max(world.edge_lengths()) * r
species.append({
'name': name,
'data': data,
'size': size
})
return species | [
"\n Private function to parse world. Return infomation about particles\n (name, coordinates and particle size) for each species.\n\n "
] |
Please provide a description of the function:def plot_movie_with_elegans(
worlds, radius=None, width=500, height=500, config=None, grid=False,
species_list=None):
config = config or {}
from IPython.core.display import display, HTML
from jinja2 import Template
data = {}
sizes = {}
for i, world in enumerate(worlds):
species = __parse_world(world, radius, species_list)
for species_info in species:
if data.get(species_info['name']) is None:
data[species_info['name']] = []
data[species_info['name']].append({
'df': species_info['data'],
't': i
})
sizes[species_info['name']] = species_info['size']
options = {
'player': True,
'autorange': False,
'space_mode': 'wireframe',
'grid': grid,
'range': __get_range_of_world(worlds[0])
}
model_id = '"movie' + str(uuid.uuid4()) + '"'
color_scale = default_color_scale(config=config)
display(HTML(generate_html({
'model_id': model_id,
'names': json.dumps(list(data.keys())),
'data': json.dumps(list(data.values())),
'colors': json.dumps([color_scale.get_color(name)
for name in data.keys()]),
'sizes': json.dumps([sizes[name] for name in data.keys()]),
'options': json.dumps(options)
}, 'templates/movie.tmpl'))) | [
"\n Generate a movie from received instances of World and show them\n on IPython notebook.\n\n Parameters\n ----------\n worlds : list of World\n Worlds to render.\n radius : float, default None\n If this value is set, all particles in the world will be rendered\n as if their radius are the same.\n width : float, default 500\n Width of the plotting area.\n height : float, default 500\n Height of the plotting area.\n config : dict, default {}\n Dict for configure default colors. Its values are colors unique\n to each speices. The dictionary will be updated during this plot.\n Colors included in config dict will never be used for other speices.\n species_list : array of string, default None\n If set, plot_movie will not search the list of species\n\n "
] |
Please provide a description of the function:def plot_world_with_elegans(
world, radius=None, width=350, height=350, config=None, grid=True,
wireframe=False, species_list=None, debug=None, max_count=1000,
camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6),
return_id=False, predicator=None):
config = config or {}
from IPython.core.display import display, HTML
from .simulation import load_world
if isinstance(world, str):
world = load_world(world)
species = __parse_world(world, radius, species_list, max_count, predicator)
color_scale = default_color_scale(config=config)
plots = []
for species_info in species:
plots.append({
'type': 'Particles',
'data': species_info['data'],
'options': {
'name': species_info['name'],
'color': color_scale.get_color(species_info['name']),
'size': species_info['size']
}
})
if debug is not None:
data = {'type': [], 'x': [], 'y': [], 'z': [], 'options': []}
for obj in debug:
for k, v in obj.items():
data[k].append(v)
plots.append({
'type': 'DebugObject',
'data': data,
'options': {}
})
model = {
'plots': plots,
'options': {
'world_width': width,
'world_height': height,
'range': __get_range_of_world(world),
'autorange': False,
'grid': grid,
'save_image': True
# 'save_image': False
}
}
if wireframe:
model['options']['space_mode'] = 'wireframe'
model_id = '"viz' + str(uuid.uuid4()) + '"'
display(HTML(generate_html(
{'model': json.dumps(model), 'model_id': model_id,
'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2],
'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]},
'templates/particles.tmpl')))
if return_id:
return model_id | [
"\n Generate a plot from received instance of World and show it on IPython notebook.\n This method returns the instance of dict that indicates color setting\n for each speices. You can use the dict as the parameter of plot_world,\n in order to use the same colors in another plot.\n\n Parameters\n ----------\n world : World or str\n World or a HDF5 filename to render.\n radius : float, default None\n If this value is set, all particles in the world will be rendered\n as if their radius are the same.\n width : float, default 350\n Width of the plotting area.\n height : float, default 350\n Height of the plotting area.\n config : dict, default {}\n Dict for configure default colors. Its values are colors unique\n to each speices. The dictionary will be updated during this plot.\n Colors included in config dict will never be used for other speices.\n species_list : array of string, default None\n If set, plot_world will not search the list of species.\n max_count : Integer, default 1000\n The maximum number of particles to show for each species.\n debug : array of dict, default []\n *** EXPERIMENTAL IMPRIMENTATION ***\n Example:\n >> [{'type': 'box', 'x': 10, 'y': 10, 'z': 10, 'options': {'width': 1, 'height': 1}}]\n type: 'box', 'plane', 'sphere', and 'cylinder'\n x, y, z: float\n options:\n box: width, height, depth\n plane: width, height\n sphere: radius\n cylinder: radius, height\n camera_position : tuple, default (-22, 23, 32)\n camera_rotaiton : tuple, default (-0.6, 0.5, 0.6)\n Initial position and rotation of camera.\n return_id : bool, default False\n If True, return a model id, which is required for `to_png` function.\n\n "
] |
Please provide a description of the function:def plot_dense_array(
arr, length=256, ranges=None, colors=("#a6cee3", "#fb9a99"), grid=False, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6)):
import numpy
from PIL import Image
from base64 import b64encode
from tempfile import TemporaryFile
from math import sqrt
from IPython.core.display import display, HTML
from functools import reduce
# unfold 3d box into 2d grid
def unfold(arr, dtype=None):
dtype = arr.dtype if dtype is None else dtype
i = sqrt(arr.shape[2])
f_per_row, f_per_column = i, i
# single channel (luminance)
try:
depth, height, width = arr.shape[:]
arr = arr.reshape((depth*height, width))
new_arr = numpy.empty((height*f_per_column, width*f_per_row), dtype=dtype)
# multi channel (RGB)
except ValueError:
depth, height, width, channel = arr.shape
arr = arr.reshape((depth*height, width, channel))
new_arr = numpy.empty((height*f_per_column, width*f_per_row, channel), dtype=dtype)
for h in range(0, int(f_per_column)):
for w in range(0, int(f_per_row)):
val = arr[(f_per_row*h+w)*height : (f_per_row*h+w+1)*height]
new_arr[h*height : (h+1)*height, w*width : (w+1)*width] = val
return new_arr
def hist(arr, ranges, length, color):
# create sample
hist, bins = numpy.histogramdd(arr, bins=tuple([length]*3), range=tuple(ranges))
# standardize value
colors = [int(color[1:][i*2:(i+1)*2], 16) for i in range(0, 3)]
len1d = reduce(lambda val, memo: memo*val, hist.shape, 1)
arr = [((val/numpy.max(hist))*(hist.copy())).reshape(len1d) for val in colors]
# add blue and green
return numpy.array(arr, dtype=numpy.int8).transpose().reshape(tuple(list(hist.shape) + [3]))
ranges = ranges if ranges is not None else [(numpy.min(a), numpy.max(a)) for a in numpy.array(arr).reshape((sum(map(len, arr)), 3)).transpose()]
hist_arr = [hist(a, ranges, length, colors[i]) for i, a in enumerate(arr)]
compressed = reduce(lambda p, n: p+n, hist_arr)
img = Image.fromarray(unfold(compressed), "RGB")
fp = TemporaryFile("r+b")
img.save(fp, "PNG")
fp.seek(0)
encoded_url = "data:image/png;base64," + b64encode(fp.read())
model = {
'plots': [{
'type': 'Volume',
'data': encoded_url,
'options': {
'name': "",
'width': length,
'height': length,
'depth': length,
'f_per_row': sqrt(length),
'f_per_column': sqrt(length)
}
}],
'options': {
'grid': grid,
'save_image': True
}
}
model_id = '"viz' + str(uuid.uuid4()) + '"'
display(HTML(generate_html(
{'model': json.dumps(model), 'model_id': model_id,
'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2],
'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]},
'templates/particles.tmpl'))) | [
"\n Volume renderer\n\n Parameters\n ----------\n arr : list of numpy.array\n i.e. [array([[1,2,3], [2,3,4]]), array([[1,2,3]])]\n ranges : list of tuple\n ranges for x, y, and z axis\n i.e. [(-100, 100), (-100, 100), (-100, 100)]\n colors : list of string\n colors for species\n length : int\n length of the texture\n 256 or 64\n camera_position : tuple, default (-22, 23, 32)\n camera_rotaiton : tuple, default (-0.6, 0.5, 0.6)\n Initial position and rotation of camera.\n\n "
] |
Please provide a description of the function:def generate_html(keywords, tmpl_path, package_name='ecell4.util'):
from jinja2 import Template
import pkgutil
template = Template(pkgutil.get_data(package_name, tmpl_path).decode())
# path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path
# template = Template(open(path).read())
html = template.render(**keywords)
return html | [
"\n Generate static html file from JSON model and its own id.\n\n Parameters\n ----------\n model : dict\n JSON model from which ecell4.viz generates a plot.\n model_id : string\n Unique id for the plot.\n\n Returns\n -------\n html :\n A HTML object\n "
] |
Please provide a description of the function:def plot_trajectory_with_elegans(
obs, width=350, height=350, config=None, grid=True, wireframe=False,
max_count=10, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6),
plot_range=None):
config = config or {}
from IPython.core.display import display, HTML
color_scale = default_color_scale(config=config)
plots = []
xmin, xmax, ymin, ymax, zmin, zmax = None, None, None, None, None, None
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[0])
yarr.append(pos[1])
zarr.append(pos[2])
if xmin is None:
if len(y) > 0:
xmin, xmax = min(xarr), max(xarr)
ymin, ymax = min(yarr), max(yarr)
zmin, zmax = min(zarr), max(zarr)
else:
xmin, xmax = min([xmin] + xarr), max([xmax] + xarr)
ymin, ymax = min([ymin] + yarr), max([ymax] + yarr)
zmin, zmax = min([zmin] + zarr), max([zmax] + zarr)
name = str(i + 1)
c = color_scale.get_color(name)
plots.append({
'type': 'Line',
'data': {'x': xarr, 'y': yarr, 'z': zarr},
'options': {
'name': name,
'thickness': 2, # XXX: 'thikness' doesn't work on Windows
'colors': [c, c]}
})
if plot_range is None:
if xmin is None:
xmin, xmax, ymin, ymax, zmin, zmax = 0, 1, 0, 1, 0, 1
max_length = max(xmax - xmin, ymax - ymin, zmax - zmin)
rangex = [(xmin + xmax - max_length) * 0.5,
(xmin + xmax + max_length) * 0.5]
rangey = [(ymin + ymax - max_length) * 0.5,
(ymin + ymax + max_length) * 0.5]
rangez = [(zmin + zmax - max_length) * 0.5,
(zmin + zmax + max_length) * 0.5]
wrange = {'x': rangex, 'y': rangey, 'z': rangez}
else:
wrange = __get_range_of_trajectories(None, plot_range)
model = {
'plots': plots,
'options': {
'world_width': width,
'world_height': height,
'range': wrange,
'autorange': False,
'grid': grid,
'save_image': True
}
}
if wireframe:
model['options']['space_mode'] = 'wireframe'
model_id = '"viz' + str(uuid.uuid4()) + '"'
display(HTML(generate_html(
{'model': json.dumps(model), 'model_id': model_id,
'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2],
'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]},
'templates/particles.tmpl'))) | [
"\n Generate a plot from received instance of TrajectoryObserver and show it\n on IPython notebook.\n\n Parameters\n ----------\n obs : TrajectoryObserver\n TrajectoryObserver to render.\n width : float, default 350\n Width of the plotting area.\n height : float, default 350\n Height of the plotting area.\n config : dict, default {}\n Dict for configure default colors. Its values are colors unique\n to each particle. The dictionary will be updated during this plot.\n Colors included in config dict will never be used for other particles.\n camera_position : tuple, default (-30, 31, 42)\n camera_rotaiton : tuple, default (-0.6, 0.5, 0.6)\n Initial position and rotation of camera.\n plot_range : tuple, default None\n Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).\n If None, the minimum volume containing all the trajectories is used.\n\n "
] |
Please provide a description of the function:def plot_world_with_matplotlib(
world, marker_size=3, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=1000, angle=None,
legend=True, noaxis=False, **kwargs):
import matplotlib.pyplot as plt
if species_list is None:
species_list = [p.species().serial() for pid, p in world.list_particles()]
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
fig, ax = __prepare_mplot3d_with_matplotlib(
__get_range_of_world(world), figsize, grid, wireframe, angle, noaxis)
scatters, plots = __scatter_world_with_matplotlib(
world, ax, species_list, marker_size, max_count, **kwargs)
# if legend:
# ax.legend(handles=plots, labels=species_list, loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {"loc": "best", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(handles=plots, labels=species_list, **legend_opts)
plt.show() | [
"\n Generate a plot from received instance of World and show it on IPython notebook.\n\n Parameters\n ----------\n world : World or str\n World to render. A HDF5 filename is also acceptable.\n marker_size : float, default 3\n Marker size for all species. Size is passed to scatter function\n as argument, s=(2 ** marker_size).\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n species_list : array of string, default None\n If set, plot_world will not search the list of species.\n max_count : Integer, default 1000\n The maximum number of particles to show for each species.\n None means no limitation.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n legend : bool, default True\n\n "
] |
Please provide a description of the function:def plot_trajectory_with_matplotlib(
obs, max_count=10, figsize=6, legend=True, angle=None,
wireframe=False, grid=True, noaxis=False, plot_range=None, **kwargs):
import matplotlib.pyplot as plt
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
fig, ax = __prepare_mplot3d_with_matplotlib(
__get_range_of_trajectories(data, plot_range),
figsize, grid, wireframe, angle, noaxis)
lines = []
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[0])
yarr.append(pos[1])
zarr.append(pos[2])
lines.append((xarr, yarr, zarr))
__plot_trajectory_with_matplotlib(lines, ax, **kwargs)
# if legend:
# ax.legend(loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {"loc": "best", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
plt.show() | [
"\n Generate a plot from received instance of TrajectoryObserver and show it\n on IPython notebook.\n\n Parameters\n ----------\n obs : TrajectoryObserver\n TrajectoryObserver to render.\n max_count : Integer, default 10\n The maximum number of particles to show. If None, show all.\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n legend : bool, default True\n plot_range : tuple, default None\n Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).\n If None, the minimum volume containing all the trajectories is used.\n\n "
] |
Please provide a description of the function:def plot_trajectory2d_with_matplotlib(
obs, plane='xy', max_count=10, figsize=6, legend=True,
wireframe=False, grid=True, noaxis=False, plot_range=None, **kwargs):
import matplotlib.pyplot as plt
plane = plane.lower()
if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'):
raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane)))
xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2)
yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2)
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
wrange = __get_range_of_trajectories(data, plot_range)
wrange = (wrange['x'], wrange['y'], wrange['z'])
wrange = {'x': wrange[xidx], 'y': wrange[yidx]}
fig, ax = __prepare_plot_with_matplotlib(
wrange, figsize, grid, wireframe, noaxis)
ax.set_xlabel(plane[0].upper())
ax.set_ylabel(plane[1].upper())
lines = []
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[xidx])
yarr.append(pos[yidx])
lines.append((xarr, yarr))
__plot_trajectory2d_with_matplotlib(lines, ax, **kwargs)
# if legend:
# ax.legend(loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {"loc": "best", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
plt.show() | [
"\n Make a 2D plot from received instance of TrajectoryObserver and show it\n on IPython notebook.\n\n Parameters\n ----------\n obs : TrajectoryObserver\n TrajectoryObserver to render.\n plane : str, default 'xy'\n 'xy', 'yz', 'zx'.\n max_count : Integer, default 10\n The maximum number of particles to show. If None, show all.\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n legend : bool, default True\n plot_range : tuple, default None\n Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).\n If None, the minimum volume containing all the trajectories is used.\n\n "
] |
Please provide a description of the function:def plot_movie_of_trajectory2d_with_matplotlib(
obs, plane='xy', figsize=6, grid=True,
wireframe=False, max_count=None, angle=None, noaxis=False,
interval=0.16, repeat_delay=3000, stride=1, rotate=None,
legend=True, output=None, crf=10, bitrate='1M', plot_range=None, **kwargs):
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import display, HTML
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
import math
# print("Taking all data ...")
plane = plane.lower()
if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'):
raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane)))
xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2)
yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2)
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
lines = []
num_frames = 0
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[xidx])
yarr.append(pos[yidx])
lines.append((xarr, yarr))
num_frames = max(num_frames, len(y))
num_frames = int(math.ceil(float(num_frames) / stride))
# print("Start preparing mplot3d ...")
wrange = __get_range_of_trajectories(data, plot_range)
wrange = (wrange['x'], wrange['y'], wrange['z'])
wrange = {'x': wrange[xidx], 'y': wrange[yidx]}
fig, ax = __prepare_plot_with_matplotlib(
wrange, figsize, grid, wireframe, noaxis)
ax.set_xlabel(plane[0].upper())
ax.set_ylabel(plane[1].upper())
def _update_plot(i, plots, lines):
upto = i * stride
for plot, line in zip(plots, lines):
plot.set_xdata(line[0][: upto])
plot.set_ydata(line[1][: upto])
fig.canvas.draw()
# print("Start making animation ...")
plots = __plot_trajectory2d_with_matplotlib(lines, ax, 0, **kwargs)
# if legend:
# ax.legend(loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {"loc": "best", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
ani = animation.FuncAnimation(
fig, _update_plot, fargs=(plots, lines),
frames=num_frames, interval=interval, blit=False)
plt.close(ani._fig)
# print("Start generating a movie ...")
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) | [
"\n Generate a move from the received list of instances of World,\n and show it on IPython notebook. This function may require ffmpeg.\n\n Parameters\n ----------\n worlds : list or FixedIntervalHDF5Observer\n A list of Worlds to render.\n plane : str, default 'xy'\n 'xy', 'yz', 'zx'.\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n max_count : Integer, default None\n The maximum number of particles to show for each species.\n None means no limitation.\n interval : Integer, default 0.16\n Parameters for matplotlib.animation.ArtistAnimation.\n stride : Integer, default 1\n Stride per frame.\n legend : bool, default True\n output : str, default None\n An output filename. '.webm' or '.mp4' is only accepted.\n If None, display a movie on IPython Notebook.\n crf : int, default 10\n The CRF value can be from 4-63. Lower values mean better quality.\n bitrate : str, default '1M'\n Target bitrate\n plot_range : tuple, default None\n Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).\n If None, the minimum volume containing all the trajectories is used.\n\n "
] |
Please provide a description of the function:def plot_movie_of_trajectory_with_matplotlib(
obs, figsize=6, grid=True,
wireframe=False, max_count=None, angle=None, noaxis=False,
interval=0.16, repeat_delay=3000, stride=1, rotate=None,
legend=True, output=None, crf=10, bitrate='1M', plot_range=None, **kwargs):
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
import math
# print("Taking all data ...")
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
lines = []
num_frames = 0
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[0])
yarr.append(pos[1])
zarr.append(pos[2])
lines.append((xarr, yarr, zarr))
num_frames = max(num_frames, len(y))
num_frames = int(math.ceil(float(num_frames) / stride))
# print("Start preparing mplot3d ...")
fig, ax = __prepare_mplot3d_with_matplotlib(
__get_range_of_trajectories(data, plot_range),
figsize, grid, wireframe, angle, noaxis)
def _update_plot(i, plots, lines):
upto = i * stride
for plot, line in zip(plots, lines):
plot.set_data(line[0][: upto], line[1][: upto])
plot.set_3d_properties(line[2][: upto])
if rotate is not None:
ax.elev += rotate[0]
ax.azim += rotate[1]
fig.canvas.draw()
# print("Start making animation ...")
plots = __plot_trajectory_with_matplotlib(lines, ax, 0, **kwargs)
# if legend:
# ax.legend(loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {"loc": "best", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
ani = animation.FuncAnimation(
fig, _update_plot, fargs=(plots, lines),
frames=num_frames, interval=interval, blit=False)
plt.close(ani._fig)
# print("Start generating a movie ...")
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) | [
"\n Generate a move from the received list of instances of World,\n and show it on IPython notebook. This function may require ffmpeg.\n\n Parameters\n ----------\n worlds : list or FixedIntervalHDF5Observer\n A list of Worlds to render.\n marker_size : float, default 3\n Marker size for all species. Size is passed to scatter function\n as argument, s=(2 ** marker_size).\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n max_count : Integer, default None\n The maximum number of particles to show for each species.\n None means no limitation.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n interval : Integer, default 0.16\n Parameters for matplotlib.animation.ArtistAnimation.\n stride : Integer, default 1\n Stride per frame.\n rotate : tuple, default None\n A pair of rotation angles, elev and azim, for animation.\n None means no rotation, same as (0, 0).\n legend : bool, default True\n output : str, default None\n An output filename. '.webm' or '.mp4' is only accepted.\n If None, display a movie on IPython Notebook.\n crf : int, default 10\n The CRF value can be from 4-63. Lower values mean better quality.\n bitrate : str, default '1M'\n Target bitrate\n plot_range : tuple, default None\n Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).\n If None, the minimum volume containing all the trajectories is used.\n\n "
] |
Please provide a description of the function:def plot_world_with_attractive_mpl(
world, marker_size=6, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=1000, angle=None,
legend=True, noaxis=False, whratio=1.33, scale=1.0, **kwargs):
import matplotlib.pyplot as plt
if species_list is None:
species_list = [p.species().serial() for pid, p in world.list_particles()]
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
fig, ax = __prepare_mplot3d_with_attractive_mpl(
__get_range_of_world(world, scale), figsize, grid, wireframe, angle,
noaxis, whratio)
scatters, plots = __scatter_world_with_attractive_mpl(
world, ax, species_list, marker_size, max_count, scale, **kwargs)
# if legend:
# ax.legend(handles=plots, labels=species_list, loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5),
'shadow': False, 'frameon': False, 'fontsize': 'x-large',
'scatterpoints': 1}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
# ax.legend(handles=plots, labels=species_list, **legend_opts)
plt.show() | [
"\n Generate a plot from received instance of World and show it on IPython notebook.\n\n Parameters\n ----------\n world : World or str\n World to render. A HDF5 filename is also acceptable.\n marker_size : float, default 3\n Marker size for all species. Size is passed to scatter function\n as argument, s=(2 ** marker_size).\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n species_list : array of string, default None\n If set, plot_world will not search the list of species.\n max_count : Integer, default 1000\n The maximum number of particles to show for each species.\n None means no limitation.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n legend : bool, default True\n whratio : float, default 1.33\n A ratio between figure width and height.\n Customize this to keep a legend within the figure.\n scale : float, default 1\n A length-scaling factor\n\n "
] |
Please provide a description of the function:def plot_movie_with_attractive_mpl(
worlds, marker_size=6, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=None, angle=None, noaxis=False,
interval=0.16, repeat_delay=3000, stride=1, rotate=None,
legend=True, whratio=1.33, scale=1, output=None, crf=10, bitrate='1M', **kwargs):
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
import os.path
# print("Start generating species_list ...")
if isinstance(worlds, FixedIntervalHDF5Observer):
obs = worlds
worlds = []
for i in range(0, obs.num_steps(), stride):
filename = obs.filename(i)
if os.path.isfile(filename):
worlds.append(load_world(filename))
elif len(worlds) >0:
worlds.append(worlds[-1])
else:
worlds = worlds[:: stride]
if species_list is None:
species_list = []
for world in worlds:
species_list.extend(
[p.species().serial() for pid, p in world.list_particles()])
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
# print("Start preparing mplot3d ...")
fig, ax = __prepare_mplot3d_with_attractive_mpl(
__get_range_of_world(worlds[0], scale), figsize, grid, wireframe, angle,
noaxis, whratio)
from mpl_toolkits.mplot3d.art3d import juggle_axes
def _update_plot(i, scatters, worlds, species_list):
world = worlds[i]
for i, name in enumerate(species_list):
xs, ys, zs = [], [], []
particles = world.list_particles_exact(Species(name))
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count)
for pid, p in particles:
pos = p.position() * scale
xs.append(pos[0])
ys.append(pos[1])
zs.append(pos[2])
scatters[i]._offsets3d = juggle_axes(xs, ys, zs, 'z')
if rotate is not None:
ax.elev += rotate[0]
ax.azim += rotate[1]
fig.canvas.draw()
# print("Start making animation ...")
color_scale = attractive_mpl_color_scale({})
scatters = []
for i, name in enumerate(species_list):
opts = dict(marker='o', s=(2 ** marker_size), edgecolors='white', alpha=0.7)
opts.update(kwargs)
scatters.append(
ax.scatter(
[], [], [], facecolor=color_scale.get_color(name), label=name, **opts))
# if legend:
# ax.legend(loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5),
'shadow': False, 'frameon': False, 'fontsize': 'x-large',
'scatterpoints': 1}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
ani = animation.FuncAnimation(
fig, _update_plot, fargs=(scatters, worlds, species_list),
frames=len(worlds), interval=interval, blit=False)
plt.close(ani._fig)
# print("Start generating a movie ...")
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) | [
"\n Generate a move from the received list of instances of World,\n and show it on IPython notebook. This function may require ffmpeg.\n\n Parameters\n ----------\n worlds : list or FixedIntervalHDF5Observer\n A list of Worlds to render.\n marker_size : float, default 3\n Marker size for all species. Size is passed to scatter function\n as argument, s=(2 ** marker_size).\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n species_list : array of string, default None\n If set, plot_world will not search the list of species.\n max_count : Integer, default None\n The maximum number of particles to show for each species.\n None means no limitation.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n interval : Integer, default 0.16\n Parameters for matplotlib.animation.ArtistAnimation.\n stride : Integer, default 1\n Stride per frame.\n rotate : tuple, default None\n A pair of rotation angles, elev and azim, for animation.\n None means no rotation, same as (0, 0).\n legend : bool, default True\n whratio : float, default 1.33\n A ratio between figure width and height.\n Customize this to keep a legend within the figure.\n scale : float, default 1\n A length-scaling factor\n crf : int, default 10\n The CRF value can be from 4-63. Lower values mean better quality.\n bitrate : str, default '1M'\n Target bitrate\n output : str, default None\n An output filename. '.webm' or '.mp4' is only accepted.\n If None, display a movie on IPython Notebook.\n\n "
] |
Please provide a description of the function:def plot_world2d_with_matplotlib(
world, plane='xy', marker_size=3, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=1000, angle=None,
legend=True, noaxis=False, scale=1.0, **kwargs):
import matplotlib.pyplot as plt
plane = plane.lower()
if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'):
raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane)))
xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2)
yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2)
if species_list is None:
species_list = [p.species().serial() for pid, p in world.list_particles()]
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
wrange = __get_range_of_world(world, scale)
wrange = (wrange['x'], wrange['y'], wrange['z'])
wrange = {'x': wrange[xidx], 'y': wrange[yidx]}
fig, ax = __prepare_plot_with_matplotlib(
wrange, figsize, grid, wireframe, noaxis)
scatters, plots = __scatter_world2d_with_matplotlib(
world, (xidx, yidx), ax, species_list, marker_size, max_count, scale, **kwargs)
ax.set_xlabel(plane[0].upper())
ax.set_ylabel(plane[1].upper())
# if legend:
# ax.legend(handles=plots, labels=species_list, loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5),
'shadow': False, 'frameon': False, 'fontsize': 'x-large',
'scatterpoints': 1}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
# ax.legend(handles=plots, labels=species_list, **legend_opts)
plt.show() | [
"\n Make a 2D plot from received instance of World and show it on IPython notebook.\n\n Parameters\n ----------\n world : World or str\n World to render. A HDF5 filename is also acceptable.\n plane : str, default 'xy'\n 'xy', 'yz', 'zx'.\n marker_size : float, default 3\n Marker size for all species. Size is passed to scatter function\n as argument, s=(2 ** marker_size).\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n species_list : array of string, default None\n If set, plot_world will not search the list of species.\n max_count : Integer, default 1000\n The maximum number of particles to show for each species.\n None means no limitation.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n legend : bool, default True\n scale : float, default 1\n A length-scaling factor\n\n "
] |
Please provide a description of the function:def plot_movie2d_with_matplotlib(
worlds, plane='xy', marker_size=3, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=None, angle=None, noaxis=False,
interval=0.16, repeat_delay=3000, stride=1, rotate=None,
legend=True, scale=1, output=None, crf=10, bitrate='1M', **kwargs):
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
plane = plane.lower()
if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'):
raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane)))
xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2)
yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2)
if isinstance(worlds, FixedIntervalHDF5Observer):
obs = worlds
worlds = []
for i in range(0, obs.num_steps(), stride):
filename = obs.filename(i)
if os.path.isfile(filename):
worlds.append(load_world(filename))
elif len(worlds) >0:
worlds.append(worlds[-1])
else:
worlds = worlds[:: stride]
if species_list is None:
species_list = []
for world in worlds:
species_list.extend(
[p.species().serial() for pid, p in world.list_particles()])
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
wrange = __get_range_of_world(worlds[0], scale)
wrange = (wrange['x'], wrange['y'], wrange['z'])
wrange = {'x': wrange[xidx], 'y': wrange[yidx]}
fig = plt.figure(figsize=(figsize, figsize))
ax = fig.gca()
color_scale = matplotlib_color_scale()
def _update_plot(i, worlds, species_list):
ax.cla()
ax.set_aspect('equal')
ax.grid(grid)
ax.set_xlim(*wrange['x'])
ax.set_ylim(*wrange['y'])
ax.set_xlabel(plane[0].upper())
ax.set_ylabel(plane[1].upper())
if noaxis:
ax.set_axis_off()
_legend = False
world = worlds[i]
for i, name in enumerate(species_list):
offsets = ([], [])
particles = world.list_particles_exact(Species(name))
if len(particles) == 0:
continue
_legend = True
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count)
for pid, p in particles:
pos = p.position() * scale
offsets[0].append(pos[xidx])
offsets[1].append(pos[yidx])
ax.scatter(
offsets[0], offsets[1], marker='o', s=(2 ** marker_size),
lw=0, facecolor=color_scale.get_color(name), label=name)
if legend is not None and legend is not False and _legend:
legend_opts = {"loc": "upper right", "shadow": True}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
fig.canvas.draw()
ani = animation.FuncAnimation(
fig, _update_plot, fargs=(worlds, species_list),
frames=len(worlds), interval=interval, blit=False)
plt.close(ani._fig)
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) | [
"\n Generate a movie projected on the given plane from the received list\n of instances of World, and show it on IPython notebook.\n This function may require ffmpeg.\n\n Parameters\n ----------\n worlds : list or FixedIntervalHDF5Observer\n A list of Worlds to render.\n plane : str, default 'xy'\n 'xy', 'yz', 'zx'.\n marker_size : float, default 3\n Marker size for all species. Size is passed to scatter function\n as argument, s=(2 ** marker_size).\n figsize : float, default 6\n Size of the plotting area. Given in inch.\n species_list : array of string, default None\n If set, plot_world will not search the list of species.\n max_count : Integer, default None\n The maximum number of particles to show for each species.\n None means no limitation.\n angle : tuple, default None\n A tuple of view angle which is given as (azim, elev, dist).\n If None, use default assumed to be (-60, 30, 10).\n interval : Integer, default 0.16\n Parameters for matplotlib.animation.ArtistAnimation.\n stride : Integer, default 1\n Stride per frame.\n rotate : tuple, default None\n A pair of rotation angles, elev and azim, for animation.\n None means no rotation, same as (0, 0).\n legend : bool, default True\n scale : float, default 1\n A length-scaling factor\n output : str, default None\n An output filename. '.webm' or '.mp4' is only accepted.\n If None, display a movie on IPython Notebook.\n crf : int, default 10\n The CRF value can be from 4-63. Lower values mean better quality.\n bitrate : str, default '1M'\n Target bitrate\n\n "
] |
Please provide a description of the function:def plot_world_with_plotly(world, species_list=None, max_count=1000):
if isinstance(world, str):
from .simulation import load_world
world = load_world(world)
if species_list is None:
species_list = [sp.serial() for sp in world.list_species()]
species_list.sort()
import random
from ecell4_base.core import Species
positions = {}
for serial in species_list:
x, y, z = [], [], []
particles = world.list_particles_exact(Species(serial))
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count)
for pid, p in particles:
pos = p.position()
x.append(pos[0])
y.append(pos[1])
z.append(pos[2])
positions[serial] = (x, y, z)
import plotly
import plotly.graph_objs as go
plotly.offline.init_notebook_mode()
marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1),
opacity=0.9, symbol='circle')
data = []
for serial, (x, y, z) in positions.items():
trace = go.Scatter3d(
x=x, y=y, z=z, mode='markers',
marker=marker, name=serial)
data.append(trace)
layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0))
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig) | [
"\n Plot a World on IPython Notebook\n "
] |
Please provide a description of the function:def getUnitRegistry(length="meter", time="second", substance="item", volume=None, other=()):
ureg = pint.UnitRegistry()
ureg.define('item = mole / (avogadro_number * 1 mole)')
try:
pint.molar
# except UndefinedUnitError:
except AttributeError:
# https://github.com/hgrecco/pint/blob/master/pint/default_en.txt#L75-L77
ureg.define('[concentration] = [substance] / [volume]')
ureg.define('molar = mol / (1e-3 * m ** 3) = M')
base_units = [unit for unit in (length, time, substance, volume) if unit is not None]
base_units.extend(other)
_ = ureg.System.from_lines(
["@system local using international"] + base_units,
ureg.get_base_units)
ureg.default_system = 'local'
wrap_quantity(ureg.Quantity)
pint.set_application_registry(ureg) # for pickling
return ureg | [
"Return a pint.UnitRegistry made compatible with ecell4.\n\n Parameters\n ----------\n length : str, optional\n A default unit for '[length]'. 'meter' is its default.\n time : str, optional\n A default unit for '[time]'. 'second' is its default.\n substance : str, optional\n A default unit for '[substance]' (the number of molecules). 'item' is its default.\n volume : str, optional\n A default unit for '[volume]'. Its default is None, thus '[length]**3'.\n other : tuple, optional\n A list of user-defined default units other than the above.\n\n Returns\n -------\n ureg : pint.UnitRegistry\n\n "
] |
Please provide a description of the function:def interactor(self, geneList=None, org=None):
geneList = geneList or []
organisms = organisms or []
querydata = self.interactions(geneList, org)
returnData = {}
for i in querydata:
if not returnData.get(i["symB"]["name"]):
returnData[i["symB"]["name"]] = {"interactions": []}
returnData[i["symB"]["name"]]["interactions"].append(i)
return returnData | [
"\n Supposing geneList returns an unique item.\n "
] |
Please provide a description of the function:def parse_psimitab(content, fmt='tab27'):
columns = [
'Unique identifier for interactor A',
'Unique identifier for interactor B',
'Alternative identifier for interactor A',
'Alternative identifier for interactor B',
'Aliases for A',
'Aliases for B',
'Interaction detection methods',
'First author',
'Identifier of the publication',
'NCBI Taxonomy identifier for interactor A',
'NCBI Taxonomy identifier for interactor B',
'Interaction types',
'Source databases',
'Interaction identifier(s)',
'Confidence score']
columns += [
'Complex expansion',
'Biological role A', 'Biological role B',
'Experimental role A', 'Experimental role B',
'Interactor type A', 'Interactor type B',
'Xref for interactor A', 'Xref for interactor B',
'Xref for the interaction',
'Annotaions for interactor A', 'Annotations for interactor B',
'Annotations for the interaction',
'NCBI Taxonomy identifier for the host organism',
'Prameters of the interaction',
'Creaction date', 'Update date',
'Checksum for the interactor A', 'Checksum for the interactor B',
'Checksum for the interaction',
'negative',
'Feature(s) for interactor A', 'Feature(s) for interactor B',
'Stoichiometry for interactor A', 'Stoichiometroy for interactor B',
'Participant identification method for interactor A',
'Participant identification method for interactor B'
]
if fmt == 'tab25':
columns = columns[: 15]
rexp = re.compile(r"(?P<fields>((\"([^\"]|((?<=\\)\"))*\")|([^\t\"])|((?<=\\)\"))+)(\t|$)")
retval = []
for line in content.split('\n'):
line = line.strip()
if line == '' or line[0] == '#':
continue
start = 0
tmp = []
for mobj in rexp.finditer(line):
if mobj.start() != start:
print(repr(line))
assert mobj.start() == start
start = mobj.end()
tmp.append(mobj.group('fields'))
assert len(tmp) == len(columns)
retval.append(dict(zip(columns, tmp)))
return retval | [
"https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki\n "
] |
Please provide a description of the function:def export_sbml(model, y0=None, volume=1.0, is_valid=True):
y0 = y0 or {}
import libsbml
document = libsbml.SBMLDocument(3, 1)
# ns = libsbml.XMLNamespaces()
# ns.add("http://www.ecell.org/ns/ecell4", "ecell4") #XXX: DUMMY URI
# document.setNamespaces(ns)
m = document.createModel()
comp1 = m.createCompartment()
comp1.setId('world')
comp1.setConstant(True)
if unit.HAS_PINT:
if isinstance(volume, unit._Quantity):
if unit.STRICT:
if isinstance(volume.magnitude, ecell4_base.core.Real3) and not unit.check_dimensionality(volume, '[length]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[length]'".format(
volume.dimensionality, volume.u))
elif not unit.check_dimensionality(volume, '[volume]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[volume]'".format(
volume.dimensionality, volume.u))
volume = volume.to_base_units().magnitude
y0 = y0.copy()
for key, value in y0.items():
if isinstance(value, unit._Quantity):
if not unit.STRICT:
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[substance]'):
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[concentration]'):
volume = w.volume() if not isinstance(w, ecell4_base.spatiocyte.SpatiocyteWorld) else w.actual_volume()
y0[key] = value.to_base_units().magnitude * volume
else:
raise ValueError(
"Cannot convert a quantity for [{}] from '{}' ({}) to '[substance]'".format(
key, value.dimensionality, value.u))
if isinstance(volume, ecell4_base.core.Real3):
comp1.setSize(volume[0] * volume[1] * volume[2])
else:
comp1.setSize(volume)
comp1.setSpatialDimensions(3)
species_list = []
for rr in model.reaction_rules():
for sp in itertools.chain(rr.reactants(), rr.products()):
species_list.append(sp)
species_list = list(set(species_list))
species_list.sort()
sid_map = {}
for cnt, sp in enumerate(species_list):
sid_map[sp.serial()] = "s{:d}".format(cnt)
for sp in species_list:
sid = sid_map[sp.serial()]
s1 = m.createSpecies()
s1.setId(sid)
s1.setName(sp.serial())
s1.setCompartment('world')
s1.setConstant(False)
if sp.serial() in y0.keys():
s1.setInitialAmount(y0[sp.serial()])
else:
s1.setInitialAmount(0)
s1.setBoundaryCondition(False)
s1.setHasOnlySubstanceUnits(False)
# s1.appendAnnotation('<annotation><ecell4:extension><ecell4:species serial="{:s}"/></ecell4:extension></annotation>'.format(sp.serial()))
for cnt, rr in enumerate(model.reaction_rules()):
desc = rr.get_descriptor()
r1 = m.createReaction()
r1.setId("r{:d}".format(cnt))
r1.setReversible(True)
r1.setFast(False)
kinetic_law = r1.createKineticLaw()
species_coef_map = {}
if desc is None:
for sp in rr.reactants():
if sp not in species_coef_map.keys():
species_coef_map[sp] = 1
else:
species_coef_map[sp] += 1
else:
for sp, coef in zip(rr.reactants(), desc.reactant_coefficients()):
if sp not in species_coef_map.keys():
species_coef_map[sp] = coef
else:
species_coef_map[sp] += coef
if desc is None or isinstance(desc, ecell4_base.core.ReactionRuleDescriptorMassAction):
p1 = m.createParameter()
p1.setId("k{:d}".format(cnt))
# p1 = kinetic_law.createLocalParameter()
# p1.setId("k")
p1.setConstant(True)
p1.setValue(rr.k() if desc is None else desc.k())
# math_exp = "k"
math_exp = "k{:d}".format(cnt)
for sp, coef in species_coef_map.items():
sid = sid_map[sp.serial()]
if coef == 1.0:
math_exp += "*{:s}".format(sid)
else:
math_exp += "*pow({:s},{:g})".format(sid, coef)
elif isinstance(desc, ecell4_base.core.ReactionRuleDescriptorPyfunc):
math_exp = desc.as_string()
if math_exp in ('', '<lambda>'):
warnings.warn(
"The given ReactionRuleDescriptorPyfunc [{:s}] might be invalid.".format(
rr.as_string()))
math_exp = replace_parseobj(math_exp, sid_map)
else:
raise RuntimeError('Unknown derived type of ReactionRuleDescriptor was given [{}].'.format(type(desc)))
for sp, coef in species_coef_map.items():
sid = sid_map[sp.serial()]
s1 = r1.createReactant()
s1.setSpecies(sid)
s1.setConstant(False)
s1.setStoichiometry(coef)
if desc is None:
for sp in rr.products():
if sp not in species_coef_map.keys():
species_coef_map[sp] = 1
else:
species_coef_map[sp] += 1
else:
species_coef_map = {}
for sp, coef in zip(rr.products(), desc.product_coefficients()):
if sp not in species_coef_map.keys():
species_coef_map[sp] = coef
else:
species_coef_map[sp] += coef
for sp, coef in species_coef_map.items():
sid = sid_map[sp.serial()]
s1 = r1.createProduct()
s1.setSpecies(sid)
s1.setConstant(False)
s1.setStoichiometry(coef)
math_ast = libsbml.parseL3Formula(math_exp)
kinetic_law.setMath(math_ast)
if is_valid:
document.validateSBML()
num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)
+ document.getNumErrors(libsbml.LIBSBML_SEV_FATAL))
if num_errors > 0:
messages = "The generated document is not valid."
messages += " {} errors were found:\n".format(num_errors)
for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)):
err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR)
messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage())
for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)):
err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL)
messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage())
raise RuntimeError(messages)
return document | [
"\n Export a model as a SBMLDocument.\n\n Parameters\n ----------\n model : NetworkModel\n y0 : dict\n Initial condition.\n volume : Real or Real3, optional\n A size of the simulation volume. 1 as a default.\n is_valid : bool, optional\n Check if the generated model is valid. True as a default.\n\n "
] |
Please provide a description of the function:def save_sbml(filename, model, y0=None, volume=1.0, is_valid=True):
y0 = y0 or {}
import libsbml
document = export_sbml(model, y0, volume, is_valid)
# with open(filename, 'w') as fout:
# fout.write(libsbml.writeSBMLToString(document))
# writer = libsbml.SBMLWriter()
# writer.writeSBML(document, filename)
libsbml.writeSBML(document, filename) | [
"\n Save a model in the SBML format.\n\n Parameters\n ----------\n model : NetworkModel\n y0 : dict\n Initial condition.\n volume : Real or Real3, optional\n A size of the simulation volume.\n is_valid : bool, optional\n Check if the generated model is valid. True as a default.\n\n "
] |
Please provide a description of the function:def import_sbml(document):
from ecell4.util.decorator import generate_ratelaw
m = document.getModel()
if m.getNumCompartments() == 0:
raise RuntimeError("No compartment was found.")
elif m.getNumCompartments() > 1:
warnings.warn(
"[{:d}] compartments were found.".format(m.getNumCompartments())
+ " The second or later ones would be omitted.")
comp1 = m.getCompartment(0)
volume = comp1.getVolume()
y0 = {}
sid_map = {}
for s1 in m.getListOfSpecies():
sid = s1.getId()
serial = s1.getName()
sid_map[sid] = serial
value = s1.getInitialAmount()
if value != 0:
y0[serial] = value
kmap = {}
for p1 in m.getListOfParameters():
pid = p1.getId()
if not re.match("^k[0-9]+$", pid):
warnings.warn(
"Parameter [{:s}] was just ommited.".format(pid))
rid = "r{:s}".format(pid[1: ])
kmap[rid] = p1.getValue()
is_ode = False
rrs = []
for r1 in m.getListOfReactions():
rid = r1.getId()
print(rid)
is_massaction = (rid in kmap.keys())
if is_massaction:
k = kmap[rid]
else:
kinetic_law = r1.getKineticLaw()
formula = kinetic_law.getFormula()
k = replace_parseobj(formula, sid_map)
reactants, products = [], []
#XXX: The order of reactants is not consistent
for s1 in r1.getListOfReactants():
sid = s1.getSpecies()
if sid not in sid_map:
raise RuntimeError(
"Unknown Species' Id [{:s}] was given".format(sid))
serial = sid_map[sid]
coef = s1.getStoichiometry()
reactants.append((serial, coef))
#XXX: The order of products is not consistent
for s1 in r1.getListOfProducts():
sid = s1.getSpecies()
if sid not in sid_map:
raise RuntimeError(
"Unknown Species' Id [{:s}] was given".format(sid))
serial = sid_map[sid]
coef = s1.getStoichiometry()
products.append((serial, coef))
if (not is_massaction
or len(reactants) > 2
or any([coef not in (1, 2) for sp, coef in reactants])
or any([not coef.is_integer() for sp, coef in products])
or (len(reactants) == 2 and (reactants[0][1] == 2 or reactants[1][1] == 2))):
rr = ecell4_base.core.ReactionRule()
if is_massaction:
desc = ecell4_base.core.ReactionRuleDescriptorMassAction(k)
else:
func = generate_ratelaw(k, rr)
desc = ecell4_base.core.ReactionRuleDescriptorPyfunc(func, k)
desc.set_reactant_coefficients([coef for _, coef in reactants])
desc.set_product_coefficients([coef for _, coef in products])
rr.set_descriptor(desc)
else:
if len(reactants) == 1 and reactants[0][1] == 2:
reactants[0] = (reactants[0][0], 1)
reactants.append(reactants[0])
rr = ecell4_base.core.ReactionRule()
for serial, coef in reactants:
rr.add_reactant(ecell4_base.core.Species(serial))
for serial, coef in products:
for _ in range(int(coef)):
rr.add_product(ecell4_base.core.Species(serial))
rr.set_k(k)
rrs.append(rr)
m = ecell4_base.core.NetworkModel()
for rr in rrs:
m.add_reaction_rule(rr)
return m, y0, volume | [
"\n Import a model from a SBMLDocument.\n\n Parameters\n ----------\n document : SBMLDocument\n\n Returns\n -------\n model : NetworkModel\n y0 : dict\n Initial condition.\n volume : Real or Real3, optional\n A size of the simulation volume.\n\n "
] |
Please provide a description of the function:def load_sbml(filename):
import libsbml
document = libsbml.readSBML(filename)
document.validateSBML()
num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)
+ document.getNumErrors(libsbml.LIBSBML_SEV_FATAL))
if num_errors > 0:
messages = "The generated document is not valid."
messages += " {} errors were found:\n".format(num_errors)
for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)):
err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR)
messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage())
for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)):
err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL)
messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage())
raise RuntimeError(messages)
return import_sbml(document) | [
"\n Load a model from a SBML file.\n\n Parameters\n ----------\n filename : str\n The input SBML filename.\n\n Returns\n -------\n model : NetworkModel\n y0 : dict\n Initial condition.\n volume : Real or Real3, optional\n A size of the simulation volume.\n\n "
] |
Please provide a description of the function:def get_model(is_netfree=False, without_reset=False, seeds=None, effective=False):
try:
if seeds is not None or is_netfree:
m = ecell4_base.core.NetfreeModel()
else:
m = ecell4_base.core.NetworkModel()
for sp in SPECIES_ATTRIBUTES:
m.add_species_attribute(sp)
for rr in REACTION_RULES:
m.add_reaction_rule(rr)
if not without_reset:
reset_model()
if seeds is not None:
return m.expand(seeds)
if isinstance(m, ecell4_base.core.NetfreeModel):
m.set_effective(effective)
except Exception as e:
reset_model()
raise e
return m | [
"\n Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES``\n and ``REACTIONRULES``.\n\n Parameters\n ----------\n is_netfree : bool, optional\n Return ``NetfreeModel`` if True, and ``NetworkModel`` if else.\n Default is False.\n without_reset : bool, optional\n Do not reset the global variables after the generation if True.\n Default is False.\n seeds : list, optional\n A list of seed ``Species`` for expanding the model.\n If this is not None, generate a ``NetfreeModel`` once, and return a\n ``NetworkModel``, which is an expanded form of that with the given seeds.\n Default is None.\n effective : bool, optional\n See ``NetfreeModel.effective`` and ``Netfree.set_effective``.\n Only meaningfull with option ``is_netfree=True``.\n Default is False\n\n Returns\n -------\n model : NetworkModel, NetfreeModel\n\n "
] |
Please provide a description of the function:def run_serial(target, jobs, n=1, **kwargs):
return [[target(copy.copy(job), i + 1, j + 1) for j in range(n)] for i, job in enumerate(jobs)] | [
"\n Evaluate the given function with each set of arguments, and return a list of results.\n This function does in series.\n\n Parameters\n ----------\n target : function\n A function to be evaluated. The function must accepts three arguments,\n which are a list of arguments given as `jobs`, a job and task id (int).\n jobs : list\n A list of arguments passed to the function.\n n : int, optional\n A number of tasks. Repeat the evaluation `n` times for each job.\n 1 for default.\n\n Returns\n -------\n results : list\n A list of results. Each element is a list containing `n` results.\n\n Examples\n --------\n >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs'))\n\n >>> target = lambda args, job_id, task_id: (args[1] * args[0])\n >>> run_serial(target, jobs)\n [['spam'], ['hamham'], ['eggseggseggs']]\n\n >>> target = lambda args, job_id, task_id: \"{:d} {}\".format(task_id, args[1] * args[0])\n >>> run_serial(target, jobs, n=2)\n [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']]\n\n >>> seeds = genseeds(3)\n >>> def target(arg, job_id, task_id):\n ... from ecell4.extra.ensemble import getseed\n ... return getseed(arg, task_id)\n >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP\n [[127152315, 2028054913, 253611282]]\n\n See Also\n --------\n ecell4.extra.ensemble.run_serial\n ecell4.extra.ensemble.run_sge\n ecell4.extra.ensemble.run_slurm\n ecell4.extra.ensemble.run_multiprocessing\n ecell4.extra.ensemble.run_azure\n\n "
] |
Please provide a description of the function:def run_multiprocessing(target, jobs, n=1, nproc=None, **kwargs):
def consumer(f, q_in, q_out):
while True:
val = q_in.get()
if val is None:
q_in.task_done()
break
i, x = val
res = (i, f(*x))
q_in.task_done()
q_out.put(res)
def mulpmap(f, X, nproc):
nproc = nproc or multiprocessing.cpu_count()
q_in = multiprocessing.JoinableQueue()
q_out = multiprocessing.Queue()
workers = [multiprocessing.Process(target=consumer, args=(f, q_in, q_out), daemon=True) for _ in range(nproc)]
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
num_tasks = len(sent)
[q_in.put(None) for _ in range(nproc)] #XXX: poison pill
[w.start() for w in workers]
# [w.join() for w in workers]
q_in.join()
res = [q_out.get() for _ in range(num_tasks)]
return [x for (_, x) in sorted(res)]
res = mulpmap(
target, ((job, i + 1, j + 1) for (i, job), j in itertools.product(enumerate(jobs), range(n))), nproc)
return [res[i: i + n] for i in range(0, len(res), n)] | [
"\n Evaluate the given function with each set of arguments, and return a list of results.\n This function does in parallel by using `multiprocessing`.\n\n Parameters\n ----------\n target : function\n A function to be evaluated. The function must accepts three arguments,\n which are a list of arguments given as `jobs`, a job and task id (int).\n jobs : list\n A list of arguments passed to the function.\n All the argument must be picklable.\n n : int, optional\n A number of tasks. Repeat the evaluation `n` times for each job.\n 1 for default.\n nproc : int, optional\n A number of cores available once.\n If nothing is given, all available cores are used.\n\n Returns\n -------\n results : list\n A list of results. Each element is a list containing `n` results.\n\n Examples\n --------\n >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs'))\n\n >>> target = lambda args, job_id, task_id: (args[1] * args[0])\n >>> run_multiprocessing(target, jobs, nproc=2)\n [['spam'], ['hamham'], ['eggseggseggs']]\n\n >>> target = lambda args, job_id, task_id: \"{:d} {}\".format(task_id, args[1] * args[0])\n >>> run_multiprocessing(target, jobs, n=2, nproc=2)\n [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']]\n\n See Also\n --------\n ecell4.extra.ensemble.run_serial\n ecell4.extra.ensemble.run_sge\n ecell4.extra.ensemble.run_slurm\n ecell4.extra.ensemble.run_multiprocessing\n ecell4.extra.ensemble.run_azure\n\n "
] |
Please provide a description of the function:def run_sge(target, jobs, n=1, nproc=None, path='.', delete=True, wait=True, environ=None, modules=(), **kwargs):
logging.basicConfig(level=logging.DEBUG)
if isinstance(target, types.LambdaType) and target.__name__ == "<lambda>":
raise RuntimeError("A lambda function is not accepted")
# src = textwrap.dedent(inspect.getsource(singlerun)).replace(r'"', r'\"')
src = textwrap.dedent(inspect.getsource(target)).replace(r'"', r'\"')
if re.match('[\s\t]+', src.split('\n')[0]) is not None:
raise RuntimeError(
"Wrong indentation was found in the source translated")
if not os.path.isdir(path):
os.makedirs(path) #XXX: MYOB
if environ is None:
environ = {}
keys = ("LD_LIBRARY_PATH", "PYTHONPATH")
for key in keys:
if key in os.environ.keys():
environ[key] = os.environ[key]
if "PYTHONPATH" in environ.keys() and environ["PYTHONPATH"].strip() != "":
environ["PYTHONPATH"] = "{}:{}".format(os.getcwd(), environ["PYTHONPATH"])
else:
environ["PYTHONPATH"] = os.getcwd()
cmds = []
pickleins = []
pickleouts = []
scripts = []
for i, job in enumerate(jobs):
(fd, picklein) = tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)
with os.fdopen(fd, 'wb') as fout:
pickle.dump(job, fout)
pickleins.append(picklein)
pickleouts.append([])
for j in range(n):
fd, pickleout = tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)
os.close(fd)
pickleouts[-1].append(pickleout)
# pickleouts.append(
# [tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)[1]
# for j in range(n)])
code = 'import sys\n'
code += 'import os\n'
code += 'import pickle\n'
code += 'with open(\'{}\', \'rb\') as fin:\n'.format(picklein)
code += ' job = pickle.load(fin)\n'
code += 'pass\n'
for m in modules:
code += "from {} import *\n".format(m)
code += src
code += '\ntid = int(os.environ[\'SGE_TASK_ID\'])'
code += '\nretval = {:s}(job, {:d}, tid)'.format(target.__name__, i + 1)
code += '\nfilenames = {:s}'.format(str(pickleouts[-1]))
code += '\npickle.dump(retval, open(filenames[tid - 1], \'wb\'))\n'
(fd, script) = tempfile.mkstemp(suffix='.py', prefix='sge-', dir=path, text=True)
with os.fdopen(fd, 'w') as fout:
fout.write(code)
scripts.append(script)
cmd = '#!/bin/bash\n'
for key, value in environ.items():
cmd += 'export {:s}={:s}\n'.format(key, value)
cmd += 'python3 {}'.format(script) #XXX: Use the same executer, python
# cmd += 'python3 -c "\n'
# cmd += 'import sys\n'
# cmd += 'import os\n'
# cmd += 'import pickle\n'
# cmd += 'with open(sys.argv[1], \'rb\') as fin:\n'
# cmd += ' job = pickle.load(fin)\n'
# cmd += 'pass\n'
# for m in modules:
# cmd += "from {} import *\n".format(m)
# cmd += src
# cmd += '\ntid = int(os.environ[\'SGE_TASK_ID\'])'
# cmd += '\nretval = {:s}(job, {:d}, tid)'.format(target.__name__, i + 1)
# cmd += '\nfilenames = {:s}'.format(str(pickleouts[-1]))
# cmd += '\npickle.dump(retval, open(filenames[tid - 1], \'wb\'))'
# cmd += '" {:s}\n'.format(picklein)
cmds.append(cmd)
if isinstance(wait, bool):
sync = 0 if not wait else 10
elif isinstance(wait, int):
sync = wait
else:
raise ValueError("'wait' must be either 'int' or 'bool'.")
jobids = sge.run(cmds, n=n, path=path, delete=delete, sync=sync, max_running_tasks=nproc, **kwargs)
if not (sync > 0):
return None
for jobid, name in jobids:
outputs = sge.collect(jobid, name, n=n, path=path, delete=delete)
for output in outputs:
print(output, end='')
retval = [[pickle.load(open(pickleout, 'rb')) for pickleout in tasks]
for tasks in pickleouts]
if delete:
for tmpname in itertools.chain(pickleins, scripts, *pickleouts):
os.remove(tmpname)
return retval | [
"\n Evaluate the given function with each set of arguments, and return a list of results.\n This function does in parallel on the Sun Grid Engine einvironment.\n\n Parameters\n ----------\n target : function\n A function to be evaluated. The function must accepts three arguments,\n which are a list of arguments given as `jobs`, a job and task id (int).\n This function can not be a lambda.\n jobs : list\n A list of arguments passed to the function.\n All the argument must be picklable.\n n : int, optional\n A number of tasks. Repeat the evaluation `n` times for each job.\n 1 for default.\n nproc : int, optional\n A number of cores available once.\n If nothing is given, it runs with no limit.\n path : str, optional\n A path for temporary files to be saved. The path is created if not exists.\n The current directory is used as its default.\n delete : bool, optional\n Whether it removes temporary files after the successful execution.\n True for default.\n wait : bool, optional\n Whether it waits until all jobs are finished. If False, it just submits jobs.\n True for default.\n environ : dict, optional\n An environment variables used when running jobs.\n \"PYTHONPATH\" and \"LD_LIBRARY_PATH\" is inherited when no `environ` is given.\n modules : list, optional\n A list of module names imported before evaluating the given function.\n The modules are loaded as: `from [module] import *`.\n\n Returns\n -------\n results : list\n A list of results. Each element is a list containing `n` results.\n\n Examples\n --------\n >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs'))\n\n >>> def target(args, job_id, task_id):\n ... return (args[1] * args[0])\n ...\n >>> run_sge(target, jobs, nproc=2, path='.tmp')\n [['spam'], ['hamham'], ['eggseggseggs']]\n\n >>> def target(args, job_id, task_id):\n ... return \"{:d} {}\".format(task_id, args[1] * args[0])\n ...\n >>> run_sge(target, jobs, n=2, nproc=2, path='.tmp')\n [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']]\n\n See Also\n --------\n ecell4.extra.ensemble.run_serial\n ecell4.extra.ensemble.run_sge\n ecell4.extra.ensemble.run_slurm\n ecell4.extra.ensemble.run_multiprocessing\n ecell4.extra.ensemble.run_azure\n\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.