text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get the most likely stem for a word. If a part of speech is supplied,
<END_TASK>
<USER_TASK:>
Description:
def morphy_stem(word, pos=None):
"""
Get the most likely stem for a word. If a part of speech is supplied,
the stem will be more accurate.
Valid parts of speech are:
- 'n' or 'NN' for nouns
- 'v' or 'VB' for verbs
- 'a' or 'JJ' for adjectives
- 'r' or 'RB' for adverbs
Any other part of speech will be treated as unknown.
""" |
word = word.lower()
if pos is not None:
if pos.startswith('NN'):
pos = 'n'
elif pos.startswith('VB'):
pos = 'v'
elif pos.startswith('JJ'):
pos = 'a'
elif pos.startswith('RB'):
pos = 'r'
if pos is None and word.endswith('ing') or word.endswith('ed'):
pos = 'v'
if pos is not None and pos not in 'nvar':
pos = None
if word in EXCEPTIONS:
return EXCEPTIONS[word]
if pos is None:
if word in AMBIGUOUS_EXCEPTIONS:
return AMBIGUOUS_EXCEPTIONS[word]
return _morphy_best(word, pos) or word |
<SYSTEM_TASK:>
Get a list of word stems that appear in the text. Stopwords and an initial
<END_TASK>
<USER_TASK:>
Description:
def normalize_list(text):
"""
Get a list of word stems that appear in the text. Stopwords and an initial
'to' will be stripped, unless this leaves nothing in the stem.
>>> normalize_list('the dog')
['dog']
>>> normalize_list('big dogs')
['big', 'dog']
>>> normalize_list('the')
['the']
""" |
pieces = [morphy_stem(word) for word in tokenize(text)]
pieces = [piece for piece in pieces if good_lemma(piece)]
if not pieces:
return [text]
if pieces[0] == 'to':
pieces = pieces[1:]
return pieces |
<SYSTEM_TASK:>
Get a canonical representation of a Wikipedia topic, which may include
<END_TASK>
<USER_TASK:>
Description:
def normalize_topic(topic):
"""
Get a canonical representation of a Wikipedia topic, which may include
a disambiguation string in parentheses.
Returns (name, disambig), where "name" is the normalized topic name,
and "disambig" is a string corresponding to the disambiguation text or
None.
""" |
# find titles of the form Foo (bar)
topic = topic.replace('_', ' ')
match = re.match(r'([^(]+) \(([^)]+)\)', topic)
if not match:
return normalize(topic), None
else:
return normalize(match.group(1)), 'n/' + match.group(2).strip(' _') |
<SYSTEM_TASK:>
Get the install paths for EnergyPlus executable and weather files.
<END_TASK>
<USER_TASK:>
Description:
def install_paths(version=None, iddname=None):
"""Get the install paths for EnergyPlus executable and weather files.
We prefer to get the install path from the IDD name but fall back to
getting it from the version number for backwards compatibility and to
simplify tests.
Parameters
----------
version : str, optional
EnergyPlus version in the format "X-X-X", e.g. "8-7-0".
iddname : str, optional
File path to the IDD.
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_weather : str
Full path to the EnergyPlus weather directory.
""" |
try:
eplus_exe, eplus_home = paths_from_iddname(iddname)
except (AttributeError, TypeError, ValueError):
eplus_exe, eplus_home = paths_from_version(version)
eplus_weather = os.path.join(eplus_home, 'WeatherData')
return eplus_exe, eplus_weather |
<SYSTEM_TASK:>
Decorator to pass through the documentation from a wrapped function.
<END_TASK>
<USER_TASK:>
Description:
def wrapped_help_text(wrapped_func):
"""Decorator to pass through the documentation from a wrapped function.
""" |
def decorator(wrapper_func):
"""The decorator.
Parameters
----------
f : callable
The wrapped function.
"""
wrapper_func.__doc__ = ('This method wraps the following method:\n\n' +
pydoc.text.document(wrapped_func))
return wrapper_func
return decorator |
<SYSTEM_TASK:>
Prepare run inputs for one of multiple EnergyPlus runs.
<END_TASK>
<USER_TASK:>
Description:
def prepare_run(run_id, run_data):
"""Prepare run inputs for one of multiple EnergyPlus runs.
:param run_id: An ID number for naming the IDF.
:param run_data: Tuple of the IDF and keyword args to pass to EnergyPlus executable.
:return: Tuple of the IDF path and EPW, and the keyword args.
""" |
idf, kwargs = run_data
epw = idf.epw
idf_dir = os.path.join('multi_runs', 'idf_%i' % run_id)
os.mkdir(idf_dir)
idf_path = os.path.join(idf_dir, 'in.idf')
idf.saveas(idf_path)
return (idf_path, epw), kwargs |
<SYSTEM_TASK:>
Wrapper around the EnergyPlus command line interface.
<END_TASK>
<USER_TASK:>
Description:
def run(idf=None, weather=None, output_directory='', annual=False,
design_day=False, idd=None, epmacro=False, expandobjects=False,
readvars=False, output_prefix=None, output_suffix=None, version=False,
verbose='v', ep_version=None):
"""
Wrapper around the EnergyPlus command line interface.
Parameters
----------
idf : str
Full or relative path to the IDF file to be run, or an IDF object.
weather : str
Full or relative path to the weather file.
output_directory : str, optional
Full or relative path to an output directory (default: 'run_outputs)
annual : bool, optional
If True then force annual simulation (default: False)
design_day : bool, optional
Force design-day-only simulation (default: False)
idd : str, optional
Input data dictionary (default: Energy+.idd in EnergyPlus directory)
epmacro : str, optional
Run EPMacro prior to simulation (default: False).
expandobjects : bool, optional
Run ExpandObjects prior to simulation (default: False)
readvars : bool, optional
Run ReadVarsESO after simulation (default: False)
output_prefix : str, optional
Prefix for output file names (default: eplus)
output_suffix : str, optional
Suffix style for output file names (default: L)
L: Legacy (e.g., eplustbl.csv)
C: Capital (e.g., eplusTable.csv)
D: Dash (e.g., eplus-table.csv)
version : bool, optional
Display version information (default: False)
verbose: str
Set verbosity of runtime messages (default: v)
v: verbose
q: quiet
ep_version: str
EnergyPlus version, used to find install directory. Required if run() is
called with an IDF file path rather than an IDF object.
Returns
-------
str : status
Raises
------
CalledProcessError
AttributeError
If no ep_version parameter is passed when calling with an IDF file path
rather than an IDF object.
""" |
args = locals().copy()
# get unneeded params out of args ready to pass the rest to energyplus.exe
verbose = args.pop('verbose')
idf = args.pop('idf')
iddname = args.get('idd')
if not isinstance(iddname, str):
args.pop('idd')
try:
idf_path = os.path.abspath(idf.idfname)
except AttributeError:
idf_path = os.path.abspath(idf)
ep_version = args.pop('ep_version')
# get version from IDF object or by parsing the IDF file for it
if not ep_version:
try:
ep_version = '-'.join(str(x) for x in idf.idd_version[:3])
except AttributeError:
raise AttributeError(
"The ep_version must be set when passing an IDF path. \
Alternatively, use IDF.run()")
eplus_exe_path, eplus_weather_path = install_paths(ep_version, iddname)
if version:
# just get EnergyPlus version number and return
cmd = [eplus_exe_path, '--version']
check_call(cmd)
return
# convert paths to absolute paths if required
if os.path.isfile(args['weather']):
args['weather'] = os.path.abspath(args['weather'])
else:
args['weather'] = os.path.join(eplus_weather_path, args['weather'])
output_dir = os.path.abspath(args['output_directory'])
args['output_directory'] = output_dir
# store the directory we start in
cwd = os.getcwd()
run_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(run_dir)
# build a list of command line arguments
cmd = [eplus_exe_path]
for arg in args:
if args[arg]:
if isinstance(args[arg], bool):
args[arg] = ''
cmd.extend(['--{}'.format(arg.replace('_', '-'))])
if args[arg] != "":
cmd.extend([args[arg]])
cmd.extend([idf_path])
try:
if verbose == 'v':
print("\r\n" + " ".join(cmd) + "\r\n")
check_call(cmd)
elif verbose == 'q':
check_call(cmd, stdout=open(os.devnull, 'w'))
except CalledProcessError:
message = parse_error(output_dir)
raise EnergyPlusRunError(message)
finally:
os.chdir(cwd)
return 'OK' |
<SYSTEM_TASK:>
Add contents of stderr and eplusout.err and put it in the exception message.
<END_TASK>
<USER_TASK:>
Description:
def parse_error(output_dir):
"""Add contents of stderr and eplusout.err and put it in the exception message.
:param output_dir: str
:return: str
""" |
sys.stderr.seek(0)
std_err = sys.stderr.read().decode('utf-8')
err_file = os.path.join(output_dir, "eplusout.err")
if os.path.isfile(err_file):
with open(err_file, "r") as f:
ep_err = f.read()
else:
ep_err = "<File not found>"
message = "\r\n{std_err}\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}".format(**locals())
return message |
<SYSTEM_TASK:>
Test if two values are equal to a given number of places.
<END_TASK>
<USER_TASK:>
Description:
def almostequal(first, second, places=7, printit=True):
"""
Test if two values are equal to a given number of places.
This is based on python's unittest so may be covered by Python's
license.
""" |
if first == second:
return True
if round(abs(second - first), places) != 0:
if printit:
print(round(abs(second - first), places))
print("notalmost: %s != %s to %i places" % (first, second, places))
return False
else:
return True |
<SYSTEM_TASK:>
Make a new object for the given key.
<END_TASK>
<USER_TASK:>
Description:
def newrawobject(data, commdct, key, block=None, defaultvalues=True):
"""Make a new object for the given key.
Parameters
----------
data : Eplusdata object
Data dictionary and list of objects for the entire model.
commdct : list of dicts
Comments from the IDD file describing each item type in `data`.
key : str
Object type of the object to add (in ALL_CAPS).
Returns
-------
list
A list of field values for the new object.
""" |
dtls = data.dtls
key = key.upper()
key_i = dtls.index(key)
key_comm = commdct[key_i]
# set default values
if defaultvalues:
obj = [comm.get('default', [''])[0] for comm in key_comm]
else:
obj = ['' for comm in key_comm]
if not block:
inblock = ['does not start with N'] * len(obj)
else:
inblock = block[key_i]
for i, (f_comm, f_val, f_iddname) in enumerate(zip(key_comm, obj, inblock)):
if i == 0:
obj[i] = key
else:
obj[i] = convertafield(f_comm, f_val, f_iddname)
obj = poptrailing(obj) # remove the blank items in a repeating field.
return obj |
<SYSTEM_TASK:>
add a bunch to model.
<END_TASK>
<USER_TASK:>
Description:
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf):
"""add a bunch to model.
abunch usually comes from another idf file
or it can be used to copy within the idf file""" |
key = thisbunch.key.upper()
obj = copy.copy(thisbunch.obj)
abunch = obj2bunch(data, commdct, obj)
bunchdt[key].append(abunch)
return abunch |
<SYSTEM_TASK:>
make a new bunch object using the data object
<END_TASK>
<USER_TASK:>
Description:
def obj2bunch(data, commdct, obj):
"""make a new bunch object using the data object""" |
dtls = data.dtls
key = obj[0].upper()
key_i = dtls.index(key)
abunch = makeabunch(commdct, obj, key_i)
return abunch |
<SYSTEM_TASK:>
get the object if you have the key and the name
<END_TASK>
<USER_TASK:>
Description:
def getobject(bunchdt, key, name):
"""get the object if you have the key and the name
returns a list of objects, in case you have more than one
You should not have more than one""" |
# TODO : throw exception if more than one object, or return more objects
idfobjects = bunchdt[key]
if idfobjects:
# second item in list is a unique ID
unique_id = idfobjects[0].objls[1]
theobjs = [idfobj for idfobj in idfobjects if
idfobj[unique_id].upper() == name.upper()]
try:
return theobjs[0]
except IndexError:
return None |
<SYSTEM_TASK:>
test if the idf object has the field values in kwargs
<END_TASK>
<USER_TASK:>
Description:
def __objecthasfields(bunchdt, data, commdct, idfobject, places=7, **kwargs):
"""test if the idf object has the field values in kwargs""" |
for key, value in list(kwargs.items()):
if not isfieldvalue(
bunchdt, data, commdct,
idfobject, key, value, places=places):
return False
return True |
<SYSTEM_TASK:>
from commdct, return the idd of the object key
<END_TASK>
<USER_TASK:>
Description:
def iddofobject(data, commdct, key):
"""from commdct, return the idd of the object key""" |
dtls = data.dtls
i = dtls.index(key)
return commdct[i] |
<SYSTEM_TASK:>
get the index of the first extensible item
<END_TASK>
<USER_TASK:>
Description:
def getextensibleindex(bunchdt, data, commdct, key, objname):
"""get the index of the first extensible item""" |
theobject = getobject(bunchdt, key, objname)
if theobject == None:
return None
theidd = iddofobject(data, commdct, key)
extensible_i = [
i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]]
try:
extensible_i = extensible_i[0]
except IndexError:
return theobject |
<SYSTEM_TASK:>
remove the extensible items in the object
<END_TASK>
<USER_TASK:>
Description:
def removeextensibles(bunchdt, data, commdct, key, objname):
"""remove the extensible items in the object""" |
theobject = getobject(bunchdt, key, objname)
if theobject == None:
return theobject
theidd = iddofobject(data, commdct, key)
extensible_i = [
i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]]
try:
extensible_i = extensible_i[0]
except IndexError:
return theobject
while True:
try:
popped = theobject.obj.pop(extensible_i)
except IndexError:
break
return theobject |
<SYSTEM_TASK:>
get the idd comment for the field
<END_TASK>
<USER_TASK:>
Description:
def getfieldcomm(bunchdt, data, commdct, idfobject, fieldname):
"""get the idd comment for the field""" |
key = idfobject.obj[0].upper()
keyi = data.dtls.index(key)
fieldi = idfobject.objls.index(fieldname)
thiscommdct = commdct[keyi][fieldi]
return thiscommdct |
<SYSTEM_TASK:>
test if case has to be retained for that field
<END_TASK>
<USER_TASK:>
Description:
def is_retaincase(bunchdt, data, commdct, idfobject, fieldname):
"""test if case has to be retained for that field""" |
thiscommdct = getfieldcomm(bunchdt, data, commdct, idfobject, fieldname)
return 'retaincase' in thiscommdct |
<SYSTEM_TASK:>
test if idfobj.field == value
<END_TASK>
<USER_TASK:>
Description:
def isfieldvalue(bunchdt, data, commdct, idfobj, fieldname, value, places=7):
"""test if idfobj.field == value""" |
# do a quick type check
# if type(idfobj[fieldname]) != type(value):
# return False # takes care of autocalculate and real
# check float
thiscommdct = getfieldcomm(bunchdt, data, commdct, idfobj, fieldname)
if 'type' in thiscommdct:
if thiscommdct['type'][0] in ('real', 'integer'):
# test for autocalculate
try:
if idfobj[fieldname].upper() == 'AUTOCALCULATE':
if value.upper() == 'AUTOCALCULATE':
return True
except AttributeError:
pass
return almostequal(float(idfobj[fieldname]), float(value), places, False)
# check retaincase
if is_retaincase(bunchdt, data, commdct, idfobj, fieldname):
return idfobj[fieldname] == value
else:
return idfobj[fieldname].upper() == value.upper() |
<SYSTEM_TASK:>
get the reference names for this object
<END_TASK>
<USER_TASK:>
Description:
def getrefnames(idf, objname):
"""get the reference names for this object""" |
iddinfo = idf.idd_info
dtls = idf.model.dtls
index = dtls.index(objname)
fieldidds = iddinfo[index]
for fieldidd in fieldidds:
if 'field' in fieldidd:
if fieldidd['field'][0].endswith('Name'):
if 'reference' in fieldidd:
return fieldidd['reference']
else:
return [] |
<SYSTEM_TASK:>
rename all the refrences to this objname
<END_TASK>
<USER_TASK:>
Description:
def rename(idf, objkey, objname, newname):
"""rename all the refrences to this objname""" |
refnames = getrefnames(idf, objkey)
for refname in refnames:
objlists = getallobjlists(idf, refname)
# [('OBJKEY', refname, fieldindexlist), ...]
for refname in refnames:
# TODO : there seems to be a duplication in this loop. Check.
# refname appears in both loops
for robjkey, refname, fieldindexlist in objlists:
idfobjects = idf.idfobjects[robjkey]
for idfobject in idfobjects:
for findex in fieldindexlist: # for each field
if idfobject[idfobject.objls[findex]] == objname:
idfobject[idfobject.objls[findex]] = newname
theobject = idf.getobject(objkey, objname)
fieldname = [item for item in theobject.objls if item.endswith('Name')][0]
theobject[fieldname] = newname
return theobject |
<SYSTEM_TASK:>
zone floor to roof height
<END_TASK>
<USER_TASK:>
Description:
def zone_floor2roofheight(idf, zonename, debug=False):
"""zone floor to roof height""" |
zone = idf.getobject('ZONE', zonename)
surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR']
roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF']
ceilings = [s for s in zone_surfs if s.Surface_Type.upper() == 'CEILING']
topsurfaces = roofs + ceilings
topz = []
for topsurface in topsurfaces:
for coord in topsurface.coords:
topz.append(coord[-1])
topz = max(topz)
botz = []
for floor in floors:
for coord in floor.coords:
botz.append(coord[-1])
botz = min(botz)
height = topz - botz
return height |
<SYSTEM_TASK:>
Set the path to the EnergyPlus IDD for the version of EnergyPlus which
<END_TASK>
<USER_TASK:>
Description:
def setiddname(cls, iddname, testing=False):
"""
Set the path to the EnergyPlus IDD for the version of EnergyPlus which
is to be used by eppy.
Parameters
----------
iddname : str
Path to the IDD file.
testing : bool
Flag to use if running tests since we may want to ignore the
`IDDAlreadySetError`.
Raises
------
IDDAlreadySetError
""" |
if cls.iddname == None:
cls.iddname = iddname
cls.idd_info = None
cls.block = None
elif cls.iddname == iddname:
pass
else:
if testing == False:
errortxt = "IDD file is set to: %s" % (cls.iddname,)
raise IDDAlreadySetError(errortxt) |
<SYSTEM_TASK:>
Set the IDD to be used by eppy.
<END_TASK>
<USER_TASK:>
Description:
def setidd(cls, iddinfo, iddindex, block, idd_version):
"""Set the IDD to be used by eppy.
Parameters
----------
iddinfo : list
Comments and metadata about fields in the IDD.
block : list
Field names in the IDD.
""" |
cls.idd_info = iddinfo
cls.block = block
cls.idd_index = iddindex
cls.idd_version = idd_version |
<SYSTEM_TASK:>
Use the current IDD and read an IDF from file. If the IDD has not yet
<END_TASK>
<USER_TASK:>
Description:
def initread(self, idfname):
"""
Use the current IDD and read an IDF from file. If the IDD has not yet
been initialised then this is done first.
Parameters
----------
idf_name : str
Path to an IDF file.
""" |
with open(idfname, 'r') as _:
# raise nonexistent file error early if idfname doesn't exist
pass
iddfhandle = StringIO(iddcurrent.iddtxt)
if self.getiddname() == None:
self.setiddname(iddfhandle)
self.idfname = idfname
self.read() |
<SYSTEM_TASK:>
Use the current IDD and read an IDF from text data. If the IDD has not
<END_TASK>
<USER_TASK:>
Description:
def initreadtxt(self, idftxt):
"""
Use the current IDD and read an IDF from text data. If the IDD has not
yet been initialised then this is done first.
Parameters
----------
idftxt : str
Text representing an IDF file.
""" |
iddfhandle = StringIO(iddcurrent.iddtxt)
if self.getiddname() == None:
self.setiddname(iddfhandle)
idfhandle = StringIO(idftxt)
self.idfname = idfhandle
self.read() |
<SYSTEM_TASK:>
Read the IDF file and the IDD file. If the IDD file had already been
<END_TASK>
<USER_TASK:>
Description:
def read(self):
"""
Read the IDF file and the IDD file. If the IDD file had already been
read, it will not be read again.
Read populates the following data structures:
- idfobjects : list
- model : list
- idd_info : list
- idd_index : dict
""" |
if self.getiddname() == None:
errortxt = ("IDD file needed to read the idf file. "
"Set it using IDF.setiddname(iddfile)")
raise IDDNotSetError(errortxt)
readout = idfreader1(
self.idfname, self.iddname, self,
commdct=self.idd_info, block=self.block)
(self.idfobjects, block, self.model,
idd_info, idd_index, idd_version) = readout
self.__class__.setidd(idd_info, idd_index, block, idd_version) |
<SYSTEM_TASK:>
Use the current IDD and create a new empty IDF. If the IDD has not yet
<END_TASK>
<USER_TASK:>
Description:
def initnew(self, fname):
"""
Use the current IDD and create a new empty IDF. If the IDD has not yet
been initialised then this is done first.
Parameters
----------
fname : str, optional
Path to an IDF. This does not need to be set at this point.
""" |
iddfhandle = StringIO(iddcurrent.iddtxt)
if self.getiddname() == None:
self.setiddname(iddfhandle)
idfhandle = StringIO('')
self.idfname = idfhandle
self.read()
if fname:
self.idfname = fname |
<SYSTEM_TASK:>
Add a new idfobject to the model. If you don't specify a value for a
<END_TASK>
<USER_TASK:>
Description:
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs):
"""
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
""" |
obj = newrawobject(self.model, self.idd_info,
key, block=self.block, defaultvalues=defaultvalues)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn("The aname parameter should no longer be used.", UserWarning)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch)
for k, v in list(kwargs.items()):
abunch[k] = v
return abunch |
<SYSTEM_TASK:>
Add an IDF object to the IDF.
<END_TASK>
<USER_TASK:>
Description:
def copyidfobject(self, idfobject):
"""Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file.
""" |
return addthisbunch(self.idfobjects,
self.model,
self.idd_info,
idfobject, self) |
<SYSTEM_TASK:>
Get the index of the first extensible item.
<END_TASK>
<USER_TASK:>
Description:
def getextensibleindex(self, key, name):
"""
Get the index of the first extensible item.
Only for internal use. # TODO : hide this
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
name : str
The name of the object to fetch.
Returns
-------
int
""" |
return getextensibleindex(
self.idfobjects, self.model, self.idd_info,
key, name) |
<SYSTEM_TASK:>
Remove extensible items in the object of key and name.
<END_TASK>
<USER_TASK:>
Description:
def removeextensibles(self, key, name):
"""
Remove extensible items in the object of key and name.
Only for internal use. # TODO : hide this
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
name : str
The name of the object to fetch.
Returns
-------
EpBunch object
""" |
return removeextensibles(
self.idfobjects, self.model, self.idd_info,
key, name) |
<SYSTEM_TASK:>
Save the IDF as a text file with the optional filename passed, or with
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename=None, lineendings='default', encoding='latin-1'):
"""
Save the IDF as a text file with the optional filename passed, or with
the current idfname of the IDF.
Parameters
----------
filename : str, optional
Filepath to save the file. If None then use the IDF.idfname
parameter. Also accepts a file handle.
lineendings : str, optional
Line endings to use in the saved file. Options are 'default',
'windows' and 'unix' the default is 'default' which uses the line
endings for the current system.
encoding : str, optional
Encoding to use for the saved file. The default is 'latin-1' which
is compatible with the EnergyPlus IDFEditor.
""" |
if filename is None:
filename = self.idfname
s = self.idfstr()
if lineendings == 'default':
system = platform.system()
s = '!- {} Line endings \n'.format(system) + s
slines = s.splitlines()
s = os.linesep.join(slines)
elif lineendings == 'windows':
s = '!- Windows Line endings \n' + s
slines = s.splitlines()
s = '\r\n'.join(slines)
elif lineendings == 'unix':
s = '!- Unix Line endings \n' + s
slines = s.splitlines()
s = '\n'.join(slines)
s = s.encode(encoding)
try:
with open(filename, 'wb') as idf_out:
idf_out.write(s)
except TypeError: # in the case that filename is a file handle
try:
filename.write(s)
except TypeError:
filename.write(s.decode(encoding)) |
<SYSTEM_TASK:>
Save the IDF as a text file with the filename passed.
<END_TASK>
<USER_TASK:>
Description:
def saveas(self, filename, lineendings='default', encoding='latin-1'):
""" Save the IDF as a text file with the filename passed.
Parameters
----------
filename : str
Filepath to to set the idfname attribute to and save the file as.
lineendings : str, optional
Line endings to use in the saved file. Options are 'default',
'windows' and 'unix' the default is 'default' which uses the line
endings for the current system.
encoding : str, optional
Encoding to use for the saved file. The default is 'latin-1' which
is compatible with the EnergyPlus IDFEditor.
""" |
self.idfname = filename
self.save(filename, lineendings, encoding) |
<SYSTEM_TASK:>
Save a copy of the file with the filename passed.
<END_TASK>
<USER_TASK:>
Description:
def savecopy(self, filename, lineendings='default', encoding='latin-1'):
"""Save a copy of the file with the filename passed.
Parameters
----------
filename : str
Filepath to save the file.
lineendings : str, optional
Line endings to use in the saved file. Options are 'default',
'windows' and 'unix' the default is 'default' which uses the line
endings for the current system.
encoding : str, optional
Encoding to use for the saved file. The default is 'latin-1' which
is compatible with the EnergyPlus IDFEditor.
""" |
self.save(filename, lineendings, encoding) |
<SYSTEM_TASK:>
Run an IDF file with a given EnergyPlus weather file. This is a
<END_TASK>
<USER_TASK:>
Description:
def run(self, **kwargs):
"""
Run an IDF file with a given EnergyPlus weather file. This is a
wrapper for the EnergyPlus command line interface.
Parameters
----------
**kwargs
See eppy.runner.functions.run()
""" |
# write the IDF to the current directory
self.saveas('in.idf')
# if `idd` is not passed explicitly, use the IDF.iddname
idd = kwargs.pop('idd', self.iddname)
epw = kwargs.pop('weather', self.epw)
try:
run(self, weather=epw, idd=idd, **kwargs)
finally:
os.remove('in.idf') |
<SYSTEM_TASK:>
get the block bounded by start and end
<END_TASK>
<USER_TASK:>
Description:
def getoneblock(astr, start, end):
"""get the block bounded by start and end
doesn't work for multiple blocks""" |
alist = astr.split(start)
astr = alist[-1]
alist = astr.split(end)
astr = alist[0]
return astr |
<SYSTEM_TASK:>
in string astr replace all occurences of thefind with thereplace
<END_TASK>
<USER_TASK:>
Description:
def myreplace(astr, thefind, thereplace):
"""in string astr replace all occurences of thefind with thereplace""" |
alist = astr.split(thefind)
new_s = alist.split(thereplace)
return new_s |
<SYSTEM_TASK:>
Return the slice after at sub in string astr
<END_TASK>
<USER_TASK:>
Description:
def fsliceafter(astr, sub):
"""Return the slice after at sub in string astr""" |
findex = astr.find(sub)
return astr[findex + len(sub):] |
<SYSTEM_TASK:>
writes a string to file
<END_TASK>
<USER_TASK:>
Description:
def write_str2file(pathname, astr):
"""writes a string to file""" |
fname = pathname
fhandle = open(fname, 'wb')
fhandle.write(astr)
fhandle.close() |
<SYSTEM_TASK:>
test if the table has only strings in the cells
<END_TASK>
<USER_TASK:>
Description:
def is_simpletable(table):
"""test if the table has only strings in the cells""" |
tds = table('td')
for td in tds:
if td.contents != []:
td = tdbr2EOL(td)
if len(td.contents) == 1:
thecontents = td.contents[0]
if not isinstance(thecontents, NavigableString):
return False
else:
return False
return True |
<SYSTEM_TASK:>
convert a table to a list of lists - a 2D matrix
<END_TASK>
<USER_TASK:>
Description:
def table2matrix(table):
"""convert a table to a list of lists - a 2D matrix""" |
if not is_simpletable(table):
raise NotSimpleTable("Not able read a cell in the table as a string")
rows = []
for tr in table('tr'):
row = []
for td in tr('td'):
td = tdbr2EOL(td) # convert any '<br>' in the td to line ending
try:
row.append(td.contents[0])
except IndexError:
row.append('')
rows.append(row)
return rows |
<SYSTEM_TASK:>
convert a table to a list of lists - a 2D matrix
<END_TASK>
<USER_TASK:>
Description:
def table2val_matrix(table):
"""convert a table to a list of lists - a 2D matrix
Converts numbers to float""" |
if not is_simpletable(table):
raise NotSimpleTable("Not able read a cell in the table as a string")
rows = []
for tr in table('tr'):
row = []
for td in tr('td'):
td = tdbr2EOL(td)
try:
val = td.contents[0]
except IndexError:
row.append('')
else:
try:
val = float(val)
row.append(val)
except ValueError:
row.append(val)
rows.append(row)
return rows |
<SYSTEM_TASK:>
checks if soup_obj is really a soup object or just a string
<END_TASK>
<USER_TASK:>
Description:
def _has_name(soup_obj):
"""checks if soup_obj is really a soup object or just a string
If it has a name it is a soup object""" |
try:
name = soup_obj.name
if name == None:
return False
return True
except AttributeError:
return False |
<SYSTEM_TASK:>
return only legal chars
<END_TASK>
<USER_TASK:>
Description:
def onlylegalchar(name):
"""return only legal chars""" |
legalchar = ascii_letters + digits + ' '
return ''.join([s for s in name[:] if s in legalchar]) |
<SYSTEM_TASK:>
replace int in lst
<END_TASK>
<USER_TASK:>
Description:
def replaceint(fname, replacewith='%s'):
"""replace int in lst""" |
words = fname.split()
for i, word in enumerate(words):
try:
word = int(word)
words[i] = replacewith
except ValueError:
pass
return ' '.join(words) |
<SYSTEM_TASK:>
make all the keys lower case
<END_TASK>
<USER_TASK:>
Description:
def cleaniddfield(acomm):
"""make all the keys lower case""" |
for key in list(acomm.keys()):
val = acomm[key]
acomm[key.lower()] = val
for key in list(acomm.keys()):
val = acomm[key]
if key != key.lower():
acomm.pop(key)
return acomm |
<SYSTEM_TASK:>
add heading row to table
<END_TASK>
<USER_TASK:>
Description:
def heading2table(soup, table, row):
"""add heading row to table""" |
tr = Tag(soup, name="tr")
table.append(tr)
for attr in row:
th = Tag(soup, name="th")
tr.append(th)
th.append(attr) |
<SYSTEM_TASK:>
extend the list so that you have i-th value
<END_TASK>
<USER_TASK:>
Description:
def extendlist(lst, i, value=''):
"""extend the list so that you have i-th value""" |
if i < len(lst):
pass
else:
lst.extend([value, ] * (i - len(lst) + 1)) |
<SYSTEM_TASK:>
get the ranges for this field
<END_TASK>
<USER_TASK:>
Description:
def getrange(bch, fieldname):
"""get the ranges for this field""" |
keys = ['maximum', 'minimum', 'maximum<', 'minimum>', 'type']
index = bch.objls.index(fieldname)
fielddct_orig = bch.objidd[index]
fielddct = copy.deepcopy(fielddct_orig)
therange = {}
for key in keys:
therange[key] = fielddct.setdefault(key, None)
if therange['type']:
therange['type'] = therange['type'][0]
if therange['type'] == 'real':
for key in keys[:-1]:
if therange[key]:
therange[key] = float(therange[key][0])
if therange['type'] == 'integer':
for key in keys[:-1]:
if therange[key]:
therange[key] = int(therange[key][0])
return therange |
<SYSTEM_TASK:>
throw exception if the out of range
<END_TASK>
<USER_TASK:>
Description:
def checkrange(bch, fieldname):
"""throw exception if the out of range""" |
fieldvalue = bch[fieldname]
therange = bch.getrange(fieldname)
if therange['maximum'] != None:
if fieldvalue > therange['maximum']:
astr = "Value %s is not less or equal to the 'maximum' of %s"
astr = astr % (fieldvalue, therange['maximum'])
raise RangeError(astr)
if therange['minimum'] != None:
if fieldvalue < therange['minimum']:
astr = "Value %s is not greater or equal to the 'minimum' of %s"
astr = astr % (fieldvalue, therange['minimum'])
raise RangeError(astr)
if therange['maximum<'] != None:
if fieldvalue >= therange['maximum<']:
astr = "Value %s is not less than the 'maximum<' of %s"
astr = astr % (fieldvalue, therange['maximum<'])
raise RangeError(astr)
if therange['minimum>'] != None:
if fieldvalue <= therange['minimum>']:
astr = "Value %s is not greater than the 'minimum>' of %s"
astr = astr % (fieldvalue, therange['minimum>'])
raise RangeError(astr)
return fieldvalue
"""get the idd dict for this field
Will return {} if the fieldname does not exist""" |
<SYSTEM_TASK:>
return an item from the fieldidd, given the iddkey
<END_TASK>
<USER_TASK:>
Description:
def getfieldidd_item(bch, fieldname, iddkey):
"""return an item from the fieldidd, given the iddkey
will return and empty list if it does not have the iddkey
or if the fieldname does not exist""" |
fieldidd = getfieldidd(bch, fieldname)
try:
return fieldidd[iddkey]
except KeyError as e:
return [] |
<SYSTEM_TASK:>
return True if the field is equal to value
<END_TASK>
<USER_TASK:>
Description:
def isequal(bch, fieldname, value, places=7):
"""return True if the field is equal to value""" |
def equalalphanumeric(bch, fieldname, value):
if bch.get_retaincase(fieldname):
return bch[fieldname] == value
else:
return bch[fieldname].upper() == value.upper()
fieldidd = bch.getfieldidd(fieldname)
try:
ftype = fieldidd['type'][0]
if ftype in ['real', 'integer']:
return almostequal(bch[fieldname], float(value), places=places)
else:
return equalalphanumeric(bch, fieldname, value)
except KeyError as e:
return equalalphanumeric(bch, fieldname, value) |
<SYSTEM_TASK:>
Get an object referred to by a field in another object.
<END_TASK>
<USER_TASK:>
Description:
def get_referenced_object(referring_object, fieldname):
"""
Get an object referred to by a field in another object.
For example an object of type Construction has fields for each layer, each
of which refers to a Material. This functions allows the object
representing a Material to be fetched using the name of the layer.
Returns the first item found since if there is more than one matching item,
it is a malformed IDF.
Parameters
----------
referring_object : EpBunch
The object which contains a reference to another object,
fieldname : str
The name of the field in the referring object which contains the
reference to another object.
Returns
-------
EpBunch
""" |
idf = referring_object.theidf
object_list = referring_object.getfieldidd_item(fieldname, u'object-list')
for obj_type in idf.idfobjects:
for obj in idf.idfobjects[obj_type]:
valid_object_lists = obj.getfieldidd_item("Name", u'reference')
if set(object_list).intersection(set(valid_object_lists)):
referenced_obj_name = referring_object[fieldname]
if obj.Name == referenced_obj_name:
return obj |
<SYSTEM_TASK:>
return True if the field == value
<END_TASK>
<USER_TASK:>
Description:
def isequal(self, fieldname, value, places=7):
"""return True if the field == value
Will retain case if get_retaincase == True
for real value will compare to decimal 'places'
""" |
return isequal(self, fieldname, value, places=places) |
<SYSTEM_TASK:>
make the name2refs dict in the idd_index
<END_TASK>
<USER_TASK:>
Description:
def makename2refdct(commdct):
"""make the name2refs dict in the idd_index""" |
refdct = {}
for comm in commdct: # commdct is a list of dict
try:
idfobj = comm[0]['idfobj'].upper()
field1 = comm[1]
if 'Name' in field1['field']:
references = field1['reference']
refdct[idfobj] = references
except (KeyError, IndexError) as e:
continue # not the expected pattern for reference
return refdct |
<SYSTEM_TASK:>
make the ref2namesdct in the idd_index
<END_TASK>
<USER_TASK:>
Description:
def makeref2namesdct(name2refdct):
"""make the ref2namesdct in the idd_index""" |
ref2namesdct = {}
for key, values in name2refdct.items():
for value in values:
ref2namesdct.setdefault(value, set()).add(key)
return ref2namesdct |
<SYSTEM_TASK:>
get the pervious component in the loop
<END_TASK>
<USER_TASK:>
Description:
def prevnode(edges, component):
"""get the pervious component in the loop""" |
e = edges
c = component
n2c = [(a, b) for a, b in e if type(a) == tuple]
c2n = [(a, b) for a, b in e if type(b) == tuple]
node2cs = [(a, b) for a, b in e if b == c]
c2nodes = []
for node2c in node2cs:
c2node = [(a, b) for a, b in c2n if b == node2c[0]]
if len(c2node) == 0:
# return []
c2nodes = []
break
c2nodes.append(c2node[0])
cs = [a for a, b in c2nodes]
# test for connections that have no nodes
# filter for no nodes
nonodes = [(a, b) for a, b in e if type(a) != tuple and type(b) != tuple]
for a, b in nonodes:
if b == component:
cs.append(a)
return cs |
<SYSTEM_TASK:>
return the coordinates of the surface
<END_TASK>
<USER_TASK:>
Description:
def getcoords(ddtt):
"""return the coordinates of the surface""" |
n_vertices_index = ddtt.objls.index('Number_of_Vertices')
first_x = n_vertices_index + 1 # X of first coordinate
pts = ddtt.obj[first_x:]
return list(grouper(3, pts)) |
<SYSTEM_TASK:>
massage the version number so it matches the format of install folder
<END_TASK>
<USER_TASK:>
Description:
def cleanupversion(ver):
"""massage the version number so it matches the format of install folder""" |
lst = ver.split(".")
if len(lst) == 1:
lst.extend(['0', '0'])
elif len(lst) == 2:
lst.extend(['0'])
elif len(lst) > 2:
lst = lst[:3]
lst[2] = '0' # ensure the 3rd number is 0
cleanver = '.'.join(lst)
return cleanver |
<SYSTEM_TASK:>
stuff file data into the blank dictionary
<END_TASK>
<USER_TASK:>
Description:
def makedict(self, dictfile, fnamefobject):
"""stuff file data into the blank dictionary""" |
#fname = './exapmlefiles/5ZoneDD.idf'
#fname = './1ZoneUncontrolled.idf'
if isinstance(dictfile, Idd):
localidd = copy.deepcopy(dictfile)
dt, dtls = localidd.dt, localidd.dtls
else:
dt, dtls = self.initdict(dictfile)
# astr = mylib2.readfile(fname)
astr = fnamefobject.read()
try:
astr = astr.decode('ISO-8859-2')
except AttributeError:
pass
fnamefobject.close()
nocom = removecomment(astr, '!')
idfst = nocom
# alist = string.split(idfst, ';')
alist = idfst.split(';')
lss = []
for element in alist:
# lst = string.split(element, ',')
lst = element.split(',')
lss.append(lst)
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip()
for element in lss:
node = element[0].upper()
if node in dt:
# stuff data in this key
dt[node.upper()].append(element)
else:
# scream
if node == '':
continue
print('this node -%s-is not present in base dictionary' %
(node))
self.dt, self.dtls = dt, dtls
return dt, dtls |
<SYSTEM_TASK:>
replace the node here with the node from othereplus
<END_TASK>
<USER_TASK:>
Description:
def replacenode(self, othereplus, node):
"""replace the node here with the node from othereplus""" |
node = node.upper()
self.dt[node.upper()] = othereplus.dt[node.upper()] |
<SYSTEM_TASK:>
add the node here with the node from othereplus
<END_TASK>
<USER_TASK:>
Description:
def add2node(self, othereplus, node):
"""add the node here with the node from othereplus
this will potentially have duplicates""" |
node = node.upper()
self.dt[node.upper()] = self.dt[node.upper()] + \
othereplus.dt[node.upper()] |
<SYSTEM_TASK:>
reflist is got from getobjectref in parse_idd.py
<END_TASK>
<USER_TASK:>
Description:
def getrefs(self, reflist):
"""
reflist is got from getobjectref in parse_idd.py
getobjectref returns a dictionary.
reflist is an item in the dictionary
getrefs gathers all the fields refered by reflist
""" |
alist = []
for element in reflist:
if element[0].upper() in self.dt:
for elm in self.dt[element[0].upper()]:
alist.append(elm[element[1]])
return alist |
<SYSTEM_TASK:>
draw a graph without the nodes
<END_TASK>
<USER_TASK:>
Description:
def dropnodes(edges):
"""draw a graph without the nodes""" |
newedges = []
added = False
for edge in edges:
if bothnodes(edge):
newtup = (edge[0][0], edge[1][0])
newedges.append(newtup)
added = True
elif firstisnode(edge):
for edge1 in edges:
if edge[0] == edge1[1]:
newtup = (edge1[0], edge[1])
try:
newedges.index(newtup)
except ValueError as e:
newedges.append(newtup)
added = True
elif secondisnode(edge):
for edge1 in edges:
if edge[1] == edge1[0]:
newtup = (edge[0], edge1[1])
try:
newedges.index(newtup)
except ValueError as e:
newedges.append(newtup)
added = True
# gets the hanging nodes - nodes with no connection
if not added:
if firstisnode(edge):
newedges.append((edge[0][0], edge[1]))
if secondisnode(edge):
newedges.append((edge[0], edge[1][0]))
added = False
return newedges |
<SYSTEM_TASK:>
gather the nodes from the edges
<END_TASK>
<USER_TASK:>
Description:
def edges2nodes(edges):
"""gather the nodes from the edges""" |
nodes = []
for e1, e2 in edges:
nodes.append(e1)
nodes.append(e2)
nodedict = dict([(n, None) for n in nodes])
justnodes = list(nodedict.keys())
# justnodes.sort()
justnodes = sorted(justnodes, key=lambda x: str(x[0]))
return justnodes |
<SYSTEM_TASK:>
make the diagram with the edges
<END_TASK>
<USER_TASK:>
Description:
def makediagram(edges):
"""make the diagram with the edges""" |
graph = pydot.Dot(graph_type='digraph')
nodes = edges2nodes(edges)
epnodes = [(node,
makeanode(node[0])) for node in nodes if nodetype(node)=="epnode"]
endnodes = [(node,
makeendnode(node[0])) for node in nodes if nodetype(node)=="EndNode"]
epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)]
nodedict = dict(epnodes + epbr + endnodes)
for value in list(nodedict.values()):
graph.add_node(value)
for e1, e2 in edges:
graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2]))
return graph |
<SYSTEM_TASK:>
return the edges jointing the components of a branch
<END_TASK>
<USER_TASK:>
Description:
def makebranchcomponents(data, commdct, anode="epnode"):
"""return the edges jointing the components of a branch""" |
alledges = []
objkey = 'BRANCH'
cnamefield = "Component %s Name"
inletfield = "Component %s Inlet Node Name"
outletfield = "Component %s Outlet Node Name"
numobjects = len(data.dt[objkey])
cnamefields = loops.repeatingfields(data, commdct, objkey, cnamefield)
inletfields = loops.repeatingfields(data, commdct, objkey, inletfield)
outletfields = loops.repeatingfields(data, commdct, objkey, outletfield)
inlts = loops.extractfields(data, commdct,
objkey, [inletfields] * numobjects)
cmps = loops.extractfields(data, commdct,
objkey, [cnamefields] * numobjects)
otlts = loops.extractfields(data, commdct,
objkey, [outletfields] * numobjects)
zipped = list(zip(inlts, cmps, otlts))
tzipped = [transpose2d(item) for item in zipped]
for i in range(len(data.dt[objkey])):
tt = tzipped[i]
# branchname = data.dt[objkey][i][1]
edges = []
for t0 in tt:
edges = edges + [((t0[0], anode), t0[1]), (t0[1], (t0[2], anode))]
alledges = alledges + edges
return alledges |
<SYSTEM_TASK:>
remove the blank lines in astr
<END_TASK>
<USER_TASK:>
Description:
def removeblanklines(astr):
"""remove the blank lines in astr""" |
lines = astr.splitlines()
lines = [line for line in lines if line.strip() != ""]
return "\n".join(lines) |
<SYSTEM_TASK:>
copied from extractidddata below.
<END_TASK>
<USER_TASK:>
Description:
def _readfname(fname):
"""copied from extractidddata below.
It deals with all the types of fnames""" |
try:
if isinstance(fname, (file, StringIO)):
astr = fname.read()
else:
astr = open(fname, 'rb').read()
except NameError:
if isinstance(fname, (FileIO, StringIO)):
astr = fname.read()
else:
astr = mylib2.readfile(fname)
return astr |
<SYSTEM_TASK:>
insert group info into extracted idd
<END_TASK>
<USER_TASK:>
Description:
def embedgroupdata(extract_func, fname, debug):
"""insert group info into extracted idd""" |
astr = _readfname(fname)
# fname is exhausted by the above read
# reconstitute fname as a StringIO
fname = StringIO(astr)
try:
astr = astr.decode('ISO-8859-2')
except Exception as e:
pass # for python 3
glist = iddgroups.iddtxt2grouplist(astr)
blocklst, commlst, commdct = extract_func(fname)
# add group information to commlst and commdct
# glist = getglist(fname)
commlst = iddgroups.group2commlst(commlst, glist)
commdct = iddgroups.group2commdct(commdct, glist)
return blocklst, commlst, commdct |
<SYSTEM_TASK:>
get all the objects of objkey.
<END_TASK>
<USER_TASK:>
Description:
def extractfields(data, commdct, objkey, fieldlists):
"""get all the objects of objkey.
fieldlists will have a fieldlist for each of those objects.
return the contents of those fields""" |
# TODO : this assumes that the field list identical for
# each instance of the object. This is not true.
# So we should have a field list for each instance of the object
# and map them with a zip
objindex = data.dtls.index(objkey)
objcomm = commdct[objindex]
objfields = []
# get the field names of that object
for dct in objcomm[0:]:
try:
thefieldcomms = dct['field']
objfields.append(thefieldcomms[0])
except KeyError as err:
objfields.append(None)
fieldindexes = []
for fieldlist in fieldlists:
fieldindex = []
for item in fieldlist:
if isinstance(item, int):
fieldindex.append(item)
else:
fieldindex.append(objfields.index(item) + 0)
# the index starts at 1, not at 0
fieldindexes.append(fieldindex)
theobjects = data.dt[objkey]
fieldcontents = []
for theobject, fieldindex in zip(theobjects, fieldindexes):
innerlst = []
for item in fieldindex:
try:
innerlst.append(theobject[item])
except IndexError as err:
break
fieldcontents.append(innerlst)
# fieldcontents.append([theobject[item] for item in fieldindex])
return fieldcontents |
<SYSTEM_TASK:>
return the plantloopfield list
<END_TASK>
<USER_TASK:>
Description:
def plantloopfieldlists(data):
"""return the plantloopfield list""" |
objkey = 'plantloop'.upper()
numobjects = len(data.dt[objkey])
return [[
'Name',
'Plant Side Inlet Node Name',
'Plant Side Outlet Node Name',
'Plant Side Branch List Name',
'Demand Side Inlet Node Name',
'Demand Side Outlet Node Name',
'Demand Side Branch List Name']] * numobjects |
<SYSTEM_TASK:>
get plantloop fields to diagram it
<END_TASK>
<USER_TASK:>
Description:
def plantloopfields(data, commdct):
"""get plantloop fields to diagram it""" |
fieldlists = plantloopfieldlists(data)
objkey = 'plantloop'.upper()
return extractfields(data, commdct, objkey, fieldlists) |
<SYSTEM_TASK:>
get branches from the branchlist
<END_TASK>
<USER_TASK:>
Description:
def branchlist2branches(data, commdct, branchlist):
"""get branches from the branchlist""" |
objkey = 'BranchList'.upper()
theobjects = data.dt[objkey]
fieldlists = []
objnames = [obj[1] for obj in theobjects]
for theobject in theobjects:
fieldlists.append(list(range(2, len(theobject))))
blists = extractfields(data, commdct, objkey, fieldlists)
thebranches = [branches for name, branches in zip(objnames, blists)
if name == branchlist]
return thebranches[0] |
<SYSTEM_TASK:>
return the inlet and outlet of a branch
<END_TASK>
<USER_TASK:>
Description:
def branch_inlet_outlet(data, commdct, branchname):
"""return the inlet and outlet of a branch""" |
objkey = 'Branch'.upper()
theobjects = data.dt[objkey]
theobject = [obj for obj in theobjects if obj[1] == branchname]
theobject = theobject[0]
inletindex = 6
outletindex = len(theobject) - 2
return [theobject[inletindex], theobject[outletindex]] |
<SYSTEM_TASK:>
docstring for splittermixerfieldlists
<END_TASK>
<USER_TASK:>
Description:
def splittermixerfieldlists(data, commdct, objkey):
"""docstring for splittermixerfieldlists""" |
objkey = objkey.upper()
objindex = data.dtls.index(objkey)
objcomms = commdct[objindex]
theobjects = data.dt[objkey]
fieldlists = []
for theobject in theobjects:
fieldlist = list(range(1, len(theobject)))
fieldlists.append(fieldlist)
return fieldlists |
<SYSTEM_TASK:>
get splitter fields to diagram it
<END_TASK>
<USER_TASK:>
Description:
def splitterfields(data, commdct):
"""get splitter fields to diagram it""" |
objkey = "Connector:Splitter".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists) |
<SYSTEM_TASK:>
get mixer fields to diagram it
<END_TASK>
<USER_TASK:>
Description:
def mixerfields(data, commdct):
"""get mixer fields to diagram it""" |
objkey = "Connector:Mixer".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists) |
<SYSTEM_TASK:>
return the count of objects of key
<END_TASK>
<USER_TASK:>
Description:
def objectcount(data, key):
"""return the count of objects of key""" |
objkey = key.upper()
return len(data.dt[objkey]) |
<SYSTEM_TASK:>
given objkey and fieldname, return its index
<END_TASK>
<USER_TASK:>
Description:
def getfieldindex(data, commdct, objkey, fname):
"""given objkey and fieldname, return its index""" |
objindex = data.dtls.index(objkey)
objcomm = commdct[objindex]
for i_index, item in enumerate(objcomm):
try:
if item['field'] == [fname]:
break
except KeyError as err:
pass
return i_index |
<SYSTEM_TASK:>
make the dict adistu_inlets
<END_TASK>
<USER_TASK:>
Description:
def makeadistu_inlets(data, commdct):
"""make the dict adistu_inlets""" |
adistus = getadistus(data, commdct)
# assume that the inlet node has the words "Air Inlet Node Name"
airinletnode = "Air Inlet Node Name"
adistu_inlets = {}
for adistu in adistus:
objkey = adistu.upper()
objindex = data.dtls.index(objkey)
objcomm = commdct[objindex]
airinlets = []
for i, comm in enumerate(objcomm):
try:
if comm['field'][0].find(airinletnode) != -1:
airinlets.append(comm['field'][0])
except KeyError as err:
pass
adistu_inlets[adistu] = airinlets
return adistu_inlets |
<SYSTEM_TASK:>
get the version number from the E+ install folder
<END_TASK>
<USER_TASK:>
Description:
def folder2ver(folder):
"""get the version number from the E+ install folder""" |
ver = folder.split('EnergyPlus')[-1]
ver = ver[1:]
splitapp = ver.split('-')
ver = '.'.join(splitapp)
return ver |
<SYSTEM_TASK:>
just like the comment in python.
<END_TASK>
<USER_TASK:>
Description:
def nocomment(astr, com='!'):
"""
just like the comment in python.
removes any text after the phrase 'com'
""" |
alist = astr.splitlines()
for i in range(len(alist)):
element = alist[i]
pnt = element.find(com)
if pnt != -1:
alist[i] = element[:pnt]
return '\n'.join(alist) |
<SYSTEM_TASK:>
convert the idf text to a simple text
<END_TASK>
<USER_TASK:>
Description:
def idf2txt(txt):
"""convert the idf text to a simple text""" |
astr = nocomment(txt)
objs = astr.split(';')
objs = [obj.split(',') for obj in objs]
objs = [[line.strip() for line in obj] for obj in objs]
objs = [[_tofloat(line) for line in obj] for obj in objs]
objs = [tuple(obj) for obj in objs]
objs.sort()
lst = []
for obj in objs:
for field in obj[:-1]:
lst.append('%s,' % (field, ))
lst.append('%s;\n' % (obj[-1], ))
return '\n'.join(lst) |
<SYSTEM_TASK:>
given the idd file or filehandle, return the version handle
<END_TASK>
<USER_TASK:>
Description:
def iddversiontuple(afile):
"""given the idd file or filehandle, return the version handle""" |
def versiontuple(vers):
"""version tuple"""
return tuple([int(num) for num in vers.split(".")])
try:
fhandle = open(afile, 'rb')
except TypeError:
fhandle = afile
line1 = fhandle.readline()
try:
line1 = line1.decode('ISO-8859-2')
except AttributeError:
pass
line = line1.strip()
if line1 == '':
return (0,)
vers = line.split()[-1]
return versiontuple(vers) |
<SYSTEM_TASK:>
make a bunch from the object
<END_TASK>
<USER_TASK:>
Description:
def makeabunch(commdct, obj, obj_i):
"""make a bunch from the object""" |
objidd = commdct[obj_i]
objfields = [comm.get('field') for comm in commdct[obj_i]]
objfields[0] = ['key']
objfields = [field[0] for field in objfields]
obj_fields = [bunchhelpers.makefieldname(field) for field in objfields]
bobj = EpBunch(obj, obj_fields, objidd)
return bobj |
<SYSTEM_TASK:>
convert the float and interger fields
<END_TASK>
<USER_TASK:>
Description:
def convertfields_old(key_comm, obj, inblock=None):
"""convert the float and interger fields""" |
convinidd = ConvInIDD()
typefunc = dict(integer=convinidd.integer, real=convinidd.real)
types = []
for comm in key_comm:
types.append(comm.get('type', [None])[0])
convs = [typefunc.get(typ, convinidd.no_type) for typ in types]
try:
inblock = list(inblock)
except TypeError as e:
inblock = ['does not start with N'] * len(obj)
for i, (val, conv, avar) in enumerate(zip(obj, convs, inblock)):
if i == 0:
# inblock[0] is the key
pass
else:
val = conv(val, inblock[i])
obj[i] = val
return obj |
<SYSTEM_TASK:>
convert field based on field info in IDD
<END_TASK>
<USER_TASK:>
Description:
def convertafield(field_comm, field_val, field_iddname):
"""convert field based on field info in IDD""" |
convinidd = ConvInIDD()
field_typ = field_comm.get('type', [None])[0]
conv = convinidd.conv_dict().get(field_typ, convinidd.no_type)
return conv(field_val, field_iddname) |
<SYSTEM_TASK:>
convert based on float, integer, and A1, N1
<END_TASK>
<USER_TASK:>
Description:
def convertfields(key_comm, obj, inblock=None):
"""convert based on float, integer, and A1, N1""" |
# f_ stands for field_
convinidd = ConvInIDD()
if not inblock:
inblock = ['does not start with N'] * len(obj)
for i, (f_comm, f_val, f_iddname) in enumerate(zip(key_comm, obj, inblock)):
if i == 0:
# inblock[0] is the iddobject key. No conversion here
pass
else:
obj[i] = convertafield(f_comm, f_val, f_iddname)
return obj |
<SYSTEM_TASK:>
docstring for convertallfields
<END_TASK>
<USER_TASK:>
Description:
def convertallfields(data, commdct, block=None):
"""docstring for convertallfields""" |
# import pdbdb; pdb.set_trace()
for key in list(data.dt.keys()):
objs = data.dt[key]
for i, obj in enumerate(objs):
key_i = data.dtls.index(key)
key_comm = commdct[key_i]
try:
inblock = block[key_i]
except TypeError as e:
inblock = None
obj = convertfields(key_comm, obj, inblock)
objs[i] = obj |
<SYSTEM_TASK:>
dictionary of conversion
<END_TASK>
<USER_TASK:>
Description:
def conv_dict(self):
"""dictionary of conversion""" |
return dict(integer=self.integer, real=self.real, no_type=self.no_type) |
<SYSTEM_TASK:>
Find out if idjobject is mentioned an any object anywhere
<END_TASK>
<USER_TASK:>
Description:
def getanymentions(idf, anidfobject):
"""Find out if idjobject is mentioned an any object anywhere""" |
name = anidfobject.obj[1]
foundobjs = []
keys = idfobjectkeys(idf)
idfkeyobjects = [idf.idfobjects[key.upper()] for key in keys]
for idfobjects in idfkeyobjects:
for idfobject in idfobjects:
if name.upper() in [item.upper()
for item in idfobject.obj
if isinstance(item, basestring)]:
foundobjs.append(idfobject)
return foundobjs |
<SYSTEM_TASK:>
return a list of keys of idfobjects that hve 'None Name' fields
<END_TASK>
<USER_TASK:>
Description:
def getidfkeyswithnodes():
"""return a list of keys of idfobjects that hve 'None Name' fields""" |
idf = IDF(StringIO(""))
keys = idfobjectkeys(idf)
keysfieldnames = ((key, idf.newidfobject(key.upper()).fieldnames)
for key in keys)
keysnodefdnames = ((key, (name for name in fdnames
if (name.endswith('Node_Name'))))
for key, fdnames in keysfieldnames)
nodekeys = [key for key, fdnames in keysnodefdnames if list(fdnames)]
return nodekeys |
<SYSTEM_TASK:>
return all objects that mention this node name
<END_TASK>
<USER_TASK:>
Description:
def getobjectswithnode(idf, nodekeys, nodename):
"""return all objects that mention this node name""" |
keys = nodekeys
# TODO getidfkeyswithnodes needs to be done only once. take out of here
listofidfobjects = (idf.idfobjects[key.upper()]
for key in keys if idf.idfobjects[key.upper()])
idfobjects = [idfobj
for idfobjs in listofidfobjects
for idfobj in idfobjs]
objwithnodes = []
for obj in idfobjects:
values = obj.fieldvalues
fdnames = obj.fieldnames
for value, fdname in zip(values, fdnames):
if fdname.endswith('Node_Name'):
if value == nodename:
objwithnodes.append(obj)
break
return objwithnodes |
<SYSTEM_TASK:>
return a list of all idfobjects in idf
<END_TASK>
<USER_TASK:>
Description:
def getidfobjectlist(idf):
"""return a list of all idfobjects in idf""" |
idfobjects = idf.idfobjects
# idfobjlst = [idfobjects[key] for key in idfobjects if idfobjects[key]]
idfobjlst = [idfobjects[key] for key in idf.model.dtls if idfobjects[key]]
# `for key in idf.model.dtls` maintains the order
# `for key in idfobjects` does not have order
idfobjlst = itertools.chain.from_iterable(idfobjlst)
idfobjlst = list(idfobjlst)
return idfobjlst |
<SYSTEM_TASK:>
copy fromidf completely into toidf
<END_TASK>
<USER_TASK:>
Description:
def copyidfintoidf(toidf, fromidf):
"""copy fromidf completely into toidf""" |
idfobjlst = getidfobjectlist(fromidf)
for idfobj in idfobjlst:
toidf.copyidfobject(idfobj) |
<SYSTEM_TASK:>
return autsizeable field names in idfobject
<END_TASK>
<USER_TASK:>
Description:
def autosize_fieldname(idfobject):
"""return autsizeable field names in idfobject""" |
# undocumented stuff in this code
return [fname for (fname, dct) in zip(idfobject.objls,
idfobject['objidd'])
if 'autosizable' in dct] |
<SYSTEM_TASK:>
extract the groups from the idd file
<END_TASK>
<USER_TASK:>
Description:
def iddtxt2groups(txt):
"""extract the groups from the idd file""" |
try:
txt = txt.decode('ISO-8859-2')
except AttributeError as e:
pass # for python 3
txt = nocomment(txt, '!')
txt = txt.replace("\\group", "!-group") # retains group in next line
txt = nocomment(txt, '\\') # remove all other idd info
lines = txt.splitlines()
lines = [line.strip() for line in lines] # cleanup
lines = [line for line in lines if line != ''] # cleanup
txt = '\n'.join(lines)
gsplits = txt.split('!') # split into groups, since we have !-group
gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group
gsplits[0].insert(0, None)
# Put None for the first group that does nothave a group name
gdict = {}
for gsplit in gsplits:
gdict.update({gsplit[0]:gsplit[1:]})
# makes dict {groupname:[k1, k2], groupname2:[k3, k4]}
gdict = {k:'\n'.join(v) for k, v in gdict.items()}# joins lines back
gdict = {k:v.split(';') for k, v in gdict.items()} # splits into idfobjects
gdict = {k:[i.strip() for i in v] for k, v in gdict.items()} # cleanup
gdict = {k:[i.splitlines() for i in v] for k, v in gdict.items()}
# splits idfobjects into lines
gdict = {k:[i for i in v if len(i) > 0] for k, v in gdict.items()}
# cleanup - removes blank lines
gdict = {k:[i[0] for i in v] for k, v in gdict.items()} # use first line
gdict = {k:[i.split(',')[0] for i in v] for k, v in gdict.items()}
# remove ','
nvalue = gdict.pop(None) # remove group with no name
gdict = {k[len('-group '):]:v for k, v in gdict.items()} # get group name
gdict.update({None:nvalue}) # put back group with no name
return gdict |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.